d055c942e662f9483f3b4b1de618f019d129d502
[alexxy/gromacs.git] / src / gromacs / mdrun / minimize.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
7  * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
8  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9  * and including many others, as listed in the AUTHORS file in the
10  * top-level source directory and at http://www.gromacs.org.
11  *
12  * GROMACS is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public License
14  * as published by the Free Software Foundation; either version 2.1
15  * of the License, or (at your option) any later version.
16  *
17  * GROMACS is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with GROMACS; if not, see
24  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
26  *
27  * If you want to redistribute modifications to GROMACS, please
28  * consider that scientific software is very special. Version
29  * control is crucial - bugs must be traceable. We will be happy to
30  * consider code for inclusion in the official distribution, but
31  * derived work must not be called official GROMACS. Details are found
32  * in the README & COPYING files - if they are missing, get the
33  * official version at http://www.gromacs.org.
34  *
35  * To help us fund GROMACS development, we humbly ask that you cite
36  * the research papers on the package. Check out http://www.gromacs.org.
37  */
38 /*! \internal \file
39  *
40  * \brief This file defines integrators for energy minimization
41  *
42  * \author Berk Hess <hess@kth.se>
43  * \author Erik Lindahl <erik@kth.se>
44  * \ingroup module_mdrun
45  */
46 #include "gmxpre.h"
47
48 #include "config.h"
49
50 #include <cmath>
51 #include <cstring>
52 #include <ctime>
53
54 #include <algorithm>
55 #include <limits>
56 #include <vector>
57
58 #include "gromacs/commandline/filenm.h"
59 #include "gromacs/domdec/collect.h"
60 #include "gromacs/domdec/dlbtiming.h"
61 #include "gromacs/domdec/domdec.h"
62 #include "gromacs/domdec/domdec_struct.h"
63 #include "gromacs/domdec/mdsetup.h"
64 #include "gromacs/domdec/partition.h"
65 #include "gromacs/ewald/pme_pp.h"
66 #include "gromacs/fileio/confio.h"
67 #include "gromacs/fileio/mtxio.h"
68 #include "gromacs/gmxlib/network.h"
69 #include "gromacs/gmxlib/nrnb.h"
70 #include "gromacs/imd/imd.h"
71 #include "gromacs/linearalgebra/sparsematrix.h"
72 #include "gromacs/listed_forces/listed_forces.h"
73 #include "gromacs/math/functions.h"
74 #include "gromacs/math/vec.h"
75 #include "gromacs/mdlib/constr.h"
76 #include "gromacs/mdlib/coupling.h"
77 #include "gromacs/mdlib/dispersioncorrection.h"
78 #include "gromacs/mdlib/ebin.h"
79 #include "gromacs/mdlib/enerdata_utils.h"
80 #include "gromacs/mdlib/energyoutput.h"
81 #include "gromacs/mdlib/force.h"
82 #include "gromacs/mdlib/force_flags.h"
83 #include "gromacs/mdlib/forcerec.h"
84 #include "gromacs/mdlib/gmx_omp_nthreads.h"
85 #include "gromacs/mdlib/md_support.h"
86 #include "gromacs/mdlib/mdatoms.h"
87 #include "gromacs/mdlib/stat.h"
88 #include "gromacs/mdlib/tgroup.h"
89 #include "gromacs/mdlib/trajectory_writing.h"
90 #include "gromacs/mdlib/update.h"
91 #include "gromacs/mdlib/vsite.h"
92 #include "gromacs/mdrunutility/handlerestart.h"
93 #include "gromacs/mdrunutility/printtime.h"
94 #include "gromacs/mdtypes/checkpointdata.h"
95 #include "gromacs/mdtypes/commrec.h"
96 #include "gromacs/mdtypes/forcebuffers.h"
97 #include "gromacs/mdtypes/forcerec.h"
98 #include "gromacs/mdtypes/inputrec.h"
99 #include "gromacs/mdtypes/interaction_const.h"
100 #include "gromacs/mdtypes/md_enums.h"
101 #include "gromacs/mdtypes/mdatom.h"
102 #include "gromacs/mdtypes/mdrunoptions.h"
103 #include "gromacs/mdtypes/observablesreducer.h"
104 #include "gromacs/mdtypes/state.h"
105 #include "gromacs/pbcutil/pbc.h"
106 #include "gromacs/timing/wallcycle.h"
107 #include "gromacs/timing/walltime_accounting.h"
108 #include "gromacs/topology/mtop_util.h"
109 #include "gromacs/topology/topology.h"
110 #include "gromacs/utility/cstringutil.h"
111 #include "gromacs/utility/exceptions.h"
112 #include "gromacs/utility/fatalerror.h"
113 #include "gromacs/utility/logger.h"
114 #include "gromacs/utility/smalloc.h"
115
116 #include "legacysimulator.h"
117 #include "shellfc.h"
118
119 using gmx::ArrayRef;
120 using gmx::MdrunScheduleWorkload;
121 using gmx::RVec;
122 using gmx::VirtualSitesHandler;
123
124 //! Utility structure for manipulating states during EM
125 typedef struct em_state
126 {
127     //! Copy of the global state
128     t_state s;
129     //! Force array
130     gmx::ForceBuffers f;
131     //! Potential energy
132     real epot;
133     //! Norm of the force
134     real fnorm;
135     //! Maximum force
136     real fmax;
137     //! Direction
138     int a_fmax;
139 } em_state_t;
140
141 //! Print the EM starting conditions
142 static void print_em_start(FILE*                     fplog,
143                            const t_commrec*          cr,
144                            gmx_walltime_accounting_t walltime_accounting,
145                            gmx_wallcycle*            wcycle,
146                            const char*               name)
147 {
148     walltime_accounting_start_time(walltime_accounting);
149     wallcycle_start(wcycle, WallCycleCounter::Run);
150     print_start(fplog, cr, walltime_accounting, name);
151 }
152
153 //! Stop counting time for EM
154 static void em_time_end(gmx_walltime_accounting_t walltime_accounting, gmx_wallcycle* wcycle)
155 {
156     wallcycle_stop(wcycle, WallCycleCounter::Run);
157
158     walltime_accounting_end_time(walltime_accounting);
159 }
160
161 //! Printing a log file and console header
162 static void sp_header(FILE* out, const char* minimizer, real ftol, int nsteps)
163 {
164     fprintf(out, "\n");
165     fprintf(out, "%s:\n", minimizer);
166     fprintf(out, "   Tolerance (Fmax)   = %12.5e\n", ftol);
167     fprintf(out, "   Number of steps    = %12d\n", nsteps);
168 }
169
170 //! Print warning message
171 static void warn_step(FILE* fp, real ftol, real fmax, gmx_bool bLastStep, gmx_bool bConstrain)
172 {
173     constexpr bool realIsDouble = GMX_DOUBLE;
174     char           buffer[2048];
175
176     if (!std::isfinite(fmax))
177     {
178         sprintf(buffer,
179                 "\nEnergy minimization has stopped because the force "
180                 "on at least one atom is not finite. This usually means "
181                 "atoms are overlapping. Modify the input coordinates to "
182                 "remove atom overlap or use soft-core potentials with "
183                 "the free energy code to avoid infinite forces.\n%s",
184                 !realIsDouble ? "You could also be lucky that switching to double precision "
185                                 "is sufficient to obtain finite forces.\n"
186                               : "");
187     }
188     else if (bLastStep)
189     {
190         sprintf(buffer,
191                 "\nEnergy minimization reached the maximum number "
192                 "of steps before the forces reached the requested "
193                 "precision Fmax < %g.\n",
194                 ftol);
195     }
196     else
197     {
198         sprintf(buffer,
199                 "\nEnergy minimization has stopped, but the forces have "
200                 "not converged to the requested precision Fmax < %g (which "
201                 "may not be possible for your system). It stopped "
202                 "because the algorithm tried to make a new step whose size "
203                 "was too small, or there was no change in the energy since "
204                 "last step. Either way, we regard the minimization as "
205                 "converged to within the available machine precision, "
206                 "given your starting configuration and EM parameters.\n%s%s",
207                 ftol,
208                 !realIsDouble ? "\nDouble precision normally gives you higher accuracy, but "
209                                 "this is often not needed for preparing to run molecular "
210                                 "dynamics.\n"
211                               : "",
212                 bConstrain ? "You might need to increase your constraint accuracy, or turn\n"
213                              "off constraints altogether (set constraints = none in mdp file)\n"
214                            : "");
215     }
216
217     fputs(wrap_lines(buffer, 78, 0, FALSE), stderr);
218     fputs(wrap_lines(buffer, 78, 0, FALSE), fp);
219 }
220
221 //! Print message about convergence of the EM
222 static void print_converged(FILE*             fp,
223                             const char*       alg,
224                             real              ftol,
225                             int64_t           count,
226                             gmx_bool          bDone,
227                             int64_t           nsteps,
228                             const em_state_t* ems,
229                             double            sqrtNumAtoms)
230 {
231     char buf[STEPSTRSIZE];
232
233     if (bDone)
234     {
235         fprintf(fp, "\n%s converged to Fmax < %g in %s steps\n", alg, ftol, gmx_step_str(count, buf));
236     }
237     else if (count < nsteps)
238     {
239         fprintf(fp,
240                 "\n%s converged to machine precision in %s steps,\n"
241                 "but did not reach the requested Fmax < %g.\n",
242                 alg,
243                 gmx_step_str(count, buf),
244                 ftol);
245     }
246     else
247     {
248         fprintf(fp, "\n%s did not converge to Fmax < %g in %s steps.\n", alg, ftol, gmx_step_str(count, buf));
249     }
250
251 #if GMX_DOUBLE
252     fprintf(fp, "Potential Energy  = %21.14e\n", ems->epot);
253     fprintf(fp, "Maximum force     = %21.14e on atom %d\n", ems->fmax, ems->a_fmax + 1);
254     fprintf(fp, "Norm of force     = %21.14e\n", ems->fnorm / sqrtNumAtoms);
255 #else
256     fprintf(fp, "Potential Energy  = %14.7e\n", ems->epot);
257     fprintf(fp, "Maximum force     = %14.7e on atom %d\n", ems->fmax, ems->a_fmax + 1);
258     fprintf(fp, "Norm of force     = %14.7e\n", ems->fnorm / sqrtNumAtoms);
259 #endif
260 }
261
262 //! Compute the norm and max of the force array in parallel
263 static void get_f_norm_max(const t_commrec*               cr,
264                            const t_grpopts*               opts,
265                            t_mdatoms*                     mdatoms,
266                            gmx::ArrayRef<const gmx::RVec> f,
267                            real*                          fnorm,
268                            real*                          fmax,
269                            int*                           a_fmax)
270 {
271     double fnorm2, *sum;
272     real   fmax2, fam;
273     int    la_max, a_max, start, end, i, m, gf;
274
275     /* This routine finds the largest force and returns it.
276      * On parallel machines the global max is taken.
277      */
278     fnorm2 = 0;
279     fmax2  = 0;
280     la_max = -1;
281     start  = 0;
282     end    = mdatoms->homenr;
283     if (mdatoms->cFREEZE)
284     {
285         for (i = start; i < end; i++)
286         {
287             gf  = mdatoms->cFREEZE[i];
288             fam = 0;
289             for (m = 0; m < DIM; m++)
290             {
291                 if (!opts->nFreeze[gf][m])
292                 {
293                     fam += gmx::square(f[i][m]);
294                 }
295             }
296             fnorm2 += fam;
297             if (fam > fmax2)
298             {
299                 fmax2  = fam;
300                 la_max = i;
301             }
302         }
303     }
304     else
305     {
306         for (i = start; i < end; i++)
307         {
308             fam = norm2(f[i]);
309             fnorm2 += fam;
310             if (fam > fmax2)
311             {
312                 fmax2  = fam;
313                 la_max = i;
314             }
315         }
316     }
317
318     if (la_max >= 0 && DOMAINDECOMP(cr))
319     {
320         a_max = cr->dd->globalAtomIndices[la_max];
321     }
322     else
323     {
324         a_max = la_max;
325     }
326     if (PAR(cr))
327     {
328         snew(sum, 2 * cr->nnodes + 1);
329         sum[2 * cr->nodeid]     = fmax2;
330         sum[2 * cr->nodeid + 1] = a_max;
331         sum[2 * cr->nnodes]     = fnorm2;
332         gmx_sumd(2 * cr->nnodes + 1, sum, cr);
333         fnorm2 = sum[2 * cr->nnodes];
334         /* Determine the global maximum */
335         for (i = 0; i < cr->nnodes; i++)
336         {
337             if (sum[2 * i] > fmax2)
338             {
339                 fmax2 = sum[2 * i];
340                 a_max = gmx::roundToInt(sum[2 * i + 1]);
341             }
342         }
343         sfree(sum);
344     }
345
346     if (fnorm)
347     {
348         *fnorm = sqrt(fnorm2);
349     }
350     if (fmax)
351     {
352         *fmax = sqrt(fmax2);
353     }
354     if (a_fmax)
355     {
356         *a_fmax = a_max;
357     }
358 }
359
360 //! Compute the norm of the force
361 static void get_state_f_norm_max(const t_commrec* cr, const t_grpopts* opts, t_mdatoms* mdatoms, em_state_t* ems)
362 {
363     get_f_norm_max(cr, opts, mdatoms, ems->f.view().force(), &ems->fnorm, &ems->fmax, &ems->a_fmax);
364 }
365
366 //! Initialize the energy minimization
367 static void init_em(FILE*                fplog,
368                     const gmx::MDLogger& mdlog,
369                     const char*          title,
370                     const t_commrec*     cr,
371                     const t_inputrec*    ir,
372                     gmx::ImdSession*     imdSession,
373                     pull_t*              pull_work,
374                     t_state*             state_global,
375                     const gmx_mtop_t&    top_global,
376                     em_state_t*          ems,
377                     gmx_localtop_t*      top,
378                     t_nrnb*              nrnb,
379                     t_forcerec*          fr,
380                     gmx::MDAtoms*        mdAtoms,
381                     gmx_global_stat_t*   gstat,
382                     VirtualSitesHandler* vsite,
383                     gmx::Constraints*    constr,
384                     gmx_shellfc_t**      shellfc)
385 {
386     real dvdl_constr;
387
388     if (fplog)
389     {
390         fprintf(fplog, "Initiating %s\n", title);
391     }
392
393     if (MASTER(cr))
394     {
395         state_global->ngtc = 0;
396     }
397     int*                fep_state = MASTER(cr) ? &state_global->fep_state : nullptr;
398     gmx::ArrayRef<real> lambda    = MASTER(cr) ? state_global->lambda : gmx::ArrayRef<real>();
399     initialize_lambdas(fplog,
400                        ir->efep,
401                        ir->bSimTemp,
402                        *ir->fepvals,
403                        ir->simtempvals->temperatures,
404                        gmx::arrayRefFromArray(ir->opts.ref_t, ir->opts.ngtc),
405                        MASTER(cr),
406                        fep_state,
407                        lambda);
408
409     if (ir->eI == IntegrationAlgorithm::NM)
410     {
411         GMX_ASSERT(shellfc != nullptr, "With NM we always support shells");
412
413         *shellfc = init_shell_flexcon(stdout,
414                                       top_global,
415                                       constr ? constr->numFlexibleConstraints() : 0,
416                                       ir->nstcalcenergy,
417                                       DOMAINDECOMP(cr),
418                                       thisRankHasDuty(cr, DUTY_PME));
419     }
420     else
421     {
422         GMX_ASSERT(EI_ENERGY_MINIMIZATION(ir->eI),
423                    "This else currently only handles energy minimizers, consider if your algorithm "
424                    "needs shell/flexible-constraint support");
425
426         /* With energy minimization, shells and flexible constraints are
427          * automatically minimized when treated like normal DOFS.
428          */
429         if (shellfc != nullptr)
430         {
431             *shellfc = nullptr;
432         }
433     }
434
435     if (DOMAINDECOMP(cr))
436     {
437         dd_init_local_state(*cr->dd, state_global, &ems->s);
438
439         /* Distribute the charge groups over the nodes from the master node */
440         dd_partition_system(fplog,
441                             mdlog,
442                             ir->init_step,
443                             cr,
444                             TRUE,
445                             1,
446                             state_global,
447                             top_global,
448                             *ir,
449                             imdSession,
450                             pull_work,
451                             &ems->s,
452                             &ems->f,
453                             mdAtoms,
454                             top,
455                             fr,
456                             vsite,
457                             constr,
458                             nrnb,
459                             nullptr,
460                             FALSE);
461         dd_store_state(*cr->dd, &ems->s);
462     }
463     else
464     {
465         state_change_natoms(state_global, state_global->natoms);
466         /* Just copy the state */
467         ems->s = *state_global;
468         state_change_natoms(&ems->s, ems->s.natoms);
469
470         mdAlgorithmsSetupAtomData(
471                 cr, *ir, top_global, top, fr, &ems->f, mdAtoms, constr, vsite, shellfc ? *shellfc : nullptr);
472     }
473
474     update_mdatoms(mdAtoms->mdatoms(), ems->s.lambda[FreeEnergyPerturbationCouplingType::Mass]);
475
476     if (constr)
477     {
478         // TODO how should this cross-module support dependency be managed?
479         if (ir->eConstrAlg == ConstraintAlgorithm::Shake && gmx_mtop_ftype_count(top_global, F_CONSTR) > 0)
480         {
481             gmx_fatal(FARGS,
482                       "Can not do energy minimization with %s, use %s\n",
483                       enumValueToString(ConstraintAlgorithm::Shake),
484                       enumValueToString(ConstraintAlgorithm::Lincs));
485         }
486
487         if (!ir->bContinuation)
488         {
489             /* Constrain the starting coordinates */
490             bool needsLogging  = true;
491             bool computeEnergy = true;
492             bool computeVirial = false;
493             dvdl_constr        = 0;
494             constr->apply(needsLogging,
495                           computeEnergy,
496                           -1,
497                           0,
498                           1.0,
499                           ems->s.x.arrayRefWithPadding(),
500                           ems->s.x.arrayRefWithPadding(),
501                           ArrayRef<RVec>(),
502                           ems->s.box,
503                           ems->s.lambda[FreeEnergyPerturbationCouplingType::Fep],
504                           &dvdl_constr,
505                           gmx::ArrayRefWithPadding<RVec>(),
506                           computeVirial,
507                           nullptr,
508                           gmx::ConstraintVariable::Positions);
509         }
510     }
511
512     if (PAR(cr))
513     {
514         *gstat = global_stat_init(ir);
515     }
516     else
517     {
518         *gstat = nullptr;
519     }
520
521     calc_shifts(ems->s.box, fr->shift_vec);
522 }
523
524 //! Finalize the minimization
525 static void finish_em(const t_commrec*          cr,
526                       gmx_mdoutf_t              outf,
527                       gmx_walltime_accounting_t walltime_accounting,
528                       gmx_wallcycle*            wcycle)
529 {
530     if (!thisRankHasDuty(cr, DUTY_PME))
531     {
532         /* Tell the PME only node to finish */
533         gmx_pme_send_finish(cr);
534     }
535
536     done_mdoutf(outf);
537
538     em_time_end(walltime_accounting, wcycle);
539 }
540
541 //! Swap two different EM states during minimization
542 static void swap_em_state(em_state_t** ems1, em_state_t** ems2)
543 {
544     em_state_t* tmp;
545
546     tmp   = *ems1;
547     *ems1 = *ems2;
548     *ems2 = tmp;
549 }
550
551 //! Save the EM trajectory
552 static void write_em_traj(FILE*               fplog,
553                           const t_commrec*    cr,
554                           gmx_mdoutf_t        outf,
555                           gmx_bool            bX,
556                           gmx_bool            bF,
557                           const char*         confout,
558                           const gmx_mtop_t&   top_global,
559                           const t_inputrec*   ir,
560                           int64_t             step,
561                           em_state_t*         state,
562                           t_state*            state_global,
563                           ObservablesHistory* observablesHistory)
564 {
565     int mdof_flags = 0;
566
567     if (bX)
568     {
569         mdof_flags |= MDOF_X;
570     }
571     if (bF)
572     {
573         mdof_flags |= MDOF_F;
574     }
575
576     /* If we want IMD output, set appropriate MDOF flag */
577     if (ir->bIMD)
578     {
579         mdof_flags |= MDOF_IMD;
580     }
581
582     gmx::WriteCheckpointDataHolder checkpointDataHolder;
583     mdoutf_write_to_trajectory_files(fplog,
584                                      cr,
585                                      outf,
586                                      mdof_flags,
587                                      top_global.natoms,
588                                      step,
589                                      static_cast<double>(step),
590                                      &state->s,
591                                      state_global,
592                                      observablesHistory,
593                                      state->f.view().force(),
594                                      &checkpointDataHolder);
595
596     if (confout != nullptr)
597     {
598         if (DOMAINDECOMP(cr))
599         {
600             /* If bX=true, x was collected to state_global in the call above */
601             if (!bX)
602             {
603                 auto globalXRef = MASTER(cr) ? state_global->x : gmx::ArrayRef<gmx::RVec>();
604                 dd_collect_vec(
605                         cr->dd, state->s.ddp_count, state->s.ddp_count_cg_gl, state->s.cg_gl, state->s.x, globalXRef);
606             }
607         }
608         else
609         {
610             /* Copy the local state pointer */
611             state_global = &state->s;
612         }
613
614         if (MASTER(cr))
615         {
616             if (ir->pbcType != PbcType::No && !ir->bPeriodicMols && DOMAINDECOMP(cr))
617             {
618                 /* Make molecules whole only for confout writing */
619                 do_pbc_mtop(ir->pbcType, state->s.box, &top_global, state_global->x.rvec_array());
620             }
621
622             write_sto_conf_mtop(confout,
623                                 *top_global.name,
624                                 top_global,
625                                 state_global->x.rvec_array(),
626                                 nullptr,
627                                 ir->pbcType,
628                                 state->s.box);
629         }
630     }
631 }
632
633 //! \brief Do one minimization step
634 //
635 // \returns true when the step succeeded, false when a constraint error occurred
636 static bool do_em_step(const t_commrec*                          cr,
637                        const t_inputrec*                         ir,
638                        t_mdatoms*                                md,
639                        em_state_t*                               ems1,
640                        real                                      a,
641                        gmx::ArrayRefWithPadding<const gmx::RVec> force,
642                        em_state_t*                               ems2,
643                        gmx::Constraints*                         constr,
644                        int64_t                                   count)
645
646 {
647     t_state *    s1, *s2;
648     int          start, end;
649     real         dvdl_constr;
650     int nthreads gmx_unused;
651
652     bool validStep = true;
653
654     s1 = &ems1->s;
655     s2 = &ems2->s;
656
657     if (DOMAINDECOMP(cr) && s1->ddp_count != cr->dd->ddp_count)
658     {
659         gmx_incons("state mismatch in do_em_step");
660     }
661
662     s2->flags = s1->flags;
663
664     if (s2->natoms != s1->natoms)
665     {
666         state_change_natoms(s2, s1->natoms);
667         ems2->f.resize(s2->natoms);
668     }
669     if (DOMAINDECOMP(cr) && s2->cg_gl.size() != s1->cg_gl.size())
670     {
671         s2->cg_gl.resize(s1->cg_gl.size());
672     }
673
674     copy_mat(s1->box, s2->box);
675     /* Copy free energy state */
676     s2->lambda = s1->lambda;
677     copy_mat(s1->box, s2->box);
678
679     start = 0;
680     end   = md->homenr;
681
682     nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
683 #pragma omp parallel num_threads(nthreads)
684     {
685         const rvec* x1 = s1->x.rvec_array();
686         rvec*       x2 = s2->x.rvec_array();
687         const rvec* f  = as_rvec_array(force.unpaddedArrayRef().data());
688
689         int gf = 0;
690 #pragma omp for schedule(static) nowait
691         for (int i = start; i < end; i++)
692         {
693             try
694             {
695                 if (md->cFREEZE)
696                 {
697                     gf = md->cFREEZE[i];
698                 }
699                 for (int m = 0; m < DIM; m++)
700                 {
701                     if (ir->opts.nFreeze[gf][m])
702                     {
703                         x2[i][m] = x1[i][m];
704                     }
705                     else
706                     {
707                         x2[i][m] = x1[i][m] + a * f[i][m];
708                     }
709                 }
710             }
711             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
712         }
713
714         if (s2->flags & enumValueToBitMask(StateEntry::Cgp))
715         {
716             /* Copy the CG p vector */
717             const rvec* p1 = s1->cg_p.rvec_array();
718             rvec*       p2 = s2->cg_p.rvec_array();
719 #pragma omp for schedule(static) nowait
720             for (int i = start; i < end; i++)
721             {
722                 // Trivial OpenMP block that does not throw
723                 copy_rvec(p1[i], p2[i]);
724             }
725         }
726
727         if (DOMAINDECOMP(cr))
728         {
729             /* OpenMP does not supported unsigned loop variables */
730 #pragma omp for schedule(static) nowait
731             for (gmx::index i = 0; i < gmx::ssize(s2->cg_gl); i++)
732             {
733                 s2->cg_gl[i] = s1->cg_gl[i];
734             }
735         }
736     }
737
738     if (DOMAINDECOMP(cr))
739     {
740         s2->ddp_count       = s1->ddp_count;
741         s2->ddp_count_cg_gl = s1->ddp_count_cg_gl;
742     }
743
744     if (constr)
745     {
746         dvdl_constr = 0;
747         validStep   = constr->apply(TRUE,
748                                   TRUE,
749                                   count,
750                                   0,
751                                   1.0,
752                                   s1->x.arrayRefWithPadding(),
753                                   s2->x.arrayRefWithPadding(),
754                                   ArrayRef<RVec>(),
755                                   s2->box,
756                                   s2->lambda[FreeEnergyPerturbationCouplingType::Bonded],
757                                   &dvdl_constr,
758                                   gmx::ArrayRefWithPadding<RVec>(),
759                                   false,
760                                   nullptr,
761                                   gmx::ConstraintVariable::Positions);
762
763         if (cr->nnodes > 1)
764         {
765             /* This global reduction will affect performance at high
766              * parallelization, but we can not really avoid it.
767              * But usually EM is not run at high parallelization.
768              */
769             int reductionBuffer = static_cast<int>(!validStep);
770             gmx_sumi(1, &reductionBuffer, cr);
771             validStep = (reductionBuffer == 0);
772         }
773
774         // We should move this check to the different minimizers
775         if (!validStep && ir->eI != IntegrationAlgorithm::Steep)
776         {
777             gmx_fatal(FARGS,
778                       "The coordinates could not be constrained. Minimizer '%s' can not handle "
779                       "constraint failures, use minimizer '%s' before using '%s'.",
780                       enumValueToString(ir->eI),
781                       enumValueToString(IntegrationAlgorithm::Steep),
782                       enumValueToString(ir->eI));
783         }
784     }
785
786     return validStep;
787 }
788
789 //! Prepare EM for using domain decomposition parallellization
790 static void em_dd_partition_system(FILE*                fplog,
791                                    const gmx::MDLogger& mdlog,
792                                    int                  step,
793                                    const t_commrec*     cr,
794                                    const gmx_mtop_t&    top_global,
795                                    const t_inputrec*    ir,
796                                    gmx::ImdSession*     imdSession,
797                                    pull_t*              pull_work,
798                                    em_state_t*          ems,
799                                    gmx_localtop_t*      top,
800                                    gmx::MDAtoms*        mdAtoms,
801                                    t_forcerec*          fr,
802                                    VirtualSitesHandler* vsite,
803                                    gmx::Constraints*    constr,
804                                    t_nrnb*              nrnb,
805                                    gmx_wallcycle*       wcycle)
806 {
807     /* Repartition the domain decomposition */
808     dd_partition_system(fplog,
809                         mdlog,
810                         step,
811                         cr,
812                         FALSE,
813                         1,
814                         nullptr,
815                         top_global,
816                         *ir,
817                         imdSession,
818                         pull_work,
819                         &ems->s,
820                         &ems->f,
821                         mdAtoms,
822                         top,
823                         fr,
824                         vsite,
825                         constr,
826                         nrnb,
827                         wcycle,
828                         FALSE);
829     dd_store_state(*cr->dd, &ems->s);
830 }
831
832 namespace
833 {
834
835 //! Copy coordinates, OpenMP parallelized, from \p refCoords to coords
836 void setCoordinates(std::vector<RVec>* coords, ArrayRef<const RVec> refCoords)
837 {
838     coords->resize(refCoords.size());
839
840     const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
841 #pragma omp parallel for num_threads(nthreads) schedule(static)
842     for (int i = 0; i < ssize(refCoords); i++)
843     {
844         (*coords)[i] = refCoords[i];
845     }
846 }
847
848 //! Returns the maximum difference an atom moved between two coordinate sets, over all ranks
849 real maxCoordinateDifference(ArrayRef<const RVec> coords1, ArrayRef<const RVec> coords2, MPI_Comm mpiCommMyGroup)
850 {
851     GMX_RELEASE_ASSERT(coords1.size() == coords2.size(), "Coordinate counts should match");
852
853     real maxDiffSquared = 0;
854
855     const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
856 #pragma omp parallel for reduction(max : maxDiffSquared) num_threads(nthreads) schedule(static)
857     for (int i = 0; i < ssize(coords1); i++)
858     {
859         maxDiffSquared = std::max(maxDiffSquared, gmx::norm2(coords1[i] - coords2[i]));
860     }
861
862 #if GMX_MPI
863     int numRanks = 1;
864     if (mpiCommMyGroup != MPI_COMM_NULL)
865     {
866         MPI_Comm_size(mpiCommMyGroup, &numRanks);
867     }
868     if (numRanks > 1)
869     {
870         real maxDiffSquaredReduced;
871         MPI_Allreduce(
872                 &maxDiffSquared, &maxDiffSquaredReduced, 1, GMX_DOUBLE ? MPI_DOUBLE : MPI_FLOAT, MPI_MAX, mpiCommMyGroup);
873         maxDiffSquared = maxDiffSquaredReduced;
874     }
875 #else
876     GMX_UNUSED_VALUE(mpiCommMyGroup);
877 #endif
878
879     return std::sqrt(maxDiffSquared);
880 }
881
882 /*! \brief Class to handle the work of setting and doing an energy evaluation.
883  *
884  * This class is a mere aggregate of parameters to pass to evaluate an
885  * energy, so that future changes to names and types of them consume
886  * less time when refactoring other code.
887  *
888  * Aggregate initialization is used, for which the chief risk is that
889  * if a member is added at the end and not all initializer lists are
890  * updated, then the member will be value initialized, which will
891  * typically mean initialization to zero.
892  *
893  * Use a braced initializer list to construct one of these. */
894 class EnergyEvaluator
895 {
896 public:
897     /*! \brief Evaluates an energy on the state in \c ems.
898      *
899      * \todo In practice, the same objects mu_tot, vir, and pres
900      * are always passed to this function, so we would rather have
901      * them as data members. However, their C-array types are
902      * unsuited for aggregate initialization. When the types
903      * improve, the call signature of this method can be reduced.
904      */
905     void run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst, int64_t step);
906     //! Handles logging (deprecated).
907     FILE* fplog;
908     //! Handles logging.
909     const gmx::MDLogger& mdlog;
910     //! Handles communication.
911     const t_commrec* cr;
912     //! Coordinates multi-simulations.
913     const gmx_multisim_t* ms;
914     //! Holds the simulation topology.
915     const gmx_mtop_t& top_global;
916     //! Holds the domain topology.
917     gmx_localtop_t* top;
918     //! User input options.
919     const t_inputrec* inputrec;
920     //! The Interactive Molecular Dynamics session.
921     gmx::ImdSession* imdSession;
922     //! The pull work object.
923     pull_t* pull_work;
924     //! Manages flop accounting.
925     t_nrnb* nrnb;
926     //! Manages wall cycle accounting.
927     gmx_wallcycle* wcycle;
928     //! Legacy coordinator of global reduction.
929     gmx_global_stat_t gstat;
930     //! Coordinates reduction for observables
931     gmx::ObservablesReducer* observablesReducer;
932     //! Handles virtual sites.
933     VirtualSitesHandler* vsite;
934     //! Handles constraints.
935     gmx::Constraints* constr;
936     //! Per-atom data for this domain.
937     gmx::MDAtoms* mdAtoms;
938     //! Handles how to calculate the forces.
939     t_forcerec* fr;
940     //! Schedule of force-calculation work each step for this task.
941     MdrunScheduleWorkload* runScheduleWork;
942     //! Stores the computed energies.
943     gmx_enerdata_t* enerd;
944     //! The DD partitioning count at which the pair list was generated
945     int ddpCountPairSearch;
946     //! The local coordinates that were used for pair searching, stored for computing displacements
947     std::vector<RVec> pairSearchCoordinates;
948 };
949
950 void EnergyEvaluator::run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst, int64_t step)
951 {
952     real     t;
953     gmx_bool bNS;
954     tensor   force_vir, shake_vir, ekin;
955     real     dvdl_constr;
956     real     terminate = 0;
957
958     /* Set the time to the initial time, the time does not change during EM */
959     t = inputrec->init_t;
960
961     if (vsite)
962     {
963         vsite->construct(ems->s.x, {}, ems->s.box, gmx::VSiteOperation::Positions);
964     }
965
966     // Compute the buffer size of the pair list
967     const real bufferSize = inputrec->rlist - std::max(inputrec->rcoulomb, inputrec->rvdw);
968
969     if (bFirst || bufferSize <= 0 || (DOMAINDECOMP(cr) && ems->s.ddp_count != ddpCountPairSearch))
970     {
971         /* This is the first state or an old state used before the last ns */
972         bNS = TRUE;
973     }
974     else
975     {
976         // We need to generate a new pairlist when one atom moved more than half the buffer size
977         ArrayRef<const RVec> localCoordinates =
978                 ArrayRef<const RVec>(ems->s.x).subArray(0, mdAtoms->mdatoms()->homenr);
979         bNS = 2 * maxCoordinateDifference(pairSearchCoordinates, localCoordinates, cr->mpi_comm_mygroup)
980               > bufferSize;
981     }
982
983     if (DOMAINDECOMP(cr) && bNS)
984     {
985         /* Repartition the domain decomposition */
986         em_dd_partition_system(
987                 fplog, mdlog, count, cr, top_global, inputrec, imdSession, pull_work, ems, top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
988         ddpCountPairSearch = cr->dd->ddp_count;
989     }
990
991     /* Store the local coordinates that will be used in the pair search, after we re-partitioned */
992     if (bufferSize > 0 && bNS)
993     {
994         ArrayRef<const RVec> localCoordinates =
995                 constArrayRefFromArray(ems->s.x.data(), mdAtoms->mdatoms()->homenr);
996         setCoordinates(&pairSearchCoordinates, localCoordinates);
997     }
998
999     /* Calc force & energy on new trial position  */
1000     /* do_force always puts the charge groups in the box and shifts again
1001      * We do not unshift, so molecules are always whole in congrad.c
1002      */
1003     do_force(fplog,
1004              cr,
1005              ms,
1006              *inputrec,
1007              nullptr,
1008              nullptr,
1009              imdSession,
1010              pull_work,
1011              count,
1012              nrnb,
1013              wcycle,
1014              top,
1015              ems->s.box,
1016              ems->s.x.arrayRefWithPadding(),
1017              &ems->s.hist,
1018              &ems->f.view(),
1019              force_vir,
1020              mdAtoms->mdatoms(),
1021              enerd,
1022              ems->s.lambda,
1023              fr,
1024              runScheduleWork,
1025              vsite,
1026              mu_tot,
1027              t,
1028              nullptr,
1029              GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES | GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY
1030                      | (bNS ? GMX_FORCE_NS : 0),
1031              DDBalanceRegionHandler(cr));
1032
1033     /* Clear the unused shake virial and pressure */
1034     clear_mat(shake_vir);
1035     clear_mat(pres);
1036
1037     /* Communicate stuff when parallel */
1038     if (PAR(cr) && inputrec->eI != IntegrationAlgorithm::NM)
1039     {
1040         wallcycle_start(wcycle, WallCycleCounter::MoveE);
1041
1042         global_stat(*gstat,
1043                     cr,
1044                     enerd,
1045                     force_vir,
1046                     shake_vir,
1047                     *inputrec,
1048                     nullptr,
1049                     gmx::ArrayRef<real>{},
1050                     nullptr,
1051                     std::vector<real>(1, terminate),
1052                     FALSE,
1053                     CGLO_ENERGY | CGLO_PRESSURE | CGLO_CONSTRAINT,
1054                     step,
1055                     observablesReducer);
1056
1057         wallcycle_stop(wcycle, WallCycleCounter::MoveE);
1058     }
1059
1060     if (fr->dispersionCorrection)
1061     {
1062         /* Calculate long range corrections to pressure and energy */
1063         const DispersionCorrection::Correction correction = fr->dispersionCorrection->calculate(
1064                 ems->s.box, ems->s.lambda[FreeEnergyPerturbationCouplingType::Vdw]);
1065
1066         enerd->term[F_DISPCORR] = correction.energy;
1067         enerd->term[F_EPOT] += correction.energy;
1068         enerd->term[F_PRES] += correction.pressure;
1069         enerd->term[F_DVDL] += correction.dvdl;
1070     }
1071     else
1072     {
1073         enerd->term[F_DISPCORR] = 0;
1074     }
1075
1076     ems->epot = enerd->term[F_EPOT];
1077
1078     if (constr)
1079     {
1080         /* Project out the constraint components of the force */
1081         bool needsLogging  = false;
1082         bool computeEnergy = false;
1083         bool computeVirial = true;
1084         dvdl_constr        = 0;
1085         auto f             = ems->f.view().forceWithPadding();
1086         constr->apply(needsLogging,
1087                       computeEnergy,
1088                       count,
1089                       0,
1090                       1.0,
1091                       ems->s.x.arrayRefWithPadding(),
1092                       f,
1093                       f.unpaddedArrayRef(),
1094                       ems->s.box,
1095                       ems->s.lambda[FreeEnergyPerturbationCouplingType::Bonded],
1096                       &dvdl_constr,
1097                       gmx::ArrayRefWithPadding<RVec>(),
1098                       computeVirial,
1099                       shake_vir,
1100                       gmx::ConstraintVariable::ForceDispl);
1101         enerd->term[F_DVDL_CONSTR] += dvdl_constr;
1102         m_add(force_vir, shake_vir, vir);
1103     }
1104     else
1105     {
1106         copy_mat(force_vir, vir);
1107     }
1108
1109     clear_mat(ekin);
1110     enerd->term[F_PRES] = calc_pres(fr->pbcType, inputrec->nwall, ems->s.box, ekin, vir, pres);
1111
1112     if (inputrec->efep != FreeEnergyPerturbationType::No)
1113     {
1114         accumulateKineticLambdaComponents(enerd, ems->s.lambda, *inputrec->fepvals);
1115     }
1116
1117     if (EI_ENERGY_MINIMIZATION(inputrec->eI))
1118     {
1119         get_state_f_norm_max(cr, &(inputrec->opts), mdAtoms->mdatoms(), ems);
1120     }
1121 }
1122
1123 } // namespace
1124
1125 //! Parallel utility summing energies and forces
1126 static double reorder_partsum(const t_commrec*  cr,
1127                               const t_grpopts*  opts,
1128                               const gmx_mtop_t& top_global,
1129                               const em_state_t* s_min,
1130                               const em_state_t* s_b)
1131 {
1132     if (debug)
1133     {
1134         fprintf(debug, "Doing reorder_partsum\n");
1135     }
1136
1137     auto fm = s_min->f.view().force();
1138     auto fb = s_b->f.view().force();
1139
1140     /* Collect fm in a global vector fmg.
1141      * This conflicts with the spirit of domain decomposition,
1142      * but to fully optimize this a much more complicated algorithm is required.
1143      */
1144     const int natoms = top_global.natoms;
1145     rvec*     fmg;
1146     snew(fmg, natoms);
1147
1148     gmx::ArrayRef<const int> indicesMin = s_min->s.cg_gl;
1149     int                      i          = 0;
1150     for (int a : indicesMin)
1151     {
1152         copy_rvec(fm[i], fmg[a]);
1153         i++;
1154     }
1155     gmx_sum(top_global.natoms * 3, fmg[0], cr);
1156
1157     /* Now we will determine the part of the sum for the cgs in state s_b */
1158     gmx::ArrayRef<const int> indicesB = s_b->s.cg_gl;
1159
1160     double partsum                        = 0;
1161     i                                     = 0;
1162     int                                gf = 0;
1163     gmx::ArrayRef<const unsigned char> grpnrFREEZE =
1164             top_global.groups.groupNumbers[SimulationAtomGroupType::Freeze];
1165     for (int a : indicesB)
1166     {
1167         if (!grpnrFREEZE.empty())
1168         {
1169             gf = grpnrFREEZE[i];
1170         }
1171         for (int m = 0; m < DIM; m++)
1172         {
1173             if (!opts->nFreeze[gf][m])
1174             {
1175                 partsum += (fb[i][m] - fmg[a][m]) * fb[i][m];
1176             }
1177         }
1178         i++;
1179     }
1180
1181     sfree(fmg);
1182
1183     return partsum;
1184 }
1185
1186 //! Print some stuff, like beta, whatever that means.
1187 static real pr_beta(const t_commrec*  cr,
1188                     const t_grpopts*  opts,
1189                     t_mdatoms*        mdatoms,
1190                     const gmx_mtop_t& top_global,
1191                     const em_state_t* s_min,
1192                     const em_state_t* s_b)
1193 {
1194     double sum;
1195
1196     /* This is just the classical Polak-Ribiere calculation of beta;
1197      * it looks a bit complicated since we take freeze groups into account,
1198      * and might have to sum it in parallel runs.
1199      */
1200
1201     if (!DOMAINDECOMP(cr)
1202         || (s_min->s.ddp_count == cr->dd->ddp_count && s_b->s.ddp_count == cr->dd->ddp_count))
1203     {
1204         auto fm = s_min->f.view().force();
1205         auto fb = s_b->f.view().force();
1206         sum     = 0;
1207         int gf  = 0;
1208         /* This part of code can be incorrect with DD,
1209          * since the atom ordering in s_b and s_min might differ.
1210          */
1211         for (int i = 0; i < mdatoms->homenr; i++)
1212         {
1213             if (mdatoms->cFREEZE)
1214             {
1215                 gf = mdatoms->cFREEZE[i];
1216             }
1217             for (int m = 0; m < DIM; m++)
1218             {
1219                 if (!opts->nFreeze[gf][m])
1220                 {
1221                     sum += (fb[i][m] - fm[i][m]) * fb[i][m];
1222                 }
1223             }
1224         }
1225     }
1226     else
1227     {
1228         /* We need to reorder cgs while summing */
1229         sum = reorder_partsum(cr, opts, top_global, s_min, s_b);
1230     }
1231     if (PAR(cr))
1232     {
1233         gmx_sumd(1, &sum, cr);
1234     }
1235
1236     return sum / gmx::square(s_min->fnorm);
1237 }
1238
1239 namespace gmx
1240 {
1241
1242 void LegacySimulator::do_cg()
1243 {
1244     const char* CG = "Polak-Ribiere Conjugate Gradients";
1245
1246     gmx_localtop_t    top(top_global.ffparams);
1247     gmx_global_stat_t gstat;
1248     double            tmp, minstep;
1249     real              stepsize;
1250     real              a, b, c, beta = 0.0;
1251     real              epot_repl = 0;
1252     real              pnorm;
1253     gmx_bool          converged, foundlower;
1254     rvec              mu_tot = { 0 };
1255     gmx_bool          do_log = FALSE, do_ene = FALSE, do_x, do_f;
1256     tensor            vir, pres;
1257     int               number_steps, neval = 0, nstcg = inputrec->nstcgsteep;
1258     int               m, step, nminstep;
1259     auto*             mdatoms = mdAtoms->mdatoms();
1260
1261     GMX_LOG(mdlog.info)
1262             .asParagraph()
1263             .appendText(
1264                     "Note that activating conjugate gradient energy minimization via the "
1265                     "integrator .mdp option and the command gmx mdrun may "
1266                     "be available in a different form in a future version of GROMACS, "
1267                     "e.g. gmx minimize and an .mdp option.");
1268
1269     step = 0;
1270
1271     if (MASTER(cr))
1272     {
1273         // In CG, the state is extended with a search direction
1274         state_global->flags |= enumValueToBitMask(StateEntry::Cgp);
1275
1276         // Ensure the extra per-atom state array gets allocated
1277         state_change_natoms(state_global, state_global->natoms);
1278
1279         // Initialize the search direction to zero
1280         for (RVec& cg_p : state_global->cg_p)
1281         {
1282             cg_p = { 0, 0, 0 };
1283         }
1284     }
1285
1286     /* Create 4 states on the stack and extract pointers that we will swap */
1287     em_state_t  s0{}, s1{}, s2{}, s3{};
1288     em_state_t* s_min = &s0;
1289     em_state_t* s_a   = &s1;
1290     em_state_t* s_b   = &s2;
1291     em_state_t* s_c   = &s3;
1292
1293     ObservablesReducer observablesReducer = observablesReducerBuilder->build();
1294
1295     /* Init em and store the local state in s_min */
1296     init_em(fplog,
1297             mdlog,
1298             CG,
1299             cr,
1300             inputrec,
1301             imdSession,
1302             pull_work,
1303             state_global,
1304             top_global,
1305             s_min,
1306             &top,
1307             nrnb,
1308             fr,
1309             mdAtoms,
1310             &gstat,
1311             vsite,
1312             constr,
1313             nullptr);
1314     const bool        simulationsShareState = false;
1315     gmx_mdoutf*       outf                  = init_mdoutf(fplog,
1316                                    nfile,
1317                                    fnm,
1318                                    mdrunOptions,
1319                                    cr,
1320                                    outputProvider,
1321                                    mdModulesNotifiers,
1322                                    inputrec,
1323                                    top_global,
1324                                    nullptr,
1325                                    wcycle,
1326                                    StartingBehavior::NewSimulation,
1327                                    simulationsShareState,
1328                                    ms);
1329     gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
1330                                    top_global,
1331                                    *inputrec,
1332                                    pull_work,
1333                                    nullptr,
1334                                    false,
1335                                    StartingBehavior::NewSimulation,
1336                                    simulationsShareState,
1337                                    mdModulesNotifiers);
1338
1339     /* Print to log file */
1340     print_em_start(fplog, cr, walltime_accounting, wcycle, CG);
1341
1342     /* Max number of steps */
1343     number_steps = inputrec->nsteps;
1344
1345     if (MASTER(cr))
1346     {
1347         sp_header(stderr, CG, inputrec->em_tol, number_steps);
1348     }
1349     if (fplog)
1350     {
1351         sp_header(fplog, CG, inputrec->em_tol, number_steps);
1352     }
1353
1354     EnergyEvaluator energyEvaluator{ fplog,
1355                                      mdlog,
1356                                      cr,
1357                                      ms,
1358                                      top_global,
1359                                      &top,
1360                                      inputrec,
1361                                      imdSession,
1362                                      pull_work,
1363                                      nrnb,
1364                                      wcycle,
1365                                      gstat,
1366                                      &observablesReducer,
1367                                      vsite,
1368                                      constr,
1369                                      mdAtoms,
1370                                      fr,
1371                                      runScheduleWork,
1372                                      enerd,
1373                                      -1,
1374                                      {} };
1375     /* Call the force routine and some auxiliary (neighboursearching etc.) */
1376     /* do_force always puts the charge groups in the box and shifts again
1377      * We do not unshift, so molecules are always whole in congrad.c
1378      */
1379     energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE, step);
1380
1381     if (MASTER(cr))
1382     {
1383         /* Copy stuff to the energy bin for easy printing etc. */
1384         matrix nullBox = {};
1385         energyOutput.addDataAtEnergyStep(false,
1386                                          false,
1387                                          static_cast<double>(step),
1388                                          mdatoms->tmass,
1389                                          enerd,
1390                                          nullptr,
1391                                          nullptr,
1392                                          nullBox,
1393                                          PTCouplingArrays(),
1394                                          0,
1395                                          vir,
1396                                          pres,
1397                                          nullptr,
1398                                          mu_tot,
1399                                          constr);
1400
1401         EnergyOutput::printHeader(fplog, step, step);
1402         energyOutput.printStepToEnergyFile(
1403                 mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step, step, fr->fcdata.get(), nullptr);
1404     }
1405
1406     /* Estimate/guess the initial stepsize */
1407     stepsize = inputrec->em_stepsize / s_min->fnorm;
1408
1409     if (MASTER(cr))
1410     {
1411         double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1412         fprintf(stderr, "   F-max             = %12.5e on atom %d\n", s_min->fmax, s_min->a_fmax + 1);
1413         fprintf(stderr, "   F-Norm            = %12.5e\n", s_min->fnorm / sqrtNumAtoms);
1414         fprintf(stderr, "\n");
1415         /* and copy to the log file too... */
1416         fprintf(fplog, "   F-max             = %12.5e on atom %d\n", s_min->fmax, s_min->a_fmax + 1);
1417         fprintf(fplog, "   F-Norm            = %12.5e\n", s_min->fnorm / sqrtNumAtoms);
1418         fprintf(fplog, "\n");
1419     }
1420     /* Start the loop over CG steps.
1421      * Each successful step is counted, and we continue until
1422      * we either converge or reach the max number of steps.
1423      */
1424     converged = FALSE;
1425     for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
1426     {
1427
1428         /* start taking steps in a new direction
1429          * First time we enter the routine, beta=0, and the direction is
1430          * simply the negative gradient.
1431          */
1432
1433         /* Calculate the new direction in p, and the gradient in this direction, gpa */
1434         gmx::ArrayRef<gmx::RVec>       pm  = s_min->s.cg_p;
1435         gmx::ArrayRef<const gmx::RVec> sfm = s_min->f.view().force();
1436         double                         gpa = 0;
1437         int                            gf  = 0;
1438         for (int i = 0; i < mdatoms->homenr; i++)
1439         {
1440             if (mdatoms->cFREEZE)
1441             {
1442                 gf = mdatoms->cFREEZE[i];
1443             }
1444             for (m = 0; m < DIM; m++)
1445             {
1446                 if (!inputrec->opts.nFreeze[gf][m])
1447                 {
1448                     pm[i][m] = sfm[i][m] + beta * pm[i][m];
1449                     gpa -= pm[i][m] * sfm[i][m];
1450                     /* f is negative gradient, thus the sign */
1451                 }
1452                 else
1453                 {
1454                     pm[i][m] = 0;
1455                 }
1456             }
1457         }
1458
1459         /* Sum the gradient along the line across CPUs */
1460         if (PAR(cr))
1461         {
1462             gmx_sumd(1, &gpa, cr);
1463         }
1464
1465         /* Calculate the norm of the search vector */
1466         get_f_norm_max(cr, &(inputrec->opts), mdatoms, pm, &pnorm, nullptr, nullptr);
1467
1468         /* Just in case stepsize reaches zero due to numerical precision... */
1469         if (stepsize <= 0)
1470         {
1471             stepsize = inputrec->em_stepsize / pnorm;
1472         }
1473
1474         /*
1475          * Double check the value of the derivative in the search direction.
1476          * If it is positive it must be due to the old information in the
1477          * CG formula, so just remove that and start over with beta=0.
1478          * This corresponds to a steepest descent step.
1479          */
1480         if (gpa > 0)
1481         {
1482             beta = 0;
1483             step--;   /* Don't count this step since we are restarting */
1484             continue; /* Go back to the beginning of the big for-loop */
1485         }
1486
1487         /* Calculate minimum allowed stepsize, before the average (norm)
1488          * relative change in coordinate is smaller than precision
1489          */
1490         minstep      = 0;
1491         auto s_min_x = makeArrayRef(s_min->s.x);
1492         for (int i = 0; i < mdatoms->homenr; i++)
1493         {
1494             for (m = 0; m < DIM; m++)
1495             {
1496                 tmp = fabs(s_min_x[i][m]);
1497                 if (tmp < 1.0)
1498                 {
1499                     tmp = 1.0;
1500                 }
1501                 tmp = pm[i][m] / tmp;
1502                 minstep += tmp * tmp;
1503             }
1504         }
1505         /* Add up from all CPUs */
1506         if (PAR(cr))
1507         {
1508             gmx_sumd(1, &minstep, cr);
1509         }
1510
1511         minstep = GMX_REAL_EPS / sqrt(minstep / (3 * top_global.natoms));
1512
1513         if (stepsize < minstep)
1514         {
1515             converged = TRUE;
1516             break;
1517         }
1518
1519         /* Write coordinates if necessary */
1520         do_x = do_per_step(step, inputrec->nstxout);
1521         do_f = do_per_step(step, inputrec->nstfout);
1522
1523         write_em_traj(
1524                 fplog, cr, outf, do_x, do_f, nullptr, top_global, inputrec, step, s_min, state_global, observablesHistory);
1525
1526         /* Take a step downhill.
1527          * In theory, we should minimize the function along this direction.
1528          * That is quite possible, but it turns out to take 5-10 function evaluations
1529          * for each line. However, we dont really need to find the exact minimum -
1530          * it is much better to start a new CG step in a modified direction as soon
1531          * as we are close to it. This will save a lot of energy evaluations.
1532          *
1533          * In practice, we just try to take a single step.
1534          * If it worked (i.e. lowered the energy), we increase the stepsize but
1535          * the continue straight to the next CG step without trying to find any minimum.
1536          * If it didn't work (higher energy), there must be a minimum somewhere between
1537          * the old position and the new one.
1538          *
1539          * Due to the finite numerical accuracy, it turns out that it is a good idea
1540          * to even accept a SMALL increase in energy, if the derivative is still downhill.
1541          * This leads to lower final energies in the tests I've done. / Erik
1542          */
1543         s_a->epot = s_min->epot;
1544         a         = 0.0;
1545         c         = a + stepsize; /* reference position along line is zero */
1546
1547         if (DOMAINDECOMP(cr) && s_min->s.ddp_count < cr->dd->ddp_count)
1548         {
1549             em_dd_partition_system(fplog,
1550                                    mdlog,
1551                                    step,
1552                                    cr,
1553                                    top_global,
1554                                    inputrec,
1555                                    imdSession,
1556                                    pull_work,
1557                                    s_min,
1558                                    &top,
1559                                    mdAtoms,
1560                                    fr,
1561                                    vsite,
1562                                    constr,
1563                                    nrnb,
1564                                    wcycle);
1565         }
1566
1567         /* Take a trial step (new coords in s_c) */
1568         do_em_step(cr, inputrec, mdatoms, s_min, c, s_min->s.cg_p.constArrayRefWithPadding(), s_c, constr, -1);
1569
1570         neval++;
1571         /* Calculate energy for the trial step */
1572         energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE, step);
1573
1574         /* Calc derivative along line */
1575         const rvec*                    pc  = s_c->s.cg_p.rvec_array();
1576         gmx::ArrayRef<const gmx::RVec> sfc = s_c->f.view().force();
1577         double                         gpc = 0;
1578         for (int i = 0; i < mdatoms->homenr; i++)
1579         {
1580             for (m = 0; m < DIM; m++)
1581             {
1582                 gpc -= pc[i][m] * sfc[i][m]; /* f is negative gradient, thus the sign */
1583             }
1584         }
1585         /* Sum the gradient along the line across CPUs */
1586         if (PAR(cr))
1587         {
1588             gmx_sumd(1, &gpc, cr);
1589         }
1590
1591         /* This is the max amount of increase in energy we tolerate */
1592         tmp = std::sqrt(GMX_REAL_EPS) * fabs(s_a->epot);
1593
1594         /* Accept the step if the energy is lower, or if it is not significantly higher
1595          * and the line derivative is still negative.
1596          */
1597         if (s_c->epot < s_a->epot || (gpc < 0 && s_c->epot < (s_a->epot + tmp)))
1598         {
1599             foundlower = TRUE;
1600             /* Great, we found a better energy. Increase step for next iteration
1601              * if we are still going down, decrease it otherwise
1602              */
1603             if (gpc < 0)
1604             {
1605                 stepsize *= 1.618034; /* The golden section */
1606             }
1607             else
1608             {
1609                 stepsize *= 0.618034; /* 1/golden section */
1610             }
1611         }
1612         else
1613         {
1614             /* New energy is the same or higher. We will have to do some work
1615              * to find a smaller value in the interval. Take smaller step next time!
1616              */
1617             foundlower = FALSE;
1618             stepsize *= 0.618034;
1619         }
1620
1621
1622         /* OK, if we didn't find a lower value we will have to locate one now - there must
1623          * be one in the interval [a=0,c].
1624          * The same thing is valid here, though: Don't spend dozens of iterations to find
1625          * the line minimum. We try to interpolate based on the derivative at the endpoints,
1626          * and only continue until we find a lower value. In most cases this means 1-2 iterations.
1627          *
1628          * I also have a safeguard for potentially really pathological functions so we never
1629          * take more than 20 steps before we give up ...
1630          *
1631          * If we already found a lower value we just skip this step and continue to the update.
1632          */
1633         double gpb;
1634         if (!foundlower)
1635         {
1636             nminstep = 0;
1637
1638             do
1639             {
1640                 /* Select a new trial point.
1641                  * If the derivatives at points a & c have different sign we interpolate to zero,
1642                  * otherwise just do a bisection.
1643                  */
1644                 if (gpa < 0 && gpc > 0)
1645                 {
1646                     b = a + gpa * (a - c) / (gpc - gpa);
1647                 }
1648                 else
1649                 {
1650                     b = 0.5 * (a + c);
1651                 }
1652
1653                 /* safeguard if interpolation close to machine accuracy causes errors:
1654                  * never go outside the interval
1655                  */
1656                 if (b <= a || b >= c)
1657                 {
1658                     b = 0.5 * (a + c);
1659                 }
1660
1661                 if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
1662                 {
1663                     /* Reload the old state */
1664                     em_dd_partition_system(fplog,
1665                                            mdlog,
1666                                            -1,
1667                                            cr,
1668                                            top_global,
1669                                            inputrec,
1670                                            imdSession,
1671                                            pull_work,
1672                                            s_min,
1673                                            &top,
1674                                            mdAtoms,
1675                                            fr,
1676                                            vsite,
1677                                            constr,
1678                                            nrnb,
1679                                            wcycle);
1680                 }
1681
1682                 /* Take a trial step to this new point - new coords in s_b */
1683                 do_em_step(cr, inputrec, mdatoms, s_min, b, s_min->s.cg_p.constArrayRefWithPadding(), s_b, constr, -1);
1684
1685                 neval++;
1686                 /* Calculate energy for the trial step */
1687                 energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE, step);
1688
1689                 /* p does not change within a step, but since the domain decomposition
1690                  * might change, we have to use cg_p of s_b here.
1691                  */
1692                 const rvec*                    pb  = s_b->s.cg_p.rvec_array();
1693                 gmx::ArrayRef<const gmx::RVec> sfb = s_b->f.view().force();
1694                 gpb                                = 0;
1695                 for (int i = 0; i < mdatoms->homenr; i++)
1696                 {
1697                     for (m = 0; m < DIM; m++)
1698                     {
1699                         gpb -= pb[i][m] * sfb[i][m]; /* f is negative gradient, thus the sign */
1700                     }
1701                 }
1702                 /* Sum the gradient along the line across CPUs */
1703                 if (PAR(cr))
1704                 {
1705                     gmx_sumd(1, &gpb, cr);
1706                 }
1707
1708                 if (debug)
1709                 {
1710                     fprintf(debug, "CGE: EpotA %f EpotB %f EpotC %f gpb %f\n", s_a->epot, s_b->epot, s_c->epot, gpb);
1711                 }
1712
1713                 epot_repl = s_b->epot;
1714
1715                 /* Keep one of the intervals based on the value of the derivative at the new point */
1716                 if (gpb > 0)
1717                 {
1718                     /* Replace c endpoint with b */
1719                     swap_em_state(&s_b, &s_c);
1720                     c   = b;
1721                     gpc = gpb;
1722                 }
1723                 else
1724                 {
1725                     /* Replace a endpoint with b */
1726                     swap_em_state(&s_b, &s_a);
1727                     a   = b;
1728                     gpa = gpb;
1729                 }
1730
1731                 /*
1732                  * Stop search as soon as we find a value smaller than the endpoints.
1733                  * Never run more than 20 steps, no matter what.
1734                  */
1735                 nminstep++;
1736             } while ((epot_repl > s_a->epot || epot_repl > s_c->epot) && (nminstep < 20));
1737
1738             if (std::fabs(epot_repl - s_min->epot) < fabs(s_min->epot) * GMX_REAL_EPS || nminstep >= 20)
1739             {
1740                 /* OK. We couldn't find a significantly lower energy.
1741                  * If beta==0 this was steepest descent, and then we give up.
1742                  * If not, set beta=0 and restart with steepest descent before quitting.
1743                  */
1744                 if (beta == 0.0)
1745                 {
1746                     /* Converged */
1747                     converged = TRUE;
1748                     break;
1749                 }
1750                 else
1751                 {
1752                     /* Reset memory before giving up */
1753                     beta = 0.0;
1754                     continue;
1755                 }
1756             }
1757
1758             /* Select min energy state of A & C, put the best in B.
1759              */
1760             if (s_c->epot < s_a->epot)
1761             {
1762                 if (debug)
1763                 {
1764                     fprintf(debug, "CGE: C (%f) is lower than A (%f), moving C to B\n", s_c->epot, s_a->epot);
1765                 }
1766                 swap_em_state(&s_b, &s_c);
1767                 gpb = gpc;
1768             }
1769             else
1770             {
1771                 if (debug)
1772                 {
1773                     fprintf(debug, "CGE: A (%f) is lower than C (%f), moving A to B\n", s_a->epot, s_c->epot);
1774                 }
1775                 swap_em_state(&s_b, &s_a);
1776                 gpb = gpa;
1777             }
1778         }
1779         else
1780         {
1781             if (debug)
1782             {
1783                 fprintf(debug, "CGE: Found a lower energy %f, moving C to B\n", s_c->epot);
1784             }
1785             swap_em_state(&s_b, &s_c);
1786             gpb = gpc;
1787         }
1788
1789         /* new search direction */
1790         /* beta = 0 means forget all memory and restart with steepest descents. */
1791         if (nstcg && ((step % nstcg) == 0))
1792         {
1793             beta = 0.0;
1794         }
1795         else
1796         {
1797             /* s_min->fnorm cannot be zero, because then we would have converged
1798              * and broken out.
1799              */
1800
1801             /* Polak-Ribiere update.
1802              * Change to fnorm2/fnorm2_old for Fletcher-Reeves
1803              */
1804             beta = pr_beta(cr, &inputrec->opts, mdatoms, top_global, s_min, s_b);
1805         }
1806         /* Limit beta to prevent oscillations */
1807         if (fabs(beta) > 5.0)
1808         {
1809             beta = 0.0;
1810         }
1811
1812
1813         /* update positions */
1814         swap_em_state(&s_min, &s_b);
1815         gpa = gpb;
1816
1817         /* Print it if necessary */
1818         if (MASTER(cr))
1819         {
1820             if (mdrunOptions.verbose)
1821             {
1822                 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1823                 fprintf(stderr,
1824                         "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n",
1825                         step,
1826                         s_min->epot,
1827                         s_min->fnorm / sqrtNumAtoms,
1828                         s_min->fmax,
1829                         s_min->a_fmax + 1);
1830                 fflush(stderr);
1831             }
1832             /* Store the new (lower) energies */
1833             matrix nullBox = {};
1834             energyOutput.addDataAtEnergyStep(false,
1835                                              false,
1836                                              static_cast<double>(step),
1837                                              mdatoms->tmass,
1838                                              enerd,
1839                                              nullptr,
1840                                              nullptr,
1841                                              nullBox,
1842                                              PTCouplingArrays(),
1843                                              0,
1844                                              vir,
1845                                              pres,
1846                                              nullptr,
1847                                              mu_tot,
1848                                              constr);
1849
1850             do_log = do_per_step(step, inputrec->nstlog);
1851             do_ene = do_per_step(step, inputrec->nstenergy);
1852
1853             imdSession->fillEnergyRecord(step, TRUE);
1854
1855             if (do_log)
1856             {
1857                 EnergyOutput::printHeader(fplog, step, step);
1858             }
1859             energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
1860                                                do_ene,
1861                                                FALSE,
1862                                                FALSE,
1863                                                do_log ? fplog : nullptr,
1864                                                step,
1865                                                step,
1866                                                fr->fcdata.get(),
1867                                                nullptr);
1868         }
1869
1870         /* Send energies and positions to the IMD client if bIMD is TRUE. */
1871         if (MASTER(cr) && imdSession->run(step, TRUE, state_global->box, state_global->x, 0))
1872         {
1873             imdSession->sendPositionsAndEnergies();
1874         }
1875
1876         /* Stop when the maximum force lies below tolerance.
1877          * If we have reached machine precision, converged is already set to true.
1878          */
1879         converged = converged || (s_min->fmax < inputrec->em_tol);
1880
1881     } /* End of the loop */
1882
1883     if (converged)
1884     {
1885         step--; /* we never took that last step in this case */
1886     }
1887     if (s_min->fmax > inputrec->em_tol)
1888     {
1889         if (MASTER(cr))
1890         {
1891             warn_step(fplog, inputrec->em_tol, s_min->fmax, step - 1 == number_steps, FALSE);
1892         }
1893         converged = FALSE;
1894     }
1895
1896     if (MASTER(cr))
1897     {
1898         /* If we printed energy and/or logfile last step (which was the last step)
1899          * we don't have to do it again, but otherwise print the final values.
1900          */
1901         if (!do_log)
1902         {
1903             /* Write final value to log since we didn't do anything the last step */
1904             EnergyOutput::printHeader(fplog, step, step);
1905         }
1906         if (!do_ene || !do_log)
1907         {
1908             /* Write final energy file entries */
1909             energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
1910                                                !do_ene,
1911                                                FALSE,
1912                                                FALSE,
1913                                                !do_log ? fplog : nullptr,
1914                                                step,
1915                                                step,
1916                                                fr->fcdata.get(),
1917                                                nullptr);
1918         }
1919     }
1920
1921     /* Print some stuff... */
1922     if (MASTER(cr))
1923     {
1924         fprintf(stderr, "\nwriting lowest energy coordinates.\n");
1925     }
1926
1927     /* IMPORTANT!
1928      * For accurate normal mode calculation it is imperative that we
1929      * store the last conformation into the full precision binary trajectory.
1930      *
1931      * However, we should only do it if we did NOT already write this step
1932      * above (which we did if do_x or do_f was true).
1933      */
1934     /* Note that with 0 < nstfout != nstxout we can end up with two frames
1935      * in the trajectory with the same step number.
1936      */
1937     do_x = !do_per_step(step, inputrec->nstxout);
1938     do_f = (inputrec->nstfout > 0 && !do_per_step(step, inputrec->nstfout));
1939
1940     write_em_traj(
1941             fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec, step, s_min, state_global, observablesHistory);
1942
1943
1944     if (MASTER(cr))
1945     {
1946         double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1947         print_converged(stderr, CG, inputrec->em_tol, step, converged, number_steps, s_min, sqrtNumAtoms);
1948         print_converged(fplog, CG, inputrec->em_tol, step, converged, number_steps, s_min, sqrtNumAtoms);
1949
1950         fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
1951     }
1952
1953     finish_em(cr, outf, walltime_accounting, wcycle);
1954
1955     /* To print the actual number of steps we needed somewhere */
1956     walltime_accounting_set_nsteps_done(walltime_accounting, step);
1957 }
1958
1959
1960 void LegacySimulator::do_lbfgs()
1961 {
1962     static const char* LBFGS = "Low-Memory BFGS Minimizer";
1963     em_state_t         ems;
1964     gmx_localtop_t     top(top_global.ffparams);
1965     gmx_global_stat_t  gstat;
1966     auto*              mdatoms = mdAtoms->mdatoms();
1967
1968     GMX_LOG(mdlog.info)
1969             .asParagraph()
1970             .appendText(
1971                     "Note that activating L-BFGS energy minimization via the "
1972                     "integrator .mdp option and the command gmx mdrun may "
1973                     "be available in a different form in a future version of GROMACS, "
1974                     "e.g. gmx minimize and an .mdp option.");
1975
1976     if (PAR(cr))
1977     {
1978         gmx_fatal(FARGS, "L-BFGS minimization only supports a single rank");
1979     }
1980
1981     if (nullptr != constr)
1982     {
1983         gmx_fatal(
1984                 FARGS,
1985                 "The combination of constraints and L-BFGS minimization is not implemented. Either "
1986                 "do not use constraints, or use another minimizer (e.g. steepest descent).");
1987     }
1988
1989     const int n        = 3 * state_global->natoms;
1990     const int nmaxcorr = inputrec->nbfgscorr;
1991
1992     std::vector<real> p(n);
1993     std::vector<real> rho(nmaxcorr);
1994     std::vector<real> alpha(nmaxcorr);
1995
1996     std::vector<std::vector<real>> dx(nmaxcorr);
1997     for (auto& dxCorr : dx)
1998     {
1999         dxCorr.resize(n);
2000     }
2001
2002     std::vector<std::vector<real>> dg(nmaxcorr);
2003     for (auto& dgCorr : dg)
2004     {
2005         dgCorr.resize(n);
2006     }
2007
2008     int step  = 0;
2009     int neval = 0;
2010
2011     ObservablesReducer observablesReducer = observablesReducerBuilder->build();
2012
2013     /* Init em */
2014     init_em(fplog,
2015             mdlog,
2016             LBFGS,
2017             cr,
2018             inputrec,
2019             imdSession,
2020             pull_work,
2021             state_global,
2022             top_global,
2023             &ems,
2024             &top,
2025             nrnb,
2026             fr,
2027             mdAtoms,
2028             &gstat,
2029             vsite,
2030             constr,
2031             nullptr);
2032     const bool        simulationsShareState = false;
2033     gmx_mdoutf*       outf                  = init_mdoutf(fplog,
2034                                    nfile,
2035                                    fnm,
2036                                    mdrunOptions,
2037                                    cr,
2038                                    outputProvider,
2039                                    mdModulesNotifiers,
2040                                    inputrec,
2041                                    top_global,
2042                                    nullptr,
2043                                    wcycle,
2044                                    StartingBehavior::NewSimulation,
2045                                    simulationsShareState,
2046                                    ms);
2047     gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
2048                                    top_global,
2049                                    *inputrec,
2050                                    pull_work,
2051                                    nullptr,
2052                                    false,
2053                                    StartingBehavior::NewSimulation,
2054                                    simulationsShareState,
2055                                    mdModulesNotifiers);
2056
2057     const int start = 0;
2058     const int end   = mdatoms->homenr;
2059
2060     /* We need 4 working states */
2061     em_state_t  s0{}, s1{}, s2{}, s3{};
2062     em_state_t* sa   = &s0;
2063     em_state_t* sb   = &s1;
2064     em_state_t* sc   = &s2;
2065     em_state_t* last = &s3;
2066     /* Initialize by copying the state from ems (we could skip x and f here) */
2067     *sa = ems;
2068     *sb = ems;
2069     *sc = ems;
2070
2071     /* Print to log file */
2072     print_em_start(fplog, cr, walltime_accounting, wcycle, LBFGS);
2073
2074     /* Max number of steps */
2075     const int number_steps = inputrec->nsteps;
2076
2077     /* Create a 3*natoms index to tell whether each degree of freedom is frozen */
2078     std::vector<bool> frozen(n);
2079     int               gf = 0;
2080     for (int i = start; i < end; i++)
2081     {
2082         if (mdatoms->cFREEZE)
2083         {
2084             gf = mdatoms->cFREEZE[i];
2085         }
2086         for (int m = 0; m < DIM; m++)
2087         {
2088             frozen[3 * i + m] = (inputrec->opts.nFreeze[gf][m] != 0);
2089         }
2090     }
2091     if (MASTER(cr))
2092     {
2093         sp_header(stderr, LBFGS, inputrec->em_tol, number_steps);
2094     }
2095     if (fplog)
2096     {
2097         sp_header(fplog, LBFGS, inputrec->em_tol, number_steps);
2098     }
2099
2100     if (vsite)
2101     {
2102         vsite->construct(state_global->x, {}, state_global->box, VSiteOperation::Positions);
2103     }
2104
2105     /* Call the force routine and some auxiliary (neighboursearching etc.) */
2106     /* do_force always puts the charge groups in the box and shifts again
2107      * We do not unshift, so molecules are always whole
2108      */
2109     neval++;
2110     EnergyEvaluator energyEvaluator{ fplog,
2111                                      mdlog,
2112                                      cr,
2113                                      ms,
2114                                      top_global,
2115                                      &top,
2116                                      inputrec,
2117                                      imdSession,
2118                                      pull_work,
2119                                      nrnb,
2120                                      wcycle,
2121                                      gstat,
2122                                      &observablesReducer,
2123                                      vsite,
2124                                      constr,
2125                                      mdAtoms,
2126                                      fr,
2127                                      runScheduleWork,
2128                                      enerd };
2129     rvec            mu_tot;
2130     tensor          vir;
2131     tensor          pres;
2132     energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE, step);
2133
2134     if (MASTER(cr))
2135     {
2136         /* Copy stuff to the energy bin for easy printing etc. */
2137         matrix nullBox = {};
2138         energyOutput.addDataAtEnergyStep(false,
2139                                          false,
2140                                          static_cast<double>(step),
2141                                          mdatoms->tmass,
2142                                          enerd,
2143                                          nullptr,
2144                                          nullptr,
2145                                          nullBox,
2146                                          PTCouplingArrays(),
2147                                          0,
2148                                          vir,
2149                                          pres,
2150                                          nullptr,
2151                                          mu_tot,
2152                                          constr);
2153
2154         EnergyOutput::printHeader(fplog, step, step);
2155         energyOutput.printStepToEnergyFile(
2156                 mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step, step, fr->fcdata.get(), nullptr);
2157     }
2158
2159     /* Set the initial step.
2160      * since it will be multiplied by the non-normalized search direction
2161      * vector (force vector the first time), we scale it by the
2162      * norm of the force.
2163      */
2164
2165     if (MASTER(cr))
2166     {
2167         double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2168         fprintf(stderr, "Using %d BFGS correction steps.\n\n", nmaxcorr);
2169         fprintf(stderr, "   F-max             = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
2170         fprintf(stderr, "   F-Norm            = %12.5e\n", ems.fnorm / sqrtNumAtoms);
2171         fprintf(stderr, "\n");
2172         /* and copy to the log file too... */
2173         fprintf(fplog, "Using %d BFGS correction steps.\n\n", nmaxcorr);
2174         fprintf(fplog, "   F-max             = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
2175         fprintf(fplog, "   F-Norm            = %12.5e\n", ems.fnorm / sqrtNumAtoms);
2176         fprintf(fplog, "\n");
2177     }
2178
2179     // Point is an index to the memory of search directions, where 0 is the first one.
2180     int point = 0;
2181
2182     // Set initial search direction to the force (-gradient), or 0 for frozen particles.
2183     real* fInit = static_cast<real*>(ems.f.view().force().data()[0]);
2184     for (int i = 0; i < n; i++)
2185     {
2186         if (!frozen[i])
2187         {
2188             dx[point][i] = fInit[i]; /* Initial search direction */
2189         }
2190         else
2191         {
2192             dx[point][i] = 0;
2193         }
2194     }
2195
2196     // Stepsize will be modified during the search, and actually it is not critical
2197     // (the main efficiency in the algorithm comes from changing directions), but
2198     // we still need an initial value, so estimate it as the inverse of the norm
2199     // so we take small steps where the potential fluctuates a lot.
2200     double stepsize = 1.0 / ems.fnorm;
2201
2202     /* Start the loop over BFGS steps.
2203      * Each successful step is counted, and we continue until
2204      * we either converge or reach the max number of steps.
2205      */
2206
2207     bool do_log = true;
2208     bool do_ene = true;
2209
2210     int ncorr = 0;
2211
2212     /* Set the gradient from the force */
2213     bool converged = false;
2214     for (int step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
2215     {
2216
2217         /* Write coordinates if necessary */
2218         const bool do_x = do_per_step(step, inputrec->nstxout);
2219         const bool do_f = do_per_step(step, inputrec->nstfout);
2220
2221         int mdof_flags = 0;
2222         if (do_x)
2223         {
2224             mdof_flags |= MDOF_X;
2225         }
2226
2227         if (do_f)
2228         {
2229             mdof_flags |= MDOF_F;
2230         }
2231
2232         if (inputrec->bIMD)
2233         {
2234             mdof_flags |= MDOF_IMD;
2235         }
2236
2237         gmx::WriteCheckpointDataHolder checkpointDataHolder;
2238         mdoutf_write_to_trajectory_files(fplog,
2239                                          cr,
2240                                          outf,
2241                                          mdof_flags,
2242                                          top_global.natoms,
2243                                          step,
2244                                          static_cast<real>(step),
2245                                          &ems.s,
2246                                          state_global,
2247                                          observablesHistory,
2248                                          ems.f.view().force(),
2249                                          &checkpointDataHolder);
2250
2251         /* Do the linesearching in the direction dx[point][0..(n-1)] */
2252
2253         /* make s a pointer to current search direction - point=0 first time we get here */
2254         gmx::ArrayRef<const real> s = dx[point];
2255
2256         const real* xx = static_cast<real*>(ems.s.x.rvec_array()[0]);
2257         const real* ff = static_cast<real*>(ems.f.view().force().data()[0]);
2258
2259         // calculate line gradient in position A
2260         double gpa = 0;
2261         for (int i = 0; i < n; i++)
2262         {
2263             gpa -= s[i] * ff[i];
2264         }
2265
2266         /* Calculate minimum allowed stepsize along the line, before the average (norm)
2267          * relative change in coordinate is smaller than precision
2268          */
2269         double minstep = 0;
2270         for (int i = 0; i < n; i++)
2271         {
2272             double tmp = fabs(xx[i]);
2273             if (tmp < 1.0)
2274             {
2275                 tmp = 1.0;
2276             }
2277             tmp = s[i] / tmp;
2278             minstep += tmp * tmp;
2279         }
2280         minstep = GMX_REAL_EPS / sqrt(minstep / n);
2281
2282         if (stepsize < minstep)
2283         {
2284             converged = true;
2285             break;
2286         }
2287
2288         // Before taking any steps along the line, store the old position
2289         *last            = ems;
2290         real*      lastx = static_cast<real*>(last->s.x.data()[0]);
2291         real*      lastf = static_cast<real*>(last->f.view().force().data()[0]);
2292         const real Epot0 = ems.epot;
2293
2294         *sa = ems;
2295
2296         /* Take a step downhill.
2297          * In theory, we should find the actual minimum of the function in this
2298          * direction, somewhere along the line.
2299          * That is quite possible, but it turns out to take 5-10 function evaluations
2300          * for each line. However, we dont really need to find the exact minimum -
2301          * it is much better to start a new BFGS step in a modified direction as soon
2302          * as we are close to it. This will save a lot of energy evaluations.
2303          *
2304          * In practice, we just try to take a single step.
2305          * If it worked (i.e. lowered the energy), we increase the stepsize but
2306          * continue straight to the next BFGS step without trying to find any minimum,
2307          * i.e. we change the search direction too. If the line was smooth, it is
2308          * likely we are in a smooth region, and then it makes sense to take longer
2309          * steps in the modified search direction too.
2310          *
2311          * If it didn't work (higher energy), there must be a minimum somewhere between
2312          * the old position and the new one. Then we need to start by finding a lower
2313          * value before we change search direction. Since the energy was apparently
2314          * quite rough, we need to decrease the step size.
2315          *
2316          * Due to the finite numerical accuracy, it turns out that it is a good idea
2317          * to accept a SMALL increase in energy, if the derivative is still downhill.
2318          * This leads to lower final energies in the tests I've done. / Erik
2319          */
2320
2321         // State "A" is the first position along the line.
2322         // reference position along line is initially zero
2323         real a = 0;
2324
2325         // Check stepsize first. We do not allow displacements
2326         // larger than emstep.
2327         //
2328         real c;
2329         real maxdelta;
2330         do
2331         {
2332             // Pick a new position C by adding stepsize to A.
2333             c = a + stepsize;
2334
2335             // Calculate what the largest change in any individual coordinate
2336             // would be (translation along line * gradient along line)
2337             maxdelta = 0;
2338             for (int i = 0; i < n; i++)
2339             {
2340                 real delta = c * s[i];
2341                 if (delta > maxdelta)
2342                 {
2343                     maxdelta = delta;
2344                 }
2345             }
2346             // If any displacement is larger than the stepsize limit, reduce the step
2347             if (maxdelta > inputrec->em_stepsize)
2348             {
2349                 stepsize *= 0.1;
2350             }
2351         } while (maxdelta > inputrec->em_stepsize);
2352
2353         // Take a trial step and move the coordinate array xc[] to position C
2354         real* xc = static_cast<real*>(sc->s.x.rvec_array()[0]);
2355         for (int i = 0; i < n; i++)
2356         {
2357             xc[i] = lastx[i] + c * s[i];
2358         }
2359
2360         neval++;
2361         // Calculate energy for the trial step in position C
2362         energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE, step);
2363
2364         // Calc line gradient in position C
2365         real*  fc  = static_cast<real*>(sc->f.view().force()[0]);
2366         double gpc = 0;
2367         for (int i = 0; i < n; i++)
2368         {
2369             gpc -= s[i] * fc[i]; /* f is negative gradient, thus the sign */
2370         }
2371         /* Sum the gradient along the line across CPUs */
2372         if (PAR(cr))
2373         {
2374             gmx_sumd(1, &gpc, cr);
2375         }
2376
2377         // This is the max amount of increase in energy we tolerate.
2378         // By allowing VERY small changes (close to numerical precision) we
2379         // frequently find even better (lower) final energies.
2380         double tmp = std::sqrt(GMX_REAL_EPS) * fabs(sa->epot);
2381
2382         // Accept the step if the energy is lower in the new position C (compared to A),
2383         // or if it is not significantly higher and the line derivative is still negative.
2384         bool foundlower = sc->epot < sa->epot || (gpc < 0 && sc->epot < (sa->epot + tmp));
2385         // If true, great, we found a better energy. We no longer try to alter the
2386         // stepsize, but simply accept this new better position. The we select a new
2387         // search direction instead, which will be much more efficient than continuing
2388         // to take smaller steps along a line. Set fnorm based on the new C position,
2389         // which will be used to update the stepsize to 1/fnorm further down.
2390
2391         // If false, the energy is NOT lower in point C, i.e. it will be the same
2392         // or higher than in point A. In this case it is pointless to move to point C,
2393         // so we will have to do more iterations along the same line to find a smaller
2394         // value in the interval [A=0.0,C].
2395         // Here, A is still 0.0, but that will change when we do a search in the interval
2396         // [0.0,C] below. That search we will do by interpolation or bisection rather
2397         // than with the stepsize, so no need to modify it. For the next search direction
2398         // it will be reset to 1/fnorm anyway.
2399
2400         double step_taken;
2401         if (!foundlower)
2402         {
2403             // OK, if we didn't find a lower value we will have to locate one now - there must
2404             // be one in the interval [a,c].
2405             // The same thing is valid here, though: Don't spend dozens of iterations to find
2406             // the line minimum. We try to interpolate based on the derivative at the endpoints,
2407             // and only continue until we find a lower value. In most cases this means 1-2 iterations.
2408             // I also have a safeguard for potentially really pathological functions so we never
2409             // take more than 20 steps before we give up.
2410             // If we already found a lower value we just skip this step and continue to the update.
2411             real fnorm    = 0;
2412             int  nminstep = 0;
2413             do
2414             {
2415                 // Select a new trial point B in the interval [A,C].
2416                 // If the derivatives at points a & c have different sign we interpolate to zero,
2417                 // otherwise just do a bisection since there might be multiple minima/maxima
2418                 // inside the interval.
2419                 real b;
2420                 if (gpa < 0 && gpc > 0)
2421                 {
2422                     b = a + gpa * (a - c) / (gpc - gpa);
2423                 }
2424                 else
2425                 {
2426                     b = 0.5 * (a + c);
2427                 }
2428
2429                 /* safeguard if interpolation close to machine accuracy causes errors:
2430                  * never go outside the interval
2431                  */
2432                 if (b <= a || b >= c)
2433                 {
2434                     b = 0.5 * (a + c);
2435                 }
2436
2437                 // Take a trial step to point B
2438                 real* xb = static_cast<real*>(sb->s.x.rvec_array()[0]);
2439                 for (int i = 0; i < n; i++)
2440                 {
2441                     xb[i] = lastx[i] + b * s[i];
2442                 }
2443
2444                 neval++;
2445                 // Calculate energy for the trial step in point B
2446                 energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE, step);
2447                 fnorm = sb->fnorm;
2448
2449                 // Calculate gradient in point B
2450                 real*  fb  = static_cast<real*>(sb->f.view().force()[0]);
2451                 double gpb = 0;
2452                 for (int i = 0; i < n; i++)
2453                 {
2454                     gpb -= s[i] * fb[i]; /* f is negative gradient, thus the sign */
2455                 }
2456                 /* Sum the gradient along the line across CPUs */
2457                 if (PAR(cr))
2458                 {
2459                     gmx_sumd(1, &gpb, cr);
2460                 }
2461
2462                 // Keep one of the intervals [A,B] or [B,C] based on the value of the derivative
2463                 // at the new point B, and rename the endpoints of this new interval A and C.
2464                 if (gpb > 0)
2465                 {
2466                     /* Replace c endpoint with b */
2467                     c = b;
2468                     /* copy state b to c */
2469                     *sc = *sb;
2470                 }
2471                 else
2472                 {
2473                     /* Replace a endpoint with b */
2474                     a = b;
2475                     /* copy state b to a */
2476                     *sa = *sb;
2477                 }
2478
2479                 /*
2480                  * Stop search as soon as we find a value smaller than the endpoints,
2481                  * or if the tolerance is below machine precision.
2482                  * Never run more than 20 steps, no matter what.
2483                  */
2484                 nminstep++;
2485             } while ((sb->epot > sa->epot || sb->epot > sc->epot) && (nminstep < 20));
2486
2487             if (std::fabs(sb->epot - Epot0) < GMX_REAL_EPS || nminstep >= 20)
2488             {
2489                 /* OK. We couldn't find a significantly lower energy.
2490                  * If ncorr==0 this was steepest descent, and then we give up.
2491                  * If not, reset memory to restart as steepest descent before quitting.
2492                  */
2493                 if (ncorr == 0)
2494                 {
2495                     /* Converged */
2496                     converged = true;
2497                     break;
2498                 }
2499                 else
2500                 {
2501                     /* Reset memory */
2502                     ncorr = 0;
2503                     /* Search in gradient direction */
2504                     for (int i = 0; i < n; i++)
2505                     {
2506                         dx[point][i] = ff[i];
2507                     }
2508                     /* Reset stepsize */
2509                     stepsize = 1.0 / fnorm;
2510                     continue;
2511                 }
2512             }
2513
2514             /* Select min energy state of A & C, put the best in xx/ff/Epot
2515              */
2516             if (sc->epot < sa->epot)
2517             {
2518                 /* Use state C */
2519                 ems        = *sc;
2520                 step_taken = c;
2521             }
2522             else
2523             {
2524                 /* Use state A */
2525                 ems        = *sa;
2526                 step_taken = a;
2527             }
2528         }
2529         else
2530         {
2531             /* found lower */
2532             /* Use state C */
2533             ems        = *sc;
2534             step_taken = c;
2535         }
2536
2537         /* Update the memory information, and calculate a new
2538          * approximation of the inverse hessian
2539          */
2540
2541         /* Have new data in Epot, xx, ff */
2542         if (ncorr < nmaxcorr)
2543         {
2544             ncorr++;
2545         }
2546
2547         for (int i = 0; i < n; i++)
2548         {
2549             dg[point][i] = lastf[i] - ff[i];
2550             dx[point][i] *= step_taken;
2551         }
2552
2553         real dgdg = 0;
2554         real dgdx = 0;
2555         for (int i = 0; i < n; i++)
2556         {
2557             dgdg += dg[point][i] * dg[point][i];
2558             dgdx += dg[point][i] * dx[point][i];
2559         }
2560
2561         const real diag = dgdx / dgdg;
2562
2563         rho[point] = 1.0 / dgdx;
2564         point++;
2565
2566         if (point >= nmaxcorr)
2567         {
2568             point = 0;
2569         }
2570
2571         /* Update */
2572         for (int i = 0; i < n; i++)
2573         {
2574             p[i] = ff[i];
2575         }
2576
2577         int cp = point;
2578
2579         /* Recursive update. First go back over the memory points */
2580         for (int k = 0; k < ncorr; k++)
2581         {
2582             cp--;
2583             if (cp < 0)
2584             {
2585                 cp = ncorr - 1;
2586             }
2587
2588             real sq = 0;
2589             for (int i = 0; i < n; i++)
2590             {
2591                 sq += dx[cp][i] * p[i];
2592             }
2593
2594             alpha[cp] = rho[cp] * sq;
2595
2596             for (int i = 0; i < n; i++)
2597             {
2598                 p[i] -= alpha[cp] * dg[cp][i];
2599             }
2600         }
2601
2602         for (int i = 0; i < n; i++)
2603         {
2604             p[i] *= diag;
2605         }
2606
2607         /* And then go forward again */
2608         for (int k = 0; k < ncorr; k++)
2609         {
2610             real yr = 0;
2611             for (int i = 0; i < n; i++)
2612             {
2613                 yr += p[i] * dg[cp][i];
2614             }
2615
2616             real beta = rho[cp] * yr;
2617             beta      = alpha[cp] - beta;
2618
2619             for (int i = 0; i < n; i++)
2620             {
2621                 p[i] += beta * dx[cp][i];
2622             }
2623
2624             cp++;
2625             if (cp >= ncorr)
2626             {
2627                 cp = 0;
2628             }
2629         }
2630
2631         for (int i = 0; i < n; i++)
2632         {
2633             if (!frozen[i])
2634             {
2635                 dx[point][i] = p[i];
2636             }
2637             else
2638             {
2639                 dx[point][i] = 0;
2640             }
2641         }
2642
2643         /* Print it if necessary */
2644         if (MASTER(cr))
2645         {
2646             if (mdrunOptions.verbose)
2647             {
2648                 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2649                 fprintf(stderr,
2650                         "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n",
2651                         step,
2652                         ems.epot,
2653                         ems.fnorm / sqrtNumAtoms,
2654                         ems.fmax,
2655                         ems.a_fmax + 1);
2656                 fflush(stderr);
2657             }
2658             /* Store the new (lower) energies */
2659             matrix nullBox = {};
2660             energyOutput.addDataAtEnergyStep(false,
2661                                              false,
2662                                              static_cast<double>(step),
2663                                              mdatoms->tmass,
2664                                              enerd,
2665                                              nullptr,
2666                                              nullptr,
2667                                              nullBox,
2668                                              PTCouplingArrays(),
2669                                              0,
2670                                              vir,
2671                                              pres,
2672                                              nullptr,
2673                                              mu_tot,
2674                                              constr);
2675
2676             do_log = do_per_step(step, inputrec->nstlog);
2677             do_ene = do_per_step(step, inputrec->nstenergy);
2678
2679             imdSession->fillEnergyRecord(step, TRUE);
2680
2681             if (do_log)
2682             {
2683                 EnergyOutput::printHeader(fplog, step, step);
2684             }
2685             energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
2686                                                do_ene,
2687                                                FALSE,
2688                                                FALSE,
2689                                                do_log ? fplog : nullptr,
2690                                                step,
2691                                                step,
2692                                                fr->fcdata.get(),
2693                                                nullptr);
2694         }
2695
2696         /* Send x and E to IMD client, if bIMD is TRUE. */
2697         if (imdSession->run(step, TRUE, state_global->box, state_global->x, 0) && MASTER(cr))
2698         {
2699             imdSession->sendPositionsAndEnergies();
2700         }
2701
2702         // Reset stepsize in we are doing more iterations
2703         stepsize = 1.0;
2704
2705         /* Stop when the maximum force lies below tolerance.
2706          * If we have reached machine precision, converged is already set to true.
2707          */
2708         converged = converged || (ems.fmax < inputrec->em_tol);
2709
2710     } /* End of the loop */
2711
2712     if (converged)
2713     {
2714         step--; /* we never took that last step in this case */
2715     }
2716     if (ems.fmax > inputrec->em_tol)
2717     {
2718         if (MASTER(cr))
2719         {
2720             warn_step(fplog, inputrec->em_tol, ems.fmax, step - 1 == number_steps, FALSE);
2721         }
2722         converged = FALSE;
2723     }
2724
2725     /* If we printed energy and/or logfile last step (which was the last step)
2726      * we don't have to do it again, but otherwise print the final values.
2727      */
2728     if (!do_log) /* Write final value to log since we didn't do anythin last step */
2729     {
2730         EnergyOutput::printHeader(fplog, step, step);
2731     }
2732     if (!do_ene || !do_log) /* Write final energy file entries */
2733     {
2734         energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
2735                                            !do_ene,
2736                                            FALSE,
2737                                            FALSE,
2738                                            !do_log ? fplog : nullptr,
2739                                            step,
2740                                            step,
2741                                            fr->fcdata.get(),
2742                                            nullptr);
2743     }
2744
2745     /* Print some stuff... */
2746     if (MASTER(cr))
2747     {
2748         fprintf(stderr, "\nwriting lowest energy coordinates.\n");
2749     }
2750
2751     /* IMPORTANT!
2752      * For accurate normal mode calculation it is imperative that we
2753      * store the last conformation into the full precision binary trajectory.
2754      *
2755      * However, we should only do it if we did NOT already write this step
2756      * above (which we did if do_x or do_f was true).
2757      */
2758     const bool do_x = !do_per_step(step, inputrec->nstxout);
2759     const bool do_f = !do_per_step(step, inputrec->nstfout);
2760     write_em_traj(
2761             fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec, step, &ems, state_global, observablesHistory);
2762
2763     if (MASTER(cr))
2764     {
2765         double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2766         print_converged(stderr, LBFGS, inputrec->em_tol, step, converged, number_steps, &ems, sqrtNumAtoms);
2767         print_converged(fplog, LBFGS, inputrec->em_tol, step, converged, number_steps, &ems, sqrtNumAtoms);
2768
2769         fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
2770     }
2771
2772     finish_em(cr, outf, walltime_accounting, wcycle);
2773
2774     /* To print the actual number of steps we needed somewhere */
2775     walltime_accounting_set_nsteps_done(walltime_accounting, step);
2776 }
2777
2778 void LegacySimulator::do_steep()
2779 {
2780     const char*       SD = "Steepest Descents";
2781     gmx_localtop_t    top(top_global.ffparams);
2782     gmx_global_stat_t gstat;
2783     real              stepsize;
2784     real              ustep;
2785     gmx_bool          bDone, bAbort, do_x, do_f;
2786     tensor            vir, pres;
2787     rvec              mu_tot = { 0 };
2788     int               nsteps;
2789     int               count          = 0;
2790     int               steps_accepted = 0;
2791     auto*             mdatoms        = mdAtoms->mdatoms();
2792
2793     GMX_LOG(mdlog.info)
2794             .asParagraph()
2795             .appendText(
2796                     "Note that activating steepest-descent energy minimization via the "
2797                     "integrator .mdp option and the command gmx mdrun may "
2798                     "be available in a different form in a future version of GROMACS, "
2799                     "e.g. gmx minimize and an .mdp option.");
2800
2801     /* Create 2 states on the stack and extract pointers that we will swap */
2802     em_state_t  s0{}, s1{};
2803     em_state_t* s_min = &s0;
2804     em_state_t* s_try = &s1;
2805
2806     ObservablesReducer observablesReducer = observablesReducerBuilder->build();
2807
2808     /* Init em and store the local state in s_try */
2809     init_em(fplog,
2810             mdlog,
2811             SD,
2812             cr,
2813             inputrec,
2814             imdSession,
2815             pull_work,
2816             state_global,
2817             top_global,
2818             s_try,
2819             &top,
2820             nrnb,
2821             fr,
2822             mdAtoms,
2823             &gstat,
2824             vsite,
2825             constr,
2826             nullptr);
2827     const bool        simulationsShareState = false;
2828     gmx_mdoutf*       outf                  = init_mdoutf(fplog,
2829                                    nfile,
2830                                    fnm,
2831                                    mdrunOptions,
2832                                    cr,
2833                                    outputProvider,
2834                                    mdModulesNotifiers,
2835                                    inputrec,
2836                                    top_global,
2837                                    nullptr,
2838                                    wcycle,
2839                                    StartingBehavior::NewSimulation,
2840                                    simulationsShareState,
2841                                    ms);
2842     gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
2843                                    top_global,
2844                                    *inputrec,
2845                                    pull_work,
2846                                    nullptr,
2847                                    false,
2848                                    StartingBehavior::NewSimulation,
2849                                    simulationsShareState,
2850                                    mdModulesNotifiers);
2851
2852     /* Print to log file  */
2853     print_em_start(fplog, cr, walltime_accounting, wcycle, SD);
2854
2855     /* Set variables for stepsize (in nm). This is the largest
2856      * step that we are going to make in any direction.
2857      */
2858     ustep    = inputrec->em_stepsize;
2859     stepsize = 0;
2860
2861     /* Max number of steps  */
2862     nsteps = inputrec->nsteps;
2863
2864     if (MASTER(cr))
2865     {
2866         /* Print to the screen  */
2867         sp_header(stderr, SD, inputrec->em_tol, nsteps);
2868     }
2869     if (fplog)
2870     {
2871         sp_header(fplog, SD, inputrec->em_tol, nsteps);
2872     }
2873     EnergyEvaluator energyEvaluator{ fplog,
2874                                      mdlog,
2875                                      cr,
2876                                      ms,
2877                                      top_global,
2878                                      &top,
2879                                      inputrec,
2880                                      imdSession,
2881                                      pull_work,
2882                                      nrnb,
2883                                      wcycle,
2884                                      gstat,
2885                                      &observablesReducer,
2886                                      vsite,
2887                                      constr,
2888                                      mdAtoms,
2889                                      fr,
2890                                      runScheduleWork,
2891                                      enerd };
2892
2893     /**** HERE STARTS THE LOOP ****
2894      * count is the counter for the number of steps
2895      * bDone will be TRUE when the minimization has converged
2896      * bAbort will be TRUE when nsteps steps have been performed or when
2897      * the stepsize becomes smaller than is reasonable for machine precision
2898      */
2899     count  = 0;
2900     bDone  = FALSE;
2901     bAbort = FALSE;
2902     while (!bDone && !bAbort)
2903     {
2904         bAbort = (nsteps >= 0) && (count == nsteps);
2905
2906         /* set new coordinates, except for first step */
2907         bool validStep = true;
2908         if (count > 0)
2909         {
2910             validStep = do_em_step(
2911                     cr, inputrec, mdatoms, s_min, stepsize, s_min->f.view().forceWithPadding(), s_try, constr, count);
2912         }
2913
2914         if (validStep)
2915         {
2916             energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0, count);
2917         }
2918         else
2919         {
2920             // Signal constraint error during stepping with energy=inf
2921             s_try->epot = std::numeric_limits<real>::infinity();
2922         }
2923
2924         if (MASTER(cr))
2925         {
2926             EnergyOutput::printHeader(fplog, count, count);
2927         }
2928
2929         if (count == 0)
2930         {
2931             s_min->epot = s_try->epot;
2932         }
2933
2934         /* Print it if necessary  */
2935         if (MASTER(cr))
2936         {
2937             if (mdrunOptions.verbose)
2938             {
2939                 fprintf(stderr,
2940                         "Step=%5d, Dmax= %6.1e nm, Epot= %12.5e Fmax= %11.5e, atom= %d%c",
2941                         count,
2942                         ustep,
2943                         s_try->epot,
2944                         s_try->fmax,
2945                         s_try->a_fmax + 1,
2946                         ((count == 0) || (s_try->epot < s_min->epot)) ? '\n' : '\r');
2947                 fflush(stderr);
2948             }
2949
2950             if ((count == 0) || (s_try->epot < s_min->epot))
2951             {
2952                 /* Store the new (lower) energies  */
2953                 matrix nullBox = {};
2954                 energyOutput.addDataAtEnergyStep(false,
2955                                                  false,
2956                                                  static_cast<double>(count),
2957                                                  mdatoms->tmass,
2958                                                  enerd,
2959                                                  nullptr,
2960                                                  nullptr,
2961                                                  nullBox,
2962                                                  PTCouplingArrays(),
2963                                                  0,
2964                                                  vir,
2965                                                  pres,
2966                                                  nullptr,
2967                                                  mu_tot,
2968                                                  constr);
2969
2970                 imdSession->fillEnergyRecord(count, TRUE);
2971
2972                 const bool do_dr = do_per_step(steps_accepted, inputrec->nstdisreout);
2973                 const bool do_or = do_per_step(steps_accepted, inputrec->nstorireout);
2974                 energyOutput.printStepToEnergyFile(
2975                         mdoutf_get_fp_ene(outf), TRUE, do_dr, do_or, fplog, count, count, fr->fcdata.get(), nullptr);
2976                 fflush(fplog);
2977             }
2978         }
2979
2980         /* Now if the new energy is smaller than the previous...
2981          * or if this is the first step!
2982          * or if we did random steps!
2983          */
2984
2985         if ((count == 0) || (s_try->epot < s_min->epot))
2986         {
2987             steps_accepted++;
2988
2989             /* Test whether the convergence criterion is met...  */
2990             bDone = (s_try->fmax < inputrec->em_tol);
2991
2992             /* Copy the arrays for force, positions and energy  */
2993             /* The 'Min' array always holds the coords and forces of the minimal
2994                sampled energy  */
2995             swap_em_state(&s_min, &s_try);
2996             if (count > 0)
2997             {
2998                 ustep *= 1.2;
2999             }
3000
3001             /* Write to trn, if necessary */
3002             do_x = do_per_step(steps_accepted, inputrec->nstxout);
3003             do_f = do_per_step(steps_accepted, inputrec->nstfout);
3004             write_em_traj(
3005                     fplog, cr, outf, do_x, do_f, nullptr, top_global, inputrec, count, s_min, state_global, observablesHistory);
3006         }
3007         else
3008         {
3009             /* If energy is not smaller make the step smaller...  */
3010             ustep *= 0.5;
3011
3012             if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
3013             {
3014                 /* Reload the old state */
3015                 em_dd_partition_system(fplog,
3016                                        mdlog,
3017                                        count,
3018                                        cr,
3019                                        top_global,
3020                                        inputrec,
3021                                        imdSession,
3022                                        pull_work,
3023                                        s_min,
3024                                        &top,
3025                                        mdAtoms,
3026                                        fr,
3027                                        vsite,
3028                                        constr,
3029                                        nrnb,
3030                                        wcycle);
3031             }
3032         }
3033
3034         // If the force is very small after finishing minimization,
3035         // we risk dividing by zero when calculating the step size.
3036         // So we check first if the minimization has stopped before
3037         // trying to obtain a new step size.
3038         if (!bDone)
3039         {
3040             /* Determine new step  */
3041             stepsize = ustep / s_min->fmax;
3042         }
3043
3044         /* Check if stepsize is too small, with 1 nm as a characteristic length */
3045 #if GMX_DOUBLE
3046         if (count == nsteps || ustep < 1e-12)
3047 #else
3048         if (count == nsteps || ustep < 1e-6)
3049 #endif
3050         {
3051             if (MASTER(cr))
3052             {
3053                 warn_step(fplog, inputrec->em_tol, s_min->fmax, count == nsteps, constr != nullptr);
3054             }
3055             bAbort = TRUE;
3056         }
3057
3058         /* Send IMD energies and positions, if bIMD is TRUE. */
3059         if (imdSession->run(count,
3060                             TRUE,
3061                             MASTER(cr) ? state_global->box : nullptr,
3062                             MASTER(cr) ? state_global->x : gmx::ArrayRef<gmx::RVec>(),
3063                             0)
3064             && MASTER(cr))
3065         {
3066             imdSession->sendPositionsAndEnergies();
3067         }
3068
3069         count++;
3070     } /* End of the loop  */
3071
3072     /* Print some data...  */
3073     if (MASTER(cr))
3074     {
3075         fprintf(stderr, "\nwriting lowest energy coordinates.\n");
3076     }
3077     write_em_traj(fplog,
3078                   cr,
3079                   outf,
3080                   TRUE,
3081                   inputrec->nstfout != 0,
3082                   ftp2fn(efSTO, nfile, fnm),
3083                   top_global,
3084                   inputrec,
3085                   count,
3086                   s_min,
3087                   state_global,
3088                   observablesHistory);
3089
3090     if (MASTER(cr))
3091     {
3092         double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
3093
3094         print_converged(stderr, SD, inputrec->em_tol, count, bDone, nsteps, s_min, sqrtNumAtoms);
3095         print_converged(fplog, SD, inputrec->em_tol, count, bDone, nsteps, s_min, sqrtNumAtoms);
3096     }
3097
3098     finish_em(cr, outf, walltime_accounting, wcycle);
3099
3100     /* To print the actual number of steps we needed somewhere */
3101     {
3102         // TODO: Avoid changing inputrec (#3854)
3103         auto* nonConstInputrec   = const_cast<t_inputrec*>(inputrec);
3104         nonConstInputrec->nsteps = count;
3105     }
3106
3107     walltime_accounting_set_nsteps_done(walltime_accounting, count);
3108 }
3109
3110 void LegacySimulator::do_nm()
3111 {
3112     const char*         NM = "Normal Mode Analysis";
3113     int                 nnodes;
3114     gmx_localtop_t      top(top_global.ffparams);
3115     gmx_global_stat_t   gstat;
3116     tensor              vir, pres;
3117     rvec                mu_tot = { 0 };
3118     rvec*               dfdx;
3119     gmx_bool            bSparse; /* use sparse matrix storage format */
3120     size_t              sz;
3121     gmx_sparsematrix_t* sparse_matrix = nullptr;
3122     real*               full_matrix   = nullptr;
3123
3124     /* added with respect to mdrun */
3125     int   row, col;
3126     real  der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
3127     real  x_min;
3128     bool  bIsMaster = MASTER(cr);
3129     auto* mdatoms   = mdAtoms->mdatoms();
3130
3131     GMX_LOG(mdlog.info)
3132             .asParagraph()
3133             .appendText(
3134                     "Note that activating normal-mode analysis via the integrator "
3135                     ".mdp option and the command gmx mdrun may "
3136                     "be available in a different form in a future version of GROMACS, "
3137                     "e.g. gmx normal-modes.");
3138
3139     if (constr != nullptr)
3140     {
3141         gmx_fatal(
3142                 FARGS,
3143                 "Constraints present with Normal Mode Analysis, this combination is not supported");
3144     }
3145
3146     gmx_shellfc_t* shellfc;
3147
3148     em_state_t state_work{};
3149
3150     ObservablesReducer observablesReducer = observablesReducerBuilder->build();
3151
3152     /* Init em and store the local state in state_minimum */
3153     init_em(fplog,
3154             mdlog,
3155             NM,
3156             cr,
3157             inputrec,
3158             imdSession,
3159             pull_work,
3160             state_global,
3161             top_global,
3162             &state_work,
3163             &top,
3164             nrnb,
3165             fr,
3166             mdAtoms,
3167             &gstat,
3168             vsite,
3169             constr,
3170             &shellfc);
3171     const bool  simulationsShareState = false;
3172     gmx_mdoutf* outf                  = init_mdoutf(fplog,
3173                                    nfile,
3174                                    fnm,
3175                                    mdrunOptions,
3176                                    cr,
3177                                    outputProvider,
3178                                    mdModulesNotifiers,
3179                                    inputrec,
3180                                    top_global,
3181                                    nullptr,
3182                                    wcycle,
3183                                    StartingBehavior::NewSimulation,
3184                                    simulationsShareState,
3185                                    ms);
3186
3187     std::vector<int>       atom_index = get_atom_index(top_global);
3188     std::vector<gmx::RVec> fneg(atom_index.size(), { 0, 0, 0 });
3189     snew(dfdx, atom_index.size());
3190
3191 #if !GMX_DOUBLE
3192     if (bIsMaster)
3193     {
3194         fprintf(stderr,
3195                 "NOTE: This version of GROMACS has been compiled in single precision,\n"
3196                 "      which MIGHT not be accurate enough for normal mode analysis.\n"
3197                 "      GROMACS now uses sparse matrix storage, so the memory requirements\n"
3198                 "      are fairly modest even if you recompile in double precision.\n\n");
3199     }
3200 #endif
3201
3202     /* Check if we can/should use sparse storage format.
3203      *
3204      * Sparse format is only useful when the Hessian itself is sparse, which it
3205      * will be when we use a cutoff.
3206      * For small systems (n<1000) it is easier to always use full matrix format, though.
3207      */
3208     if (EEL_FULL(fr->ic->eeltype) || fr->rlist == 0.0)
3209     {
3210         GMX_LOG(mdlog.warning)
3211                 .appendText("Non-cutoff electrostatics used, forcing full Hessian format.");
3212         bSparse = FALSE;
3213     }
3214     else if (atom_index.size() < 1000)
3215     {
3216         GMX_LOG(mdlog.warning)
3217                 .appendTextFormatted("Small system size (N=%zu), using full Hessian format.",
3218                                      atom_index.size());
3219         bSparse = FALSE;
3220     }
3221     else
3222     {
3223         GMX_LOG(mdlog.warning).appendText("Using compressed symmetric sparse Hessian format.");
3224         bSparse = TRUE;
3225     }
3226
3227     /* Number of dimensions, based on real atoms, that is not vsites or shell */
3228     sz = DIM * atom_index.size();
3229
3230     fprintf(stderr, "Allocating Hessian memory...\n\n");
3231
3232     if (bSparse)
3233     {
3234         sparse_matrix                       = gmx_sparsematrix_init(sz);
3235         sparse_matrix->compressed_symmetric = TRUE;
3236     }
3237     else
3238     {
3239         snew(full_matrix, sz * sz);
3240     }
3241
3242     /* Write start time and temperature */
3243     print_em_start(fplog, cr, walltime_accounting, wcycle, NM);
3244
3245     /* fudge nr of steps to nr of atoms */
3246     {
3247         // TODO: Avoid changing inputrec (#3854)
3248         auto* nonConstInputrec   = const_cast<t_inputrec*>(inputrec);
3249         nonConstInputrec->nsteps = atom_index.size() * 2;
3250     }
3251
3252     if (bIsMaster)
3253     {
3254         fprintf(stderr,
3255                 "starting normal mode calculation '%s'\n%" PRId64 " steps.\n\n",
3256                 *(top_global.name),
3257                 inputrec->nsteps);
3258     }
3259
3260     nnodes = cr->nnodes;
3261
3262     /* Make evaluate_energy do a single node force calculation */
3263     cr->nnodes = 1;
3264     EnergyEvaluator energyEvaluator{ fplog,
3265                                      mdlog,
3266                                      cr,
3267                                      ms,
3268                                      top_global,
3269                                      &top,
3270                                      inputrec,
3271                                      imdSession,
3272                                      pull_work,
3273                                      nrnb,
3274                                      wcycle,
3275                                      gstat,
3276                                      &observablesReducer,
3277                                      vsite,
3278                                      constr,
3279                                      mdAtoms,
3280                                      fr,
3281                                      runScheduleWork,
3282                                      enerd };
3283     energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE, 0);
3284     cr->nnodes = nnodes;
3285
3286     /* if forces are not small, warn user */
3287     get_state_f_norm_max(cr, &(inputrec->opts), mdatoms, &state_work);
3288
3289     GMX_LOG(mdlog.warning).appendTextFormatted("Maximum force:%12.5e", state_work.fmax);
3290     if (state_work.fmax > 1.0e-3)
3291     {
3292         GMX_LOG(mdlog.warning)
3293                 .appendText(
3294                         "The force is probably not small enough to "
3295                         "ensure that you are at a minimum.\n"
3296                         "Be aware that negative eigenvalues may occur\n"
3297                         "when the resulting matrix is diagonalized.");
3298     }
3299
3300     /***********************************************************
3301      *
3302      *      Loop over all pairs in matrix
3303      *
3304      *      do_force called twice. Once with positive and
3305      *      once with negative displacement
3306      *
3307      ************************************************************/
3308
3309     /* Steps are divided one by one over the nodes */
3310     bool bNS          = true;
3311     auto state_work_x = makeArrayRef(state_work.s.x);
3312     auto state_work_f = state_work.f.view().force();
3313     for (index aid = cr->nodeid; aid < ssize(atom_index); aid += nnodes)
3314     {
3315         size_t atom = atom_index[aid];
3316         for (size_t d = 0; d < DIM; d++)
3317         {
3318             int64_t step        = 0;
3319             int     force_flags = GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES;
3320             double  t           = 0;
3321
3322             x_min = state_work_x[atom][d];
3323
3324             for (unsigned int dx = 0; (dx < 2); dx++)
3325             {
3326                 if (dx == 0)
3327                 {
3328                     state_work_x[atom][d] = x_min - der_range;
3329                 }
3330                 else
3331                 {
3332                     state_work_x[atom][d] = x_min + der_range;
3333                 }
3334
3335                 /* Make evaluate_energy do a single node force calculation */
3336                 cr->nnodes = 1;
3337                 if (shellfc)
3338                 {
3339                     /* Now is the time to relax the shells */
3340                     relax_shell_flexcon(fplog,
3341                                         cr,
3342                                         ms,
3343                                         mdrunOptions.verbose,
3344                                         nullptr,
3345                                         step,
3346                                         inputrec,
3347                                         imdSession,
3348                                         pull_work,
3349                                         bNS,
3350                                         force_flags,
3351                                         &top,
3352                                         constr,
3353                                         enerd,
3354                                         state_work.s.natoms,
3355                                         state_work.s.x.arrayRefWithPadding(),
3356                                         state_work.s.v.arrayRefWithPadding(),
3357                                         state_work.s.box,
3358                                         state_work.s.lambda,
3359                                         &state_work.s.hist,
3360                                         &state_work.f.view(),
3361                                         vir,
3362                                         *mdatoms,
3363                                         nrnb,
3364                                         wcycle,
3365                                         shellfc,
3366                                         fr,
3367                                         runScheduleWork,
3368                                         t,
3369                                         mu_tot,
3370                                         vsite,
3371                                         DDBalanceRegionHandler(nullptr));
3372                     bNS = false;
3373                     step++;
3374                 }
3375                 else
3376                 {
3377                     energyEvaluator.run(&state_work, mu_tot, vir, pres, aid * 2 + dx, FALSE, step);
3378                 }
3379
3380                 cr->nnodes = nnodes;
3381
3382                 if (dx == 0)
3383                 {
3384                     std::copy(state_work_f.begin(), state_work_f.begin() + atom_index.size(), fneg.begin());
3385                 }
3386             }
3387
3388             /* x is restored to original */
3389             state_work_x[atom][d] = x_min;
3390
3391             for (size_t j = 0; j < atom_index.size(); j++)
3392             {
3393                 for (size_t k = 0; (k < DIM); k++)
3394                 {
3395                     dfdx[j][k] = -(state_work_f[atom_index[j]][k] - fneg[j][k]) / (2 * der_range);
3396                 }
3397             }
3398
3399             if (!bIsMaster)
3400             {
3401 #if GMX_MPI
3402 #    define mpi_type GMX_MPI_REAL
3403                 MPI_Send(dfdx[0], atom_index.size() * DIM, mpi_type, MASTER(cr), cr->nodeid, cr->mpi_comm_mygroup);
3404 #endif
3405             }
3406             else
3407             {
3408                 for (index node = 0; (node < nnodes && aid + node < ssize(atom_index)); node++)
3409                 {
3410                     if (node > 0)
3411                     {
3412 #if GMX_MPI
3413                         MPI_Status stat;
3414                         MPI_Recv(dfdx[0], atom_index.size() * DIM, mpi_type, node, node, cr->mpi_comm_mygroup, &stat);
3415 #    undef mpi_type
3416 #endif
3417                     }
3418
3419                     row = (aid + node) * DIM + d;
3420
3421                     for (size_t j = 0; j < atom_index.size(); j++)
3422                     {
3423                         for (size_t k = 0; k < DIM; k++)
3424                         {
3425                             col = j * DIM + k;
3426
3427                             if (bSparse)
3428                             {
3429                                 if (col >= row && dfdx[j][k] != 0.0)
3430                                 {
3431                                     gmx_sparsematrix_increment_value(sparse_matrix, row, col, dfdx[j][k]);
3432                                 }
3433                             }
3434                             else
3435                             {
3436                                 full_matrix[row * sz + col] = dfdx[j][k];
3437                             }
3438                         }
3439                     }
3440                 }
3441             }
3442
3443             if (mdrunOptions.verbose && fplog)
3444             {
3445                 fflush(fplog);
3446             }
3447         }
3448         /* write progress */
3449         if (bIsMaster && mdrunOptions.verbose)
3450         {
3451             fprintf(stderr,
3452                     "\rFinished step %d out of %td",
3453                     std::min<int>(atom + nnodes, atom_index.size()),
3454                     ssize(atom_index));
3455             fflush(stderr);
3456         }
3457     }
3458
3459     if (bIsMaster)
3460     {
3461         fprintf(stderr, "\n\nWriting Hessian...\n");
3462         gmx_mtxio_write(ftp2fn(efMTX, nfile, fnm), sz, sz, full_matrix, sparse_matrix);
3463     }
3464
3465     finish_em(cr, outf, walltime_accounting, wcycle);
3466
3467     walltime_accounting_set_nsteps_done(walltime_accounting, atom_index.size() * 2);
3468 }
3469
3470 } // namespace gmx