2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief This file defines integrators for energy minimization
41 * \author Berk Hess <hess@kth.se>
42 * \author Erik Lindahl <erik@kth.se>
43 * \ingroup module_mdrun
56 #include "gromacs/commandline/filenm.h"
57 #include "gromacs/domdec/domdec.h"
58 #include "gromacs/domdec/domdec_struct.h"
59 #include "gromacs/ewald/pme.h"
60 #include "gromacs/fileio/confio.h"
61 #include "gromacs/fileio/mtxio.h"
62 #include "gromacs/gmxlib/network.h"
63 #include "gromacs/gmxlib/nrnb.h"
64 #include "gromacs/imd/imd.h"
65 #include "gromacs/linearalgebra/sparsematrix.h"
66 #include "gromacs/listed-forces/manage-threading.h"
67 #include "gromacs/math/functions.h"
68 #include "gromacs/math/vec.h"
69 #include "gromacs/mdlib/constr.h"
70 #include "gromacs/mdlib/force.h"
71 #include "gromacs/mdlib/forcerec.h"
72 #include "gromacs/mdlib/gmx_omp_nthreads.h"
73 #include "gromacs/mdlib/md_support.h"
74 #include "gromacs/mdlib/mdatoms.h"
75 #include "gromacs/mdlib/mdebin.h"
76 #include "gromacs/mdlib/mdrun.h"
77 #include "gromacs/mdlib/mdsetup.h"
78 #include "gromacs/mdlib/ns.h"
79 #include "gromacs/mdlib/shellfc.h"
80 #include "gromacs/mdlib/sim_util.h"
81 #include "gromacs/mdlib/tgroup.h"
82 #include "gromacs/mdlib/trajectory_writing.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdlib/vsite.h"
85 #include "gromacs/mdtypes/commrec.h"
86 #include "gromacs/mdtypes/inputrec.h"
87 #include "gromacs/mdtypes/md_enums.h"
88 #include "gromacs/mdtypes/state.h"
89 #include "gromacs/pbcutil/mshift.h"
90 #include "gromacs/pbcutil/pbc.h"
91 #include "gromacs/timing/wallcycle.h"
92 #include "gromacs/timing/walltime_accounting.h"
93 #include "gromacs/topology/mtop_util.h"
94 #include "gromacs/topology/topology.h"
95 #include "gromacs/utility/cstringutil.h"
96 #include "gromacs/utility/exceptions.h"
97 #include "gromacs/utility/fatalerror.h"
98 #include "gromacs/utility/logger.h"
99 #include "gromacs/utility/smalloc.h"
101 #include "integrator.h"
103 //! Utility structure for manipulating states during EM
105 //! Copy of the global state
111 //! Norm of the force
119 //! Print the EM starting conditions
120 static void print_em_start(FILE *fplog,
122 gmx_walltime_accounting_t walltime_accounting,
123 gmx_wallcycle_t wcycle,
126 walltime_accounting_start(walltime_accounting);
127 wallcycle_start(wcycle, ewcRUN);
128 print_start(fplog, cr, walltime_accounting, name);
131 //! Stop counting time for EM
132 static void em_time_end(gmx_walltime_accounting_t walltime_accounting,
133 gmx_wallcycle_t wcycle)
135 wallcycle_stop(wcycle, ewcRUN);
137 walltime_accounting_end(walltime_accounting);
140 //! Printing a log file and console header
141 static void sp_header(FILE *out, const char *minimizer, real ftol, int nsteps)
144 fprintf(out, "%s:\n", minimizer);
145 fprintf(out, " Tolerance (Fmax) = %12.5e\n", ftol);
146 fprintf(out, " Number of steps = %12d\n", nsteps);
149 //! Print warning message
150 static void warn_step(FILE *fp,
156 constexpr bool realIsDouble = GMX_DOUBLE;
159 if (!std::isfinite(fmax))
162 "\nEnergy minimization has stopped because the force "
163 "on at least one atom is not finite. This usually means "
164 "atoms are overlapping. Modify the input coordinates to "
165 "remove atom overlap or use soft-core potentials with "
166 "the free energy code to avoid infinite forces.\n%s",
168 "You could also be lucky that switching to double precision "
169 "is sufficient to obtain finite forces.\n" :
175 "\nEnergy minimization reached the maximum number "
176 "of steps before the forces reached the requested "
177 "precision Fmax < %g.\n", ftol);
182 "\nEnergy minimization has stopped, but the forces have "
183 "not converged to the requested precision Fmax < %g (which "
184 "may not be possible for your system). It stopped "
185 "because the algorithm tried to make a new step whose size "
186 "was too small, or there was no change in the energy since "
187 "last step. Either way, we regard the minimization as "
188 "converged to within the available machine precision, "
189 "given your starting configuration and EM parameters.\n%s%s",
192 "\nDouble precision normally gives you higher accuracy, but "
193 "this is often not needed for preparing to run molecular "
197 "You might need to increase your constraint accuracy, or turn\n"
198 "off constraints altogether (set constraints = none in mdp file)\n" :
202 fputs(wrap_lines(buffer, 78, 0, FALSE), stderr);
203 fputs(wrap_lines(buffer, 78, 0, FALSE), fp);
206 //! Print message about convergence of the EM
207 static void print_converged(FILE *fp, const char *alg, real ftol,
208 gmx_int64_t count, gmx_bool bDone, gmx_int64_t nsteps,
209 const em_state_t *ems, double sqrtNumAtoms)
211 char buf[STEPSTRSIZE];
215 fprintf(fp, "\n%s converged to Fmax < %g in %s steps\n",
216 alg, ftol, gmx_step_str(count, buf));
218 else if (count < nsteps)
220 fprintf(fp, "\n%s converged to machine precision in %s steps,\n"
221 "but did not reach the requested Fmax < %g.\n",
222 alg, gmx_step_str(count, buf), ftol);
226 fprintf(fp, "\n%s did not converge to Fmax < %g in %s steps.\n",
227 alg, ftol, gmx_step_str(count, buf));
231 fprintf(fp, "Potential Energy = %21.14e\n", ems->epot);
232 fprintf(fp, "Maximum force = %21.14e on atom %d\n", ems->fmax, ems->a_fmax + 1);
233 fprintf(fp, "Norm of force = %21.14e\n", ems->fnorm/sqrtNumAtoms);
235 fprintf(fp, "Potential Energy = %14.7e\n", ems->epot);
236 fprintf(fp, "Maximum force = %14.7e on atom %d\n", ems->fmax, ems->a_fmax + 1);
237 fprintf(fp, "Norm of force = %14.7e\n", ems->fnorm/sqrtNumAtoms);
241 //! Compute the norm and max of the force array in parallel
242 static void get_f_norm_max(const t_commrec *cr,
243 t_grpopts *opts, t_mdatoms *mdatoms, const rvec *f,
244 real *fnorm, real *fmax, int *a_fmax)
248 int la_max, a_max, start, end, i, m, gf;
250 /* This routine finds the largest force and returns it.
251 * On parallel machines the global max is taken.
257 end = mdatoms->homenr;
258 if (mdatoms->cFREEZE)
260 for (i = start; i < end; i++)
262 gf = mdatoms->cFREEZE[i];
264 for (m = 0; m < DIM; m++)
266 if (!opts->nFreeze[gf][m])
268 fam += gmx::square(f[i][m]);
281 for (i = start; i < end; i++)
293 if (la_max >= 0 && DOMAINDECOMP(cr))
295 a_max = cr->dd->globalAtomIndices[la_max];
303 snew(sum, 2*cr->nnodes+1);
304 sum[2*cr->nodeid] = fmax2;
305 sum[2*cr->nodeid+1] = a_max;
306 sum[2*cr->nnodes] = fnorm2;
307 gmx_sumd(2*cr->nnodes+1, sum, cr);
308 fnorm2 = sum[2*cr->nnodes];
309 /* Determine the global maximum */
310 for (i = 0; i < cr->nnodes; i++)
312 if (sum[2*i] > fmax2)
315 a_max = (int)(sum[2*i+1] + 0.5);
323 *fnorm = sqrt(fnorm2);
335 //! Compute the norm of the force
336 static void get_state_f_norm_max(const t_commrec *cr,
337 t_grpopts *opts, t_mdatoms *mdatoms,
340 get_f_norm_max(cr, opts, mdatoms, as_rvec_array(ems->f.data()),
341 &ems->fnorm, &ems->fmax, &ems->a_fmax);
344 //! Initialize the energy minimization
345 static void init_em(FILE *fplog, const char *title,
347 const gmx_multisim_t *ms,
348 gmx::IMDOutputProvider *outputProvider,
350 const MdrunOptions &mdrunOptions,
351 t_state *state_global, gmx_mtop_t *top_global,
352 em_state_t *ems, gmx_localtop_t **top,
353 t_nrnb *nrnb, rvec mu_tot,
354 t_forcerec *fr, gmx_enerdata_t **enerd,
355 t_graph **graph, gmx::MDAtoms *mdAtoms, gmx_global_stat_t *gstat,
356 gmx_vsite_t *vsite, gmx::Constraints *constr, gmx_shellfc_t **shellfc,
357 int nfile, const t_filenm fnm[],
358 gmx_mdoutf_t *outf, t_mdebin **mdebin,
359 gmx_wallcycle_t wcycle)
365 fprintf(fplog, "Initiating %s\n", title);
370 state_global->ngtc = 0;
372 /* Initialize lambda variables */
373 initialize_lambdas(fplog, ir, &(state_global->fep_state), state_global->lambda, nullptr);
378 /* Interactive molecular dynamics */
379 init_IMD(ir, cr, ms, top_global, fplog, 1,
380 MASTER(cr) ? as_rvec_array(state_global->x.data()) : nullptr,
381 nfile, fnm, nullptr, mdrunOptions);
385 GMX_ASSERT(shellfc != nullptr, "With NM we always support shells");
387 *shellfc = init_shell_flexcon(stdout,
389 constr ? constr->numFlexibleConstraints() : 0,
395 GMX_ASSERT(EI_ENERGY_MINIMIZATION(ir->eI), "This else currently only handles energy minimizers, consider if your algorithm needs shell/flexible-constraint support");
397 /* With energy minimization, shells and flexible constraints are
398 * automatically minimized when treated like normal DOFS.
400 if (shellfc != nullptr)
406 auto mdatoms = mdAtoms->mdatoms();
407 if (DOMAINDECOMP(cr))
409 *top = dd_init_local_top(top_global);
411 dd_init_local_state(cr->dd, state_global, &ems->s);
413 /* Distribute the charge groups over the nodes from the master node */
414 dd_partition_system(fplog, ir->init_step, cr, TRUE, 1,
415 state_global, top_global, ir,
416 &ems->s, &ems->f, mdAtoms, *top,
418 nrnb, nullptr, FALSE);
419 dd_store_state(cr->dd, &ems->s);
425 state_change_natoms(state_global, state_global->natoms);
426 /* Just copy the state */
427 ems->s = *state_global;
428 state_change_natoms(&ems->s, ems->s.natoms);
429 /* We need to allocate one element extra, since we might use
430 * (unaligned) 4-wide SIMD loads to access rvec entries.
432 ems->f.resize(gmx::paddedRVecVectorSize(ems->s.natoms));
435 mdAlgorithmsSetupAtomData(cr, ir, top_global, *top, fr,
437 constr, vsite, shellfc ? *shellfc : nullptr);
441 set_vsite_top(vsite, *top, mdatoms);
445 update_mdatoms(mdAtoms->mdatoms(), ems->s.lambda[efptMASS]);
449 // TODO how should this cross-module support dependency be managed?
450 if (ir->eConstrAlg == econtSHAKE &&
451 gmx_mtop_ftype_count(top_global, F_CONSTR) > 0)
453 gmx_fatal(FARGS, "Can not do energy minimization with %s, use %s\n",
454 econstr_names[econtSHAKE], econstr_names[econtLINCS]);
457 if (!ir->bContinuation)
459 /* Constrain the starting coordinates */
461 constr->apply(TRUE, TRUE,
463 as_rvec_array(ems->s.x.data()),
464 as_rvec_array(ems->s.x.data()),
467 ems->s.lambda[efptFEP], &dvdl_constr,
468 nullptr, nullptr, gmx::ConstraintVariable::Positions);
474 *gstat = global_stat_init(ir);
481 *outf = init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider, ir, top_global, nullptr, wcycle);
484 init_enerdata(top_global->groups.grps[egcENER].nr, ir->fepvals->n_lambda,
487 if (mdebin != nullptr)
489 /* Init bin for energy stuff */
490 *mdebin = init_mdebin(mdoutf_get_fp_ene(*outf), top_global, ir, nullptr);
494 calc_shifts(ems->s.box, fr->shift_vec);
497 //! Finalize the minimization
498 static void finish_em(const t_commrec *cr, gmx_mdoutf_t outf,
499 gmx_walltime_accounting_t walltime_accounting,
500 gmx_wallcycle_t wcycle)
502 if (!thisRankHasDuty(cr, DUTY_PME))
504 /* Tell the PME only node to finish */
505 gmx_pme_send_finish(cr);
510 em_time_end(walltime_accounting, wcycle);
513 //! Swap two different EM states during minimization
514 static void swap_em_state(em_state_t **ems1, em_state_t **ems2)
523 //! Save the EM trajectory
524 static void write_em_traj(FILE *fplog, const t_commrec *cr,
526 gmx_bool bX, gmx_bool bF, const char *confout,
527 gmx_mtop_t *top_global,
528 t_inputrec *ir, gmx_int64_t step,
530 t_state *state_global,
531 ObservablesHistory *observablesHistory)
537 mdof_flags |= MDOF_X;
541 mdof_flags |= MDOF_F;
544 /* If we want IMD output, set appropriate MDOF flag */
547 mdof_flags |= MDOF_IMD;
550 mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags,
551 top_global, step, (double)step,
552 &state->s, state_global, observablesHistory,
555 if (confout != nullptr && MASTER(cr))
557 GMX_RELEASE_ASSERT(bX, "The code below assumes that (with domain decomposition), x is collected to state_global in the call above.");
558 /* With domain decomposition the call above collected the state->s.x
559 * into state_global->x. Without DD we copy the local state pointer.
561 if (!DOMAINDECOMP(cr))
563 state_global = &state->s;
566 if (ir->ePBC != epbcNONE && !ir->bPeriodicMols && DOMAINDECOMP(cr))
568 /* Make molecules whole only for confout writing */
569 do_pbc_mtop(fplog, ir->ePBC, state->s.box, top_global,
570 as_rvec_array(state_global->x.data()));
573 write_sto_conf_mtop(confout,
574 *top_global->name, top_global,
575 as_rvec_array(state_global->x.data()), nullptr, ir->ePBC, state->s.box);
579 //! \brief Do one minimization step
581 // \returns true when the step succeeded, false when a constraint error occurred
582 static bool do_em_step(const t_commrec *cr,
583 t_inputrec *ir, t_mdatoms *md,
584 em_state_t *ems1, real a, const PaddedRVecVector *force,
586 gmx::Constraints *constr,
593 int nthreads gmx_unused;
595 bool validStep = true;
600 if (DOMAINDECOMP(cr) && s1->ddp_count != cr->dd->ddp_count)
602 gmx_incons("state mismatch in do_em_step");
605 s2->flags = s1->flags;
607 if (s2->natoms != s1->natoms)
609 state_change_natoms(s2, s1->natoms);
610 /* We need to allocate one element extra, since we might use
611 * (unaligned) 4-wide SIMD loads to access rvec entries.
613 ems2->f.resize(gmx::paddedRVecVectorSize(s2->natoms));
615 if (DOMAINDECOMP(cr) && s2->cg_gl.size() != s1->cg_gl.size())
617 s2->cg_gl.resize(s1->cg_gl.size());
620 copy_mat(s1->box, s2->box);
621 /* Copy free energy state */
622 s2->lambda = s1->lambda;
623 copy_mat(s1->box, s2->box);
628 // cppcheck-suppress unreadVariable
629 nthreads = gmx_omp_nthreads_get(emntUpdate);
630 #pragma omp parallel num_threads(nthreads)
632 const rvec *x1 = as_rvec_array(s1->x.data());
633 rvec *x2 = as_rvec_array(s2->x.data());
634 const rvec *f = as_rvec_array(force->data());
637 #pragma omp for schedule(static) nowait
638 for (int i = start; i < end; i++)
646 for (int m = 0; m < DIM; m++)
648 if (ir->opts.nFreeze[gf][m])
654 x2[i][m] = x1[i][m] + a*f[i][m];
658 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
661 if (s2->flags & (1<<estCGP))
663 /* Copy the CG p vector */
664 const rvec *p1 = as_rvec_array(s1->cg_p.data());
665 rvec *p2 = as_rvec_array(s2->cg_p.data());
666 #pragma omp for schedule(static) nowait
667 for (int i = start; i < end; i++)
669 // Trivial OpenMP block that does not throw
670 copy_rvec(p1[i], p2[i]);
674 if (DOMAINDECOMP(cr))
676 s2->ddp_count = s1->ddp_count;
678 /* OpenMP does not supported unsigned loop variables */
679 #pragma omp for schedule(static) nowait
680 for (int i = 0; i < static_cast<int>(s2->cg_gl.size()); i++)
682 s2->cg_gl[i] = s1->cg_gl[i];
684 s2->ddp_count_cg_gl = s1->ddp_count_cg_gl;
692 constr->apply(TRUE, TRUE,
694 as_rvec_array(s1->x.data()), as_rvec_array(s2->x.data()),
696 s2->lambda[efptBONDED], &dvdl_constr,
697 nullptr, nullptr, gmx::ConstraintVariable::Positions);
701 /* This global reduction will affect performance at high
702 * parallelization, but we can not really avoid it.
703 * But usually EM is not run at high parallelization.
705 int reductionBuffer = !validStep;
706 gmx_sumi(1, &reductionBuffer, cr);
707 validStep = (reductionBuffer == 0);
710 // We should move this check to the different minimizers
711 if (!validStep && ir->eI != eiSteep)
713 gmx_fatal(FARGS, "The coordinates could not be constrained. Minimizer '%s' can not handle constraint failures, use minimizer '%s' before using '%s'.",
714 EI(ir->eI), EI(eiSteep), EI(ir->eI));
721 //! Prepare EM for using domain decomposition parallellization
722 static void em_dd_partition_system(FILE *fplog, int step, const t_commrec *cr,
723 gmx_mtop_t *top_global, t_inputrec *ir,
724 em_state_t *ems, gmx_localtop_t *top,
725 gmx::MDAtoms *mdAtoms, t_forcerec *fr,
726 gmx_vsite_t *vsite, gmx::Constraints *constr,
727 t_nrnb *nrnb, gmx_wallcycle_t wcycle)
729 /* Repartition the domain decomposition */
730 dd_partition_system(fplog, step, cr, FALSE, 1,
731 nullptr, top_global, ir,
733 mdAtoms, top, fr, vsite, constr,
734 nrnb, wcycle, FALSE);
735 dd_store_state(cr->dd, &ems->s);
741 /*! \brief Class to handle the work of setting and doing an energy evaluation.
743 * This class is a mere aggregate of parameters to pass to evaluate an
744 * energy, so that future changes to names and types of them consume
745 * less time when refactoring other code.
747 * Aggregate initialization is used, for which the chief risk is that
748 * if a member is added at the end and not all initializer lists are
749 * updated, then the member will be value initialized, which will
750 * typically mean initialization to zero.
752 * We only want to construct one of these with an initializer list, so
753 * we explicitly delete the default constructor. */
754 class EnergyEvaluator
757 //! We only intend to construct such objects with an initializer list.
758 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
759 // Aspects of the C++11 spec changed after GCC 4.8.5, and
760 // compilation of the initializer list construction in
761 // runner.cpp fails in GCC 4.8.5.
762 EnergyEvaluator() = delete;
764 /*! \brief Evaluates an energy on the state in \c ems.
766 * \todo In practice, the same objects mu_tot, vir, and pres
767 * are always passed to this function, so we would rather have
768 * them as data members. However, their C-array types are
769 * unsuited for aggregate initialization. When the types
770 * improve, the call signature of this method can be reduced.
772 void run(em_state_t *ems, rvec mu_tot,
773 tensor vir, tensor pres,
774 gmx_int64_t count, gmx_bool bFirst);
777 //! Handles communication.
779 //! Coordinates multi-simulations.
780 const gmx_multisim_t *ms;
781 //! Holds the simulation topology.
782 gmx_mtop_t *top_global;
783 //! Holds the domain topology.
785 //! User input options.
786 t_inputrec *inputrec;
787 //! Manages flop accounting.
789 //! Manages wall cycle accounting.
790 gmx_wallcycle_t wcycle;
791 //! Coordinates global reduction.
792 gmx_global_stat_t gstat;
793 //! Handles virtual sites.
795 //! Handles constraints.
796 gmx::Constraints *constr;
797 //! Handles strange things.
799 //! Molecular graph for SHAKE.
801 //! Per-atom data for this domain.
802 gmx::MDAtoms *mdAtoms;
803 //! Handles how to calculate the forces.
805 //! Stores the computed energies.
806 gmx_enerdata_t *enerd;
810 EnergyEvaluator::run(em_state_t *ems, rvec mu_tot,
811 tensor vir, tensor pres,
812 gmx_int64_t count, gmx_bool bFirst)
816 tensor force_vir, shake_vir, ekin;
817 real dvdl_constr, prescorr, enercorr, dvdlcorr;
820 /* Set the time to the initial time, the time does not change during EM */
821 t = inputrec->init_t;
824 (DOMAINDECOMP(cr) && ems->s.ddp_count < cr->dd->ddp_count))
826 /* This is the first state or an old state used before the last ns */
832 if (inputrec->nstlist > 0)
840 construct_vsites(vsite, as_rvec_array(ems->s.x.data()), 1, nullptr,
841 top->idef.iparams, top->idef.il,
842 fr->ePBC, fr->bMolPBC, cr, ems->s.box);
845 if (DOMAINDECOMP(cr) && bNS)
847 /* Repartition the domain decomposition */
848 em_dd_partition_system(fplog, count, cr, top_global, inputrec,
849 ems, top, mdAtoms, fr, vsite, constr,
853 /* Calc force & energy on new trial position */
854 /* do_force always puts the charge groups in the box and shifts again
855 * We do not unshift, so molecules are always whole in congrad.c
857 do_force(fplog, cr, ms, inputrec, nullptr,
858 count, nrnb, wcycle, top, &top_global->groups,
859 ems->s.box, ems->s.x, &ems->s.hist,
860 ems->f, force_vir, mdAtoms->mdatoms(), enerd, fcd,
861 ems->s.lambda, graph, fr, vsite, mu_tot, t, nullptr,
862 GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES |
863 GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY |
864 (bNS ? GMX_FORCE_NS : 0),
866 DdOpenBalanceRegionBeforeForceComputation::yes :
867 DdOpenBalanceRegionBeforeForceComputation::no,
869 DdCloseBalanceRegionAfterForceComputation::yes :
870 DdCloseBalanceRegionAfterForceComputation::no);
872 /* Clear the unused shake virial and pressure */
873 clear_mat(shake_vir);
876 /* Communicate stuff when parallel */
877 if (PAR(cr) && inputrec->eI != eiNM)
879 wallcycle_start(wcycle, ewcMoveE);
881 global_stat(gstat, cr, enerd, force_vir, shake_vir, mu_tot,
882 inputrec, nullptr, nullptr, nullptr, 1, &terminate,
888 wallcycle_stop(wcycle, ewcMoveE);
891 /* Calculate long range corrections to pressure and energy */
892 calc_dispcorr(inputrec, fr, ems->s.box, ems->s.lambda[efptVDW],
893 pres, force_vir, &prescorr, &enercorr, &dvdlcorr);
894 enerd->term[F_DISPCORR] = enercorr;
895 enerd->term[F_EPOT] += enercorr;
896 enerd->term[F_PRES] += prescorr;
897 enerd->term[F_DVDL] += dvdlcorr;
899 ems->epot = enerd->term[F_EPOT];
903 /* Project out the constraint components of the force */
905 rvec *f_rvec = as_rvec_array(ems->f.data());
906 constr->apply(FALSE, FALSE,
908 as_rvec_array(ems->s.x.data()), f_rvec, f_rvec,
910 ems->s.lambda[efptBONDED], &dvdl_constr,
911 nullptr, &shake_vir, gmx::ConstraintVariable::ForceDispl);
912 enerd->term[F_DVDL_CONSTR] += dvdl_constr;
913 m_add(force_vir, shake_vir, vir);
917 copy_mat(force_vir, vir);
921 enerd->term[F_PRES] =
922 calc_pres(fr->ePBC, inputrec->nwall, ems->s.box, ekin, vir, pres);
924 sum_dhdl(enerd, ems->s.lambda, inputrec->fepvals);
926 if (EI_ENERGY_MINIMIZATION(inputrec->eI))
928 get_state_f_norm_max(cr, &(inputrec->opts), mdAtoms->mdatoms(), ems);
934 //! Parallel utility summing energies and forces
935 static double reorder_partsum(const t_commrec *cr, t_grpopts *opts, t_mdatoms *mdatoms,
936 gmx_mtop_t *top_global,
937 em_state_t *s_min, em_state_t *s_b)
940 int ncg, *cg_gl, *index, c, cg, i, a0, a1, a, gf, m;
942 unsigned char *grpnrFREEZE;
946 fprintf(debug, "Doing reorder_partsum\n");
949 const rvec *fm = as_rvec_array(s_min->f.data());
950 const rvec *fb = as_rvec_array(s_b->f.data());
952 cgs_gl = dd_charge_groups_global(cr->dd);
953 index = cgs_gl->index;
955 /* Collect fm in a global vector fmg.
956 * This conflicts with the spirit of domain decomposition,
957 * but to fully optimize this a much more complicated algorithm is required.
960 snew(fmg, top_global->natoms);
962 ncg = s_min->s.cg_gl.size();
963 cg_gl = s_min->s.cg_gl.data();
965 for (c = 0; c < ncg; c++)
970 for (a = a0; a < a1; a++)
972 copy_rvec(fm[i], fmg[a]);
976 gmx_sum(top_global->natoms*3, fmg[0], cr);
978 /* Now we will determine the part of the sum for the cgs in state s_b */
979 ncg = s_b->s.cg_gl.size();
980 cg_gl = s_b->s.cg_gl.data();
984 grpnrFREEZE = top_global->groups.grpnr[egcFREEZE];
985 for (c = 0; c < ncg; c++)
990 for (a = a0; a < a1; a++)
992 if (mdatoms->cFREEZE && grpnrFREEZE)
996 for (m = 0; m < DIM; m++)
998 if (!opts->nFreeze[gf][m])
1000 partsum += (fb[i][m] - fmg[a][m])*fb[i][m];
1012 //! Print some stuff, like beta, whatever that means.
1013 static real pr_beta(const t_commrec *cr, t_grpopts *opts, t_mdatoms *mdatoms,
1014 gmx_mtop_t *top_global,
1015 em_state_t *s_min, em_state_t *s_b)
1019 /* This is just the classical Polak-Ribiere calculation of beta;
1020 * it looks a bit complicated since we take freeze groups into account,
1021 * and might have to sum it in parallel runs.
1024 if (!DOMAINDECOMP(cr) ||
1025 (s_min->s.ddp_count == cr->dd->ddp_count &&
1026 s_b->s.ddp_count == cr->dd->ddp_count))
1028 const rvec *fm = as_rvec_array(s_min->f.data());
1029 const rvec *fb = as_rvec_array(s_b->f.data());
1032 /* This part of code can be incorrect with DD,
1033 * since the atom ordering in s_b and s_min might differ.
1035 for (int i = 0; i < mdatoms->homenr; i++)
1037 if (mdatoms->cFREEZE)
1039 gf = mdatoms->cFREEZE[i];
1041 for (int m = 0; m < DIM; m++)
1043 if (!opts->nFreeze[gf][m])
1045 sum += (fb[i][m] - fm[i][m])*fb[i][m];
1052 /* We need to reorder cgs while summing */
1053 sum = reorder_partsum(cr, opts, mdatoms, top_global, s_min, s_b);
1057 gmx_sumd(1, &sum, cr);
1060 return sum/gmx::square(s_min->fnorm);
1069 const char *CG = "Polak-Ribiere Conjugate Gradients";
1071 gmx_localtop_t *top;
1072 gmx_enerdata_t *enerd;
1073 gmx_global_stat_t gstat;
1075 double tmp, minstep;
1077 real a, b, c, beta = 0.0;
1081 gmx_bool converged, foundlower;
1083 gmx_bool do_log = FALSE, do_ene = FALSE, do_x, do_f;
1085 int number_steps, neval = 0, nstcg = inputrec->nstcgsteep;
1087 int m, step, nminstep;
1088 auto mdatoms = mdAtoms->mdatoms();
1092 // Ensure the extra per-atom state array gets allocated
1093 state_global->flags |= (1<<estCGP);
1095 /* Create 4 states on the stack and extract pointers that we will swap */
1096 em_state_t s0 {}, s1 {}, s2 {}, s3 {};
1097 em_state_t *s_min = &s0;
1098 em_state_t *s_a = &s1;
1099 em_state_t *s_b = &s2;
1100 em_state_t *s_c = &s3;
1102 /* Init em and store the local state in s_min */
1103 init_em(fplog, CG, cr, ms, outputProvider, inputrec, mdrunOptions,
1104 state_global, top_global, s_min, &top,
1105 nrnb, mu_tot, fr, &enerd, &graph, mdAtoms, &gstat,
1106 vsite, constr, nullptr,
1107 nfile, fnm, &outf, &mdebin, wcycle);
1109 /* Print to log file */
1110 print_em_start(fplog, cr, walltime_accounting, wcycle, CG);
1112 /* Max number of steps */
1113 number_steps = inputrec->nsteps;
1117 sp_header(stderr, CG, inputrec->em_tol, number_steps);
1121 sp_header(fplog, CG, inputrec->em_tol, number_steps);
1124 EnergyEvaluator energyEvaluator {
1127 inputrec, nrnb, wcycle, gstat,
1128 vsite, constr, fcd, graph,
1131 /* Call the force routine and some auxiliary (neighboursearching etc.) */
1132 /* do_force always puts the charge groups in the box and shifts again
1133 * We do not unshift, so molecules are always whole in congrad.c
1135 energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE);
1139 /* Copy stuff to the energy bin for easy printing etc. */
1140 upd_mdebin(mdebin, FALSE, FALSE, (double)step,
1141 mdatoms->tmass, enerd, &s_min->s, inputrec->fepvals, inputrec->expandedvals, s_min->s.box,
1142 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
1144 print_ebin_header(fplog, step, step);
1145 print_ebin(mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step, step, eprNORMAL,
1146 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
1149 /* Estimate/guess the initial stepsize */
1150 stepsize = inputrec->em_stepsize/s_min->fnorm;
1154 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1155 fprintf(stderr, " F-max = %12.5e on atom %d\n",
1156 s_min->fmax, s_min->a_fmax+1);
1157 fprintf(stderr, " F-Norm = %12.5e\n",
1158 s_min->fnorm/sqrtNumAtoms);
1159 fprintf(stderr, "\n");
1160 /* and copy to the log file too... */
1161 fprintf(fplog, " F-max = %12.5e on atom %d\n",
1162 s_min->fmax, s_min->a_fmax+1);
1163 fprintf(fplog, " F-Norm = %12.5e\n",
1164 s_min->fnorm/sqrtNumAtoms);
1165 fprintf(fplog, "\n");
1167 /* Start the loop over CG steps.
1168 * Each successful step is counted, and we continue until
1169 * we either converge or reach the max number of steps.
1172 for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
1175 /* start taking steps in a new direction
1176 * First time we enter the routine, beta=0, and the direction is
1177 * simply the negative gradient.
1180 /* Calculate the new direction in p, and the gradient in this direction, gpa */
1181 rvec *pm = as_rvec_array(s_min->s.cg_p.data());
1182 const rvec *sfm = as_rvec_array(s_min->f.data());
1185 for (int i = 0; i < mdatoms->homenr; i++)
1187 if (mdatoms->cFREEZE)
1189 gf = mdatoms->cFREEZE[i];
1191 for (m = 0; m < DIM; m++)
1193 if (!inputrec->opts.nFreeze[gf][m])
1195 pm[i][m] = sfm[i][m] + beta*pm[i][m];
1196 gpa -= pm[i][m]*sfm[i][m];
1197 /* f is negative gradient, thus the sign */
1206 /* Sum the gradient along the line across CPUs */
1209 gmx_sumd(1, &gpa, cr);
1212 /* Calculate the norm of the search vector */
1213 get_f_norm_max(cr, &(inputrec->opts), mdatoms, pm, &pnorm, nullptr, nullptr);
1215 /* Just in case stepsize reaches zero due to numerical precision... */
1218 stepsize = inputrec->em_stepsize/pnorm;
1222 * Double check the value of the derivative in the search direction.
1223 * If it is positive it must be due to the old information in the
1224 * CG formula, so just remove that and start over with beta=0.
1225 * This corresponds to a steepest descent step.
1230 step--; /* Don't count this step since we are restarting */
1231 continue; /* Go back to the beginning of the big for-loop */
1234 /* Calculate minimum allowed stepsize, before the average (norm)
1235 * relative change in coordinate is smaller than precision
1238 for (int i = 0; i < mdatoms->homenr; i++)
1240 for (m = 0; m < DIM; m++)
1242 tmp = fabs(s_min->s.x[i][m]);
1251 /* Add up from all CPUs */
1254 gmx_sumd(1, &minstep, cr);
1257 minstep = GMX_REAL_EPS/sqrt(minstep/(3*state_global->natoms));
1259 if (stepsize < minstep)
1265 /* Write coordinates if necessary */
1266 do_x = do_per_step(step, inputrec->nstxout);
1267 do_f = do_per_step(step, inputrec->nstfout);
1269 write_em_traj(fplog, cr, outf, do_x, do_f, nullptr,
1270 top_global, inputrec, step,
1271 s_min, state_global, observablesHistory);
1273 /* Take a step downhill.
1274 * In theory, we should minimize the function along this direction.
1275 * That is quite possible, but it turns out to take 5-10 function evaluations
1276 * for each line. However, we dont really need to find the exact minimum -
1277 * it is much better to start a new CG step in a modified direction as soon
1278 * as we are close to it. This will save a lot of energy evaluations.
1280 * In practice, we just try to take a single step.
1281 * If it worked (i.e. lowered the energy), we increase the stepsize but
1282 * the continue straight to the next CG step without trying to find any minimum.
1283 * If it didn't work (higher energy), there must be a minimum somewhere between
1284 * the old position and the new one.
1286 * Due to the finite numerical accuracy, it turns out that it is a good idea
1287 * to even accept a SMALL increase in energy, if the derivative is still downhill.
1288 * This leads to lower final energies in the tests I've done. / Erik
1290 s_a->epot = s_min->epot;
1292 c = a + stepsize; /* reference position along line is zero */
1294 if (DOMAINDECOMP(cr) && s_min->s.ddp_count < cr->dd->ddp_count)
1296 em_dd_partition_system(fplog, step, cr, top_global, inputrec,
1297 s_min, top, mdAtoms, fr, vsite, constr,
1301 /* Take a trial step (new coords in s_c) */
1302 do_em_step(cr, inputrec, mdatoms, s_min, c, &s_min->s.cg_p, s_c,
1306 /* Calculate energy for the trial step */
1307 energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE);
1309 /* Calc derivative along line */
1310 const rvec *pc = as_rvec_array(s_c->s.cg_p.data());
1311 const rvec *sfc = as_rvec_array(s_c->f.data());
1313 for (int i = 0; i < mdatoms->homenr; i++)
1315 for (m = 0; m < DIM; m++)
1317 gpc -= pc[i][m]*sfc[i][m]; /* f is negative gradient, thus the sign */
1320 /* Sum the gradient along the line across CPUs */
1323 gmx_sumd(1, &gpc, cr);
1326 /* This is the max amount of increase in energy we tolerate */
1327 tmp = std::sqrt(GMX_REAL_EPS)*fabs(s_a->epot);
1329 /* Accept the step if the energy is lower, or if it is not significantly higher
1330 * and the line derivative is still negative.
1332 if (s_c->epot < s_a->epot || (gpc < 0 && s_c->epot < (s_a->epot + tmp)))
1335 /* Great, we found a better energy. Increase step for next iteration
1336 * if we are still going down, decrease it otherwise
1340 stepsize *= 1.618034; /* The golden section */
1344 stepsize *= 0.618034; /* 1/golden section */
1349 /* New energy is the same or higher. We will have to do some work
1350 * to find a smaller value in the interval. Take smaller step next time!
1353 stepsize *= 0.618034;
1359 /* OK, if we didn't find a lower value we will have to locate one now - there must
1360 * be one in the interval [a=0,c].
1361 * The same thing is valid here, though: Don't spend dozens of iterations to find
1362 * the line minimum. We try to interpolate based on the derivative at the endpoints,
1363 * and only continue until we find a lower value. In most cases this means 1-2 iterations.
1365 * I also have a safeguard for potentially really pathological functions so we never
1366 * take more than 20 steps before we give up ...
1368 * If we already found a lower value we just skip this step and continue to the update.
1377 /* Select a new trial point.
1378 * If the derivatives at points a & c have different sign we interpolate to zero,
1379 * otherwise just do a bisection.
1381 if (gpa < 0 && gpc > 0)
1383 b = a + gpa*(a-c)/(gpc-gpa);
1390 /* safeguard if interpolation close to machine accuracy causes errors:
1391 * never go outside the interval
1393 if (b <= a || b >= c)
1398 if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
1400 /* Reload the old state */
1401 em_dd_partition_system(fplog, -1, cr, top_global, inputrec,
1402 s_min, top, mdAtoms, fr, vsite, constr,
1406 /* Take a trial step to this new point - new coords in s_b */
1407 do_em_step(cr, inputrec, mdatoms, s_min, b, &s_min->s.cg_p, s_b,
1411 /* Calculate energy for the trial step */
1412 energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE);
1414 /* p does not change within a step, but since the domain decomposition
1415 * might change, we have to use cg_p of s_b here.
1417 const rvec *pb = as_rvec_array(s_b->s.cg_p.data());
1418 const rvec *sfb = as_rvec_array(s_b->f.data());
1420 for (int i = 0; i < mdatoms->homenr; i++)
1422 for (m = 0; m < DIM; m++)
1424 gpb -= pb[i][m]*sfb[i][m]; /* f is negative gradient, thus the sign */
1427 /* Sum the gradient along the line across CPUs */
1430 gmx_sumd(1, &gpb, cr);
1435 fprintf(debug, "CGE: EpotA %f EpotB %f EpotC %f gpb %f\n",
1436 s_a->epot, s_b->epot, s_c->epot, gpb);
1439 epot_repl = s_b->epot;
1441 /* Keep one of the intervals based on the value of the derivative at the new point */
1444 /* Replace c endpoint with b */
1445 swap_em_state(&s_b, &s_c);
1451 /* Replace a endpoint with b */
1452 swap_em_state(&s_b, &s_a);
1458 * Stop search as soon as we find a value smaller than the endpoints.
1459 * Never run more than 20 steps, no matter what.
1463 while ((epot_repl > s_a->epot || epot_repl > s_c->epot) &&
1466 if (std::fabs(epot_repl - s_min->epot) < fabs(s_min->epot)*GMX_REAL_EPS ||
1469 /* OK. We couldn't find a significantly lower energy.
1470 * If beta==0 this was steepest descent, and then we give up.
1471 * If not, set beta=0 and restart with steepest descent before quitting.
1481 /* Reset memory before giving up */
1487 /* Select min energy state of A & C, put the best in B.
1489 if (s_c->epot < s_a->epot)
1493 fprintf(debug, "CGE: C (%f) is lower than A (%f), moving C to B\n",
1494 s_c->epot, s_a->epot);
1496 swap_em_state(&s_b, &s_c);
1503 fprintf(debug, "CGE: A (%f) is lower than C (%f), moving A to B\n",
1504 s_a->epot, s_c->epot);
1506 swap_em_state(&s_b, &s_a);
1515 fprintf(debug, "CGE: Found a lower energy %f, moving C to B\n",
1518 swap_em_state(&s_b, &s_c);
1522 /* new search direction */
1523 /* beta = 0 means forget all memory and restart with steepest descents. */
1524 if (nstcg && ((step % nstcg) == 0))
1530 /* s_min->fnorm cannot be zero, because then we would have converged
1534 /* Polak-Ribiere update.
1535 * Change to fnorm2/fnorm2_old for Fletcher-Reeves
1537 beta = pr_beta(cr, &inputrec->opts, mdatoms, top_global, s_min, s_b);
1539 /* Limit beta to prevent oscillations */
1540 if (fabs(beta) > 5.0)
1546 /* update positions */
1547 swap_em_state(&s_min, &s_b);
1550 /* Print it if necessary */
1553 if (mdrunOptions.verbose)
1555 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1556 fprintf(stderr, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n",
1557 step, s_min->epot, s_min->fnorm/sqrtNumAtoms,
1558 s_min->fmax, s_min->a_fmax+1);
1561 /* Store the new (lower) energies */
1562 upd_mdebin(mdebin, FALSE, FALSE, (double)step,
1563 mdatoms->tmass, enerd, &s_min->s, inputrec->fepvals, inputrec->expandedvals, s_min->s.box,
1564 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
1566 do_log = do_per_step(step, inputrec->nstlog);
1567 do_ene = do_per_step(step, inputrec->nstenergy);
1569 /* Prepare IMD energy record, if bIMD is TRUE. */
1570 IMD_fill_energy_record(inputrec->bIMD, inputrec->imd, enerd, step, TRUE);
1574 print_ebin_header(fplog, step, step);
1576 print_ebin(mdoutf_get_fp_ene(outf), do_ene, FALSE, FALSE,
1577 do_log ? fplog : nullptr, step, step, eprNORMAL,
1578 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
1581 /* Send energies and positions to the IMD client if bIMD is TRUE. */
1582 if (do_IMD(inputrec->bIMD, step, cr, TRUE, state_global->box, as_rvec_array(state_global->x.data()), inputrec, 0, wcycle) && MASTER(cr))
1584 IMD_send_positions(inputrec->imd);
1587 /* Stop when the maximum force lies below tolerance.
1588 * If we have reached machine precision, converged is already set to true.
1590 converged = converged || (s_min->fmax < inputrec->em_tol);
1592 } /* End of the loop */
1594 /* IMD cleanup, if bIMD is TRUE. */
1595 IMD_finalize(inputrec->bIMD, inputrec->imd);
1599 step--; /* we never took that last step in this case */
1602 if (s_min->fmax > inputrec->em_tol)
1606 warn_step(fplog, inputrec->em_tol, s_min->fmax,
1607 step-1 == number_steps, FALSE);
1614 /* If we printed energy and/or logfile last step (which was the last step)
1615 * we don't have to do it again, but otherwise print the final values.
1619 /* Write final value to log since we didn't do anything the last step */
1620 print_ebin_header(fplog, step, step);
1622 if (!do_ene || !do_log)
1624 /* Write final energy file entries */
1625 print_ebin(mdoutf_get_fp_ene(outf), !do_ene, FALSE, FALSE,
1626 !do_log ? fplog : nullptr, step, step, eprNORMAL,
1627 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
1631 /* Print some stuff... */
1634 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
1638 * For accurate normal mode calculation it is imperative that we
1639 * store the last conformation into the full precision binary trajectory.
1641 * However, we should only do it if we did NOT already write this step
1642 * above (which we did if do_x or do_f was true).
1644 do_x = !do_per_step(step, inputrec->nstxout);
1645 do_f = (inputrec->nstfout > 0 && !do_per_step(step, inputrec->nstfout));
1647 write_em_traj(fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm),
1648 top_global, inputrec, step,
1649 s_min, state_global, observablesHistory);
1654 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1655 print_converged(stderr, CG, inputrec->em_tol, step, converged, number_steps,
1656 s_min, sqrtNumAtoms);
1657 print_converged(fplog, CG, inputrec->em_tol, step, converged, number_steps,
1658 s_min, sqrtNumAtoms);
1660 fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
1663 finish_em(cr, outf, walltime_accounting, wcycle);
1665 /* To print the actual number of steps we needed somewhere */
1666 walltime_accounting_set_nsteps_done(walltime_accounting, step);
1671 Integrator::do_lbfgs()
1673 static const char *LBFGS = "Low-Memory BFGS Minimizer";
1675 gmx_localtop_t *top;
1676 gmx_enerdata_t *enerd;
1677 gmx_global_stat_t gstat;
1679 int ncorr, nmaxcorr, point, cp, neval, nminstep;
1680 double stepsize, step_taken, gpa, gpb, gpc, tmp, minstep;
1681 real *rho, *alpha, *p, *s, **dx, **dg;
1682 real a, b, c, maxdelta, delta;
1684 real dgdx, dgdg, sq, yr, beta;
1688 gmx_bool do_log, do_ene, do_x, do_f, foundlower, *frozen;
1690 int start, end, number_steps;
1692 int i, k, m, n, gf, step;
1694 auto mdatoms = mdAtoms->mdatoms();
1698 gmx_fatal(FARGS, "Cannot do parallel L-BFGS Minimization - yet.\n");
1701 if (nullptr != constr)
1703 gmx_fatal(FARGS, "The combination of constraints and L-BFGS minimization is not implemented. Either do not use constraints, or use another minimizer (e.g. steepest descent).");
1706 n = 3*state_global->natoms;
1707 nmaxcorr = inputrec->nbfgscorr;
1712 snew(rho, nmaxcorr);
1713 snew(alpha, nmaxcorr);
1716 for (i = 0; i < nmaxcorr; i++)
1722 for (i = 0; i < nmaxcorr; i++)
1731 init_em(fplog, LBFGS, cr, ms, outputProvider, inputrec, mdrunOptions,
1732 state_global, top_global, &ems, &top,
1733 nrnb, mu_tot, fr, &enerd, &graph, mdAtoms, &gstat,
1734 vsite, constr, nullptr,
1735 nfile, fnm, &outf, &mdebin, wcycle);
1738 end = mdatoms->homenr;
1740 /* We need 4 working states */
1741 em_state_t s0 {}, s1 {}, s2 {}, s3 {};
1742 em_state_t *sa = &s0;
1743 em_state_t *sb = &s1;
1744 em_state_t *sc = &s2;
1745 em_state_t *last = &s3;
1746 /* Initialize by copying the state from ems (we could skip x and f here) */
1751 /* Print to log file */
1752 print_em_start(fplog, cr, walltime_accounting, wcycle, LBFGS);
1754 do_log = do_ene = do_x = do_f = TRUE;
1756 /* Max number of steps */
1757 number_steps = inputrec->nsteps;
1759 /* Create a 3*natoms index to tell whether each degree of freedom is frozen */
1761 for (i = start; i < end; i++)
1763 if (mdatoms->cFREEZE)
1765 gf = mdatoms->cFREEZE[i];
1767 for (m = 0; m < DIM; m++)
1769 frozen[3*i+m] = inputrec->opts.nFreeze[gf][m];
1774 sp_header(stderr, LBFGS, inputrec->em_tol, number_steps);
1778 sp_header(fplog, LBFGS, inputrec->em_tol, number_steps);
1783 construct_vsites(vsite, as_rvec_array(state_global->x.data()), 1, nullptr,
1784 top->idef.iparams, top->idef.il,
1785 fr->ePBC, fr->bMolPBC, cr, state_global->box);
1788 /* Call the force routine and some auxiliary (neighboursearching etc.) */
1789 /* do_force always puts the charge groups in the box and shifts again
1790 * We do not unshift, so molecules are always whole
1793 EnergyEvaluator energyEvaluator {
1796 inputrec, nrnb, wcycle, gstat,
1797 vsite, constr, fcd, graph,
1800 energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE);
1804 /* Copy stuff to the energy bin for easy printing etc. */
1805 upd_mdebin(mdebin, FALSE, FALSE, (double)step,
1806 mdatoms->tmass, enerd, state_global, inputrec->fepvals, inputrec->expandedvals, state_global->box,
1807 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
1809 print_ebin_header(fplog, step, step);
1810 print_ebin(mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step, step, eprNORMAL,
1811 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
1814 /* Set the initial step.
1815 * since it will be multiplied by the non-normalized search direction
1816 * vector (force vector the first time), we scale it by the
1817 * norm of the force.
1822 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1823 fprintf(stderr, "Using %d BFGS correction steps.\n\n", nmaxcorr);
1824 fprintf(stderr, " F-max = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
1825 fprintf(stderr, " F-Norm = %12.5e\n", ems.fnorm/sqrtNumAtoms);
1826 fprintf(stderr, "\n");
1827 /* and copy to the log file too... */
1828 fprintf(fplog, "Using %d BFGS correction steps.\n\n", nmaxcorr);
1829 fprintf(fplog, " F-max = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
1830 fprintf(fplog, " F-Norm = %12.5e\n", ems.fnorm/sqrtNumAtoms);
1831 fprintf(fplog, "\n");
1834 // Point is an index to the memory of search directions, where 0 is the first one.
1837 // Set initial search direction to the force (-gradient), or 0 for frozen particles.
1838 real *fInit = static_cast<real *>(as_rvec_array(ems.f.data())[0]);
1839 for (i = 0; i < n; i++)
1843 dx[point][i] = fInit[i]; /* Initial search direction */
1851 // Stepsize will be modified during the search, and actually it is not critical
1852 // (the main efficiency in the algorithm comes from changing directions), but
1853 // we still need an initial value, so estimate it as the inverse of the norm
1854 // so we take small steps where the potential fluctuates a lot.
1855 stepsize = 1.0/ems.fnorm;
1857 /* Start the loop over BFGS steps.
1858 * Each successful step is counted, and we continue until
1859 * we either converge or reach the max number of steps.
1864 /* Set the gradient from the force */
1866 for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
1869 /* Write coordinates if necessary */
1870 do_x = do_per_step(step, inputrec->nstxout);
1871 do_f = do_per_step(step, inputrec->nstfout);
1876 mdof_flags |= MDOF_X;
1881 mdof_flags |= MDOF_F;
1886 mdof_flags |= MDOF_IMD;
1889 mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags,
1890 top_global, step, (real)step, &ems.s, state_global, observablesHistory, ems.f);
1892 /* Do the linesearching in the direction dx[point][0..(n-1)] */
1894 /* make s a pointer to current search direction - point=0 first time we get here */
1897 real *xx = static_cast<real *>(as_rvec_array(ems.s.x.data())[0]);
1898 real *ff = static_cast<real *>(as_rvec_array(ems.f.data())[0]);
1900 // calculate line gradient in position A
1901 for (gpa = 0, i = 0; i < n; i++)
1906 /* Calculate minimum allowed stepsize along the line, before the average (norm)
1907 * relative change in coordinate is smaller than precision
1909 for (minstep = 0, i = 0; i < n; i++)
1919 minstep = GMX_REAL_EPS/sqrt(minstep/n);
1921 if (stepsize < minstep)
1927 // Before taking any steps along the line, store the old position
1929 real *lastx = static_cast<real *>(as_rvec_array(last->s.x.data())[0]);
1930 real *lastf = static_cast<real *>(as_rvec_array(last->f.data())[0]);
1935 /* Take a step downhill.
1936 * In theory, we should find the actual minimum of the function in this
1937 * direction, somewhere along the line.
1938 * That is quite possible, but it turns out to take 5-10 function evaluations
1939 * for each line. However, we dont really need to find the exact minimum -
1940 * it is much better to start a new BFGS step in a modified direction as soon
1941 * as we are close to it. This will save a lot of energy evaluations.
1943 * In practice, we just try to take a single step.
1944 * If it worked (i.e. lowered the energy), we increase the stepsize but
1945 * continue straight to the next BFGS step without trying to find any minimum,
1946 * i.e. we change the search direction too. If the line was smooth, it is
1947 * likely we are in a smooth region, and then it makes sense to take longer
1948 * steps in the modified search direction too.
1950 * If it didn't work (higher energy), there must be a minimum somewhere between
1951 * the old position and the new one. Then we need to start by finding a lower
1952 * value before we change search direction. Since the energy was apparently
1953 * quite rough, we need to decrease the step size.
1955 * Due to the finite numerical accuracy, it turns out that it is a good idea
1956 * to accept a SMALL increase in energy, if the derivative is still downhill.
1957 * This leads to lower final energies in the tests I've done. / Erik
1960 // State "A" is the first position along the line.
1961 // reference position along line is initially zero
1964 // Check stepsize first. We do not allow displacements
1965 // larger than emstep.
1969 // Pick a new position C by adding stepsize to A.
1972 // Calculate what the largest change in any individual coordinate
1973 // would be (translation along line * gradient along line)
1975 for (i = 0; i < n; i++)
1978 if (delta > maxdelta)
1983 // If any displacement is larger than the stepsize limit, reduce the step
1984 if (maxdelta > inputrec->em_stepsize)
1989 while (maxdelta > inputrec->em_stepsize);
1991 // Take a trial step and move the coordinate array xc[] to position C
1992 real *xc = static_cast<real *>(as_rvec_array(sc->s.x.data())[0]);
1993 for (i = 0; i < n; i++)
1995 xc[i] = lastx[i] + c*s[i];
1999 // Calculate energy for the trial step in position C
2000 energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE);
2002 // Calc line gradient in position C
2003 real *fc = static_cast<real *>(as_rvec_array(sc->f.data())[0]);
2004 for (gpc = 0, i = 0; i < n; i++)
2006 gpc -= s[i]*fc[i]; /* f is negative gradient, thus the sign */
2008 /* Sum the gradient along the line across CPUs */
2011 gmx_sumd(1, &gpc, cr);
2014 // This is the max amount of increase in energy we tolerate.
2015 // By allowing VERY small changes (close to numerical precision) we
2016 // frequently find even better (lower) final energies.
2017 tmp = std::sqrt(GMX_REAL_EPS)*fabs(sa->epot);
2019 // Accept the step if the energy is lower in the new position C (compared to A),
2020 // or if it is not significantly higher and the line derivative is still negative.
2021 if (sc->epot < sa->epot || (gpc < 0 && sc->epot < (sa->epot + tmp)))
2023 // Great, we found a better energy. We no longer try to alter the
2024 // stepsize, but simply accept this new better position. The we select a new
2025 // search direction instead, which will be much more efficient than continuing
2026 // to take smaller steps along a line. Set fnorm based on the new C position,
2027 // which will be used to update the stepsize to 1/fnorm further down.
2032 // If we got here, the energy is NOT lower in point C, i.e. it will be the same
2033 // or higher than in point A. In this case it is pointless to move to point C,
2034 // so we will have to do more iterations along the same line to find a smaller
2035 // value in the interval [A=0.0,C].
2036 // Here, A is still 0.0, but that will change when we do a search in the interval
2037 // [0.0,C] below. That search we will do by interpolation or bisection rather
2038 // than with the stepsize, so no need to modify it. For the next search direction
2039 // it will be reset to 1/fnorm anyway.
2045 // OK, if we didn't find a lower value we will have to locate one now - there must
2046 // be one in the interval [a,c].
2047 // The same thing is valid here, though: Don't spend dozens of iterations to find
2048 // the line minimum. We try to interpolate based on the derivative at the endpoints,
2049 // and only continue until we find a lower value. In most cases this means 1-2 iterations.
2050 // I also have a safeguard for potentially really pathological functions so we never
2051 // take more than 20 steps before we give up.
2052 // If we already found a lower value we just skip this step and continue to the update.
2057 // Select a new trial point B in the interval [A,C].
2058 // If the derivatives at points a & c have different sign we interpolate to zero,
2059 // otherwise just do a bisection since there might be multiple minima/maxima
2060 // inside the interval.
2061 if (gpa < 0 && gpc > 0)
2063 b = a + gpa*(a-c)/(gpc-gpa);
2070 /* safeguard if interpolation close to machine accuracy causes errors:
2071 * never go outside the interval
2073 if (b <= a || b >= c)
2078 // Take a trial step to point B
2079 real *xb = static_cast<real *>(as_rvec_array(sb->s.x.data())[0]);
2080 for (i = 0; i < n; i++)
2082 xb[i] = lastx[i] + b*s[i];
2086 // Calculate energy for the trial step in point B
2087 energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE);
2090 // Calculate gradient in point B
2091 real *fb = static_cast<real *>(as_rvec_array(sb->f.data())[0]);
2092 for (gpb = 0, i = 0; i < n; i++)
2094 gpb -= s[i]*fb[i]; /* f is negative gradient, thus the sign */
2097 /* Sum the gradient along the line across CPUs */
2100 gmx_sumd(1, &gpb, cr);
2103 // Keep one of the intervals [A,B] or [B,C] based on the value of the derivative
2104 // at the new point B, and rename the endpoints of this new interval A and C.
2107 /* Replace c endpoint with b */
2109 /* swap states b and c */
2110 swap_em_state(&sb, &sc);
2114 /* Replace a endpoint with b */
2116 /* swap states a and b */
2117 swap_em_state(&sa, &sb);
2121 * Stop search as soon as we find a value smaller than the endpoints,
2122 * or if the tolerance is below machine precision.
2123 * Never run more than 20 steps, no matter what.
2127 while ((sb->epot > sa->epot || sb->epot > sc->epot) && (nminstep < 20));
2129 if (std::fabs(sb->epot - Epot0) < GMX_REAL_EPS || nminstep >= 20)
2131 /* OK. We couldn't find a significantly lower energy.
2132 * If ncorr==0 this was steepest descent, and then we give up.
2133 * If not, reset memory to restart as steepest descent before quitting.
2145 /* Search in gradient direction */
2146 for (i = 0; i < n; i++)
2148 dx[point][i] = ff[i];
2150 /* Reset stepsize */
2151 stepsize = 1.0/fnorm;
2156 /* Select min energy state of A & C, put the best in xx/ff/Epot
2158 if (sc->epot < sa->epot)
2180 /* Update the memory information, and calculate a new
2181 * approximation of the inverse hessian
2184 /* Have new data in Epot, xx, ff */
2185 if (ncorr < nmaxcorr)
2190 for (i = 0; i < n; i++)
2192 dg[point][i] = lastf[i]-ff[i];
2193 dx[point][i] *= step_taken;
2198 for (i = 0; i < n; i++)
2200 dgdg += dg[point][i]*dg[point][i];
2201 dgdx += dg[point][i]*dx[point][i];
2206 rho[point] = 1.0/dgdx;
2209 if (point >= nmaxcorr)
2215 for (i = 0; i < n; i++)
2222 /* Recursive update. First go back over the memory points */
2223 for (k = 0; k < ncorr; k++)
2232 for (i = 0; i < n; i++)
2234 sq += dx[cp][i]*p[i];
2237 alpha[cp] = rho[cp]*sq;
2239 for (i = 0; i < n; i++)
2241 p[i] -= alpha[cp]*dg[cp][i];
2245 for (i = 0; i < n; i++)
2250 /* And then go forward again */
2251 for (k = 0; k < ncorr; k++)
2254 for (i = 0; i < n; i++)
2256 yr += p[i]*dg[cp][i];
2260 beta = alpha[cp]-beta;
2262 for (i = 0; i < n; i++)
2264 p[i] += beta*dx[cp][i];
2274 for (i = 0; i < n; i++)
2278 dx[point][i] = p[i];
2286 /* Print it if necessary */
2289 if (mdrunOptions.verbose)
2291 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2292 fprintf(stderr, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n",
2293 step, ems.epot, ems.fnorm/sqrtNumAtoms, ems.fmax, ems.a_fmax + 1);
2296 /* Store the new (lower) energies */
2297 upd_mdebin(mdebin, FALSE, FALSE, (double)step,
2298 mdatoms->tmass, enerd, state_global, inputrec->fepvals, inputrec->expandedvals, state_global->box,
2299 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
2300 do_log = do_per_step(step, inputrec->nstlog);
2301 do_ene = do_per_step(step, inputrec->nstenergy);
2304 print_ebin_header(fplog, step, step);
2306 print_ebin(mdoutf_get_fp_ene(outf), do_ene, FALSE, FALSE,
2307 do_log ? fplog : nullptr, step, step, eprNORMAL,
2308 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
2311 /* Send x and E to IMD client, if bIMD is TRUE. */
2312 if (do_IMD(inputrec->bIMD, step, cr, TRUE, state_global->box, as_rvec_array(state_global->x.data()), inputrec, 0, wcycle) && MASTER(cr))
2314 IMD_send_positions(inputrec->imd);
2317 // Reset stepsize in we are doing more iterations
2318 stepsize = 1.0/ems.fnorm;
2320 /* Stop when the maximum force lies below tolerance.
2321 * If we have reached machine precision, converged is already set to true.
2323 converged = converged || (ems.fmax < inputrec->em_tol);
2325 } /* End of the loop */
2327 /* IMD cleanup, if bIMD is TRUE. */
2328 IMD_finalize(inputrec->bIMD, inputrec->imd);
2332 step--; /* we never took that last step in this case */
2335 if (ems.fmax > inputrec->em_tol)
2339 warn_step(fplog, inputrec->em_tol, ems.fmax,
2340 step-1 == number_steps, FALSE);
2345 /* If we printed energy and/or logfile last step (which was the last step)
2346 * we don't have to do it again, but otherwise print the final values.
2348 if (!do_log) /* Write final value to log since we didn't do anythin last step */
2350 print_ebin_header(fplog, step, step);
2352 if (!do_ene || !do_log) /* Write final energy file entries */
2354 print_ebin(mdoutf_get_fp_ene(outf), !do_ene, FALSE, FALSE,
2355 !do_log ? fplog : nullptr, step, step, eprNORMAL,
2356 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
2359 /* Print some stuff... */
2362 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
2366 * For accurate normal mode calculation it is imperative that we
2367 * store the last conformation into the full precision binary trajectory.
2369 * However, we should only do it if we did NOT already write this step
2370 * above (which we did if do_x or do_f was true).
2372 do_x = !do_per_step(step, inputrec->nstxout);
2373 do_f = !do_per_step(step, inputrec->nstfout);
2374 write_em_traj(fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm),
2375 top_global, inputrec, step,
2376 &ems, state_global, observablesHistory);
2380 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2381 print_converged(stderr, LBFGS, inputrec->em_tol, step, converged,
2382 number_steps, &ems, sqrtNumAtoms);
2383 print_converged(fplog, LBFGS, inputrec->em_tol, step, converged,
2384 number_steps, &ems, sqrtNumAtoms);
2386 fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
2389 finish_em(cr, outf, walltime_accounting, wcycle);
2391 /* To print the actual number of steps we needed somewhere */
2392 walltime_accounting_set_nsteps_done(walltime_accounting, step);
2396 Integrator::do_steep()
2398 const char *SD = "Steepest Descents";
2399 gmx_localtop_t *top;
2400 gmx_enerdata_t *enerd;
2401 gmx_global_stat_t gstat;
2407 gmx_bool bDone, bAbort, do_x, do_f;
2412 int steps_accepted = 0;
2413 auto mdatoms = mdAtoms->mdatoms();
2415 /* Create 2 states on the stack and extract pointers that we will swap */
2416 em_state_t s0 {}, s1 {};
2417 em_state_t *s_min = &s0;
2418 em_state_t *s_try = &s1;
2420 /* Init em and store the local state in s_try */
2421 init_em(fplog, SD, cr, ms, outputProvider, inputrec, mdrunOptions,
2422 state_global, top_global, s_try, &top,
2423 nrnb, mu_tot, fr, &enerd, &graph, mdAtoms, &gstat,
2424 vsite, constr, nullptr,
2425 nfile, fnm, &outf, &mdebin, wcycle);
2427 /* Print to log file */
2428 print_em_start(fplog, cr, walltime_accounting, wcycle, SD);
2430 /* Set variables for stepsize (in nm). This is the largest
2431 * step that we are going to make in any direction.
2433 ustep = inputrec->em_stepsize;
2436 /* Max number of steps */
2437 nsteps = inputrec->nsteps;
2441 /* Print to the screen */
2442 sp_header(stderr, SD, inputrec->em_tol, nsteps);
2446 sp_header(fplog, SD, inputrec->em_tol, nsteps);
2448 EnergyEvaluator energyEvaluator {
2451 inputrec, nrnb, wcycle, gstat,
2452 vsite, constr, fcd, graph,
2456 /**** HERE STARTS THE LOOP ****
2457 * count is the counter for the number of steps
2458 * bDone will be TRUE when the minimization has converged
2459 * bAbort will be TRUE when nsteps steps have been performed or when
2460 * the stepsize becomes smaller than is reasonable for machine precision
2465 while (!bDone && !bAbort)
2467 bAbort = (nsteps >= 0) && (count == nsteps);
2469 /* set new coordinates, except for first step */
2470 bool validStep = true;
2474 do_em_step(cr, inputrec, mdatoms,
2475 s_min, stepsize, &s_min->f, s_try,
2481 energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0);
2485 // Signal constraint error during stepping with energy=inf
2486 s_try->epot = std::numeric_limits<real>::infinity();
2491 print_ebin_header(fplog, count, count);
2496 s_min->epot = s_try->epot;
2499 /* Print it if necessary */
2502 if (mdrunOptions.verbose)
2504 fprintf(stderr, "Step=%5d, Dmax= %6.1e nm, Epot= %12.5e Fmax= %11.5e, atom= %d%c",
2505 count, ustep, s_try->epot, s_try->fmax, s_try->a_fmax+1,
2506 ( (count == 0) || (s_try->epot < s_min->epot) ) ? '\n' : '\r');
2510 if ( (count == 0) || (s_try->epot < s_min->epot) )
2512 /* Store the new (lower) energies */
2513 upd_mdebin(mdebin, FALSE, FALSE, (double)count,
2514 mdatoms->tmass, enerd, &s_try->s, inputrec->fepvals, inputrec->expandedvals,
2515 s_try->s.box, nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
2517 /* Prepare IMD energy record, if bIMD is TRUE. */
2518 IMD_fill_energy_record(inputrec->bIMD, inputrec->imd, enerd, count, TRUE);
2520 print_ebin(mdoutf_get_fp_ene(outf), TRUE,
2521 do_per_step(steps_accepted, inputrec->nstdisreout),
2522 do_per_step(steps_accepted, inputrec->nstorireout),
2523 fplog, count, count, eprNORMAL,
2524 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
2529 /* Now if the new energy is smaller than the previous...
2530 * or if this is the first step!
2531 * or if we did random steps!
2534 if ( (count == 0) || (s_try->epot < s_min->epot) )
2538 /* Test whether the convergence criterion is met... */
2539 bDone = (s_try->fmax < inputrec->em_tol);
2541 /* Copy the arrays for force, positions and energy */
2542 /* The 'Min' array always holds the coords and forces of the minimal
2544 swap_em_state(&s_min, &s_try);
2550 /* Write to trn, if necessary */
2551 do_x = do_per_step(steps_accepted, inputrec->nstxout);
2552 do_f = do_per_step(steps_accepted, inputrec->nstfout);
2553 write_em_traj(fplog, cr, outf, do_x, do_f, nullptr,
2554 top_global, inputrec, count,
2555 s_min, state_global, observablesHistory);
2559 /* If energy is not smaller make the step smaller... */
2562 if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
2564 /* Reload the old state */
2565 em_dd_partition_system(fplog, count, cr, top_global, inputrec,
2566 s_min, top, mdAtoms, fr, vsite, constr,
2571 /* Determine new step */
2572 stepsize = ustep/s_min->fmax;
2574 /* Check if stepsize is too small, with 1 nm as a characteristic length */
2576 if (count == nsteps || ustep < 1e-12)
2578 if (count == nsteps || ustep < 1e-6)
2583 warn_step(fplog, inputrec->em_tol, s_min->fmax,
2584 count == nsteps, constr != nullptr);
2589 /* Send IMD energies and positions, if bIMD is TRUE. */
2590 if (do_IMD(inputrec->bIMD, count, cr, TRUE, state_global->box,
2591 MASTER(cr) ? as_rvec_array(state_global->x.data()) : nullptr,
2592 inputrec, 0, wcycle) &&
2595 IMD_send_positions(inputrec->imd);
2599 } /* End of the loop */
2601 /* IMD cleanup, if bIMD is TRUE. */
2602 IMD_finalize(inputrec->bIMD, inputrec->imd);
2604 /* Print some data... */
2607 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
2609 write_em_traj(fplog, cr, outf, TRUE, inputrec->nstfout, ftp2fn(efSTO, nfile, fnm),
2610 top_global, inputrec, count,
2611 s_min, state_global, observablesHistory);
2615 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2617 print_converged(stderr, SD, inputrec->em_tol, count, bDone, nsteps,
2618 s_min, sqrtNumAtoms);
2619 print_converged(fplog, SD, inputrec->em_tol, count, bDone, nsteps,
2620 s_min, sqrtNumAtoms);
2623 finish_em(cr, outf, walltime_accounting, wcycle);
2625 /* To print the actual number of steps we needed somewhere */
2626 inputrec->nsteps = count;
2628 walltime_accounting_set_nsteps_done(walltime_accounting, count);
2634 const char *NM = "Normal Mode Analysis";
2637 gmx_localtop_t *top;
2638 gmx_enerdata_t *enerd;
2639 gmx_global_stat_t gstat;
2644 gmx_bool bSparse; /* use sparse matrix storage format */
2646 gmx_sparsematrix_t * sparse_matrix = nullptr;
2647 real * full_matrix = nullptr;
2649 /* added with respect to mdrun */
2651 real der_range = 10.0*std::sqrt(GMX_REAL_EPS);
2653 bool bIsMaster = MASTER(cr);
2654 auto mdatoms = mdAtoms->mdatoms();
2656 if (constr != nullptr)
2658 gmx_fatal(FARGS, "Constraints present with Normal Mode Analysis, this combination is not supported");
2661 gmx_shellfc_t *shellfc;
2663 em_state_t state_work {};
2665 /* Init em and store the local state in state_minimum */
2666 init_em(fplog, NM, cr, ms, outputProvider, inputrec, mdrunOptions,
2667 state_global, top_global, &state_work, &top,
2668 nrnb, mu_tot, fr, &enerd, &graph, mdAtoms, &gstat,
2669 vsite, constr, &shellfc,
2670 nfile, fnm, &outf, nullptr, wcycle);
2672 std::vector<size_t> atom_index = get_atom_index(top_global);
2673 snew(fneg, atom_index.size());
2674 snew(dfdx, atom_index.size());
2680 "NOTE: This version of GROMACS has been compiled in single precision,\n"
2681 " which MIGHT not be accurate enough for normal mode analysis.\n"
2682 " GROMACS now uses sparse matrix storage, so the memory requirements\n"
2683 " are fairly modest even if you recompile in double precision.\n\n");
2687 /* Check if we can/should use sparse storage format.
2689 * Sparse format is only useful when the Hessian itself is sparse, which it
2690 * will be when we use a cutoff.
2691 * For small systems (n<1000) it is easier to always use full matrix format, though.
2693 if (EEL_FULL(fr->ic->eeltype) || fr->rlist == 0.0)
2695 GMX_LOG(mdlog.warning).appendText("Non-cutoff electrostatics used, forcing full Hessian format.");
2698 else if (atom_index.size() < 1000)
2700 GMX_LOG(mdlog.warning).appendTextFormatted("Small system size (N=%zu), using full Hessian format.",
2706 GMX_LOG(mdlog.warning).appendText("Using compressed symmetric sparse Hessian format.");
2710 /* Number of dimensions, based on real atoms, that is not vsites or shell */
2711 sz = DIM*atom_index.size();
2713 fprintf(stderr, "Allocating Hessian memory...\n\n");
2717 sparse_matrix = gmx_sparsematrix_init(sz);
2718 sparse_matrix->compressed_symmetric = TRUE;
2722 snew(full_matrix, sz*sz);
2728 /* Write start time and temperature */
2729 print_em_start(fplog, cr, walltime_accounting, wcycle, NM);
2731 /* fudge nr of steps to nr of atoms */
2732 inputrec->nsteps = atom_index.size()*2;
2736 fprintf(stderr, "starting normal mode calculation '%s'\n%d steps.\n\n",
2737 *(top_global->name), (int)inputrec->nsteps);
2740 nnodes = cr->nnodes;
2742 /* Make evaluate_energy do a single node force calculation */
2744 EnergyEvaluator energyEvaluator {
2747 inputrec, nrnb, wcycle, gstat,
2748 vsite, constr, fcd, graph,
2751 energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE);
2752 cr->nnodes = nnodes;
2754 /* if forces are not small, warn user */
2755 get_state_f_norm_max(cr, &(inputrec->opts), mdatoms, &state_work);
2757 GMX_LOG(mdlog.warning).appendTextFormatted("Maximum force:%12.5e", state_work.fmax);
2758 if (state_work.fmax > 1.0e-3)
2760 GMX_LOG(mdlog.warning).appendText(
2761 "The force is probably not small enough to "
2762 "ensure that you are at a minimum.\n"
2763 "Be aware that negative eigenvalues may occur\n"
2764 "when the resulting matrix is diagonalized.");
2767 /***********************************************************
2769 * Loop over all pairs in matrix
2771 * do_force called twice. Once with positive and
2772 * once with negative displacement
2774 ************************************************************/
2776 /* Steps are divided one by one over the nodes */
2778 for (unsigned int aid = cr->nodeid; aid < atom_index.size(); aid += nnodes)
2780 size_t atom = atom_index[aid];
2781 for (size_t d = 0; d < DIM; d++)
2783 gmx_int64_t step = 0;
2784 int force_flags = GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES;
2787 x_min = state_work.s.x[atom][d];
2789 for (unsigned int dx = 0; (dx < 2); dx++)
2793 state_work.s.x[atom][d] = x_min - der_range;
2797 state_work.s.x[atom][d] = x_min + der_range;
2800 /* Make evaluate_energy do a single node force calculation */
2804 /* Now is the time to relax the shells */
2805 relax_shell_flexcon(fplog,
2808 mdrunOptions.verbose,
2824 &top_global->groups,
2830 DdOpenBalanceRegionBeforeForceComputation::no,
2831 DdCloseBalanceRegionAfterForceComputation::no);
2837 energyEvaluator.run(&state_work, mu_tot, vir, pres, atom*2+dx, FALSE);
2840 cr->nnodes = nnodes;
2844 for (size_t i = 0; i < atom_index.size(); i++)
2846 copy_rvec(state_work.f[atom_index[i]], fneg[i]);
2851 /* x is restored to original */
2852 state_work.s.x[atom][d] = x_min;
2854 for (size_t j = 0; j < atom_index.size(); j++)
2856 for (size_t k = 0; (k < DIM); k++)
2859 -(state_work.f[atom_index[j]][k] - fneg[j][k])/(2*der_range);
2866 #define mpi_type GMX_MPI_REAL
2867 MPI_Send(dfdx[0], atom_index.size()*DIM, mpi_type, MASTER(cr),
2868 cr->nodeid, cr->mpi_comm_mygroup);
2873 for (node = 0; (node < nnodes && atom+node < atom_index.size()); node++)
2879 MPI_Recv(dfdx[0], atom_index.size()*DIM, mpi_type, node, node,
2880 cr->mpi_comm_mygroup, &stat);
2885 row = (atom + node)*DIM + d;
2887 for (size_t j = 0; j < atom_index.size(); j++)
2889 for (size_t k = 0; k < DIM; k++)
2895 if (col >= row && dfdx[j][k] != 0.0)
2897 gmx_sparsematrix_increment_value(sparse_matrix,
2898 row, col, dfdx[j][k]);
2903 full_matrix[row*sz+col] = dfdx[j][k];
2910 if (mdrunOptions.verbose && fplog)
2915 /* write progress */
2916 if (bIsMaster && mdrunOptions.verbose)
2918 fprintf(stderr, "\rFinished step %d out of %d",
2919 static_cast<int>(std::min(atom+nnodes, atom_index.size())),
2920 static_cast<int>(atom_index.size()));
2927 fprintf(stderr, "\n\nWriting Hessian...\n");
2928 gmx_mtxio_write(ftp2fn(efMTX, nfile, fnm), sz, sz, full_matrix, sparse_matrix);
2931 finish_em(cr, outf, walltime_accounting, wcycle);
2933 walltime_accounting_set_nsteps_done(walltime_accounting, atom_index.size()*2);