2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2008, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
50 #include "gromacs/domdec/dlbtiming.h"
51 #include "gromacs/domdec/domdec.h"
52 #include "gromacs/domdec/domdec_struct.h"
53 #include "gromacs/domdec/mdsetup.h"
54 #include "gromacs/gmxlib/network.h"
55 #include "gromacs/math/functions.h"
56 #include "gromacs/math/units.h"
57 #include "gromacs/math/vec.h"
58 #include "gromacs/math/vecdump.h"
59 #include "gromacs/mdlib/constr.h"
60 #include "gromacs/mdlib/enerdata_utils.h"
61 #include "gromacs/mdlib/force.h"
62 #include "gromacs/mdlib/force_flags.h"
63 #include "gromacs/mdlib/gmx_omp_nthreads.h"
64 #include "gromacs/mdlib/mdatoms.h"
65 #include "gromacs/mdlib/vsite.h"
66 #include "gromacs/mdtypes/commrec.h"
67 #include "gromacs/mdtypes/enerdata.h"
68 #include "gromacs/mdtypes/forcerec.h"
69 #include "gromacs/mdtypes/inputrec.h"
70 #include "gromacs/mdtypes/md_enums.h"
71 #include "gromacs/mdtypes/mdatom.h"
72 #include "gromacs/mdtypes/state.h"
73 #include "gromacs/pbcutil/pbc.h"
74 #include "gromacs/topology/ifunc.h"
75 #include "gromacs/topology/mtop_lookup.h"
76 #include "gromacs/topology/mtop_util.h"
77 #include "gromacs/utility/arrayref.h"
78 #include "gromacs/utility/arraysize.h"
79 #include "gromacs/utility/cstringutil.h"
80 #include "gromacs/utility/fatalerror.h"
81 #include "gromacs/utility/gmxassert.h"
84 using gmx::ArrayRefWithPadding;
89 int nnucl = 0; /* The number of nuclei */
90 int shellIndex = -1; /* The shell index */
91 int nucl1 = -1; /* The first nuclei connected to the shell */
92 int nucl2 = -1; /* The second nuclei connected to the shell */
93 int nucl3 = -1; /* The third nuclei connected to the shell */
94 real k = 0; /* force constant */
95 real k_1 = 0; /* 1 over force constant */
96 rvec xold; /* The old shell coordinates */
97 rvec fold; /* The old force on the shell */
98 rvec step; /* Step size for steepest descents */
103 /* Shell counts, indices, parameters and working data */
104 std::vector<t_shell> shell_gl; /* All the shells (for DD only) */
105 std::vector<int> shell_index_gl; /* Global shell index (for DD only) */
106 gmx_bool bInterCG; /* Are there inter charge-group shells? */
107 std::vector<t_shell> shells; /* The local shells */
108 bool predictShells = false; /* Predict shell positions */
109 bool requireInit = false; /* Require initialization of shell positions */
110 int nflexcon = 0; /* The number of flexible constraints */
112 std::array<PaddedHostVector<RVec>, 2> x; /* Coordinate buffers for iterative minimization */
113 std::array<PaddedHostVector<RVec>, 2> f; /* Force buffers for iterative minimization */
115 /* Flexible constraint working data */
116 std::vector<RVec> acc_dir; /* Acceleration direction for flexcon */
117 gmx::PaddedVector<RVec> x_old; /* Old coordinates for flexcon */
118 gmx::PaddedVector<RVec> adir_xnold; /* Work space for init_adir */
119 gmx::PaddedVector<RVec> adir_xnew; /* Work space for init_adir */
120 std::int64_t numForceEvaluations; /* Total number of force evaluations */
121 int numConvergedIterations; /* Total number of iterations that converged */
125 static void pr_shell(FILE* fplog, ArrayRef<const t_shell> shells)
127 fprintf(fplog, "SHELL DATA\n");
128 fprintf(fplog, "%5s %8s %5s %5s %5s\n", "Shell", "Force k", "Nucl1", "Nucl2", "Nucl3");
129 for (const t_shell& shell : shells)
131 fprintf(fplog, "%5d %8.3f %5d", shell.shellIndex, 1.0 / shell.k_1, shell.nucl1);
132 if (shell.nnucl == 2)
134 fprintf(fplog, " %5d\n", shell.nucl2);
136 else if (shell.nnucl == 3)
138 fprintf(fplog, " %5d %5d\n", shell.nucl2, shell.nucl3);
142 fprintf(fplog, "\n");
147 /* TODO The remain call of this function passes non-NULL mass and NULL
148 * mtop, so this routine can be simplified.
150 * The other code path supported doing prediction before the MD loop
151 * started, but even when called, the prediction was always
152 * over-written by a subsequent call in the MD loop, so has been
154 static void predict_shells(FILE* fplog,
158 ArrayRef<const t_shell> shells,
164 real dt_1, fudge, tm, m1, m2, m3;
166 GMX_RELEASE_ASSERT(mass || mtop, "Must have masses or a way to look them up");
168 /* We introduce a fudge factor for performance reasons: with this choice
169 * the initial force on the shells is about a factor of two lower than
179 fprintf(fplog, "RELAX: Using prediction for initial shell placement\n");
191 for (const t_shell& shell : shells)
193 const int s1 = shell.shellIndex;
202 for (m = 0; (m < DIM); m++)
204 x[s1][m] += xOrV[n1][m] * dt_1;
217 /* Not the correct masses with FE, but it is just a prediction... */
218 m1 = mtopGetAtomMass(mtop, n1, &molb);
219 m2 = mtopGetAtomMass(mtop, n2, &molb);
221 tm = dt_1 / (m1 + m2);
222 for (m = 0; (m < DIM); m++)
224 x[s1][m] += (m1 * xOrV[n1][m] + m2 * xOrV[n2][m]) * tm;
239 /* Not the correct masses with FE, but it is just a prediction... */
240 m1 = mtopGetAtomMass(mtop, n1, &molb);
241 m2 = mtopGetAtomMass(mtop, n2, &molb);
242 m3 = mtopGetAtomMass(mtop, n3, &molb);
244 tm = dt_1 / (m1 + m2 + m3);
245 for (m = 0; (m < DIM); m++)
247 x[s1][m] += (m1 * xOrV[n1][m] + m2 * xOrV[n2][m] + m3 * xOrV[n3][m]) * tm;
250 default: gmx_fatal(FARGS, "Shell %d has %d nuclei!", s1, shell.nnucl);
255 gmx_shellfc_t* init_shell_flexcon(FILE* fplog, const gmx_mtop_t* mtop, int nflexcon, int nstcalcenergy, bool usingDomainDecomposition)
260 int i, j, type, a_offset, mol, ftype, nra;
262 int aS, aN = 0; /* Shell and nucleus */
263 int bondtypes[] = { F_BONDS, F_HARMONIC, F_CUBICBONDS, F_POLARIZATION, F_ANHARM_POL, F_WATER_POL };
264 #define NBT asize(bondtypes)
265 const gmx_ffparams_t* ffparams;
267 const std::array<int, eptNR> numParticles = gmx_mtop_particletype_count(*mtop);
270 /* Print the number of each particle type */
272 for (const auto& n : numParticles)
276 fprintf(fplog, "There are: %d %ss\n", n, ptype_str[pType]);
282 nshell = numParticles[eptShell];
284 if (nshell == 0 && nflexcon == 0)
286 /* We're not doing shells or flexible constraints */
290 shfc = new gmx_shellfc_t;
291 shfc->nflexcon = nflexcon;
295 /* Only flexible constraints, no shells.
296 * Note that make_local_shells() does not need to be called.
301 if (nstcalcenergy != 1)
304 "You have nstcalcenergy set to a value (%d) that is different from 1.\nThis is "
305 "not supported in combination with shell particles.\nPlease make a new tpr file.",
308 if (usingDomainDecomposition)
312 "Shell particles are not implemented with domain decomposition, use a single rank");
315 /* We have shells: fill the shell data structure */
317 /* Global system sized array, this should be avoided */
318 std::vector<int> shell_index(mtop->natoms);
321 for (const AtomProxy atomP : AtomRange(*mtop))
323 const t_atom& local = atomP.atom();
324 int i = atomP.globalAtomNumber();
325 if (local.ptype == eptShell)
327 shell_index[i] = nshell++;
331 std::vector<t_shell> shell(nshell);
333 ffparams = &mtop->ffparams;
335 /* Now fill the structures */
336 /* TODO: See if we can use update groups that cover shell constructions */
337 shfc->bInterCG = FALSE;
340 for (size_t mb = 0; mb < mtop->molblock.size(); mb++)
342 const gmx_molblock_t* molb = &mtop->molblock[mb];
343 const gmx_moltype_t* molt = &mtop->moltype[molb->type];
345 const t_atom* atom = molt->atoms.atom;
346 for (mol = 0; mol < molb->nmol; mol++)
348 for (j = 0; (j < NBT); j++)
350 const int* ia = molt->ilist[bondtypes[j]].iatoms.data();
351 for (i = 0; (i < molt->ilist[bondtypes[j]].size());)
354 ftype = ffparams->functype[type];
355 nra = interaction_function[ftype].nratoms;
357 /* Check whether we have a bond with a shell */
360 switch (bondtypes[j])
367 if (atom[ia[1]].ptype == eptShell)
372 else if (atom[ia[2]].ptype == eptShell)
379 aN = ia[4]; /* Dummy */
380 aS = ia[5]; /* Shell */
382 default: gmx_fatal(FARGS, "Death Horror: %s, %d", __FILE__, __LINE__);
389 /* Check whether one of the particles is a shell... */
390 nsi = shell_index[a_offset + aS];
391 if ((nsi < 0) || (nsi >= nshell))
393 gmx_fatal(FARGS, "nsi is %d should be within 0 - %d. aS = %d", nsi, nshell, aS);
395 if (shell[nsi].shellIndex == -1)
397 shell[nsi].shellIndex = a_offset + aS;
400 else if (shell[nsi].shellIndex != a_offset + aS)
402 gmx_fatal(FARGS, "Weird stuff in %s, %d", __FILE__, __LINE__);
405 if (shell[nsi].nucl1 == -1)
407 shell[nsi].nucl1 = a_offset + aN;
409 else if (shell[nsi].nucl2 == -1)
411 shell[nsi].nucl2 = a_offset + aN;
413 else if (shell[nsi].nucl3 == -1)
415 shell[nsi].nucl3 = a_offset + aN;
421 pr_shell(fplog, shell);
423 gmx_fatal(FARGS, "Can not handle more than three bonds per shell\n");
427 /* shell[nsi].bInterCG = TRUE; */
428 shfc->bInterCG = TRUE;
431 switch (bondtypes[j])
435 shell[nsi].k += ffparams->iparams[type].harmonic.krA;
438 shell[nsi].k += ffparams->iparams[type].cubic.kb;
442 if (!gmx_within_tol(qS, atom[aS].qB, GMX_REAL_EPS * 10))
445 "polarize can not be used with qA(%e) != qB(%e) for "
446 "atom %d of molecule block %zu",
447 qS, atom[aS].qB, aS + 1, mb + 1);
449 shell[nsi].k += gmx::square(qS) * ONE_4PI_EPS0
450 / ffparams->iparams[type].polarize.alpha;
453 if (!gmx_within_tol(qS, atom[aS].qB, GMX_REAL_EPS * 10))
456 "water_pol can not be used with qA(%e) != qB(%e) for "
457 "atom %d of molecule block %zu",
458 qS, atom[aS].qB, aS + 1, mb + 1);
460 alpha = (ffparams->iparams[type].wpol.al_x
461 + ffparams->iparams[type].wpol.al_y
462 + ffparams->iparams[type].wpol.al_z)
464 shell[nsi].k += gmx::square(qS) * ONE_4PI_EPS0 / alpha;
466 default: gmx_fatal(FARGS, "Death Horror: %s, %d", __FILE__, __LINE__);
474 a_offset += molt->atoms.nr;
476 /* Done with this molecule type */
479 /* Verify whether it's all correct */
482 gmx_fatal(FARGS, "Something weird with shells. They may not be bonded to something");
485 for (i = 0; (i < ns); i++)
487 shell[i].k_1 = 1.0 / shell[i].k;
492 pr_shell(debug, shell);
496 shfc->shell_gl = shell;
497 shfc->shell_index_gl = shell_index;
499 shfc->predictShells = (getenv("GMX_NOPREDICT") == nullptr);
500 shfc->requireInit = false;
501 if (!shfc->predictShells)
505 fprintf(fplog, "\nWill never predict shell positions\n");
510 shfc->requireInit = (getenv("GMX_REQUIRE_SHELL_INIT") != nullptr);
511 if (shfc->requireInit && fplog)
513 fprintf(fplog, "\nWill always initiate shell positions\n");
517 if (shfc->predictShells)
524 "\nNOTE: in the current version shell prediction during the crun is "
527 /* Prediction improves performance, so we should implement either:
528 * 1. communication for the atoms needed for prediction
529 * 2. prediction using the velocities of shells; currently the
530 * shell velocities are zeroed, it's a bit tricky to keep
531 * track of the shell displacements and thus the velocity.
533 shfc->predictShells = false;
540 void gmx::make_local_shells(const t_commrec* cr, const t_mdatoms* md, gmx_shellfc_t* shfc)
543 gmx_domdec_t* dd = nullptr;
545 if (DOMAINDECOMP(cr))
549 a1 = dd_numHomeAtoms(*dd);
553 /* Single node: we need all shells, copy them */
554 shfc->shells = shfc->shell_gl;
559 ArrayRef<const int> ind = shfc->shell_index_gl;
561 std::vector<t_shell>& shells = shfc->shells;
563 for (int i = a0; i < a1; i++)
565 if (md->ptype[i] == eptShell)
569 shells.push_back(shfc->shell_gl[ind[dd->globalAtomIndices[i]]]);
573 shells.push_back(shfc->shell_gl[ind[i]]);
575 t_shell& shell = shells.back();
577 /* With inter-cg shells we can no do shell prediction,
578 * so we do not need the nuclei numbers.
582 shell.nucl1 = i + shell.nucl1 - shell.shellIndex;
585 shell.nucl2 = i + shell.nucl2 - shell.shellIndex;
589 shell.nucl3 = i + shell.nucl3 - shell.shellIndex;
592 shell.shellIndex = i;
597 static void do_1pos(rvec xnew, const rvec xold, const rvec f, real step)
615 static void do_1pos3(rvec xnew, const rvec xold, const rvec f, const rvec step)
624 dx = f[XX] * step[XX];
625 dy = f[YY] * step[YY];
626 dz = f[ZZ] * step[ZZ];
633 static void directional_sd(ArrayRef<const RVec> xold,
635 ArrayRef<const RVec> acc_dir,
639 const rvec* xo = as_rvec_array(xold.data());
640 rvec* xn = as_rvec_array(xnew.data());
642 for (int i = 0; i < homenr; i++)
644 do_1pos(xn[i], xo[i], acc_dir[i], step);
648 static void shell_pos_sd(ArrayRef<const RVec> xcur,
650 ArrayRef<const RVec> f,
651 ArrayRef<t_shell> shells,
654 const real step_scale_min = 0.8, step_scale_increment = 0.2, step_scale_max = 1.2,
655 step_scale_multiple = (step_scale_max - step_scale_min) / step_scale_increment;
660 real step_min, step_max;
665 for (t_shell& shell : shells)
667 const int ind = shell.shellIndex;
670 for (d = 0; d < DIM; d++)
672 shell.step[d] = shell.k_1;
674 step_min = std::min(step_min, shell.step[d]);
675 step_max = std::max(step_max, shell.step[d]);
681 for (d = 0; d < DIM; d++)
683 dx = xcur[ind][d] - shell.xold[d];
684 df = f[ind][d] - shell.fold[d];
685 /* -dx/df gets used to generate an interpolated value, but would
686 * cause a NaN if df were binary-equal to zero. Values close to
687 * zero won't cause problems (because of the min() and max()), so
688 * just testing for binary inequality is OK. */
692 /* Scale the step size by a factor interpolated from
693 * step_scale_min to step_scale_max, as k_est goes from 0 to
694 * step_scale_multiple * shell.step[d] */
695 shell.step[d] = step_scale_min * shell.step[d]
696 + step_scale_increment
697 * std::min(step_scale_multiple * shell.step[d],
698 std::max(k_est, zero));
703 if (gmx_numzero(dx)) /* 0 == dx */
705 /* Likely this will never happen, but if it does just
706 * don't scale the step. */
710 shell.step[d] *= step_scale_max;
714 step_min = std::min(step_min, shell.step[d]);
715 step_max = std::max(step_max, shell.step[d]);
719 copy_rvec(xcur[ind], shell.xold);
720 copy_rvec(f[ind], shell.fold);
722 do_1pos3(xnew[ind], xcur[ind], f[ind], shell.step);
726 fprintf(debug, "shell = %d\n", ind);
727 pr_rvec(debug, 0, "fshell", f[ind], DIM, TRUE);
728 pr_rvec(debug, 0, "xold", xcur[ind], DIM, TRUE);
729 pr_rvec(debug, 0, "step", shell.step, DIM, TRUE);
730 pr_rvec(debug, 0, "xnew", xnew[ind], DIM, TRUE);
734 printf("step %.3e %.3e\n", step_min, step_max);
738 static void decrease_step_size(ArrayRef<t_shell> shells)
740 for (t_shell& shell : shells)
742 svmul(0.8, shell.step, shell.step);
746 static void print_epot(FILE* fp, int64_t mdstep, int count, real epot, real df, int ndir, real sf_dir)
750 fprintf(fp, "MDStep=%5s/%2d EPot: %12.8e, rmsF: %6.2e", gmx_step_str(mdstep, buf), count, epot, df);
753 fprintf(fp, ", dir. rmsF: %6.2e\n", std::sqrt(sf_dir / ndir));
762 static real rms_force(const t_commrec* cr,
763 ArrayRef<const RVec> force,
764 ArrayRef<const t_shell> shells,
770 const rvec* f = as_rvec_array(force.data());
773 for (const t_shell& shell : shells)
775 buf[0] += norm2(f[shell.shellIndex]);
777 int ntot = shells.ssize();
784 gmx_sumd(4, buf, cr);
785 ntot = gmx::roundToInt(buf[1]);
791 return (ntot ? std::sqrt(buf[0] / ntot) : 0);
794 static void dump_shells(FILE* fp, ArrayRef<RVec> f, real ftol, ArrayRef<const t_shell> shells)
798 ft2 = gmx::square(ftol);
800 for (const t_shell& shell : shells)
802 const int ind = shell.shellIndex;
803 ff2 = iprod(f[ind], f[ind]);
806 fprintf(fp, "SHELL %5d, force %10.5f %10.5f %10.5f, |f| %10.5f\n", ind, f[ind][XX],
807 f[ind][YY], f[ind][ZZ], std::sqrt(ff2));
812 static void init_adir(gmx_shellfc_t* shfc,
813 gmx::Constraints* constr,
814 const t_inputrec* ir,
820 ArrayRefWithPadding<RVec> xOld,
821 ArrayRef<RVec> x_init,
822 ArrayRefWithPadding<RVec> xCurrent,
824 ArrayRef<RVec> acc_dir,
826 ArrayRef<const real> lambda,
831 unsigned short* ptype;
833 if (DOMAINDECOMP(cr))
841 shfc->adir_xnold.resizeWithPadding(n);
842 shfc->adir_xnew.resizeWithPadding(n);
843 rvec* xnold = as_rvec_array(shfc->adir_xnold.data());
844 rvec* xnew = as_rvec_array(shfc->adir_xnew.data());
845 rvec* x_old = as_rvec_array(xOld.paddedArrayRef().data());
846 rvec* x = as_rvec_array(xCurrent.paddedArrayRef().data());
852 /* Does NOT work with freeze or acceleration groups (yet) */
853 for (n = 0; n < end; n++)
855 w_dt = md->invmass[n] * dt;
857 for (d = 0; d < DIM; d++)
859 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell))
861 xnold[n][d] = x[n][d] - (x_init[n][d] - x_old[n][d]);
862 xnew[n][d] = 2 * x[n][d] - x_old[n][d] + f[n][d] * w_dt * dt;
866 xnold[n][d] = x[n][d];
867 xnew[n][d] = x[n][d];
871 bool needsLogging = false;
872 bool computeEnergy = false;
873 bool computeVirial = false;
874 constr->apply(needsLogging, computeEnergy, step, 0, 1.0, xCurrent,
875 shfc->adir_xnold.arrayRefWithPadding(), {}, box, lambda[efptBONDED],
876 &(dvdlambda[efptBONDED]), {}, computeVirial, nullptr,
877 gmx::ConstraintVariable::Positions);
878 constr->apply(needsLogging, computeEnergy, step, 0, 1.0, xCurrent,
879 shfc->adir_xnew.arrayRefWithPadding(), {}, box, lambda[efptBONDED],
880 &(dvdlambda[efptBONDED]), {}, computeVirial, nullptr,
881 gmx::ConstraintVariable::Positions);
883 for (n = 0; n < end; n++)
885 for (d = 0; d < DIM; d++)
887 xnew[n][d] = -(2 * x[n][d] - xnold[n][d] - xnew[n][d]) / gmx::square(dt)
888 - f[n][d] * md->invmass[n];
890 clear_rvec(acc_dir[n]);
893 /* Project the acceleration on the old bond directions */
894 constr->apply(needsLogging, computeEnergy, step, 0, 1.0, xOld, shfc->adir_xnew.arrayRefWithPadding(),
895 acc_dir, box, lambda[efptBONDED], &(dvdlambda[efptBONDED]), {}, computeVirial,
896 nullptr, gmx::ConstraintVariable::Deriv_FlexCon);
899 void relax_shell_flexcon(FILE* fplog,
901 const gmx_multisim_t* ms,
903 gmx_enfrot* enforcedRotation,
905 const t_inputrec* inputrec,
906 gmx::ImdSession* imdSession,
910 const gmx_localtop_t* top,
911 gmx::Constraints* constr,
912 gmx_enerdata_t* enerd,
914 ArrayRefWithPadding<RVec> xPadded,
915 ArrayRefWithPadding<RVec> vPadded,
917 ArrayRef<real> lambda,
919 ArrayRefWithPadding<RVec> f,
923 gmx_wallcycle_t wcycle,
926 gmx::MdrunScheduleWorkload* runScheduleWork,
929 gmx::VirtualSitesHandler* vsite,
930 const DDBalanceRegionHandler& ddBalanceRegionHandler)
936 int nat, dd_ac0, dd_ac1 = 0, i;
937 int homenr = md->homenr, end = homenr;
938 int d, Min = 0, count = 0;
939 #define Try (1 - Min) /* At start Try = 1 */
941 const bool bCont = (mdstep == inputrec->init_step) && inputrec->bContinuation;
942 const bool bInit = (mdstep == inputrec->init_step) || shfc->requireInit;
943 const real ftol = inputrec->em_tol;
944 const int number_steps = inputrec->niter;
945 ArrayRef<t_shell> shells = shfc->shells;
946 const int nflexcon = shfc->nflexcon;
948 if (DOMAINDECOMP(cr))
950 nat = dd_natoms_vsite(cr->dd);
953 dd_get_constraint_range(cr->dd, &dd_ac0, &dd_ac1);
954 nat = std::max(nat, dd_ac1);
962 for (i = 0; (i < 2); i++)
964 shfc->x[i].resizeWithPadding(nat);
965 shfc->f[i].resizeWithPadding(nat);
968 /* Create views that we can swap for trail and minimum for positions and forces */
969 ArrayRefWithPadding<RVec> posWithPadding[2];
970 ArrayRefWithPadding<RVec> forceWithPadding[2];
971 ArrayRef<RVec> pos[2];
972 ArrayRef<RVec> force[2];
973 for (i = 0; (i < 2); i++)
975 posWithPadding[i] = shfc->x[i].arrayRefWithPadding();
976 pos[i] = posWithPadding[i].paddedArrayRef();
977 forceWithPadding[i] = shfc->f[i].arrayRefWithPadding();
978 force[i] = forceWithPadding[i].paddedArrayRef();
981 ArrayRef<RVec> x = xPadded.unpaddedArrayRef();
982 ArrayRef<RVec> v = vPadded.unpaddedArrayRef();
984 if (bDoNS && inputrec->pbcType != PbcType::No && !DOMAINDECOMP(cr))
986 /* This is the only time where the coordinates are used
987 * before do_force is called, which normally puts all
988 * charge groups in the box.
990 put_atoms_in_box_omp(fr->pbcType, box, x.subArray(0, md->homenr),
991 gmx_omp_nthreads_get(emntDefault));
996 shfc->acc_dir.resize(nat);
997 shfc->x_old.resizeWithPadding(nat);
998 ArrayRef<RVec> x_old = shfc->x_old.arrayRefWithPadding().unpaddedArrayRef();
999 for (i = 0; i < homenr; i++)
1001 for (d = 0; d < DIM; d++)
1003 x_old[i][d] = x[i][d] - v[i][d] * inputrec->delta_t;
1008 /* Do a prediction of the shell positions, when appropriate.
1009 * Without velocities (EM, NM, BD) we only do initial prediction.
1011 if (shfc->predictShells && !bCont && (EI_STATE_VELOCITY(inputrec->eI) || bInit))
1013 predict_shells(fplog, x, v, inputrec->delta_t, shells, md->massT, nullptr, bInit);
1016 /* Calculate the forces first time around */
1019 pr_rvecs(debug, 0, "x b4 do_force", as_rvec_array(x.data()), homenr);
1021 int shellfc_flags = force_flags | (bVerbose ? GMX_FORCE_ENERGY : 0);
1022 do_force(fplog, cr, ms, inputrec, nullptr, enforcedRotation, imdSession, pull_work, mdstep,
1023 nrnb, wcycle, top, box, xPadded, hist, forceWithPadding[Min], force_vir, md, enerd,
1024 lambda, fr, runScheduleWork, vsite, mu_tot, t, nullptr,
1025 (bDoNS ? GMX_FORCE_NS : 0) | shellfc_flags, ddBalanceRegionHandler);
1030 init_adir(shfc, constr, inputrec, cr, dd_ac1, mdstep, md, end, shfc->x_old.arrayRefWithPadding(),
1031 x, xPadded, force[Min], shfc->acc_dir, box, lambda, &dum);
1033 for (i = 0; i < end; i++)
1035 sf_dir += md->massT[i] * norm2(shfc->acc_dir[i]);
1038 accumulatePotentialEnergies(enerd, lambda, inputrec->fepvals);
1039 Epot[Min] = enerd->term[F_EPOT];
1041 df[Min] = rms_force(cr, forceWithPadding[Min].paddedArrayRef(), shells, nflexcon, &sf_dir, &Epot[Min]);
1045 fprintf(debug, "df = %g %g\n", df[Min], df[Try]);
1050 pr_rvecs(debug, 0, "force0", as_rvec_array(force[Min].data()), md->nr);
1053 if (!shells.empty() || nflexcon > 0)
1055 /* Copy x to pos[Min] & pos[Try]: during minimization only the
1056 * shell positions are updated, therefore the other particles must
1057 * be set here, in advance.
1059 std::copy(xPadded.paddedArrayRef().begin(), xPadded.paddedArrayRef().end(),
1060 posWithPadding[Min].paddedArrayRef().begin());
1061 std::copy(xPadded.paddedArrayRef().begin(), xPadded.paddedArrayRef().end(),
1062 posWithPadding[Try].paddedArrayRef().begin());
1065 if (bVerbose && MASTER(cr))
1067 print_epot(stdout, mdstep, 0, Epot[Min], df[Min], nflexcon, sf_dir);
1072 fprintf(debug, "%17s: %14.10e\n", interaction_function[F_EKIN].longname, enerd->term[F_EKIN]);
1073 fprintf(debug, "%17s: %14.10e\n", interaction_function[F_EPOT].longname, enerd->term[F_EPOT]);
1074 fprintf(debug, "%17s: %14.10e\n", interaction_function[F_ETOT].longname, enerd->term[F_ETOT]);
1075 fprintf(debug, "SHELLSTEP %s\n", gmx_step_str(mdstep, sbuf));
1078 /* First check whether we should do shells, or whether the force is
1079 * low enough even without minimization.
1081 bool bConverged = (df[Min] < ftol);
1083 for (count = 1; (!(bConverged) && (count < number_steps)); count++)
1087 vsite->construct(pos[Min], inputrec->delta_t, v, box);
1092 init_adir(shfc, constr, inputrec, cr, dd_ac1, mdstep, md, end,
1093 shfc->x_old.arrayRefWithPadding(), x, posWithPadding[Min], force[Min],
1094 shfc->acc_dir, box, lambda, &dum);
1096 directional_sd(pos[Min], pos[Try], shfc->acc_dir, end, fr->fc_stepsize);
1099 /* New positions, Steepest descent */
1100 shell_pos_sd(pos[Min], pos[Try], force[Min], shells, count);
1104 pr_rvecs(debug, 0, "RELAX: pos[Min] ", as_rvec_array(pos[Min].data()), homenr);
1105 pr_rvecs(debug, 0, "RELAX: pos[Try] ", as_rvec_array(pos[Try].data()), homenr);
1107 /* Try the new positions */
1108 do_force(fplog, cr, ms, inputrec, nullptr, enforcedRotation, imdSession, pull_work, 1, nrnb,
1109 wcycle, top, box, posWithPadding[Try], hist, forceWithPadding[Try], force_vir, md,
1110 enerd, lambda, fr, runScheduleWork, vsite, mu_tot, t, nullptr, shellfc_flags,
1111 ddBalanceRegionHandler);
1112 accumulatePotentialEnergies(enerd, lambda, inputrec->fepvals);
1115 pr_rvecs(debug, 0, "RELAX: force[Min]", as_rvec_array(force[Min].data()), homenr);
1116 pr_rvecs(debug, 0, "RELAX: force[Try]", as_rvec_array(force[Try].data()), homenr);
1121 init_adir(shfc, constr, inputrec, cr, dd_ac1, mdstep, md, end,
1122 shfc->x_old.arrayRefWithPadding(), x, posWithPadding[Try], force[Try],
1123 shfc->acc_dir, box, lambda, &dum);
1125 ArrayRef<const RVec> acc_dir = shfc->acc_dir;
1126 for (i = 0; i < end; i++)
1128 sf_dir += md->massT[i] * norm2(acc_dir[i]);
1132 Epot[Try] = enerd->term[F_EPOT];
1134 df[Try] = rms_force(cr, force[Try], shells, nflexcon, &sf_dir, &Epot[Try]);
1138 fprintf(debug, "df = %g %g\n", df[Min], df[Try]);
1145 pr_rvecs(debug, 0, "F na do_force", as_rvec_array(force[Try].data()), homenr);
1149 fprintf(debug, "SHELL ITER %d\n", count);
1150 dump_shells(debug, force[Try], ftol, shells);
1154 if (bVerbose && MASTER(cr))
1156 print_epot(stdout, mdstep, count, Epot[Try], df[Try], nflexcon, sf_dir);
1159 bConverged = (df[Try] < ftol);
1161 if ((df[Try] < df[Min]))
1165 fprintf(debug, "Swapping Min and Try\n");
1169 /* Correct the velocities for the flexible constraints */
1170 invdt = 1 / inputrec->delta_t;
1171 for (i = 0; i < end; i++)
1173 for (d = 0; d < DIM; d++)
1175 v[i][d] += (pos[Try][i][d] - pos[Min][i][d]) * invdt;
1183 decrease_step_size(shells);
1186 shfc->numForceEvaluations += count;
1189 shfc->numConvergedIterations++;
1191 if (MASTER(cr) && !(bConverged))
1193 /* Note that the energies and virial are incorrect when not converged */
1196 fprintf(fplog, "step %s: EM did not converge in %d iterations, RMS force %6.2e\n",
1197 gmx_step_str(mdstep, sbuf), number_steps, df[Min]);
1199 fprintf(stderr, "step %s: EM did not converge in %d iterations, RMS force %6.2e\n",
1200 gmx_step_str(mdstep, sbuf), number_steps, df[Min]);
1203 /* Copy back the coordinates and the forces */
1204 std::copy(pos[Min].begin(), pos[Min].end(), x.data());
1205 std::copy(force[Min].begin(), force[Min].end(), f.unpaddedArrayRef().begin());
1208 void done_shellfc(FILE* fplog, gmx_shellfc_t* shfc, int64_t numSteps)
1210 if (shfc && fplog && numSteps > 0)
1212 double numStepsAsDouble = static_cast<double>(numSteps);
1213 fprintf(fplog, "Fraction of iterations that converged: %.2f %%\n",
1214 (shfc->numConvergedIterations * 100.0) / numStepsAsDouble);
1215 fprintf(fplog, "Average number of force evaluations per MD step: %.2f\n\n",
1216 shfc->numForceEvaluations / numStepsAsDouble);