Use ArrayRef in signatures of constraints and some pulling
[alexxy/gromacs.git] / src / gromacs / mdrun / md.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2011-2019,2020,2021, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /*! \internal \file
38  *
39  * \brief Implements the integrator for normal molecular dynamics simulations
40  *
41  * \author David van der Spoel <david.vanderspoel@icm.uu.se>
42  * \ingroup module_mdrun
43  */
44 #include "gmxpre.h"
45
46 #include <cinttypes>
47 #include <cmath>
48 #include <cstdio>
49 #include <cstdlib>
50
51 #include <algorithm>
52 #include <memory>
53 #include <numeric>
54
55 #include "gromacs/applied_forces/awh/awh.h"
56 #include "gromacs/applied_forces/awh/read_params.h"
57 #include "gromacs/commandline/filenm.h"
58 #include "gromacs/domdec/collect.h"
59 #include "gromacs/domdec/dlbtiming.h"
60 #include "gromacs/domdec/domdec.h"
61 #include "gromacs/domdec/domdec_network.h"
62 #include "gromacs/domdec/domdec_struct.h"
63 #include "gromacs/domdec/gpuhaloexchange.h"
64 #include "gromacs/domdec/mdsetup.h"
65 #include "gromacs/domdec/partition.h"
66 #include "gromacs/essentialdynamics/edsam.h"
67 #include "gromacs/ewald/pme_load_balancing.h"
68 #include "gromacs/ewald/pme_pp.h"
69 #include "gromacs/fileio/trxio.h"
70 #include "gromacs/gmxlib/network.h"
71 #include "gromacs/gmxlib/nrnb.h"
72 #include "gromacs/gpu_utils/device_stream_manager.h"
73 #include "gromacs/gpu_utils/gpu_utils.h"
74 #include "gromacs/imd/imd.h"
75 #include "gromacs/listed_forces/listed_forces.h"
76 #include "gromacs/math/functions.h"
77 #include "gromacs/math/invertmatrix.h"
78 #include "gromacs/math/vec.h"
79 #include "gromacs/math/vectypes.h"
80 #include "gromacs/mdlib/checkpointhandler.h"
81 #include "gromacs/mdlib/compute_io.h"
82 #include "gromacs/mdlib/constr.h"
83 #include "gromacs/mdlib/coupling.h"
84 #include "gromacs/mdlib/ebin.h"
85 #include "gromacs/mdlib/enerdata_utils.h"
86 #include "gromacs/mdlib/energyoutput.h"
87 #include "gromacs/mdlib/expanded.h"
88 #include "gromacs/mdlib/force.h"
89 #include "gromacs/mdlib/force_flags.h"
90 #include "gromacs/mdlib/forcerec.h"
91 #include "gromacs/mdlib/freeenergyparameters.h"
92 #include "gromacs/mdlib/md_support.h"
93 #include "gromacs/mdlib/mdatoms.h"
94 #include "gromacs/mdlib/mdoutf.h"
95 #include "gromacs/mdlib/membed.h"
96 #include "gromacs/mdlib/resethandler.h"
97 #include "gromacs/mdlib/sighandler.h"
98 #include "gromacs/mdlib/simulationsignal.h"
99 #include "gromacs/mdlib/stat.h"
100 #include "gromacs/mdlib/stophandler.h"
101 #include "gromacs/mdlib/tgroup.h"
102 #include "gromacs/mdlib/trajectory_writing.h"
103 #include "gromacs/mdlib/update.h"
104 #include "gromacs/mdlib/update_constrain_gpu.h"
105 #include "gromacs/mdlib/update_vv.h"
106 #include "gromacs/mdlib/vcm.h"
107 #include "gromacs/mdlib/vsite.h"
108 #include "gromacs/mdrunutility/handlerestart.h"
109 #include "gromacs/mdrunutility/multisim.h"
110 #include "gromacs/mdrunutility/printtime.h"
111 #include "gromacs/mdtypes/awh_history.h"
112 #include "gromacs/mdtypes/awh_params.h"
113 #include "gromacs/mdtypes/commrec.h"
114 #include "gromacs/mdtypes/df_history.h"
115 #include "gromacs/mdtypes/energyhistory.h"
116 #include "gromacs/mdtypes/fcdata.h"
117 #include "gromacs/mdtypes/forcebuffers.h"
118 #include "gromacs/mdtypes/forcerec.h"
119 #include "gromacs/mdtypes/group.h"
120 #include "gromacs/mdtypes/inputrec.h"
121 #include "gromacs/mdtypes/interaction_const.h"
122 #include "gromacs/mdtypes/md_enums.h"
123 #include "gromacs/mdtypes/mdatom.h"
124 #include "gromacs/mdtypes/mdrunoptions.h"
125 #include "gromacs/mdtypes/multipletimestepping.h"
126 #include "gromacs/mdtypes/observableshistory.h"
127 #include "gromacs/mdtypes/pullhistory.h"
128 #include "gromacs/mdtypes/simulation_workload.h"
129 #include "gromacs/mdtypes/state.h"
130 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
131 #include "gromacs/modularsimulator/energydata.h"
132 #include "gromacs/nbnxm/gpu_data_mgmt.h"
133 #include "gromacs/nbnxm/nbnxm.h"
134 #include "gromacs/pbcutil/pbc.h"
135 #include "gromacs/pulling/output.h"
136 #include "gromacs/pulling/pull.h"
137 #include "gromacs/swap/swapcoords.h"
138 #include "gromacs/timing/wallcycle.h"
139 #include "gromacs/timing/walltime_accounting.h"
140 #include "gromacs/topology/atoms.h"
141 #include "gromacs/topology/idef.h"
142 #include "gromacs/topology/mtop_util.h"
143 #include "gromacs/topology/topology.h"
144 #include "gromacs/trajectory/trajectoryframe.h"
145 #include "gromacs/utility/basedefinitions.h"
146 #include "gromacs/utility/cstringutil.h"
147 #include "gromacs/utility/fatalerror.h"
148 #include "gromacs/utility/logger.h"
149 #include "gromacs/utility/real.h"
150 #include "gromacs/utility/smalloc.h"
151
152 #include "legacysimulator.h"
153 #include "replicaexchange.h"
154 #include "shellfc.h"
155
156 using gmx::SimulationSignaller;
157
158 void gmx::LegacySimulator::do_md()
159 {
160     // TODO Historically, the EM and MD "integrators" used different
161     // names for the t_inputrec *parameter, but these must have the
162     // same name, now that it's a member of a struct. We use this ir
163     // alias to avoid a large ripple of nearly useless changes.
164     // t_inputrec is being replaced by IMdpOptionsProvider, so this
165     // will go away eventually.
166     const t_inputrec* ir = inputrec;
167
168     int64_t      step, step_rel;
169     double       t, t0 = ir->init_t;
170     gmx_bool     bGStatEveryStep, bGStat, bCalcVir, bCalcEnerStep, bCalcEner;
171     gmx_bool     bNS = FALSE, bNStList, bStopCM, bFirstStep, bInitStep, bLastStep = FALSE;
172     gmx_bool     bDoDHDL = FALSE, bDoFEP = FALSE, bDoExpanded = FALSE;
173     gmx_bool     do_ene, do_log, do_verbose;
174     gmx_bool     bMasterState;
175     unsigned int force_flags;
176     tensor force_vir = { { 0 } }, shake_vir = { { 0 } }, total_vir = { { 0 } }, pres = { { 0 } };
177     int    i, m;
178     rvec   mu_tot;
179     matrix pressureCouplingMu, M;
180     gmx_repl_ex_t     repl_ex = nullptr;
181     gmx_global_stat_t gstat;
182     gmx_shellfc_t*    shellfc;
183     gmx_bool          bSumEkinhOld, bDoReplEx, bExchanged, bNeedRepartition;
184     gmx_bool          bTrotter;
185     real              dvdl_constr;
186     std::vector<RVec> cbuf;
187     matrix            lastbox;
188     int               lamnew = 0;
189     /* for FEP */
190     int       nstfep = 0;
191     double    cycles;
192     real      saved_conserved_quantity = 0;
193     real      last_ekin                = 0;
194     t_extmass MassQ;
195     char      sbuf[STEPSTRSIZE], sbuf2[STEPSTRSIZE];
196
197     /* PME load balancing data for GPU kernels */
198     gmx_bool bPMETune         = FALSE;
199     gmx_bool bPMETunePrinting = FALSE;
200
201     bool bInteractiveMDstep = false;
202
203     /* Domain decomposition could incorrectly miss a bonded
204        interaction, but checking for that requires a global
205        communication stage, which does not otherwise happen in DD
206        code. So we do that alongside the first global energy reduction
207        after a new DD is made. These variables handle whether the
208        check happens, and the result it returns. */
209     bool shouldCheckNumberOfBondedInteractions = false;
210     int  totalNumberOfBondedInteractions       = -1;
211
212     SimulationSignals signals;
213     // Most global communnication stages don't propagate mdrun
214     // signals, and will use this object to achieve that.
215     SimulationSignaller nullSignaller(nullptr, nullptr, nullptr, false, false);
216
217     if (!mdrunOptions.writeConfout)
218     {
219         // This is on by default, and the main known use case for
220         // turning it off is for convenience in benchmarking, which is
221         // something that should not show up in the general user
222         // interface.
223         GMX_LOG(mdlog.info)
224                 .asParagraph()
225                 .appendText(
226                         "The -noconfout functionality is deprecated, and may be removed in a "
227                         "future version.");
228     }
229
230     /* md-vv uses averaged full step velocities for T-control
231        md-vv-avek uses averaged half step velocities for T-control (but full step ekin for P control)
232        md uses averaged half step kinetic energies to determine temperature unless defined otherwise by GMX_EKIN_AVE_VEL; */
233     bTrotter = (EI_VV(ir->eI)
234                 && (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir)));
235
236     const bool bRerunMD = false;
237
238     int nstglobalcomm = computeGlobalCommunicationPeriod(mdlog, ir, cr);
239     bGStatEveryStep   = (nstglobalcomm == 1);
240
241     const SimulationGroups* groups = &top_global.groups;
242
243     std::unique_ptr<EssentialDynamics> ed = nullptr;
244     if (opt2bSet("-ei", nfile, fnm))
245     {
246         /* Initialize essential dynamics sampling */
247         ed = init_edsam(mdlog,
248                         opt2fn_null("-ei", nfile, fnm),
249                         opt2fn("-eo", nfile, fnm),
250                         top_global,
251                         *ir,
252                         cr,
253                         constr,
254                         state_global,
255                         observablesHistory,
256                         oenv,
257                         startingBehavior);
258     }
259     else if (observablesHistory->edsamHistory)
260     {
261         gmx_fatal(FARGS,
262                   "The checkpoint is from a run with essential dynamics sampling, "
263                   "but the current run did not specify the -ei option. "
264                   "Either specify the -ei option to mdrun, or do not use this checkpoint file.");
265     }
266
267     int*                fep_state = MASTER(cr) ? &state_global->fep_state : nullptr;
268     gmx::ArrayRef<real> lambda    = MASTER(cr) ? state_global->lambda : gmx::ArrayRef<real>();
269     initialize_lambdas(fplog, *ir, MASTER(cr), fep_state, lambda);
270     Update upd(*ir, deform);
271     bool   doSimulatedAnnealing = false;
272     {
273         // TODO: Avoid changing inputrec (#3854)
274         // Simulated annealing updates the reference temperature.
275         auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
276         doSimulatedAnnealing   = initSimulatedAnnealing(nonConstInputrec, &upd);
277     }
278     const bool useReplicaExchange = (replExParams.exchangeInterval > 0);
279
280     const t_fcdata& fcdata = *fr->fcdata;
281
282     bool simulationsShareState = false;
283     int  nstSignalComm         = nstglobalcomm;
284     {
285         // TODO This implementation of ensemble orientation restraints is nasty because
286         // a user can't just do multi-sim with single-sim orientation restraints.
287         bool usingEnsembleRestraints =
288                 (fcdata.disres->nsystems > 1) || ((ms != nullptr) && (fcdata.orires->nr != 0));
289         bool awhUsesMultiSim = (ir->bDoAwh && ir->awhParams->shareBiasMultisim() && (ms != nullptr));
290
291         // Replica exchange, ensemble restraints and AWH need all
292         // simulations to remain synchronized, so they need
293         // checkpoints and stop conditions to act on the same step, so
294         // the propagation of such signals must take place between
295         // simulations, not just within simulations.
296         // TODO: Make algorithm initializers set these flags.
297         simulationsShareState = useReplicaExchange || usingEnsembleRestraints || awhUsesMultiSim;
298
299         if (simulationsShareState)
300         {
301             // Inter-simulation signal communication does not need to happen
302             // often, so we use a minimum of 200 steps to reduce overhead.
303             const int c_minimumInterSimulationSignallingInterval = 200;
304             nstSignalComm = ((c_minimumInterSimulationSignallingInterval + nstglobalcomm - 1) / nstglobalcomm)
305                             * nstglobalcomm;
306         }
307     }
308
309     if (startingBehavior != StartingBehavior::RestartWithAppending)
310     {
311         pleaseCiteCouplingAlgorithms(fplog, *ir);
312     }
313     gmx_mdoutf*       outf = init_mdoutf(fplog,
314                                    nfile,
315                                    fnm,
316                                    mdrunOptions,
317                                    cr,
318                                    outputProvider,
319                                    mdModulesNotifier,
320                                    ir,
321                                    top_global,
322                                    oenv,
323                                    wcycle,
324                                    startingBehavior,
325                                    simulationsShareState,
326                                    ms);
327     gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
328                                    top_global,
329                                    *ir,
330                                    pull_work,
331                                    mdoutf_get_fp_dhdl(outf),
332                                    false,
333                                    startingBehavior,
334                                    simulationsShareState,
335                                    mdModulesNotifier);
336
337     gstat = global_stat_init(ir);
338
339     const auto& simulationWork     = runScheduleWork->simulationWork;
340     const bool  useGpuForPme       = simulationWork.useGpuPme;
341     const bool  useGpuForNonbonded = simulationWork.useGpuNonbonded;
342     const bool  useGpuForBufferOps = simulationWork.useGpuBufferOps;
343     const bool  useGpuForUpdate    = simulationWork.useGpuUpdate;
344
345     /* Check for polarizable models and flexible constraints */
346     shellfc = init_shell_flexcon(fplog,
347                                  top_global,
348                                  constr ? constr->numFlexibleConstraints() : 0,
349                                  ir->nstcalcenergy,
350                                  DOMAINDECOMP(cr),
351                                  useGpuForPme);
352
353     {
354         double io = compute_io(ir, top_global.natoms, *groups, energyOutput.numEnergyTerms(), 1);
355         if ((io > 2000) && MASTER(cr))
356         {
357             fprintf(stderr, "\nWARNING: This run will generate roughly %.0f Mb of data\n\n", io);
358         }
359     }
360
361     // Local state only becomes valid now.
362     std::unique_ptr<t_state> stateInstance;
363     t_state*                 state;
364
365     gmx_localtop_t top(top_global.ffparams);
366
367     auto mdatoms = mdAtoms->mdatoms();
368
369     ForceBuffers f(fr->useMts,
370                    ((useGpuForNonbonded && useGpuForBufferOps) || useGpuForUpdate)
371                            ? PinningPolicy::PinnedIfSupported
372                            : PinningPolicy::CannotBePinned);
373     if (DOMAINDECOMP(cr))
374     {
375         stateInstance = std::make_unique<t_state>();
376         state         = stateInstance.get();
377         dd_init_local_state(*cr->dd, state_global, state);
378
379         /* Distribute the charge groups over the nodes from the master node */
380         dd_partition_system(fplog,
381                             mdlog,
382                             ir->init_step,
383                             cr,
384                             TRUE,
385                             1,
386                             state_global,
387                             top_global,
388                             *ir,
389                             imdSession,
390                             pull_work,
391                             state,
392                             &f,
393                             mdAtoms,
394                             &top,
395                             fr,
396                             vsite,
397                             constr,
398                             nrnb,
399                             nullptr,
400                             FALSE);
401         shouldCheckNumberOfBondedInteractions = true;
402         upd.setNumAtoms(state->natoms);
403     }
404     else
405     {
406         state_change_natoms(state_global, state_global->natoms);
407         /* Copy the pointer to the global state */
408         state = state_global;
409
410         /* Generate and initialize new topology */
411         mdAlgorithmsSetupAtomData(cr, *ir, top_global, &top, fr, &f, mdAtoms, constr, vsite, shellfc);
412
413         upd.setNumAtoms(state->natoms);
414     }
415
416     std::unique_ptr<UpdateConstrainGpu> integrator;
417
418     StatePropagatorDataGpu* stateGpu = fr->stateGpu;
419
420     // TODO: the assertions below should be handled by UpdateConstraintsBuilder.
421     if (useGpuForUpdate)
422     {
423         GMX_RELEASE_ASSERT(!DOMAINDECOMP(cr) || ddUsesUpdateGroups(*cr->dd) || constr == nullptr
424                                    || constr->numConstraintsTotal() == 0,
425                            "Constraints in domain decomposition are only supported with update "
426                            "groups if using GPU update.\n");
427         GMX_RELEASE_ASSERT(ir->eConstrAlg != ConstraintAlgorithm::Shake || constr == nullptr
428                                    || constr->numConstraintsTotal() == 0,
429                            "SHAKE is not supported with GPU update.");
430         GMX_RELEASE_ASSERT(useGpuForPme || (useGpuForNonbonded && simulationWork.useGpuBufferOps),
431                            "Either PME or short-ranged non-bonded interaction tasks must run on "
432                            "the GPU to use GPU update.\n");
433         GMX_RELEASE_ASSERT(ir->eI == IntegrationAlgorithm::MD,
434                            "Only the md integrator is supported with the GPU update.\n");
435         GMX_RELEASE_ASSERT(
436                 ir->etc != TemperatureCoupling::NoseHoover,
437                 "Nose-Hoover temperature coupling is not supported with the GPU update.\n");
438         GMX_RELEASE_ASSERT(
439                 ir->epc == PressureCoupling::No || ir->epc == PressureCoupling::ParrinelloRahman
440                         || ir->epc == PressureCoupling::Berendsen || ir->epc == PressureCoupling::CRescale,
441                 "Only Parrinello-Rahman, Berendsen, and C-rescale pressure coupling are supported "
442                 "with the GPU update.\n");
443         GMX_RELEASE_ASSERT(!mdatoms->haveVsites,
444                            "Virtual sites are not supported with the GPU update.\n");
445         GMX_RELEASE_ASSERT(ed == nullptr,
446                            "Essential dynamics is not supported with the GPU update.\n");
447         GMX_RELEASE_ASSERT(!ir->bPull || !pull_have_constraint(*ir->pull),
448                            "Constraints pulling is not supported with the GPU update.\n");
449         GMX_RELEASE_ASSERT(fcdata.orires->nr == 0,
450                            "Orientation restraints are not supported with the GPU update.\n");
451         GMX_RELEASE_ASSERT(
452                 ir->efep == FreeEnergyPerturbationType::No
453                         || (!haveFepPerturbedMasses(top_global) && !havePerturbedConstraints(top_global)),
454                 "Free energy perturbation of masses and constraints are not supported with the GPU "
455                 "update.");
456
457         if (constr != nullptr && constr->numConstraintsTotal() > 0)
458         {
459             GMX_LOG(mdlog.info)
460                     .asParagraph()
461                     .appendText("Updating coordinates and applying constraints on the GPU.");
462         }
463         else
464         {
465             GMX_LOG(mdlog.info).asParagraph().appendText("Updating coordinates on the GPU.");
466         }
467         GMX_RELEASE_ASSERT(fr->deviceStreamManager != nullptr,
468                            "Device stream manager should be initialized in order to use GPU "
469                            "update-constraints.");
470         GMX_RELEASE_ASSERT(
471                 fr->deviceStreamManager->streamIsValid(gmx::DeviceStreamType::UpdateAndConstraints),
472                 "Update stream should be initialized in order to use GPU "
473                 "update-constraints.");
474         integrator = std::make_unique<UpdateConstrainGpu>(
475                 *ir,
476                 top_global,
477                 ekind->ngtc,
478                 fr->deviceStreamManager->context(),
479                 fr->deviceStreamManager->stream(gmx::DeviceStreamType::UpdateAndConstraints),
480                 stateGpu->xUpdatedOnDevice(),
481                 wcycle);
482
483         integrator->setPbc(PbcType::Xyz, state->box);
484     }
485
486     if (useGpuForPme || (useGpuForNonbonded && useGpuForBufferOps) || useGpuForUpdate)
487     {
488         changePinningPolicy(&state->x, PinningPolicy::PinnedIfSupported);
489     }
490     if (useGpuForUpdate)
491     {
492         changePinningPolicy(&state->v, PinningPolicy::PinnedIfSupported);
493     }
494
495     // NOTE: The global state is no longer used at this point.
496     // But state_global is still used as temporary storage space for writing
497     // the global state to file and potentially for replica exchange.
498     // (Global topology should persist.)
499
500     update_mdatoms(mdatoms, state->lambda[FreeEnergyPerturbationCouplingType::Mass]);
501
502     if (ir->bExpanded)
503     {
504         /* Check nstexpanded here, because the grompp check was broken */
505         if (ir->expandedvals->nstexpanded % ir->nstcalcenergy != 0)
506         {
507             gmx_fatal(FARGS,
508                       "With expanded ensemble, nstexpanded should be a multiple of nstcalcenergy");
509         }
510         init_expanded_ensemble(startingBehavior != StartingBehavior::NewSimulation, ir, state->dfhist);
511     }
512
513     if (MASTER(cr))
514     {
515         EnergyData::initializeEnergyHistory(startingBehavior, observablesHistory, &energyOutput);
516     }
517
518     preparePrevStepPullCom(ir,
519                            pull_work,
520                            gmx::arrayRefFromArray(mdatoms->massT, mdatoms->nr),
521                            state,
522                            state_global,
523                            cr,
524                            startingBehavior != StartingBehavior::NewSimulation);
525
526     // TODO: Remove this by converting AWH into a ForceProvider
527     auto awh = prepareAwhModule(fplog,
528                                 *ir,
529                                 state_global,
530                                 cr,
531                                 ms,
532                                 startingBehavior != StartingBehavior::NewSimulation,
533                                 shellfc != nullptr,
534                                 opt2fn("-awh", nfile, fnm),
535                                 pull_work);
536
537     if (useReplicaExchange && MASTER(cr))
538     {
539         repl_ex = init_replica_exchange(fplog, ms, top_global.natoms, ir, replExParams);
540     }
541     /* PME tuning is only supported in the Verlet scheme, with PME for
542      * Coulomb. It is not supported with only LJ PME. */
543     bPMETune = (mdrunOptions.tunePme && EEL_PME(fr->ic->eeltype) && !mdrunOptions.reproducible
544                 && ir->cutoff_scheme != CutoffScheme::Group);
545
546     pme_load_balancing_t* pme_loadbal = nullptr;
547     if (bPMETune)
548     {
549         pme_loadbal_init(
550                 &pme_loadbal, cr, mdlog, *ir, state->box, *fr->ic, *fr->nbv, fr->pmedata, fr->nbv->useGpu());
551     }
552
553     if (!ir->bContinuation)
554     {
555         if (state->flags & enumValueToBitMask(StateEntry::V))
556         {
557             auto v = makeArrayRef(state->v);
558             /* Set the velocities of vsites, shells and frozen atoms to zero */
559             for (i = 0; i < mdatoms->homenr; i++)
560             {
561                 if (mdatoms->ptype[i] == ParticleType::Shell)
562                 {
563                     clear_rvec(v[i]);
564                 }
565                 else if (mdatoms->cFREEZE)
566                 {
567                     for (m = 0; m < DIM; m++)
568                     {
569                         if (ir->opts.nFreeze[mdatoms->cFREEZE[i]][m])
570                         {
571                             v[i][m] = 0;
572                         }
573                     }
574                 }
575             }
576         }
577
578         if (constr)
579         {
580             /* Constrain the initial coordinates and velocities */
581             do_constrain_first(fplog,
582                                constr,
583                                ir,
584                                mdatoms->nr,
585                                mdatoms->homenr,
586                                state->x.arrayRefWithPadding(),
587                                state->v.arrayRefWithPadding(),
588                                state->box,
589                                state->lambda[FreeEnergyPerturbationCouplingType::Bonded]);
590         }
591     }
592
593     if (ir->efep != FreeEnergyPerturbationType::No)
594     {
595         /* Set free energy calculation frequency as the greatest common
596          * denominator of nstdhdl and repl_ex_nst. */
597         nstfep = ir->fepvals->nstdhdl;
598         if (ir->bExpanded)
599         {
600             nstfep = std::gcd(ir->expandedvals->nstexpanded, nstfep);
601         }
602         if (useReplicaExchange)
603         {
604             nstfep = std::gcd(replExParams.exchangeInterval, nstfep);
605         }
606         if (ir->bDoAwh)
607         {
608             nstfep = std::gcd(ir->awhParams->nstSampleCoord(), nstfep);
609         }
610     }
611
612     /* Be REALLY careful about what flags you set here. You CANNOT assume
613      * this is the first step, since we might be restarting from a checkpoint,
614      * and in that case we should not do any modifications to the state.
615      */
616     bStopCM = (ir->comm_mode != ComRemovalAlgorithm::No && !ir->bContinuation);
617
618     // When restarting from a checkpoint, it can be appropriate to
619     // initialize ekind from quantities in the checkpoint. Otherwise,
620     // compute_globals must initialize ekind before the simulation
621     // starts/restarts. However, only the master rank knows what was
622     // found in the checkpoint file, so we have to communicate in
623     // order to coordinate the restart.
624     //
625     // TODO Consider removing this communication if/when checkpoint
626     // reading directly follows .tpr reading, because all ranks can
627     // agree on hasReadEkinState at that time.
628     bool hasReadEkinState = MASTER(cr) ? state_global->ekinstate.hasReadEkinState : false;
629     if (PAR(cr))
630     {
631         gmx_bcast(sizeof(hasReadEkinState), &hasReadEkinState, cr->mpi_comm_mygroup);
632     }
633     if (hasReadEkinState)
634     {
635         restore_ekinstate_from_state(cr, ekind, &state_global->ekinstate);
636     }
637
638     unsigned int cglo_flags =
639             (CGLO_TEMPERATURE | CGLO_GSTAT | (EI_VV(ir->eI) ? CGLO_PRESSURE : 0)
640              | (EI_VV(ir->eI) ? CGLO_CONSTRAINT : 0) | (hasReadEkinState ? CGLO_READEKIN : 0));
641
642     bSumEkinhOld = FALSE;
643
644     t_vcm vcm(top_global.groups, *ir);
645     reportComRemovalInfo(fplog, vcm);
646
647     /* To minimize communication, compute_globals computes the COM velocity
648      * and the kinetic energy for the velocities without COM motion removed.
649      * Thus to get the kinetic energy without the COM contribution, we need
650      * to call compute_globals twice.
651      */
652     for (int cgloIteration = 0; cgloIteration < (bStopCM ? 2 : 1); cgloIteration++)
653     {
654         unsigned int cglo_flags_iteration = cglo_flags;
655         if (bStopCM && cgloIteration == 0)
656         {
657             cglo_flags_iteration |= CGLO_STOPCM;
658             cglo_flags_iteration &= ~CGLO_TEMPERATURE;
659         }
660         compute_globals(gstat,
661                         cr,
662                         ir,
663                         fr,
664                         ekind,
665                         makeConstArrayRef(state->x),
666                         makeConstArrayRef(state->v),
667                         state->box,
668                         mdatoms,
669                         nrnb,
670                         &vcm,
671                         nullptr,
672                         enerd,
673                         force_vir,
674                         shake_vir,
675                         total_vir,
676                         pres,
677                         gmx::ArrayRef<real>{},
678                         &nullSignaller,
679                         state->box,
680                         &totalNumberOfBondedInteractions,
681                         &bSumEkinhOld,
682                         cglo_flags_iteration
683                                 | (shouldCheckNumberOfBondedInteractions ? CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS
684                                                                          : 0));
685         if (cglo_flags_iteration & CGLO_STOPCM)
686         {
687             /* At initialization, do not pass x with acceleration-correction mode
688              * to avoid (incorrect) correction of the initial coordinates.
689              */
690             auto x = (vcm.mode == ComRemovalAlgorithm::LinearAccelerationCorrection)
691                              ? ArrayRef<RVec>()
692                              : makeArrayRef(state->x);
693             process_and_stopcm_grp(fplog, &vcm, *mdatoms, x, makeArrayRef(state->v));
694             inc_nrnb(nrnb, eNR_STOPCM, mdatoms->homenr);
695         }
696     }
697     checkNumberOfBondedInteractions(mdlog,
698                                     cr,
699                                     totalNumberOfBondedInteractions,
700                                     top_global,
701                                     &top,
702                                     makeConstArrayRef(state->x),
703                                     state->box,
704                                     &shouldCheckNumberOfBondedInteractions);
705     if (ir->eI == IntegrationAlgorithm::VVAK)
706     {
707         /* a second call to get the half step temperature initialized as well */
708         /* we do the same call as above, but turn the pressure off -- internally to
709            compute_globals, this is recognized as a velocity verlet half-step
710            kinetic energy calculation.  This minimized excess variables, but
711            perhaps loses some logic?*/
712
713         compute_globals(gstat,
714                         cr,
715                         ir,
716                         fr,
717                         ekind,
718                         makeConstArrayRef(state->x),
719                         makeConstArrayRef(state->v),
720                         state->box,
721                         mdatoms,
722                         nrnb,
723                         &vcm,
724                         nullptr,
725                         enerd,
726                         force_vir,
727                         shake_vir,
728                         total_vir,
729                         pres,
730                         gmx::ArrayRef<real>{},
731                         &nullSignaller,
732                         state->box,
733                         nullptr,
734                         &bSumEkinhOld,
735                         cglo_flags & ~CGLO_PRESSURE);
736     }
737
738     /* Calculate the initial half step temperature, and save the ekinh_old */
739     if (startingBehavior == StartingBehavior::NewSimulation)
740     {
741         for (i = 0; (i < ir->opts.ngtc); i++)
742         {
743             copy_mat(ekind->tcstat[i].ekinh, ekind->tcstat[i].ekinh_old);
744         }
745     }
746
747     /* need to make an initiation call to get the Trotter variables set, as well as other constants
748        for non-trotter temperature control */
749     auto trotter_seq = init_npt_vars(ir, state, &MassQ, bTrotter);
750
751     if (MASTER(cr))
752     {
753         if (!ir->bContinuation)
754         {
755             if (constr && ir->eConstrAlg == ConstraintAlgorithm::Lincs)
756             {
757                 fprintf(fplog,
758                         "RMS relative constraint deviation after constraining: %.2e\n",
759                         constr->rmsd());
760             }
761             if (EI_STATE_VELOCITY(ir->eI))
762             {
763                 real temp = enerd->term[F_TEMP];
764                 if (ir->eI != IntegrationAlgorithm::VV)
765                 {
766                     /* Result of Ekin averaged over velocities of -half
767                      * and +half step, while we only have -half step here.
768                      */
769                     temp *= 2;
770                 }
771                 fprintf(fplog, "Initial temperature: %g K\n", temp);
772             }
773         }
774
775         char tbuf[20];
776         fprintf(stderr, "starting mdrun '%s'\n", *(top_global.name));
777         if (ir->nsteps >= 0)
778         {
779             sprintf(tbuf, "%8.1f", (ir->init_step + ir->nsteps) * ir->delta_t);
780         }
781         else
782         {
783             sprintf(tbuf, "%s", "infinite");
784         }
785         if (ir->init_step > 0)
786         {
787             fprintf(stderr,
788                     "%s steps, %s ps (continuing from step %s, %8.1f ps).\n",
789                     gmx_step_str(ir->init_step + ir->nsteps, sbuf),
790                     tbuf,
791                     gmx_step_str(ir->init_step, sbuf2),
792                     ir->init_step * ir->delta_t);
793         }
794         else
795         {
796             fprintf(stderr, "%s steps, %s ps.\n", gmx_step_str(ir->nsteps, sbuf), tbuf);
797         }
798         fprintf(fplog, "\n");
799     }
800
801     walltime_accounting_start_time(walltime_accounting);
802     wallcycle_start(wcycle, ewcRUN);
803     print_start(fplog, cr, walltime_accounting, "mdrun");
804
805     /***********************************************************
806      *
807      *             Loop over MD steps
808      *
809      ************************************************************/
810
811     bFirstStep = TRUE;
812     /* Skip the first Nose-Hoover integration when we get the state from tpx */
813     bInitStep        = startingBehavior == StartingBehavior::NewSimulation || EI_VV(ir->eI);
814     bSumEkinhOld     = FALSE;
815     bExchanged       = FALSE;
816     bNeedRepartition = FALSE;
817
818     step     = ir->init_step;
819     step_rel = 0;
820
821     auto stopHandler = stopHandlerBuilder->getStopHandlerMD(
822             compat::not_null<SimulationSignal*>(&signals[eglsSTOPCOND]),
823             simulationsShareState,
824             MASTER(cr),
825             ir->nstlist,
826             mdrunOptions.reproducible,
827             nstSignalComm,
828             mdrunOptions.maximumHoursToRun,
829             ir->nstlist == 0,
830             fplog,
831             step,
832             bNS,
833             walltime_accounting);
834
835     auto checkpointHandler = std::make_unique<CheckpointHandler>(
836             compat::make_not_null<SimulationSignal*>(&signals[eglsCHKPT]),
837             simulationsShareState,
838             ir->nstlist == 0,
839             MASTER(cr),
840             mdrunOptions.writeConfout,
841             mdrunOptions.checkpointOptions.period);
842
843     const bool resetCountersIsLocal = true;
844     auto       resetHandler         = std::make_unique<ResetHandler>(
845             compat::make_not_null<SimulationSignal*>(&signals[eglsRESETCOUNTERS]),
846             !resetCountersIsLocal,
847             ir->nsteps,
848             MASTER(cr),
849             mdrunOptions.timingOptions.resetHalfway,
850             mdrunOptions.maximumHoursToRun,
851             mdlog,
852             wcycle,
853             walltime_accounting);
854
855     const DDBalanceRegionHandler ddBalanceRegionHandler(cr);
856
857     if (MASTER(cr) && isMultiSim(ms) && !useReplicaExchange)
858     {
859         logInitialMultisimStatus(ms, cr, mdlog, simulationsShareState, ir->nsteps, ir->init_step);
860     }
861
862     /* and stop now if we should */
863     bLastStep = (bLastStep || (ir->nsteps >= 0 && step_rel > ir->nsteps));
864     while (!bLastStep)
865     {
866
867         /* Determine if this is a neighbor search step */
868         bNStList = (ir->nstlist > 0 && step % ir->nstlist == 0);
869
870         if (bPMETune && bNStList)
871         {
872             // This has to be here because PME load balancing is called so early.
873             // TODO: Move to after all booleans are defined.
874             if (useGpuForUpdate && !bFirstStep)
875             {
876                 stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
877                 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
878             }
879             /* PME grid + cut-off optimization with GPUs or PME nodes */
880             pme_loadbal_do(pme_loadbal,
881                            cr,
882                            (mdrunOptions.verbose && MASTER(cr)) ? stderr : nullptr,
883                            fplog,
884                            mdlog,
885                            *ir,
886                            fr,
887                            state->box,
888                            state->x,
889                            wcycle,
890                            step,
891                            step_rel,
892                            &bPMETunePrinting,
893                            simulationWork.useGpuPmePpCommunication);
894         }
895
896         wallcycle_start(wcycle, ewcSTEP);
897
898         bLastStep = (step_rel == ir->nsteps);
899         t         = t0 + step * ir->delta_t;
900
901         // TODO Refactor this, so that nstfep does not need a default value of zero
902         if (ir->efep != FreeEnergyPerturbationType::No || ir->bSimTemp)
903         {
904             /* find and set the current lambdas */
905             state->lambda = currentLambdas(step, *(ir->fepvals), state->fep_state);
906
907             bDoDHDL = do_per_step(step, ir->fepvals->nstdhdl);
908             bDoFEP  = ((ir->efep != FreeEnergyPerturbationType::No) && do_per_step(step, nstfep));
909             bDoExpanded = (do_per_step(step, ir->expandedvals->nstexpanded) && (ir->bExpanded)
910                            && (!bFirstStep));
911         }
912
913         bDoReplEx = (useReplicaExchange && (step > 0) && !bLastStep
914                      && do_per_step(step, replExParams.exchangeInterval));
915
916         if (doSimulatedAnnealing)
917         {
918             // TODO: Avoid changing inputrec (#3854)
919             // Simulated annealing updates the reference temperature.
920             auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
921             update_annealing_target_temp(nonConstInputrec, t, &upd);
922         }
923
924         /* Stop Center of Mass motion */
925         bStopCM = (ir->comm_mode != ComRemovalAlgorithm::No && do_per_step(step, ir->nstcomm));
926
927         /* Determine whether or not to do Neighbour Searching */
928         bNS = (bFirstStep || bNStList || bExchanged || bNeedRepartition);
929
930         /* Note that the stopHandler will cause termination at nstglobalcomm
931          * steps. Since this concides with nstcalcenergy, nsttcouple and/or
932          * nstpcouple steps, we have computed the half-step kinetic energy
933          * of the previous step and can always output energies at the last step.
934          */
935         bLastStep = bLastStep || stopHandler->stoppingAfterCurrentStep(bNS);
936
937         /* do_log triggers energy and virial calculation. Because this leads
938          * to different code paths, forces can be different. Thus for exact
939          * continuation we should avoid extra log output.
940          * Note that the || bLastStep can result in non-exact continuation
941          * beyond the last step. But we don't consider that to be an issue.
942          */
943         do_log     = (do_per_step(step, ir->nstlog)
944                   || (bFirstStep && startingBehavior == StartingBehavior::NewSimulation) || bLastStep);
945         do_verbose = mdrunOptions.verbose
946                      && (step % mdrunOptions.verboseStepPrintInterval == 0 || bFirstStep || bLastStep);
947
948         if (useGpuForUpdate && !bFirstStep && bNS)
949         {
950             // Copy velocities from the GPU on search steps to keep a copy on host (device buffers are reinitialized).
951             stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
952             stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
953             // Copy coordinate from the GPU when needed at the search step.
954             // NOTE: The cases when coordinates needed on CPU for force evaluation are handled in sim_utils.
955             // NOTE: If the coordinates are to be written into output file they are also copied separately before the output.
956             stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
957             stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
958         }
959
960         // We only need to calculate virtual velocities if we are writing them in the current step
961         const bool needVirtualVelocitiesThisStep =
962                 (vsite != nullptr)
963                 && (do_per_step(step, ir->nstvout) || checkpointHandler->isCheckpointingStep());
964
965         if (vsite != nullptr)
966         {
967             // Virtual sites need to be updated before domain decomposition and forces are calculated
968             wallcycle_start(wcycle, ewcVSITECONSTR);
969             // md-vv calculates virtual velocities once it has full-step real velocities
970             vsite->construct(state->x,
971                              state->v,
972                              state->box,
973                              (!EI_VV(inputrec->eI) && needVirtualVelocitiesThisStep)
974                                      ? VSiteOperation::PositionsAndVelocities
975                                      : VSiteOperation::Positions);
976             wallcycle_stop(wcycle, ewcVSITECONSTR);
977         }
978
979         if (bNS && !(bFirstStep && ir->bContinuation))
980         {
981             bMasterState = FALSE;
982             /* Correct the new box if it is too skewed */
983             if (inputrecDynamicBox(ir))
984             {
985                 if (correct_box(fplog, step, state->box))
986                 {
987                     bMasterState = TRUE;
988                     // If update is offloaded, it should be informed about the box size change
989                     if (useGpuForUpdate)
990                     {
991                         integrator->setPbc(PbcType::Xyz, state->box);
992                     }
993                 }
994             }
995             if (DOMAINDECOMP(cr) && bMasterState)
996             {
997                 dd_collect_state(cr->dd, state, state_global);
998             }
999
1000             if (DOMAINDECOMP(cr))
1001             {
1002                 /* Repartition the domain decomposition */
1003                 dd_partition_system(fplog,
1004                                     mdlog,
1005                                     step,
1006                                     cr,
1007                                     bMasterState,
1008                                     nstglobalcomm,
1009                                     state_global,
1010                                     top_global,
1011                                     *ir,
1012                                     imdSession,
1013                                     pull_work,
1014                                     state,
1015                                     &f,
1016                                     mdAtoms,
1017                                     &top,
1018                                     fr,
1019                                     vsite,
1020                                     constr,
1021                                     nrnb,
1022                                     wcycle,
1023                                     do_verbose && !bPMETunePrinting);
1024                 shouldCheckNumberOfBondedInteractions = true;
1025                 upd.setNumAtoms(state->natoms);
1026             }
1027         }
1028
1029         // Allocate or re-size GPU halo exchange object, if necessary
1030         if (bNS && havePPDomainDecomposition(cr) && simulationWork.useGpuHaloExchange)
1031         {
1032             GMX_RELEASE_ASSERT(fr->deviceStreamManager != nullptr,
1033                                "GPU device manager has to be initialized to use GPU "
1034                                "version of halo exchange.");
1035             constructGpuHaloExchange(mdlog, *cr, *fr->deviceStreamManager, wcycle);
1036         }
1037
1038         if (MASTER(cr) && do_log)
1039         {
1040             gmx::EnergyOutput::printHeader(
1041                     fplog, step, t); /* can we improve the information printed here? */
1042         }
1043
1044         if (ir->efep != FreeEnergyPerturbationType::No)
1045         {
1046             update_mdatoms(mdatoms, state->lambda[FreeEnergyPerturbationCouplingType::Mass]);
1047         }
1048
1049         if (bExchanged)
1050         {
1051             /* We need the kinetic energy at minus the half step for determining
1052              * the full step kinetic energy and possibly for T-coupling.*/
1053             /* This may not be quite working correctly yet . . . . */
1054             compute_globals(gstat,
1055                             cr,
1056                             ir,
1057                             fr,
1058                             ekind,
1059                             makeConstArrayRef(state->x),
1060                             makeConstArrayRef(state->v),
1061                             state->box,
1062                             mdatoms,
1063                             nrnb,
1064                             &vcm,
1065                             wcycle,
1066                             enerd,
1067                             nullptr,
1068                             nullptr,
1069                             nullptr,
1070                             nullptr,
1071                             gmx::ArrayRef<real>{},
1072                             &nullSignaller,
1073                             state->box,
1074                             &totalNumberOfBondedInteractions,
1075                             &bSumEkinhOld,
1076                             CGLO_GSTAT | CGLO_TEMPERATURE | CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS);
1077             checkNumberOfBondedInteractions(mdlog,
1078                                             cr,
1079                                             totalNumberOfBondedInteractions,
1080                                             top_global,
1081                                             &top,
1082                                             makeConstArrayRef(state->x),
1083                                             state->box,
1084                                             &shouldCheckNumberOfBondedInteractions);
1085         }
1086         clear_mat(force_vir);
1087
1088         checkpointHandler->decideIfCheckpointingThisStep(bNS, bFirstStep, bLastStep);
1089
1090         /* Determine the energy and pressure:
1091          * at nstcalcenergy steps and at energy output steps (set below).
1092          */
1093         if (EI_VV(ir->eI) && (!bInitStep))
1094         {
1095             bCalcEnerStep = do_per_step(step, ir->nstcalcenergy);
1096             bCalcVir      = bCalcEnerStep
1097                        || (ir->epc != PressureCoupling::No
1098                            && (do_per_step(step, ir->nstpcouple) || do_per_step(step - 1, ir->nstpcouple)));
1099         }
1100         else
1101         {
1102             bCalcEnerStep = do_per_step(step, ir->nstcalcenergy);
1103             bCalcVir      = bCalcEnerStep
1104                        || (ir->epc != PressureCoupling::No && do_per_step(step, ir->nstpcouple));
1105         }
1106         bCalcEner = bCalcEnerStep;
1107
1108         do_ene = (do_per_step(step, ir->nstenergy) || bLastStep);
1109
1110         if (do_ene || do_log || bDoReplEx)
1111         {
1112             bCalcVir  = TRUE;
1113             bCalcEner = TRUE;
1114         }
1115
1116         /* Do we need global communication ? */
1117         bGStat = (bCalcVir || bCalcEner || bStopCM || do_per_step(step, nstglobalcomm)
1118                   || (EI_VV(ir->eI) && inputrecNvtTrotter(ir) && do_per_step(step - 1, nstglobalcomm)));
1119
1120         force_flags = (GMX_FORCE_STATECHANGED | ((inputrecDynamicBox(ir)) ? GMX_FORCE_DYNAMICBOX : 0)
1121                        | GMX_FORCE_ALLFORCES | (bCalcVir ? GMX_FORCE_VIRIAL : 0)
1122                        | (bCalcEner ? GMX_FORCE_ENERGY : 0) | (bDoFEP ? GMX_FORCE_DHDL : 0));
1123         if (fr->useMts && !do_per_step(step, ir->nstfout))
1124         {
1125             force_flags |= GMX_FORCE_DO_NOT_NEED_NORMAL_FORCE;
1126         }
1127
1128         if (shellfc)
1129         {
1130             /* Now is the time to relax the shells */
1131             relax_shell_flexcon(fplog,
1132                                 cr,
1133                                 ms,
1134                                 mdrunOptions.verbose,
1135                                 enforcedRotation,
1136                                 step,
1137                                 ir,
1138                                 imdSession,
1139                                 pull_work,
1140                                 bNS,
1141                                 force_flags,
1142                                 &top,
1143                                 constr,
1144                                 enerd,
1145                                 state->natoms,
1146                                 state->x.arrayRefWithPadding(),
1147                                 state->v.arrayRefWithPadding(),
1148                                 state->box,
1149                                 state->lambda,
1150                                 &state->hist,
1151                                 &f.view(),
1152                                 force_vir,
1153                                 *mdatoms,
1154                                 nrnb,
1155                                 wcycle,
1156                                 shellfc,
1157                                 fr,
1158                                 runScheduleWork,
1159                                 t,
1160                                 mu_tot,
1161                                 vsite,
1162                                 ddBalanceRegionHandler);
1163         }
1164         else
1165         {
1166             /* The AWH history need to be saved _before_ doing force calculations where the AWH bias
1167                is updated (or the AWH update will be performed twice for one step when continuing).
1168                It would be best to call this update function from do_md_trajectory_writing but that
1169                would occur after do_force. One would have to divide the update_awh function into one
1170                function applying the AWH force and one doing the AWH bias update. The update AWH
1171                bias function could then be called after do_md_trajectory_writing (then containing
1172                update_awh_history). The checkpointing will in the future probably moved to the start
1173                of the md loop which will rid of this issue. */
1174             if (awh && checkpointHandler->isCheckpointingStep() && MASTER(cr))
1175             {
1176                 awh->updateHistory(state_global->awhHistory.get());
1177             }
1178
1179             /* The coordinates (x) are shifted (to get whole molecules)
1180              * in do_force.
1181              * This is parallellized as well, and does communication too.
1182              * Check comments in sim_util.c
1183              */
1184             do_force(fplog,
1185                      cr,
1186                      ms,
1187                      *ir,
1188                      awh.get(),
1189                      enforcedRotation,
1190                      imdSession,
1191                      pull_work,
1192                      step,
1193                      nrnb,
1194                      wcycle,
1195                      &top,
1196                      state->box,
1197                      state->x.arrayRefWithPadding(),
1198                      &state->hist,
1199                      &f.view(),
1200                      force_vir,
1201                      mdatoms,
1202                      enerd,
1203                      state->lambda,
1204                      fr,
1205                      runScheduleWork,
1206                      vsite,
1207                      mu_tot,
1208                      t,
1209                      ed ? ed->getLegacyED() : nullptr,
1210                      (bNS ? GMX_FORCE_NS : 0) | force_flags,
1211                      ddBalanceRegionHandler);
1212         }
1213
1214         // VV integrators do not need the following velocity half step
1215         // if it is the first step after starting from a checkpoint.
1216         // That is, the half step is needed on all other steps, and
1217         // also the first step when starting from a .tpr file.
1218         if (EI_VV(ir->eI))
1219         {
1220             integrateVVFirstStep(step,
1221                                  bFirstStep,
1222                                  bInitStep,
1223                                  startingBehavior,
1224                                  nstglobalcomm,
1225                                  ir,
1226                                  fr,
1227                                  cr,
1228                                  state,
1229                                  mdatoms,
1230                                  fcdata,
1231                                  &MassQ,
1232                                  &vcm,
1233                                  top_global,
1234                                  top,
1235                                  enerd,
1236                                  ekind,
1237                                  gstat,
1238                                  &last_ekin,
1239                                  bCalcVir,
1240                                  total_vir,
1241                                  shake_vir,
1242                                  force_vir,
1243                                  pres,
1244                                  M,
1245                                  do_log,
1246                                  do_ene,
1247                                  bCalcEner,
1248                                  bGStat,
1249                                  bStopCM,
1250                                  bTrotter,
1251                                  bExchanged,
1252                                  &bSumEkinhOld,
1253                                  &shouldCheckNumberOfBondedInteractions,
1254                                  &saved_conserved_quantity,
1255                                  &f,
1256                                  &upd,
1257                                  constr,
1258                                  &nullSignaller,
1259                                  trotter_seq,
1260                                  nrnb,
1261                                  mdlog,
1262                                  fplog,
1263                                  wcycle);
1264             if (vsite != nullptr && needVirtualVelocitiesThisStep)
1265             {
1266                 // Positions were calculated earlier
1267                 wallcycle_start(wcycle, ewcVSITECONSTR);
1268                 vsite->construct(state->x, state->v, state->box, VSiteOperation::Velocities);
1269                 wallcycle_stop(wcycle, ewcVSITECONSTR);
1270             }
1271         }
1272
1273         /* ########  END FIRST UPDATE STEP  ############## */
1274         /* ########  If doing VV, we now have v(dt) ###### */
1275         if (bDoExpanded)
1276         {
1277             /* perform extended ensemble sampling in lambda - we don't
1278                actually move to the new state before outputting
1279                statistics, but if performing simulated tempering, we
1280                do update the velocities and the tau_t. */
1281             // TODO: Avoid changing inputrec (#3854)
1282             // Simulated tempering updates the reference temperature.
1283             // Expanded ensemble without simulated tempering does not change the inputrec.
1284             auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
1285             lamnew                 = ExpandedEnsembleDynamics(fplog,
1286                                               nonConstInputrec,
1287                                               enerd,
1288                                               state,
1289                                               &MassQ,
1290                                               state->fep_state,
1291                                               state->dfhist,
1292                                               step,
1293                                               state->v.rvec_array(),
1294                                               mdatoms);
1295             /* history is maintained in state->dfhist, but state_global is what is sent to trajectory and log output */
1296             if (MASTER(cr))
1297             {
1298                 copy_df_history(state_global->dfhist, state->dfhist);
1299             }
1300         }
1301
1302         // Copy coordinate from the GPU for the output/checkpointing if the update is offloaded and
1303         // coordinates have not already been copied for i) search or ii) CPU force tasks.
1304         if (useGpuForUpdate && !bNS && !runScheduleWork->domainWork.haveCpuLocalForceWork
1305             && (do_per_step(step, ir->nstxout) || do_per_step(step, ir->nstxout_compressed)
1306                 || checkpointHandler->isCheckpointingStep()))
1307         {
1308             stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
1309             stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1310         }
1311         // Copy velocities if needed for the output/checkpointing.
1312         // NOTE: Copy on the search steps is done at the beginning of the step.
1313         if (useGpuForUpdate && !bNS
1314             && (do_per_step(step, ir->nstvout) || checkpointHandler->isCheckpointingStep()))
1315         {
1316             stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
1317             stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
1318         }
1319         // Copy forces for the output if the forces were reduced on the GPU (not the case on virial steps)
1320         // and update is offloaded hence forces are kept on the GPU for update and have not been
1321         // already transferred in do_force().
1322         // TODO: There should be an improved, explicit mechanism that ensures this copy is only executed
1323         //       when the forces are ready on the GPU -- the same synchronizer should be used as the one
1324         //       prior to GPU update.
1325         // TODO: When the output flags will be included in step workload, this copy can be combined with the
1326         //       copy call in do_force(...).
1327         // NOTE: The forces should not be copied here if the vsites are present, since they were modified
1328         //       on host after the D2H copy in do_force(...).
1329         if (runScheduleWork->stepWork.useGpuFBufferOps && (simulationWork.useGpuUpdate && !vsite)
1330             && do_per_step(step, ir->nstfout))
1331         {
1332             stateGpu->copyForcesFromGpu(f.view().force(), AtomLocality::Local);
1333             stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
1334         }
1335         /* Now we have the energies and forces corresponding to the
1336          * coordinates at time t. We must output all of this before
1337          * the update.
1338          */
1339         do_md_trajectory_writing(fplog,
1340                                  cr,
1341                                  nfile,
1342                                  fnm,
1343                                  step,
1344                                  step_rel,
1345                                  t,
1346                                  ir,
1347                                  state,
1348                                  state_global,
1349                                  observablesHistory,
1350                                  top_global,
1351                                  fr,
1352                                  outf,
1353                                  energyOutput,
1354                                  ekind,
1355                                  f.view().force(),
1356                                  checkpointHandler->isCheckpointingStep(),
1357                                  bRerunMD,
1358                                  bLastStep,
1359                                  mdrunOptions.writeConfout,
1360                                  bSumEkinhOld);
1361         /* Check if IMD step and do IMD communication, if bIMD is TRUE. */
1362         bInteractiveMDstep = imdSession->run(step, bNS, state->box, state->x.rvec_array(), t);
1363
1364         /* kludge -- virial is lost with restart for MTTK NPT control. Must reload (saved earlier). */
1365         if (startingBehavior != StartingBehavior::NewSimulation && bFirstStep
1366             && (inputrecNptTrotter(ir) || inputrecNphTrotter(ir)))
1367         {
1368             copy_mat(state->svir_prev, shake_vir);
1369             copy_mat(state->fvir_prev, force_vir);
1370         }
1371
1372         stopHandler->setSignal();
1373         resetHandler->setSignal(walltime_accounting);
1374
1375         if (bGStat || !PAR(cr))
1376         {
1377             /* In parallel we only have to check for checkpointing in steps
1378              * where we do global communication,
1379              *  otherwise the other nodes don't know.
1380              */
1381             checkpointHandler->setSignal(walltime_accounting);
1382         }
1383
1384         /* #########   START SECOND UPDATE STEP ################# */
1385
1386         /* at the start of step, randomize or scale the velocities ((if vv. Restriction of Andersen
1387            controlled in preprocessing */
1388
1389         if (ETC_ANDERSEN(ir->etc)) /* keep this outside of update_tcouple because of the extra info required to pass */
1390         {
1391             gmx_bool bIfRandomize;
1392             bIfRandomize = update_randomize_velocities(ir, step, cr, mdatoms, state->v, &upd, constr);
1393             /* if we have constraints, we have to remove the kinetic energy parallel to the bonds */
1394             if (constr && bIfRandomize)
1395             {
1396                 constrain_velocities(constr, do_log, do_ene, step, state, nullptr, false, nullptr);
1397             }
1398         }
1399         /* Box is changed in update() when we do pressure coupling,
1400          * but we should still use the old box for energy corrections and when
1401          * writing it to the energy file, so it matches the trajectory files for
1402          * the same timestep above. Make a copy in a separate array.
1403          */
1404         copy_mat(state->box, lastbox);
1405
1406         dvdl_constr = 0;
1407
1408         if (!useGpuForUpdate)
1409         {
1410             wallcycle_start(wcycle, ewcUPDATE);
1411         }
1412         /* UPDATE PRESSURE VARIABLES IN TROTTER FORMULATION WITH CONSTRAINTS */
1413         if (bTrotter)
1414         {
1415             trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ3);
1416             /* We can only do Berendsen coupling after we have summed
1417              * the kinetic energy or virial. Since the happens
1418              * in global_state after update, we should only do it at
1419              * step % nstlist = 1 with bGStatEveryStep=FALSE.
1420              */
1421         }
1422         else
1423         {
1424             update_tcouple(step, ir, state, ekind, &MassQ, mdatoms);
1425             update_pcouple_before_coordinates(fplog, step, ir, state, pressureCouplingMu, M, bInitStep);
1426         }
1427
1428         /* With leap-frog type integrators we compute the kinetic energy
1429          * at a whole time step as the average of the half-time step kinetic
1430          * energies of two subsequent steps. Therefore we need to compute the
1431          * half step kinetic energy also if we need energies at the next step.
1432          */
1433         const bool needHalfStepKineticEnergy =
1434                 (!EI_VV(ir->eI) && (do_per_step(step + 1, nstglobalcomm) || step_rel + 1 == ir->nsteps));
1435
1436         // Parrinello-Rahman requires the pressure to be availible before the update to compute
1437         // the velocity scaling matrix. Hence, it runs one step after the nstpcouple step.
1438         const bool doParrinelloRahman = (ir->epc == PressureCoupling::ParrinelloRahman
1439                                          && do_per_step(step + ir->nstpcouple - 1, ir->nstpcouple));
1440
1441         if (EI_VV(ir->eI))
1442         {
1443             GMX_ASSERT(!useGpuForUpdate, "GPU update is not supported with VVAK integrator.");
1444
1445             integrateVVSecondStep(step,
1446                                   ir,
1447                                   fr,
1448                                   cr,
1449                                   state,
1450                                   mdatoms,
1451                                   fcdata,
1452                                   &MassQ,
1453                                   &vcm,
1454                                   pull_work,
1455                                   enerd,
1456                                   ekind,
1457                                   gstat,
1458                                   &dvdl_constr,
1459                                   bCalcVir,
1460                                   total_vir,
1461                                   shake_vir,
1462                                   force_vir,
1463                                   pres,
1464                                   M,
1465                                   lastbox,
1466                                   do_log,
1467                                   do_ene,
1468                                   bGStat,
1469                                   &bSumEkinhOld,
1470                                   &f,
1471                                   &cbuf,
1472                                   &upd,
1473                                   constr,
1474                                   &nullSignaller,
1475                                   trotter_seq,
1476                                   nrnb,
1477                                   wcycle);
1478         }
1479         else
1480         {
1481             if (useGpuForUpdate)
1482             {
1483                 if (bNS && (bFirstStep || DOMAINDECOMP(cr)))
1484                 {
1485                     integrator->set(stateGpu->getCoordinates(),
1486                                     stateGpu->getVelocities(),
1487                                     stateGpu->getForces(),
1488                                     top.idef,
1489                                     *mdatoms);
1490
1491                     // Copy data to the GPU after buffers might have being reinitialized
1492                     stateGpu->copyVelocitiesToGpu(state->v, AtomLocality::Local);
1493                     stateGpu->copyCoordinatesToGpu(state->x, AtomLocality::Local);
1494                 }
1495
1496                 if (simulationWork.useGpuPme && !runScheduleWork->simulationWork.useGpuPmePpCommunication
1497                     && !thisRankHasDuty(cr, DUTY_PME))
1498                 {
1499                     // The PME forces were recieved to the host, so have to be copied
1500                     stateGpu->copyForcesToGpu(f.view().force(), AtomLocality::All);
1501                 }
1502                 else if (!runScheduleWork->stepWork.useGpuFBufferOps)
1503                 {
1504                     // The buffer ops were not offloaded this step, so the forces are on the
1505                     // host and have to be copied
1506                     stateGpu->copyForcesToGpu(f.view().force(), AtomLocality::Local);
1507                 }
1508
1509                 const bool doTemperatureScaling =
1510                         (ir->etc != TemperatureCoupling::No
1511                          && do_per_step(step + ir->nsttcouple - 1, ir->nsttcouple));
1512
1513                 // This applies Leap-Frog, LINCS and SETTLE in succession
1514                 integrator->integrate(
1515                         stateGpu->getForcesReadyOnDeviceEvent(
1516                                 AtomLocality::Local, runScheduleWork->stepWork.useGpuFBufferOps),
1517                         ir->delta_t,
1518                         true,
1519                         bCalcVir,
1520                         shake_vir,
1521                         doTemperatureScaling,
1522                         ekind->tcstat,
1523                         doParrinelloRahman,
1524                         ir->nstpcouple * ir->delta_t,
1525                         M);
1526
1527                 // Copy velocities D2H after update if:
1528                 // - Globals are computed this step (includes the energy output steps).
1529                 // - Temperature is needed for the next step.
1530                 if (bGStat || needHalfStepKineticEnergy)
1531                 {
1532                     stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
1533                     stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
1534                 }
1535             }
1536             else
1537             {
1538                 /* With multiple time stepping we need to do an additional normal
1539                  * update step to obtain the virial, as the actual MTS integration
1540                  * using an acceleration where the slow forces are multiplied by mtsFactor.
1541                  * Using that acceleration would result in a virial with the slow
1542                  * force contribution would be a factor mtsFactor too large.
1543                  */
1544                 if (fr->useMts && bCalcVir && constr != nullptr)
1545                 {
1546                     upd.update_for_constraint_virial(
1547                             *ir, *mdatoms, *state, f.view().forceWithPadding(), *ekind);
1548
1549                     constrain_coordinates(constr,
1550                                           do_log,
1551                                           do_ene,
1552                                           step,
1553                                           state,
1554                                           upd.xp()->arrayRefWithPadding(),
1555                                           &dvdl_constr,
1556                                           bCalcVir,
1557                                           shake_vir);
1558                 }
1559
1560                 ArrayRefWithPadding<const RVec> forceCombined =
1561                         (fr->useMts && step % ir->mtsLevels[1].stepFactor == 0)
1562                                 ? f.view().forceMtsCombinedWithPadding()
1563                                 : f.view().forceWithPadding();
1564                 upd.update_coords(
1565                         *ir, step, mdatoms, state, forceCombined, fcdata, ekind, M, etrtPOSITION, cr, constr != nullptr);
1566
1567                 wallcycle_stop(wcycle, ewcUPDATE);
1568
1569                 constrain_coordinates(constr,
1570                                       do_log,
1571                                       do_ene,
1572                                       step,
1573                                       state,
1574                                       upd.xp()->arrayRefWithPadding(),
1575                                       &dvdl_constr,
1576                                       bCalcVir && !fr->useMts,
1577                                       shake_vir);
1578
1579                 upd.update_sd_second_half(
1580                         *ir, step, &dvdl_constr, mdatoms, state, cr, nrnb, wcycle, constr, do_log, do_ene);
1581                 upd.finish_update(*ir, mdatoms, state, wcycle, constr != nullptr);
1582             }
1583
1584             if (ir->bPull && ir->pull->bSetPbcRefToPrevStepCOM)
1585             {
1586                 updatePrevStepPullCom(pull_work, state);
1587             }
1588
1589             enerd->term[F_DVDL_CONSTR] += dvdl_constr;
1590         }
1591
1592         /* ############## IF NOT VV, Calculate globals HERE  ############ */
1593         /* With Leap-Frog we can skip compute_globals at
1594          * non-communication steps, but we need to calculate
1595          * the kinetic energy one step before communication.
1596          */
1597         {
1598             // Organize to do inter-simulation signalling on steps if
1599             // and when algorithms require it.
1600             const bool doInterSimSignal = (simulationsShareState && do_per_step(step, nstSignalComm));
1601
1602             if (bGStat || needHalfStepKineticEnergy || doInterSimSignal)
1603             {
1604                 // Copy coordinates when needed to stop the CM motion.
1605                 if (useGpuForUpdate && !EI_VV(ir->eI) && bStopCM)
1606                 {
1607                     stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
1608                     stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1609                 }
1610                 // Since we're already communicating at this step, we
1611                 // can propagate intra-simulation signals. Note that
1612                 // check_nstglobalcomm has the responsibility for
1613                 // choosing the value of nstglobalcomm that is one way
1614                 // bGStat becomes true, so we can't get into a
1615                 // situation where e.g. checkpointing can't be
1616                 // signalled.
1617                 bool                doIntraSimSignal = true;
1618                 SimulationSignaller signaller(&signals, cr, ms, doInterSimSignal, doIntraSimSignal);
1619
1620                 compute_globals(
1621                         gstat,
1622                         cr,
1623                         ir,
1624                         fr,
1625                         ekind,
1626                         makeConstArrayRef(state->x),
1627                         makeConstArrayRef(state->v),
1628                         state->box,
1629                         mdatoms,
1630                         nrnb,
1631                         &vcm,
1632                         wcycle,
1633                         enerd,
1634                         force_vir,
1635                         shake_vir,
1636                         total_vir,
1637                         pres,
1638                         (!EI_VV(ir->eI) && bCalcEner && constr != nullptr) ? constr->rmsdData()
1639                                                                            : gmx::ArrayRef<real>{},
1640                         &signaller,
1641                         lastbox,
1642                         &totalNumberOfBondedInteractions,
1643                         &bSumEkinhOld,
1644                         (bGStat ? CGLO_GSTAT : 0) | (!EI_VV(ir->eI) && bCalcEner ? CGLO_ENERGY : 0)
1645                                 | (!EI_VV(ir->eI) && bStopCM ? CGLO_STOPCM : 0)
1646                                 | (!EI_VV(ir->eI) ? CGLO_TEMPERATURE : 0)
1647                                 | (!EI_VV(ir->eI) ? CGLO_PRESSURE : 0) | CGLO_CONSTRAINT
1648                                 | (shouldCheckNumberOfBondedInteractions ? CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS
1649                                                                          : 0));
1650                 checkNumberOfBondedInteractions(mdlog,
1651                                                 cr,
1652                                                 totalNumberOfBondedInteractions,
1653                                                 top_global,
1654                                                 &top,
1655                                                 makeConstArrayRef(state->x),
1656                                                 state->box,
1657                                                 &shouldCheckNumberOfBondedInteractions);
1658                 if (!EI_VV(ir->eI) && bStopCM)
1659                 {
1660                     process_and_stopcm_grp(
1661                             fplog, &vcm, *mdatoms, makeArrayRef(state->x), makeArrayRef(state->v));
1662                     inc_nrnb(nrnb, eNR_STOPCM, mdatoms->homenr);
1663
1664                     // TODO: The special case of removing CM motion should be dealt more gracefully
1665                     if (useGpuForUpdate)
1666                     {
1667                         stateGpu->copyCoordinatesToGpu(state->x, AtomLocality::Local);
1668                         // Here we block until the H2D copy completes because event sync with the
1669                         // force kernels that use the coordinates on the next steps is not implemented
1670                         // (not because of a race on state->x being modified on the CPU while H2D is in progress).
1671                         stateGpu->waitCoordinatesCopiedToDevice(AtomLocality::Local);
1672                         // If the COM removal changed the velocities on the CPU, this has to be accounted for.
1673                         if (vcm.mode != ComRemovalAlgorithm::No)
1674                         {
1675                             stateGpu->copyVelocitiesToGpu(state->v, AtomLocality::Local);
1676                         }
1677                     }
1678                 }
1679             }
1680         }
1681
1682         /* #############  END CALC EKIN AND PRESSURE ################# */
1683
1684         /* Note: this is OK, but there are some numerical precision issues with using the convergence of
1685            the virial that should probably be addressed eventually. state->veta has better properies,
1686            but what we actually need entering the new cycle is the new shake_vir value. Ideally, we could
1687            generate the new shake_vir, but test the veta value for convergence.  This will take some thought. */
1688
1689         if (ir->efep != FreeEnergyPerturbationType::No && !EI_VV(ir->eI))
1690         {
1691             /* Sum up the foreign energy and dK/dl terms for md and sd.
1692                Currently done every step so that dH/dl is correct in the .edr */
1693             accumulateKineticLambdaComponents(enerd, state->lambda, *ir->fepvals);
1694         }
1695
1696         update_pcouple_after_coordinates(
1697                 fplog, step, ir, mdatoms, pres, force_vir, shake_vir, pressureCouplingMu, state, nrnb, upd.deform(), !useGpuForUpdate);
1698
1699         const bool doBerendsenPressureCoupling = (inputrec->epc == PressureCoupling::Berendsen
1700                                                   && do_per_step(step, inputrec->nstpcouple));
1701         const bool doCRescalePressureCoupling  = (inputrec->epc == PressureCoupling::CRescale
1702                                                  && do_per_step(step, inputrec->nstpcouple));
1703         if (useGpuForUpdate
1704             && (doBerendsenPressureCoupling || doCRescalePressureCoupling || doParrinelloRahman))
1705         {
1706             integrator->scaleCoordinates(pressureCouplingMu);
1707             if (doCRescalePressureCoupling)
1708             {
1709                 matrix pressureCouplingInvMu;
1710                 gmx::invertBoxMatrix(pressureCouplingMu, pressureCouplingInvMu);
1711                 integrator->scaleVelocities(pressureCouplingInvMu);
1712             }
1713             integrator->setPbc(PbcType::Xyz, state->box);
1714         }
1715
1716         /* ################# END UPDATE STEP 2 ################# */
1717         /* #### We now have r(t+dt) and v(t+dt/2)  ############# */
1718
1719         /* The coordinates (x) were unshifted in update */
1720         if (!bGStat)
1721         {
1722             /* We will not sum ekinh_old,
1723              * so signal that we still have to do it.
1724              */
1725             bSumEkinhOld = TRUE;
1726         }
1727
1728         if (bCalcEner)
1729         {
1730             /* #########  BEGIN PREPARING EDR OUTPUT  ###########  */
1731
1732             /* use the directly determined last velocity, not actually the averaged half steps */
1733             if (bTrotter && ir->eI == IntegrationAlgorithm::VV)
1734             {
1735                 enerd->term[F_EKIN] = last_ekin;
1736             }
1737             enerd->term[F_ETOT] = enerd->term[F_EPOT] + enerd->term[F_EKIN];
1738
1739             if (integratorHasConservedEnergyQuantity(ir))
1740             {
1741                 if (EI_VV(ir->eI))
1742                 {
1743                     enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + saved_conserved_quantity;
1744                 }
1745                 else
1746                 {
1747                     enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + NPT_energy(ir, state, &MassQ);
1748                 }
1749             }
1750             /* #########  END PREPARING EDR OUTPUT  ###########  */
1751         }
1752
1753         /* Output stuff */
1754         if (MASTER(cr))
1755         {
1756             if (fplog && do_log && bDoExpanded)
1757             {
1758                 /* only needed if doing expanded ensemble */
1759                 PrintFreeEnergyInfoToFile(fplog,
1760                                           ir->fepvals.get(),
1761                                           ir->expandedvals.get(),
1762                                           ir->bSimTemp ? ir->simtempvals.get() : nullptr,
1763                                           state_global->dfhist,
1764                                           state->fep_state,
1765                                           ir->nstlog,
1766                                           step);
1767             }
1768             if (bCalcEner)
1769             {
1770                 energyOutput.addDataAtEnergyStep(bDoDHDL,
1771                                                  bCalcEnerStep,
1772                                                  t,
1773                                                  mdatoms->tmass,
1774                                                  enerd,
1775                                                  ir->fepvals.get(),
1776                                                  ir->expandedvals.get(),
1777                                                  lastbox,
1778                                                  PTCouplingArrays{ state->boxv,
1779                                                                    state->nosehoover_xi,
1780                                                                    state->nosehoover_vxi,
1781                                                                    state->nhpres_xi,
1782                                                                    state->nhpres_vxi },
1783                                                  state->fep_state,
1784                                                  shake_vir,
1785                                                  force_vir,
1786                                                  total_vir,
1787                                                  pres,
1788                                                  ekind,
1789                                                  mu_tot,
1790                                                  constr);
1791             }
1792             else
1793             {
1794                 energyOutput.recordNonEnergyStep();
1795             }
1796
1797             gmx_bool do_dr = do_per_step(step, ir->nstdisreout);
1798             gmx_bool do_or = do_per_step(step, ir->nstorireout);
1799
1800             if (doSimulatedAnnealing)
1801             {
1802                 gmx::EnergyOutput::printAnnealingTemperatures(
1803                         do_log ? fplog : nullptr, groups, &(ir->opts));
1804             }
1805             if (do_log || do_ene || do_dr || do_or)
1806             {
1807                 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
1808                                                    do_ene,
1809                                                    do_dr,
1810                                                    do_or,
1811                                                    do_log ? fplog : nullptr,
1812                                                    step,
1813                                                    t,
1814                                                    fr->fcdata.get(),
1815                                                    awh.get());
1816             }
1817             if (do_log && ir->bDoAwh && awh->hasFepLambdaDimension())
1818             {
1819                 const bool isInitialOutput = false;
1820                 printLambdaStateToLog(fplog, state->lambda, isInitialOutput);
1821             }
1822
1823             if (ir->bPull)
1824             {
1825                 pull_print_output(pull_work, step, t);
1826             }
1827
1828             if (do_per_step(step, ir->nstlog))
1829             {
1830                 if (fflush(fplog) != 0)
1831                 {
1832                     gmx_fatal(FARGS, "Cannot flush logfile - maybe you are out of disk space?");
1833                 }
1834             }
1835         }
1836         if (bDoExpanded)
1837         {
1838             /* Have to do this part _after_ outputting the logfile and the edr file */
1839             /* Gets written into the state at the beginning of next loop*/
1840             state->fep_state = lamnew;
1841         }
1842         else if (ir->bDoAwh && awh->needForeignEnergyDifferences(step))
1843         {
1844             state->fep_state = awh->fepLambdaState();
1845         }
1846         /* Print the remaining wall clock time for the run */
1847         if (isMasterSimMasterRank(ms, MASTER(cr)) && (do_verbose || gmx_got_usr_signal()) && !bPMETunePrinting)
1848         {
1849             if (shellfc)
1850             {
1851                 fprintf(stderr, "\n");
1852             }
1853             print_time(stderr, walltime_accounting, step, ir, cr);
1854         }
1855
1856         /* Ion/water position swapping.
1857          * Not done in last step since trajectory writing happens before this call
1858          * in the MD loop and exchanges would be lost anyway. */
1859         bNeedRepartition = FALSE;
1860         if ((ir->eSwapCoords != SwapType::No) && (step > 0) && !bLastStep
1861             && do_per_step(step, ir->swap->nstswap))
1862         {
1863             bNeedRepartition = do_swapcoords(cr,
1864                                              step,
1865                                              t,
1866                                              ir,
1867                                              swap,
1868                                              wcycle,
1869                                              as_rvec_array(state->x.data()),
1870                                              state->box,
1871                                              MASTER(cr) && mdrunOptions.verbose,
1872                                              bRerunMD);
1873
1874             if (bNeedRepartition && DOMAINDECOMP(cr))
1875             {
1876                 dd_collect_state(cr->dd, state, state_global);
1877             }
1878         }
1879
1880         /* Replica exchange */
1881         bExchanged = FALSE;
1882         if (bDoReplEx)
1883         {
1884             bExchanged = replica_exchange(fplog, cr, ms, repl_ex, state_global, enerd, state, step, t);
1885         }
1886
1887         if ((bExchanged || bNeedRepartition) && DOMAINDECOMP(cr))
1888         {
1889             dd_partition_system(fplog,
1890                                 mdlog,
1891                                 step,
1892                                 cr,
1893                                 TRUE,
1894                                 1,
1895                                 state_global,
1896                                 top_global,
1897                                 *ir,
1898                                 imdSession,
1899                                 pull_work,
1900                                 state,
1901                                 &f,
1902                                 mdAtoms,
1903                                 &top,
1904                                 fr,
1905                                 vsite,
1906                                 constr,
1907                                 nrnb,
1908                                 wcycle,
1909                                 FALSE);
1910             shouldCheckNumberOfBondedInteractions = true;
1911             upd.setNumAtoms(state->natoms);
1912         }
1913
1914         bFirstStep = FALSE;
1915         bInitStep  = FALSE;
1916
1917         /* #######  SET VARIABLES FOR NEXT ITERATION IF THEY STILL NEED IT ###### */
1918         /* With all integrators, except VV, we need to retain the pressure
1919          * at the current step for coupling at the next step.
1920          */
1921         if ((state->flags & enumValueToBitMask(StateEntry::PressurePrevious))
1922             && (bGStatEveryStep || (ir->nstpcouple > 0 && step % ir->nstpcouple == 0)))
1923         {
1924             /* Store the pressure in t_state for pressure coupling
1925              * at the next MD step.
1926              */
1927             copy_mat(pres, state->pres_prev);
1928         }
1929
1930         /* #######  END SET VARIABLES FOR NEXT ITERATION ###### */
1931
1932         if ((membed != nullptr) && (!bLastStep))
1933         {
1934             rescale_membed(step_rel, membed, as_rvec_array(state_global->x.data()));
1935         }
1936
1937         cycles = wallcycle_stop(wcycle, ewcSTEP);
1938         if (DOMAINDECOMP(cr) && wcycle)
1939         {
1940             dd_cycles_add(cr->dd, cycles, ddCyclStep);
1941         }
1942
1943         /* increase the MD step number */
1944         step++;
1945         step_rel++;
1946
1947 #if GMX_FAHCORE
1948         if (MASTER(cr))
1949         {
1950             fcReportProgress(ir->nsteps + ir->init_step, step);
1951         }
1952 #endif
1953
1954         resetHandler->resetCounters(
1955                 step, step_rel, mdlog, fplog, cr, fr->nbv.get(), nrnb, fr->pmedata, pme_loadbal, wcycle, walltime_accounting);
1956
1957         /* If bIMD is TRUE, the master updates the IMD energy record and sends positions to VMD client */
1958         imdSession->updateEnergyRecordAndSendPositionsAndEnergies(bInteractiveMDstep, step, bCalcEner);
1959     }
1960     /* End of main MD loop */
1961
1962     /* Closing TNG files can include compressing data. Therefore it is good to do that
1963      * before stopping the time measurements. */
1964     mdoutf_tng_close(outf);
1965
1966     /* Stop measuring walltime */
1967     walltime_accounting_end_time(walltime_accounting);
1968
1969     if (!thisRankHasDuty(cr, DUTY_PME))
1970     {
1971         /* Tell the PME only node to finish */
1972         gmx_pme_send_finish(cr);
1973     }
1974
1975     if (MASTER(cr))
1976     {
1977         if (ir->nstcalcenergy > 0)
1978         {
1979             energyOutput.printEnergyConservation(fplog, ir->simulation_part, EI_MD(ir->eI));
1980
1981             gmx::EnergyOutput::printAnnealingTemperatures(fplog, groups, &(ir->opts));
1982             energyOutput.printAverages(fplog, groups);
1983         }
1984     }
1985     done_mdoutf(outf);
1986
1987     if (bPMETune)
1988     {
1989         pme_loadbal_done(pme_loadbal, fplog, mdlog, fr->nbv->useGpu());
1990     }
1991
1992     done_shellfc(fplog, shellfc, step_rel);
1993
1994     if (useReplicaExchange && MASTER(cr))
1995     {
1996         print_replica_exchange_statistics(fplog, repl_ex);
1997     }
1998
1999     walltime_accounting_set_nsteps_done(walltime_accounting, step_rel);
2000
2001     global_stat_destroy(gstat);
2002 }