77d3641a6db22983f2f0021d84de954a329f1b15
[alexxy/gromacs.git] / src / gromacs / mdrun / md.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2011-2019,2020,2021, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /*! \internal \file
38  *
39  * \brief Implements the integrator for normal molecular dynamics simulations
40  *
41  * \author David van der Spoel <david.vanderspoel@icm.uu.se>
42  * \ingroup module_mdrun
43  */
44 #include "gmxpre.h"
45
46 #include <cinttypes>
47 #include <cmath>
48 #include <cstdio>
49 #include <cstdlib>
50
51 #include <algorithm>
52 #include <memory>
53 #include <numeric>
54
55 #include "gromacs/applied_forces/awh/awh.h"
56 #include "gromacs/applied_forces/awh/read_params.h"
57 #include "gromacs/commandline/filenm.h"
58 #include "gromacs/domdec/collect.h"
59 #include "gromacs/domdec/dlbtiming.h"
60 #include "gromacs/domdec/domdec.h"
61 #include "gromacs/domdec/domdec_network.h"
62 #include "gromacs/domdec/domdec_struct.h"
63 #include "gromacs/domdec/gpuhaloexchange.h"
64 #include "gromacs/domdec/mdsetup.h"
65 #include "gromacs/domdec/partition.h"
66 #include "gromacs/essentialdynamics/edsam.h"
67 #include "gromacs/ewald/pme_load_balancing.h"
68 #include "gromacs/ewald/pme_pp.h"
69 #include "gromacs/fileio/trxio.h"
70 #include "gromacs/gmxlib/network.h"
71 #include "gromacs/gmxlib/nrnb.h"
72 #include "gromacs/gpu_utils/device_stream_manager.h"
73 #include "gromacs/gpu_utils/gpu_utils.h"
74 #include "gromacs/imd/imd.h"
75 #include "gromacs/listed_forces/listed_forces.h"
76 #include "gromacs/math/functions.h"
77 #include "gromacs/math/invertmatrix.h"
78 #include "gromacs/math/vec.h"
79 #include "gromacs/math/vectypes.h"
80 #include "gromacs/mdlib/checkpointhandler.h"
81 #include "gromacs/mdlib/compute_io.h"
82 #include "gromacs/mdlib/constr.h"
83 #include "gromacs/mdlib/coupling.h"
84 #include "gromacs/mdlib/ebin.h"
85 #include "gromacs/mdlib/enerdata_utils.h"
86 #include "gromacs/mdlib/energyoutput.h"
87 #include "gromacs/mdlib/expanded.h"
88 #include "gromacs/mdlib/force.h"
89 #include "gromacs/mdlib/force_flags.h"
90 #include "gromacs/mdlib/forcerec.h"
91 #include "gromacs/mdlib/freeenergyparameters.h"
92 #include "gromacs/mdlib/md_support.h"
93 #include "gromacs/mdlib/mdatoms.h"
94 #include "gromacs/mdlib/mdoutf.h"
95 #include "gromacs/mdlib/membed.h"
96 #include "gromacs/mdlib/resethandler.h"
97 #include "gromacs/mdlib/sighandler.h"
98 #include "gromacs/mdlib/simulationsignal.h"
99 #include "gromacs/mdlib/stat.h"
100 #include "gromacs/mdlib/stophandler.h"
101 #include "gromacs/mdlib/tgroup.h"
102 #include "gromacs/mdlib/trajectory_writing.h"
103 #include "gromacs/mdlib/update.h"
104 #include "gromacs/mdlib/update_constrain_gpu.h"
105 #include "gromacs/mdlib/update_vv.h"
106 #include "gromacs/mdlib/vcm.h"
107 #include "gromacs/mdlib/vsite.h"
108 #include "gromacs/mdrunutility/handlerestart.h"
109 #include "gromacs/mdrunutility/multisim.h"
110 #include "gromacs/mdrunutility/printtime.h"
111 #include "gromacs/mdtypes/awh_history.h"
112 #include "gromacs/mdtypes/awh_params.h"
113 #include "gromacs/mdtypes/commrec.h"
114 #include "gromacs/mdtypes/df_history.h"
115 #include "gromacs/mdtypes/energyhistory.h"
116 #include "gromacs/mdtypes/fcdata.h"
117 #include "gromacs/mdtypes/forcebuffers.h"
118 #include "gromacs/mdtypes/forcerec.h"
119 #include "gromacs/mdtypes/group.h"
120 #include "gromacs/mdtypes/inputrec.h"
121 #include "gromacs/mdtypes/interaction_const.h"
122 #include "gromacs/mdtypes/md_enums.h"
123 #include "gromacs/mdtypes/mdatom.h"
124 #include "gromacs/mdtypes/mdrunoptions.h"
125 #include "gromacs/mdtypes/multipletimestepping.h"
126 #include "gromacs/mdtypes/observableshistory.h"
127 #include "gromacs/mdtypes/pullhistory.h"
128 #include "gromacs/mdtypes/simulation_workload.h"
129 #include "gromacs/mdtypes/state.h"
130 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
131 #include "gromacs/modularsimulator/energydata.h"
132 #include "gromacs/nbnxm/gpu_data_mgmt.h"
133 #include "gromacs/nbnxm/nbnxm.h"
134 #include "gromacs/pbcutil/pbc.h"
135 #include "gromacs/pulling/output.h"
136 #include "gromacs/pulling/pull.h"
137 #include "gromacs/swap/swapcoords.h"
138 #include "gromacs/timing/wallcycle.h"
139 #include "gromacs/timing/walltime_accounting.h"
140 #include "gromacs/topology/atoms.h"
141 #include "gromacs/topology/idef.h"
142 #include "gromacs/topology/mtop_util.h"
143 #include "gromacs/topology/topology.h"
144 #include "gromacs/trajectory/trajectoryframe.h"
145 #include "gromacs/utility/basedefinitions.h"
146 #include "gromacs/utility/cstringutil.h"
147 #include "gromacs/utility/fatalerror.h"
148 #include "gromacs/utility/logger.h"
149 #include "gromacs/utility/real.h"
150 #include "gromacs/utility/smalloc.h"
151
152 #include "legacysimulator.h"
153 #include "replicaexchange.h"
154 #include "shellfc.h"
155
156 using gmx::SimulationSignaller;
157
158 void gmx::LegacySimulator::do_md()
159 {
160     // TODO Historically, the EM and MD "integrators" used different
161     // names for the t_inputrec *parameter, but these must have the
162     // same name, now that it's a member of a struct. We use this ir
163     // alias to avoid a large ripple of nearly useless changes.
164     // t_inputrec is being replaced by IMdpOptionsProvider, so this
165     // will go away eventually.
166     const t_inputrec* ir = inputrec;
167
168     int64_t      step, step_rel;
169     double       t, t0 = ir->init_t;
170     gmx_bool     bGStatEveryStep, bGStat, bCalcVir, bCalcEnerStep, bCalcEner;
171     gmx_bool     bNS = FALSE, bNStList, bStopCM, bFirstStep, bInitStep, bLastStep = FALSE;
172     gmx_bool     bDoDHDL = FALSE, bDoFEP = FALSE, bDoExpanded = FALSE;
173     gmx_bool     do_ene, do_log, do_verbose;
174     gmx_bool     bMasterState;
175     unsigned int force_flags;
176     tensor force_vir = { { 0 } }, shake_vir = { { 0 } }, total_vir = { { 0 } }, pres = { { 0 } };
177     int    i, m;
178     rvec   mu_tot;
179     matrix pressureCouplingMu, M;
180     gmx_repl_ex_t     repl_ex = nullptr;
181     gmx_global_stat_t gstat;
182     gmx_shellfc_t*    shellfc;
183     gmx_bool          bSumEkinhOld, bDoReplEx, bExchanged, bNeedRepartition;
184     gmx_bool          bTrotter;
185     real              dvdl_constr;
186     std::vector<RVec> cbuf;
187     matrix            lastbox;
188     int               lamnew = 0;
189     /* for FEP */
190     int       nstfep = 0;
191     double    cycles;
192     real      saved_conserved_quantity = 0;
193     real      last_ekin                = 0;
194     t_extmass MassQ;
195     char      sbuf[STEPSTRSIZE], sbuf2[STEPSTRSIZE];
196
197     /* PME load balancing data for GPU kernels */
198     gmx_bool bPMETune         = FALSE;
199     gmx_bool bPMETunePrinting = FALSE;
200
201     bool bInteractiveMDstep = false;
202
203     /* Domain decomposition could incorrectly miss a bonded
204        interaction, but checking for that requires a global
205        communication stage, which does not otherwise happen in DD
206        code. So we do that alongside the first global energy reduction
207        after a new DD is made. These variables handle whether the
208        check happens, and the result it returns. */
209     bool shouldCheckNumberOfBondedInteractions = false;
210     int  totalNumberOfBondedInteractions       = -1;
211
212     SimulationSignals signals;
213     // Most global communnication stages don't propagate mdrun
214     // signals, and will use this object to achieve that.
215     SimulationSignaller nullSignaller(nullptr, nullptr, nullptr, false, false);
216
217     if (!mdrunOptions.writeConfout)
218     {
219         // This is on by default, and the main known use case for
220         // turning it off is for convenience in benchmarking, which is
221         // something that should not show up in the general user
222         // interface.
223         GMX_LOG(mdlog.info)
224                 .asParagraph()
225                 .appendText(
226                         "The -noconfout functionality is deprecated, and may be removed in a "
227                         "future version.");
228     }
229
230     /* md-vv uses averaged full step velocities for T-control
231        md-vv-avek uses averaged half step velocities for T-control (but full step ekin for P control)
232        md uses averaged half step kinetic energies to determine temperature unless defined otherwise by GMX_EKIN_AVE_VEL; */
233     bTrotter = (EI_VV(ir->eI)
234                 && (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir)));
235
236     const bool bRerunMD = false;
237
238     int nstglobalcomm = computeGlobalCommunicationPeriod(mdlog, ir, cr);
239     bGStatEveryStep   = (nstglobalcomm == 1);
240
241     const SimulationGroups* groups = &top_global.groups;
242
243     std::unique_ptr<EssentialDynamics> ed = nullptr;
244     if (opt2bSet("-ei", nfile, fnm))
245     {
246         /* Initialize essential dynamics sampling */
247         ed = init_edsam(mdlog,
248                         opt2fn_null("-ei", nfile, fnm),
249                         opt2fn("-eo", nfile, fnm),
250                         top_global,
251                         *ir,
252                         cr,
253                         constr,
254                         state_global,
255                         observablesHistory,
256                         oenv,
257                         startingBehavior);
258     }
259     else if (observablesHistory->edsamHistory)
260     {
261         gmx_fatal(FARGS,
262                   "The checkpoint is from a run with essential dynamics sampling, "
263                   "but the current run did not specify the -ei option. "
264                   "Either specify the -ei option to mdrun, or do not use this checkpoint file.");
265     }
266
267     int*                fep_state = MASTER(cr) ? &state_global->fep_state : nullptr;
268     gmx::ArrayRef<real> lambda    = MASTER(cr) ? state_global->lambda : gmx::ArrayRef<real>();
269     initialize_lambdas(fplog, *ir, MASTER(cr), fep_state, lambda);
270     Update upd(*ir, deform);
271     bool   doSimulatedAnnealing = false;
272     {
273         // TODO: Avoid changing inputrec (#3854)
274         // Simulated annealing updates the reference temperature.
275         auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
276         doSimulatedAnnealing   = initSimulatedAnnealing(nonConstInputrec, &upd);
277     }
278     const bool useReplicaExchange = (replExParams.exchangeInterval > 0);
279
280     const t_fcdata& fcdata = *fr->fcdata;
281
282     bool simulationsShareState = false;
283     int  nstSignalComm         = nstglobalcomm;
284     {
285         // TODO This implementation of ensemble orientation restraints is nasty because
286         // a user can't just do multi-sim with single-sim orientation restraints.
287         bool usingEnsembleRestraints =
288                 (fcdata.disres->nsystems > 1) || ((ms != nullptr) && (fcdata.orires->nr != 0));
289         bool awhUsesMultiSim = (ir->bDoAwh && ir->awhParams->shareBiasMultisim() && (ms != nullptr));
290
291         // Replica exchange, ensemble restraints and AWH need all
292         // simulations to remain synchronized, so they need
293         // checkpoints and stop conditions to act on the same step, so
294         // the propagation of such signals must take place between
295         // simulations, not just within simulations.
296         // TODO: Make algorithm initializers set these flags.
297         simulationsShareState = useReplicaExchange || usingEnsembleRestraints || awhUsesMultiSim;
298
299         if (simulationsShareState)
300         {
301             // Inter-simulation signal communication does not need to happen
302             // often, so we use a minimum of 200 steps to reduce overhead.
303             const int c_minimumInterSimulationSignallingInterval = 200;
304             nstSignalComm = ((c_minimumInterSimulationSignallingInterval + nstglobalcomm - 1) / nstglobalcomm)
305                             * nstglobalcomm;
306         }
307     }
308
309     if (startingBehavior != StartingBehavior::RestartWithAppending)
310     {
311         pleaseCiteCouplingAlgorithms(fplog, *ir);
312     }
313     gmx_mdoutf*       outf = init_mdoutf(fplog,
314                                    nfile,
315                                    fnm,
316                                    mdrunOptions,
317                                    cr,
318                                    outputProvider,
319                                    mdModulesNotifier,
320                                    ir,
321                                    top_global,
322                                    oenv,
323                                    wcycle,
324                                    startingBehavior,
325                                    simulationsShareState,
326                                    ms);
327     gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
328                                    top_global,
329                                    *ir,
330                                    pull_work,
331                                    mdoutf_get_fp_dhdl(outf),
332                                    false,
333                                    startingBehavior,
334                                    simulationsShareState,
335                                    mdModulesNotifier);
336
337     gstat = global_stat_init(ir);
338
339     const auto& simulationWork     = runScheduleWork->simulationWork;
340     const bool  useGpuForPme       = simulationWork.useGpuPme;
341     const bool  useGpuForNonbonded = simulationWork.useGpuNonbonded;
342     const bool  useGpuForBufferOps = simulationWork.useGpuBufferOps;
343     const bool  useGpuForUpdate    = simulationWork.useGpuUpdate;
344
345     /* Check for polarizable models and flexible constraints */
346     shellfc = init_shell_flexcon(fplog,
347                                  top_global,
348                                  constr ? constr->numFlexibleConstraints() : 0,
349                                  ir->nstcalcenergy,
350                                  DOMAINDECOMP(cr),
351                                  useGpuForPme);
352
353     {
354         double io = compute_io(ir, top_global.natoms, *groups, energyOutput.numEnergyTerms(), 1);
355         if ((io > 2000) && MASTER(cr))
356         {
357             fprintf(stderr, "\nWARNING: This run will generate roughly %.0f Mb of data\n\n", io);
358         }
359     }
360
361     // Local state only becomes valid now.
362     std::unique_ptr<t_state> stateInstance;
363     t_state*                 state;
364
365     gmx_localtop_t top(top_global.ffparams);
366
367     auto mdatoms = mdAtoms->mdatoms();
368
369     ForceBuffers f(fr->useMts,
370                    ((useGpuForNonbonded && useGpuForBufferOps) || useGpuForUpdate)
371                            ? PinningPolicy::PinnedIfSupported
372                            : PinningPolicy::CannotBePinned);
373     if (DOMAINDECOMP(cr))
374     {
375         stateInstance = std::make_unique<t_state>();
376         state         = stateInstance.get();
377         dd_init_local_state(*cr->dd, state_global, state);
378
379         /* Distribute the charge groups over the nodes from the master node */
380         dd_partition_system(fplog,
381                             mdlog,
382                             ir->init_step,
383                             cr,
384                             TRUE,
385                             1,
386                             state_global,
387                             top_global,
388                             *ir,
389                             imdSession,
390                             pull_work,
391                             state,
392                             &f,
393                             mdAtoms,
394                             &top,
395                             fr,
396                             vsite,
397                             constr,
398                             nrnb,
399                             nullptr,
400                             FALSE);
401         shouldCheckNumberOfBondedInteractions = true;
402         upd.setNumAtoms(state->natoms);
403     }
404     else
405     {
406         state_change_natoms(state_global, state_global->natoms);
407         /* Copy the pointer to the global state */
408         state = state_global;
409
410         /* Generate and initialize new topology */
411         mdAlgorithmsSetupAtomData(cr, *ir, top_global, &top, fr, &f, mdAtoms, constr, vsite, shellfc);
412
413         upd.setNumAtoms(state->natoms);
414     }
415
416     std::unique_ptr<UpdateConstrainGpu> integrator;
417
418     StatePropagatorDataGpu* stateGpu = fr->stateGpu;
419
420     // TODO: the assertions below should be handled by UpdateConstraintsBuilder.
421     if (useGpuForUpdate)
422     {
423         GMX_RELEASE_ASSERT(!DOMAINDECOMP(cr) || ddUsesUpdateGroups(*cr->dd) || constr == nullptr
424                                    || constr->numConstraintsTotal() == 0,
425                            "Constraints in domain decomposition are only supported with update "
426                            "groups if using GPU update.\n");
427         GMX_RELEASE_ASSERT(ir->eConstrAlg != ConstraintAlgorithm::Shake || constr == nullptr
428                                    || constr->numConstraintsTotal() == 0,
429                            "SHAKE is not supported with GPU update.");
430         GMX_RELEASE_ASSERT(useGpuForPme || (useGpuForNonbonded && simulationWork.useGpuBufferOps),
431                            "Either PME or short-ranged non-bonded interaction tasks must run on "
432                            "the GPU to use GPU update.\n");
433         GMX_RELEASE_ASSERT(ir->eI == IntegrationAlgorithm::MD,
434                            "Only the md integrator is supported with the GPU update.\n");
435         GMX_RELEASE_ASSERT(
436                 ir->etc != TemperatureCoupling::NoseHoover,
437                 "Nose-Hoover temperature coupling is not supported with the GPU update.\n");
438         GMX_RELEASE_ASSERT(
439                 ir->epc == PressureCoupling::No || ir->epc == PressureCoupling::ParrinelloRahman
440                         || ir->epc == PressureCoupling::Berendsen || ir->epc == PressureCoupling::CRescale,
441                 "Only Parrinello-Rahman, Berendsen, and C-rescale pressure coupling are supported "
442                 "with the GPU update.\n");
443         GMX_RELEASE_ASSERT(!mdatoms->haveVsites,
444                            "Virtual sites are not supported with the GPU update.\n");
445         GMX_RELEASE_ASSERT(ed == nullptr,
446                            "Essential dynamics is not supported with the GPU update.\n");
447         GMX_RELEASE_ASSERT(!ir->bPull || !pull_have_constraint(*ir->pull),
448                            "Constraints pulling is not supported with the GPU update.\n");
449         GMX_RELEASE_ASSERT(fcdata.orires->nr == 0,
450                            "Orientation restraints are not supported with the GPU update.\n");
451         GMX_RELEASE_ASSERT(
452                 ir->efep == FreeEnergyPerturbationType::No
453                         || (!haveFepPerturbedMasses(top_global) && !havePerturbedConstraints(top_global)),
454                 "Free energy perturbation of masses and constraints are not supported with the GPU "
455                 "update.");
456
457         if (constr != nullptr && constr->numConstraintsTotal() > 0)
458         {
459             GMX_LOG(mdlog.info)
460                     .asParagraph()
461                     .appendText("Updating coordinates and applying constraints on the GPU.");
462         }
463         else
464         {
465             GMX_LOG(mdlog.info).asParagraph().appendText("Updating coordinates on the GPU.");
466         }
467         GMX_RELEASE_ASSERT(fr->deviceStreamManager != nullptr,
468                            "Device stream manager should be initialized in order to use GPU "
469                            "update-constraints.");
470         GMX_RELEASE_ASSERT(
471                 fr->deviceStreamManager->streamIsValid(gmx::DeviceStreamType::UpdateAndConstraints),
472                 "Update stream should be initialized in order to use GPU "
473                 "update-constraints.");
474         integrator = std::make_unique<UpdateConstrainGpu>(
475                 *ir,
476                 top_global,
477                 ekind->ngtc,
478                 fr->deviceStreamManager->context(),
479                 fr->deviceStreamManager->stream(gmx::DeviceStreamType::UpdateAndConstraints),
480                 stateGpu->xUpdatedOnDevice(),
481                 wcycle);
482
483         integrator->setPbc(PbcType::Xyz, state->box);
484     }
485
486     if (useGpuForPme || (useGpuForNonbonded && useGpuForBufferOps) || useGpuForUpdate)
487     {
488         changePinningPolicy(&state->x, PinningPolicy::PinnedIfSupported);
489     }
490     if (useGpuForUpdate)
491     {
492         changePinningPolicy(&state->v, PinningPolicy::PinnedIfSupported);
493     }
494
495     // NOTE: The global state is no longer used at this point.
496     // But state_global is still used as temporary storage space for writing
497     // the global state to file and potentially for replica exchange.
498     // (Global topology should persist.)
499
500     update_mdatoms(mdatoms, state->lambda[FreeEnergyPerturbationCouplingType::Mass]);
501
502     if (ir->bExpanded)
503     {
504         /* Check nstexpanded here, because the grompp check was broken */
505         if (ir->expandedvals->nstexpanded % ir->nstcalcenergy != 0)
506         {
507             gmx_fatal(FARGS,
508                       "With expanded ensemble, nstexpanded should be a multiple of nstcalcenergy");
509         }
510         init_expanded_ensemble(startingBehavior != StartingBehavior::NewSimulation, ir, state->dfhist);
511     }
512
513     if (MASTER(cr))
514     {
515         EnergyData::initializeEnergyHistory(startingBehavior, observablesHistory, &energyOutput);
516     }
517
518     preparePrevStepPullCom(
519             ir, pull_work, mdatoms->massT, state, state_global, cr, startingBehavior != StartingBehavior::NewSimulation);
520
521     // TODO: Remove this by converting AWH into a ForceProvider
522     auto awh = prepareAwhModule(fplog,
523                                 *ir,
524                                 state_global,
525                                 cr,
526                                 ms,
527                                 startingBehavior != StartingBehavior::NewSimulation,
528                                 shellfc != nullptr,
529                                 opt2fn("-awh", nfile, fnm),
530                                 pull_work);
531
532     if (useReplicaExchange && MASTER(cr))
533     {
534         repl_ex = init_replica_exchange(fplog, ms, top_global.natoms, ir, replExParams);
535     }
536     /* PME tuning is only supported in the Verlet scheme, with PME for
537      * Coulomb. It is not supported with only LJ PME. */
538     bPMETune = (mdrunOptions.tunePme && EEL_PME(fr->ic->eeltype) && !mdrunOptions.reproducible
539                 && ir->cutoff_scheme != CutoffScheme::Group);
540
541     pme_load_balancing_t* pme_loadbal = nullptr;
542     if (bPMETune)
543     {
544         pme_loadbal_init(
545                 &pme_loadbal, cr, mdlog, *ir, state->box, *fr->ic, *fr->nbv, fr->pmedata, fr->nbv->useGpu());
546     }
547
548     if (!ir->bContinuation)
549     {
550         if (state->flags & enumValueToBitMask(StateEntry::V))
551         {
552             auto v = makeArrayRef(state->v);
553             /* Set the velocities of vsites, shells and frozen atoms to zero */
554             for (i = 0; i < mdatoms->homenr; i++)
555             {
556                 if (mdatoms->ptype[i] == ParticleType::Shell)
557                 {
558                     clear_rvec(v[i]);
559                 }
560                 else if (mdatoms->cFREEZE)
561                 {
562                     for (m = 0; m < DIM; m++)
563                     {
564                         if (ir->opts.nFreeze[mdatoms->cFREEZE[i]][m])
565                         {
566                             v[i][m] = 0;
567                         }
568                     }
569                 }
570             }
571         }
572
573         if (constr)
574         {
575             /* Constrain the initial coordinates and velocities */
576             do_constrain_first(fplog,
577                                constr,
578                                ir,
579                                mdatoms->nr,
580                                mdatoms->homenr,
581                                state->x.arrayRefWithPadding(),
582                                state->v.arrayRefWithPadding(),
583                                state->box,
584                                state->lambda[FreeEnergyPerturbationCouplingType::Bonded]);
585         }
586     }
587
588     if (ir->efep != FreeEnergyPerturbationType::No)
589     {
590         /* Set free energy calculation frequency as the greatest common
591          * denominator of nstdhdl and repl_ex_nst. */
592         nstfep = ir->fepvals->nstdhdl;
593         if (ir->bExpanded)
594         {
595             nstfep = std::gcd(ir->expandedvals->nstexpanded, nstfep);
596         }
597         if (useReplicaExchange)
598         {
599             nstfep = std::gcd(replExParams.exchangeInterval, nstfep);
600         }
601         if (ir->bDoAwh)
602         {
603             nstfep = std::gcd(ir->awhParams->nstSampleCoord(), nstfep);
604         }
605     }
606
607     /* Be REALLY careful about what flags you set here. You CANNOT assume
608      * this is the first step, since we might be restarting from a checkpoint,
609      * and in that case we should not do any modifications to the state.
610      */
611     bStopCM = (ir->comm_mode != ComRemovalAlgorithm::No && !ir->bContinuation);
612
613     // When restarting from a checkpoint, it can be appropriate to
614     // initialize ekind from quantities in the checkpoint. Otherwise,
615     // compute_globals must initialize ekind before the simulation
616     // starts/restarts. However, only the master rank knows what was
617     // found in the checkpoint file, so we have to communicate in
618     // order to coordinate the restart.
619     //
620     // TODO Consider removing this communication if/when checkpoint
621     // reading directly follows .tpr reading, because all ranks can
622     // agree on hasReadEkinState at that time.
623     bool hasReadEkinState = MASTER(cr) ? state_global->ekinstate.hasReadEkinState : false;
624     if (PAR(cr))
625     {
626         gmx_bcast(sizeof(hasReadEkinState), &hasReadEkinState, cr->mpi_comm_mygroup);
627     }
628     if (hasReadEkinState)
629     {
630         restore_ekinstate_from_state(cr, ekind, &state_global->ekinstate);
631     }
632
633     unsigned int cglo_flags =
634             (CGLO_TEMPERATURE | CGLO_GSTAT | (EI_VV(ir->eI) ? CGLO_PRESSURE : 0)
635              | (EI_VV(ir->eI) ? CGLO_CONSTRAINT : 0) | (hasReadEkinState ? CGLO_READEKIN : 0));
636
637     bSumEkinhOld = FALSE;
638
639     t_vcm vcm(top_global.groups, *ir);
640     reportComRemovalInfo(fplog, vcm);
641
642     /* To minimize communication, compute_globals computes the COM velocity
643      * and the kinetic energy for the velocities without COM motion removed.
644      * Thus to get the kinetic energy without the COM contribution, we need
645      * to call compute_globals twice.
646      */
647     for (int cgloIteration = 0; cgloIteration < (bStopCM ? 2 : 1); cgloIteration++)
648     {
649         unsigned int cglo_flags_iteration = cglo_flags;
650         if (bStopCM && cgloIteration == 0)
651         {
652             cglo_flags_iteration |= CGLO_STOPCM;
653             cglo_flags_iteration &= ~CGLO_TEMPERATURE;
654         }
655         compute_globals(gstat,
656                         cr,
657                         ir,
658                         fr,
659                         ekind,
660                         makeConstArrayRef(state->x),
661                         makeConstArrayRef(state->v),
662                         state->box,
663                         mdatoms,
664                         nrnb,
665                         &vcm,
666                         nullptr,
667                         enerd,
668                         force_vir,
669                         shake_vir,
670                         total_vir,
671                         pres,
672                         gmx::ArrayRef<real>{},
673                         &nullSignaller,
674                         state->box,
675                         &totalNumberOfBondedInteractions,
676                         &bSumEkinhOld,
677                         cglo_flags_iteration
678                                 | (shouldCheckNumberOfBondedInteractions ? CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS
679                                                                          : 0));
680         if (cglo_flags_iteration & CGLO_STOPCM)
681         {
682             /* At initialization, do not pass x with acceleration-correction mode
683              * to avoid (incorrect) correction of the initial coordinates.
684              */
685             auto x = (vcm.mode == ComRemovalAlgorithm::LinearAccelerationCorrection)
686                              ? ArrayRef<RVec>()
687                              : makeArrayRef(state->x);
688             process_and_stopcm_grp(fplog, &vcm, *mdatoms, x, makeArrayRef(state->v));
689             inc_nrnb(nrnb, eNR_STOPCM, mdatoms->homenr);
690         }
691     }
692     checkNumberOfBondedInteractions(mdlog,
693                                     cr,
694                                     totalNumberOfBondedInteractions,
695                                     top_global,
696                                     &top,
697                                     makeConstArrayRef(state->x),
698                                     state->box,
699                                     &shouldCheckNumberOfBondedInteractions);
700     if (ir->eI == IntegrationAlgorithm::VVAK)
701     {
702         /* a second call to get the half step temperature initialized as well */
703         /* we do the same call as above, but turn the pressure off -- internally to
704            compute_globals, this is recognized as a velocity verlet half-step
705            kinetic energy calculation.  This minimized excess variables, but
706            perhaps loses some logic?*/
707
708         compute_globals(gstat,
709                         cr,
710                         ir,
711                         fr,
712                         ekind,
713                         makeConstArrayRef(state->x),
714                         makeConstArrayRef(state->v),
715                         state->box,
716                         mdatoms,
717                         nrnb,
718                         &vcm,
719                         nullptr,
720                         enerd,
721                         force_vir,
722                         shake_vir,
723                         total_vir,
724                         pres,
725                         gmx::ArrayRef<real>{},
726                         &nullSignaller,
727                         state->box,
728                         nullptr,
729                         &bSumEkinhOld,
730                         cglo_flags & ~CGLO_PRESSURE);
731     }
732
733     /* Calculate the initial half step temperature, and save the ekinh_old */
734     if (startingBehavior == StartingBehavior::NewSimulation)
735     {
736         for (i = 0; (i < ir->opts.ngtc); i++)
737         {
738             copy_mat(ekind->tcstat[i].ekinh, ekind->tcstat[i].ekinh_old);
739         }
740     }
741
742     /* need to make an initiation call to get the Trotter variables set, as well as other constants
743        for non-trotter temperature control */
744     auto trotter_seq = init_npt_vars(ir, state, &MassQ, bTrotter);
745
746     if (MASTER(cr))
747     {
748         if (!ir->bContinuation)
749         {
750             if (constr && ir->eConstrAlg == ConstraintAlgorithm::Lincs)
751             {
752                 fprintf(fplog,
753                         "RMS relative constraint deviation after constraining: %.2e\n",
754                         constr->rmsd());
755             }
756             if (EI_STATE_VELOCITY(ir->eI))
757             {
758                 real temp = enerd->term[F_TEMP];
759                 if (ir->eI != IntegrationAlgorithm::VV)
760                 {
761                     /* Result of Ekin averaged over velocities of -half
762                      * and +half step, while we only have -half step here.
763                      */
764                     temp *= 2;
765                 }
766                 fprintf(fplog, "Initial temperature: %g K\n", temp);
767             }
768         }
769
770         char tbuf[20];
771         fprintf(stderr, "starting mdrun '%s'\n", *(top_global.name));
772         if (ir->nsteps >= 0)
773         {
774             sprintf(tbuf, "%8.1f", (ir->init_step + ir->nsteps) * ir->delta_t);
775         }
776         else
777         {
778             sprintf(tbuf, "%s", "infinite");
779         }
780         if (ir->init_step > 0)
781         {
782             fprintf(stderr,
783                     "%s steps, %s ps (continuing from step %s, %8.1f ps).\n",
784                     gmx_step_str(ir->init_step + ir->nsteps, sbuf),
785                     tbuf,
786                     gmx_step_str(ir->init_step, sbuf2),
787                     ir->init_step * ir->delta_t);
788         }
789         else
790         {
791             fprintf(stderr, "%s steps, %s ps.\n", gmx_step_str(ir->nsteps, sbuf), tbuf);
792         }
793         fprintf(fplog, "\n");
794     }
795
796     walltime_accounting_start_time(walltime_accounting);
797     wallcycle_start(wcycle, ewcRUN);
798     print_start(fplog, cr, walltime_accounting, "mdrun");
799
800     /***********************************************************
801      *
802      *             Loop over MD steps
803      *
804      ************************************************************/
805
806     bFirstStep = TRUE;
807     /* Skip the first Nose-Hoover integration when we get the state from tpx */
808     bInitStep        = startingBehavior == StartingBehavior::NewSimulation || EI_VV(ir->eI);
809     bSumEkinhOld     = FALSE;
810     bExchanged       = FALSE;
811     bNeedRepartition = FALSE;
812
813     step     = ir->init_step;
814     step_rel = 0;
815
816     auto stopHandler = stopHandlerBuilder->getStopHandlerMD(
817             compat::not_null<SimulationSignal*>(&signals[eglsSTOPCOND]),
818             simulationsShareState,
819             MASTER(cr),
820             ir->nstlist,
821             mdrunOptions.reproducible,
822             nstSignalComm,
823             mdrunOptions.maximumHoursToRun,
824             ir->nstlist == 0,
825             fplog,
826             step,
827             bNS,
828             walltime_accounting);
829
830     auto checkpointHandler = std::make_unique<CheckpointHandler>(
831             compat::make_not_null<SimulationSignal*>(&signals[eglsCHKPT]),
832             simulationsShareState,
833             ir->nstlist == 0,
834             MASTER(cr),
835             mdrunOptions.writeConfout,
836             mdrunOptions.checkpointOptions.period);
837
838     const bool resetCountersIsLocal = true;
839     auto       resetHandler         = std::make_unique<ResetHandler>(
840             compat::make_not_null<SimulationSignal*>(&signals[eglsRESETCOUNTERS]),
841             !resetCountersIsLocal,
842             ir->nsteps,
843             MASTER(cr),
844             mdrunOptions.timingOptions.resetHalfway,
845             mdrunOptions.maximumHoursToRun,
846             mdlog,
847             wcycle,
848             walltime_accounting);
849
850     const DDBalanceRegionHandler ddBalanceRegionHandler(cr);
851
852     if (MASTER(cr) && isMultiSim(ms) && !useReplicaExchange)
853     {
854         logInitialMultisimStatus(ms, cr, mdlog, simulationsShareState, ir->nsteps, ir->init_step);
855     }
856
857     /* and stop now if we should */
858     bLastStep = (bLastStep || (ir->nsteps >= 0 && step_rel > ir->nsteps));
859     while (!bLastStep)
860     {
861
862         /* Determine if this is a neighbor search step */
863         bNStList = (ir->nstlist > 0 && step % ir->nstlist == 0);
864
865         if (bPMETune && bNStList)
866         {
867             // This has to be here because PME load balancing is called so early.
868             // TODO: Move to after all booleans are defined.
869             if (useGpuForUpdate && !bFirstStep)
870             {
871                 stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
872                 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
873             }
874             /* PME grid + cut-off optimization with GPUs or PME nodes */
875             pme_loadbal_do(pme_loadbal,
876                            cr,
877                            (mdrunOptions.verbose && MASTER(cr)) ? stderr : nullptr,
878                            fplog,
879                            mdlog,
880                            *ir,
881                            fr,
882                            state->box,
883                            state->x,
884                            wcycle,
885                            step,
886                            step_rel,
887                            &bPMETunePrinting,
888                            simulationWork.useGpuPmePpCommunication);
889         }
890
891         wallcycle_start(wcycle, ewcSTEP);
892
893         bLastStep = (step_rel == ir->nsteps);
894         t         = t0 + step * ir->delta_t;
895
896         // TODO Refactor this, so that nstfep does not need a default value of zero
897         if (ir->efep != FreeEnergyPerturbationType::No || ir->bSimTemp)
898         {
899             /* find and set the current lambdas */
900             state->lambda = currentLambdas(step, *(ir->fepvals), state->fep_state);
901
902             bDoDHDL = do_per_step(step, ir->fepvals->nstdhdl);
903             bDoFEP  = ((ir->efep != FreeEnergyPerturbationType::No) && do_per_step(step, nstfep));
904             bDoExpanded = (do_per_step(step, ir->expandedvals->nstexpanded) && (ir->bExpanded)
905                            && (!bFirstStep));
906         }
907
908         bDoReplEx = (useReplicaExchange && (step > 0) && !bLastStep
909                      && do_per_step(step, replExParams.exchangeInterval));
910
911         if (doSimulatedAnnealing)
912         {
913             // TODO: Avoid changing inputrec (#3854)
914             // Simulated annealing updates the reference temperature.
915             auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
916             update_annealing_target_temp(nonConstInputrec, t, &upd);
917         }
918
919         /* Stop Center of Mass motion */
920         bStopCM = (ir->comm_mode != ComRemovalAlgorithm::No && do_per_step(step, ir->nstcomm));
921
922         /* Determine whether or not to do Neighbour Searching */
923         bNS = (bFirstStep || bNStList || bExchanged || bNeedRepartition);
924
925         /* Note that the stopHandler will cause termination at nstglobalcomm
926          * steps. Since this concides with nstcalcenergy, nsttcouple and/or
927          * nstpcouple steps, we have computed the half-step kinetic energy
928          * of the previous step and can always output energies at the last step.
929          */
930         bLastStep = bLastStep || stopHandler->stoppingAfterCurrentStep(bNS);
931
932         /* do_log triggers energy and virial calculation. Because this leads
933          * to different code paths, forces can be different. Thus for exact
934          * continuation we should avoid extra log output.
935          * Note that the || bLastStep can result in non-exact continuation
936          * beyond the last step. But we don't consider that to be an issue.
937          */
938         do_log     = (do_per_step(step, ir->nstlog)
939                   || (bFirstStep && startingBehavior == StartingBehavior::NewSimulation) || bLastStep);
940         do_verbose = mdrunOptions.verbose
941                      && (step % mdrunOptions.verboseStepPrintInterval == 0 || bFirstStep || bLastStep);
942
943         if (useGpuForUpdate && !bFirstStep && bNS)
944         {
945             // Copy velocities from the GPU on search steps to keep a copy on host (device buffers are reinitialized).
946             stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
947             stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
948             // Copy coordinate from the GPU when needed at the search step.
949             // NOTE: The cases when coordinates needed on CPU for force evaluation are handled in sim_utils.
950             // NOTE: If the coordinates are to be written into output file they are also copied separately before the output.
951             stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
952             stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
953         }
954
955         // We only need to calculate virtual velocities if we are writing them in the current step
956         const bool needVirtualVelocitiesThisStep =
957                 (vsite != nullptr)
958                 && (do_per_step(step, ir->nstvout) || checkpointHandler->isCheckpointingStep());
959
960         if (vsite != nullptr)
961         {
962             // Virtual sites need to be updated before domain decomposition and forces are calculated
963             wallcycle_start(wcycle, ewcVSITECONSTR);
964             // md-vv calculates virtual velocities once it has full-step real velocities
965             vsite->construct(state->x,
966                              state->v,
967                              state->box,
968                              (!EI_VV(inputrec->eI) && needVirtualVelocitiesThisStep)
969                                      ? VSiteOperation::PositionsAndVelocities
970                                      : VSiteOperation::Positions);
971             wallcycle_stop(wcycle, ewcVSITECONSTR);
972         }
973
974         if (bNS && !(bFirstStep && ir->bContinuation))
975         {
976             bMasterState = FALSE;
977             /* Correct the new box if it is too skewed */
978             if (inputrecDynamicBox(ir))
979             {
980                 if (correct_box(fplog, step, state->box))
981                 {
982                     bMasterState = TRUE;
983                     // If update is offloaded, it should be informed about the box size change
984                     if (useGpuForUpdate)
985                     {
986                         integrator->setPbc(PbcType::Xyz, state->box);
987                     }
988                 }
989             }
990             if (DOMAINDECOMP(cr) && bMasterState)
991             {
992                 dd_collect_state(cr->dd, state, state_global);
993             }
994
995             if (DOMAINDECOMP(cr))
996             {
997                 /* Repartition the domain decomposition */
998                 dd_partition_system(fplog,
999                                     mdlog,
1000                                     step,
1001                                     cr,
1002                                     bMasterState,
1003                                     nstglobalcomm,
1004                                     state_global,
1005                                     top_global,
1006                                     *ir,
1007                                     imdSession,
1008                                     pull_work,
1009                                     state,
1010                                     &f,
1011                                     mdAtoms,
1012                                     &top,
1013                                     fr,
1014                                     vsite,
1015                                     constr,
1016                                     nrnb,
1017                                     wcycle,
1018                                     do_verbose && !bPMETunePrinting);
1019                 shouldCheckNumberOfBondedInteractions = true;
1020                 upd.setNumAtoms(state->natoms);
1021             }
1022         }
1023
1024         // Allocate or re-size GPU halo exchange object, if necessary
1025         if (bNS && havePPDomainDecomposition(cr) && simulationWork.useGpuHaloExchange)
1026         {
1027             GMX_RELEASE_ASSERT(fr->deviceStreamManager != nullptr,
1028                                "GPU device manager has to be initialized to use GPU "
1029                                "version of halo exchange.");
1030             constructGpuHaloExchange(mdlog, *cr, *fr->deviceStreamManager, wcycle);
1031         }
1032
1033         if (MASTER(cr) && do_log)
1034         {
1035             gmx::EnergyOutput::printHeader(
1036                     fplog, step, t); /* can we improve the information printed here? */
1037         }
1038
1039         if (ir->efep != FreeEnergyPerturbationType::No)
1040         {
1041             update_mdatoms(mdatoms, state->lambda[FreeEnergyPerturbationCouplingType::Mass]);
1042         }
1043
1044         if (bExchanged)
1045         {
1046             /* We need the kinetic energy at minus the half step for determining
1047              * the full step kinetic energy and possibly for T-coupling.*/
1048             /* This may not be quite working correctly yet . . . . */
1049             compute_globals(gstat,
1050                             cr,
1051                             ir,
1052                             fr,
1053                             ekind,
1054                             makeConstArrayRef(state->x),
1055                             makeConstArrayRef(state->v),
1056                             state->box,
1057                             mdatoms,
1058                             nrnb,
1059                             &vcm,
1060                             wcycle,
1061                             enerd,
1062                             nullptr,
1063                             nullptr,
1064                             nullptr,
1065                             nullptr,
1066                             gmx::ArrayRef<real>{},
1067                             &nullSignaller,
1068                             state->box,
1069                             &totalNumberOfBondedInteractions,
1070                             &bSumEkinhOld,
1071                             CGLO_GSTAT | CGLO_TEMPERATURE | CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS);
1072             checkNumberOfBondedInteractions(mdlog,
1073                                             cr,
1074                                             totalNumberOfBondedInteractions,
1075                                             top_global,
1076                                             &top,
1077                                             makeConstArrayRef(state->x),
1078                                             state->box,
1079                                             &shouldCheckNumberOfBondedInteractions);
1080         }
1081         clear_mat(force_vir);
1082
1083         checkpointHandler->decideIfCheckpointingThisStep(bNS, bFirstStep, bLastStep);
1084
1085         /* Determine the energy and pressure:
1086          * at nstcalcenergy steps and at energy output steps (set below).
1087          */
1088         if (EI_VV(ir->eI) && (!bInitStep))
1089         {
1090             bCalcEnerStep = do_per_step(step, ir->nstcalcenergy);
1091             bCalcVir      = bCalcEnerStep
1092                        || (ir->epc != PressureCoupling::No
1093                            && (do_per_step(step, ir->nstpcouple) || do_per_step(step - 1, ir->nstpcouple)));
1094         }
1095         else
1096         {
1097             bCalcEnerStep = do_per_step(step, ir->nstcalcenergy);
1098             bCalcVir      = bCalcEnerStep
1099                        || (ir->epc != PressureCoupling::No && do_per_step(step, ir->nstpcouple));
1100         }
1101         bCalcEner = bCalcEnerStep;
1102
1103         do_ene = (do_per_step(step, ir->nstenergy) || bLastStep);
1104
1105         if (do_ene || do_log || bDoReplEx)
1106         {
1107             bCalcVir  = TRUE;
1108             bCalcEner = TRUE;
1109         }
1110
1111         /* Do we need global communication ? */
1112         bGStat = (bCalcVir || bCalcEner || bStopCM || do_per_step(step, nstglobalcomm)
1113                   || (EI_VV(ir->eI) && inputrecNvtTrotter(ir) && do_per_step(step - 1, nstglobalcomm)));
1114
1115         force_flags = (GMX_FORCE_STATECHANGED | ((inputrecDynamicBox(ir)) ? GMX_FORCE_DYNAMICBOX : 0)
1116                        | GMX_FORCE_ALLFORCES | (bCalcVir ? GMX_FORCE_VIRIAL : 0)
1117                        | (bCalcEner ? GMX_FORCE_ENERGY : 0) | (bDoFEP ? GMX_FORCE_DHDL : 0));
1118         if (fr->useMts && !do_per_step(step, ir->nstfout))
1119         {
1120             force_flags |= GMX_FORCE_DO_NOT_NEED_NORMAL_FORCE;
1121         }
1122
1123         if (shellfc)
1124         {
1125             /* Now is the time to relax the shells */
1126             relax_shell_flexcon(fplog,
1127                                 cr,
1128                                 ms,
1129                                 mdrunOptions.verbose,
1130                                 enforcedRotation,
1131                                 step,
1132                                 ir,
1133                                 imdSession,
1134                                 pull_work,
1135                                 bNS,
1136                                 force_flags,
1137                                 &top,
1138                                 constr,
1139                                 enerd,
1140                                 state->natoms,
1141                                 state->x.arrayRefWithPadding(),
1142                                 state->v.arrayRefWithPadding(),
1143                                 state->box,
1144                                 state->lambda,
1145                                 &state->hist,
1146                                 &f.view(),
1147                                 force_vir,
1148                                 *mdatoms,
1149                                 nrnb,
1150                                 wcycle,
1151                                 shellfc,
1152                                 fr,
1153                                 runScheduleWork,
1154                                 t,
1155                                 mu_tot,
1156                                 vsite,
1157                                 ddBalanceRegionHandler);
1158         }
1159         else
1160         {
1161             /* The AWH history need to be saved _before_ doing force calculations where the AWH bias
1162                is updated (or the AWH update will be performed twice for one step when continuing).
1163                It would be best to call this update function from do_md_trajectory_writing but that
1164                would occur after do_force. One would have to divide the update_awh function into one
1165                function applying the AWH force and one doing the AWH bias update. The update AWH
1166                bias function could then be called after do_md_trajectory_writing (then containing
1167                update_awh_history). The checkpointing will in the future probably moved to the start
1168                of the md loop which will rid of this issue. */
1169             if (awh && checkpointHandler->isCheckpointingStep() && MASTER(cr))
1170             {
1171                 awh->updateHistory(state_global->awhHistory.get());
1172             }
1173
1174             /* The coordinates (x) are shifted (to get whole molecules)
1175              * in do_force.
1176              * This is parallellized as well, and does communication too.
1177              * Check comments in sim_util.c
1178              */
1179             do_force(fplog,
1180                      cr,
1181                      ms,
1182                      *ir,
1183                      awh.get(),
1184                      enforcedRotation,
1185                      imdSession,
1186                      pull_work,
1187                      step,
1188                      nrnb,
1189                      wcycle,
1190                      &top,
1191                      state->box,
1192                      state->x.arrayRefWithPadding(),
1193                      &state->hist,
1194                      &f.view(),
1195                      force_vir,
1196                      mdatoms,
1197                      enerd,
1198                      state->lambda,
1199                      fr,
1200                      runScheduleWork,
1201                      vsite,
1202                      mu_tot,
1203                      t,
1204                      ed ? ed->getLegacyED() : nullptr,
1205                      (bNS ? GMX_FORCE_NS : 0) | force_flags,
1206                      ddBalanceRegionHandler);
1207         }
1208
1209         // VV integrators do not need the following velocity half step
1210         // if it is the first step after starting from a checkpoint.
1211         // That is, the half step is needed on all other steps, and
1212         // also the first step when starting from a .tpr file.
1213         if (EI_VV(ir->eI))
1214         {
1215             integrateVVFirstStep(step,
1216                                  bFirstStep,
1217                                  bInitStep,
1218                                  startingBehavior,
1219                                  nstglobalcomm,
1220                                  ir,
1221                                  fr,
1222                                  cr,
1223                                  state,
1224                                  mdatoms,
1225                                  fcdata,
1226                                  &MassQ,
1227                                  &vcm,
1228                                  top_global,
1229                                  top,
1230                                  enerd,
1231                                  ekind,
1232                                  gstat,
1233                                  &last_ekin,
1234                                  bCalcVir,
1235                                  total_vir,
1236                                  shake_vir,
1237                                  force_vir,
1238                                  pres,
1239                                  M,
1240                                  do_log,
1241                                  do_ene,
1242                                  bCalcEner,
1243                                  bGStat,
1244                                  bStopCM,
1245                                  bTrotter,
1246                                  bExchanged,
1247                                  &bSumEkinhOld,
1248                                  &shouldCheckNumberOfBondedInteractions,
1249                                  &saved_conserved_quantity,
1250                                  &f,
1251                                  &upd,
1252                                  constr,
1253                                  &nullSignaller,
1254                                  trotter_seq,
1255                                  nrnb,
1256                                  mdlog,
1257                                  fplog,
1258                                  wcycle);
1259             if (vsite != nullptr && needVirtualVelocitiesThisStep)
1260             {
1261                 // Positions were calculated earlier
1262                 wallcycle_start(wcycle, ewcVSITECONSTR);
1263                 vsite->construct(state->x, state->v, state->box, VSiteOperation::Velocities);
1264                 wallcycle_stop(wcycle, ewcVSITECONSTR);
1265             }
1266         }
1267
1268         /* ########  END FIRST UPDATE STEP  ############## */
1269         /* ########  If doing VV, we now have v(dt) ###### */
1270         if (bDoExpanded)
1271         {
1272             /* perform extended ensemble sampling in lambda - we don't
1273                actually move to the new state before outputting
1274                statistics, but if performing simulated tempering, we
1275                do update the velocities and the tau_t. */
1276             // TODO: Avoid changing inputrec (#3854)
1277             // Simulated tempering updates the reference temperature.
1278             // Expanded ensemble without simulated tempering does not change the inputrec.
1279             auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
1280             lamnew                 = ExpandedEnsembleDynamics(fplog,
1281                                               nonConstInputrec,
1282                                               enerd,
1283                                               state,
1284                                               &MassQ,
1285                                               state->fep_state,
1286                                               state->dfhist,
1287                                               step,
1288                                               state->v.rvec_array(),
1289                                               mdatoms);
1290             /* history is maintained in state->dfhist, but state_global is what is sent to trajectory and log output */
1291             if (MASTER(cr))
1292             {
1293                 copy_df_history(state_global->dfhist, state->dfhist);
1294             }
1295         }
1296
1297         // Copy coordinate from the GPU for the output/checkpointing if the update is offloaded and
1298         // coordinates have not already been copied for i) search or ii) CPU force tasks.
1299         if (useGpuForUpdate && !bNS && !runScheduleWork->domainWork.haveCpuLocalForceWork
1300             && (do_per_step(step, ir->nstxout) || do_per_step(step, ir->nstxout_compressed)
1301                 || checkpointHandler->isCheckpointingStep()))
1302         {
1303             stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
1304             stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1305         }
1306         // Copy velocities if needed for the output/checkpointing.
1307         // NOTE: Copy on the search steps is done at the beginning of the step.
1308         if (useGpuForUpdate && !bNS
1309             && (do_per_step(step, ir->nstvout) || checkpointHandler->isCheckpointingStep()))
1310         {
1311             stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
1312             stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
1313         }
1314         // Copy forces for the output if the forces were reduced on the GPU (not the case on virial steps)
1315         // and update is offloaded hence forces are kept on the GPU for update and have not been
1316         // already transferred in do_force().
1317         // TODO: There should be an improved, explicit mechanism that ensures this copy is only executed
1318         //       when the forces are ready on the GPU -- the same synchronizer should be used as the one
1319         //       prior to GPU update.
1320         // TODO: When the output flags will be included in step workload, this copy can be combined with the
1321         //       copy call in do_force(...).
1322         // NOTE: The forces should not be copied here if the vsites are present, since they were modified
1323         //       on host after the D2H copy in do_force(...).
1324         if (runScheduleWork->stepWork.useGpuFBufferOps && (simulationWork.useGpuUpdate && !vsite)
1325             && do_per_step(step, ir->nstfout))
1326         {
1327             stateGpu->copyForcesFromGpu(f.view().force(), AtomLocality::Local);
1328             stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
1329         }
1330         /* Now we have the energies and forces corresponding to the
1331          * coordinates at time t. We must output all of this before
1332          * the update.
1333          */
1334         do_md_trajectory_writing(fplog,
1335                                  cr,
1336                                  nfile,
1337                                  fnm,
1338                                  step,
1339                                  step_rel,
1340                                  t,
1341                                  ir,
1342                                  state,
1343                                  state_global,
1344                                  observablesHistory,
1345                                  top_global,
1346                                  fr,
1347                                  outf,
1348                                  energyOutput,
1349                                  ekind,
1350                                  f.view().force(),
1351                                  checkpointHandler->isCheckpointingStep(),
1352                                  bRerunMD,
1353                                  bLastStep,
1354                                  mdrunOptions.writeConfout,
1355                                  bSumEkinhOld);
1356         /* Check if IMD step and do IMD communication, if bIMD is TRUE. */
1357         bInteractiveMDstep = imdSession->run(step, bNS, state->box, state->x.rvec_array(), t);
1358
1359         /* kludge -- virial is lost with restart for MTTK NPT control. Must reload (saved earlier). */
1360         if (startingBehavior != StartingBehavior::NewSimulation && bFirstStep
1361             && (inputrecNptTrotter(ir) || inputrecNphTrotter(ir)))
1362         {
1363             copy_mat(state->svir_prev, shake_vir);
1364             copy_mat(state->fvir_prev, force_vir);
1365         }
1366
1367         stopHandler->setSignal();
1368         resetHandler->setSignal(walltime_accounting);
1369
1370         if (bGStat || !PAR(cr))
1371         {
1372             /* In parallel we only have to check for checkpointing in steps
1373              * where we do global communication,
1374              *  otherwise the other nodes don't know.
1375              */
1376             checkpointHandler->setSignal(walltime_accounting);
1377         }
1378
1379         /* #########   START SECOND UPDATE STEP ################# */
1380
1381         /* at the start of step, randomize or scale the velocities ((if vv. Restriction of Andersen
1382            controlled in preprocessing */
1383
1384         if (ETC_ANDERSEN(ir->etc)) /* keep this outside of update_tcouple because of the extra info required to pass */
1385         {
1386             gmx_bool bIfRandomize;
1387             bIfRandomize = update_randomize_velocities(ir, step, cr, mdatoms, state->v, &upd, constr);
1388             /* if we have constraints, we have to remove the kinetic energy parallel to the bonds */
1389             if (constr && bIfRandomize)
1390             {
1391                 constrain_velocities(constr, do_log, do_ene, step, state, nullptr, false, nullptr);
1392             }
1393         }
1394         /* Box is changed in update() when we do pressure coupling,
1395          * but we should still use the old box for energy corrections and when
1396          * writing it to the energy file, so it matches the trajectory files for
1397          * the same timestep above. Make a copy in a separate array.
1398          */
1399         copy_mat(state->box, lastbox);
1400
1401         dvdl_constr = 0;
1402
1403         if (!useGpuForUpdate)
1404         {
1405             wallcycle_start(wcycle, ewcUPDATE);
1406         }
1407         /* UPDATE PRESSURE VARIABLES IN TROTTER FORMULATION WITH CONSTRAINTS */
1408         if (bTrotter)
1409         {
1410             trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ3);
1411             /* We can only do Berendsen coupling after we have summed
1412              * the kinetic energy or virial. Since the happens
1413              * in global_state after update, we should only do it at
1414              * step % nstlist = 1 with bGStatEveryStep=FALSE.
1415              */
1416         }
1417         else
1418         {
1419             update_tcouple(step, ir, state, ekind, &MassQ, mdatoms);
1420             update_pcouple_before_coordinates(fplog, step, ir, state, pressureCouplingMu, M, bInitStep);
1421         }
1422
1423         /* With leap-frog type integrators we compute the kinetic energy
1424          * at a whole time step as the average of the half-time step kinetic
1425          * energies of two subsequent steps. Therefore we need to compute the
1426          * half step kinetic energy also if we need energies at the next step.
1427          */
1428         const bool needHalfStepKineticEnergy =
1429                 (!EI_VV(ir->eI) && (do_per_step(step + 1, nstglobalcomm) || step_rel + 1 == ir->nsteps));
1430
1431         // Parrinello-Rahman requires the pressure to be availible before the update to compute
1432         // the velocity scaling matrix. Hence, it runs one step after the nstpcouple step.
1433         const bool doParrinelloRahman = (ir->epc == PressureCoupling::ParrinelloRahman
1434                                          && do_per_step(step + ir->nstpcouple - 1, ir->nstpcouple));
1435
1436         if (EI_VV(ir->eI))
1437         {
1438             GMX_ASSERT(!useGpuForUpdate, "GPU update is not supported with VVAK integrator.");
1439
1440             integrateVVSecondStep(step,
1441                                   ir,
1442                                   fr,
1443                                   cr,
1444                                   state,
1445                                   mdatoms,
1446                                   fcdata,
1447                                   &MassQ,
1448                                   &vcm,
1449                                   pull_work,
1450                                   enerd,
1451                                   ekind,
1452                                   gstat,
1453                                   &dvdl_constr,
1454                                   bCalcVir,
1455                                   total_vir,
1456                                   shake_vir,
1457                                   force_vir,
1458                                   pres,
1459                                   M,
1460                                   lastbox,
1461                                   do_log,
1462                                   do_ene,
1463                                   bGStat,
1464                                   &bSumEkinhOld,
1465                                   &f,
1466                                   &cbuf,
1467                                   &upd,
1468                                   constr,
1469                                   &nullSignaller,
1470                                   trotter_seq,
1471                                   nrnb,
1472                                   wcycle);
1473         }
1474         else
1475         {
1476             if (useGpuForUpdate)
1477             {
1478                 if (bNS && (bFirstStep || DOMAINDECOMP(cr)))
1479                 {
1480                     integrator->set(stateGpu->getCoordinates(),
1481                                     stateGpu->getVelocities(),
1482                                     stateGpu->getForces(),
1483                                     top.idef,
1484                                     *mdatoms);
1485
1486                     // Copy data to the GPU after buffers might have being reinitialized
1487                     stateGpu->copyVelocitiesToGpu(state->v, AtomLocality::Local);
1488                     stateGpu->copyCoordinatesToGpu(state->x, AtomLocality::Local);
1489                 }
1490
1491                 if (simulationWork.useGpuPme && !runScheduleWork->simulationWork.useGpuPmePpCommunication
1492                     && !thisRankHasDuty(cr, DUTY_PME))
1493                 {
1494                     // The PME forces were recieved to the host, so have to be copied
1495                     stateGpu->copyForcesToGpu(f.view().force(), AtomLocality::All);
1496                 }
1497                 else if (!runScheduleWork->stepWork.useGpuFBufferOps)
1498                 {
1499                     // The buffer ops were not offloaded this step, so the forces are on the
1500                     // host and have to be copied
1501                     stateGpu->copyForcesToGpu(f.view().force(), AtomLocality::Local);
1502                 }
1503
1504                 const bool doTemperatureScaling =
1505                         (ir->etc != TemperatureCoupling::No
1506                          && do_per_step(step + ir->nsttcouple - 1, ir->nsttcouple));
1507
1508                 // This applies Leap-Frog, LINCS and SETTLE in succession
1509                 integrator->integrate(
1510                         stateGpu->getForcesReadyOnDeviceEvent(
1511                                 AtomLocality::Local, runScheduleWork->stepWork.useGpuFBufferOps),
1512                         ir->delta_t,
1513                         true,
1514                         bCalcVir,
1515                         shake_vir,
1516                         doTemperatureScaling,
1517                         ekind->tcstat,
1518                         doParrinelloRahman,
1519                         ir->nstpcouple * ir->delta_t,
1520                         M);
1521
1522                 // Copy velocities D2H after update if:
1523                 // - Globals are computed this step (includes the energy output steps).
1524                 // - Temperature is needed for the next step.
1525                 if (bGStat || needHalfStepKineticEnergy)
1526                 {
1527                     stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
1528                     stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
1529                 }
1530             }
1531             else
1532             {
1533                 /* With multiple time stepping we need to do an additional normal
1534                  * update step to obtain the virial, as the actual MTS integration
1535                  * using an acceleration where the slow forces are multiplied by mtsFactor.
1536                  * Using that acceleration would result in a virial with the slow
1537                  * force contribution would be a factor mtsFactor too large.
1538                  */
1539                 if (fr->useMts && bCalcVir && constr != nullptr)
1540                 {
1541                     upd.update_for_constraint_virial(
1542                             *ir, *mdatoms, *state, f.view().forceWithPadding(), *ekind);
1543
1544                     constrain_coordinates(constr,
1545                                           do_log,
1546                                           do_ene,
1547                                           step,
1548                                           state,
1549                                           upd.xp()->arrayRefWithPadding(),
1550                                           &dvdl_constr,
1551                                           bCalcVir,
1552                                           shake_vir);
1553                 }
1554
1555                 ArrayRefWithPadding<const RVec> forceCombined =
1556                         (fr->useMts && step % ir->mtsLevels[1].stepFactor == 0)
1557                                 ? f.view().forceMtsCombinedWithPadding()
1558                                 : f.view().forceWithPadding();
1559                 upd.update_coords(
1560                         *ir, step, mdatoms, state, forceCombined, fcdata, ekind, M, etrtPOSITION, cr, constr != nullptr);
1561
1562                 wallcycle_stop(wcycle, ewcUPDATE);
1563
1564                 constrain_coordinates(constr,
1565                                       do_log,
1566                                       do_ene,
1567                                       step,
1568                                       state,
1569                                       upd.xp()->arrayRefWithPadding(),
1570                                       &dvdl_constr,
1571                                       bCalcVir && !fr->useMts,
1572                                       shake_vir);
1573
1574                 upd.update_sd_second_half(
1575                         *ir, step, &dvdl_constr, mdatoms, state, cr, nrnb, wcycle, constr, do_log, do_ene);
1576                 upd.finish_update(*ir, mdatoms, state, wcycle, constr != nullptr);
1577             }
1578
1579             if (ir->bPull && ir->pull->bSetPbcRefToPrevStepCOM)
1580             {
1581                 updatePrevStepPullCom(pull_work, state);
1582             }
1583
1584             enerd->term[F_DVDL_CONSTR] += dvdl_constr;
1585         }
1586
1587         /* ############## IF NOT VV, Calculate globals HERE  ############ */
1588         /* With Leap-Frog we can skip compute_globals at
1589          * non-communication steps, but we need to calculate
1590          * the kinetic energy one step before communication.
1591          */
1592         {
1593             // Organize to do inter-simulation signalling on steps if
1594             // and when algorithms require it.
1595             const bool doInterSimSignal = (simulationsShareState && do_per_step(step, nstSignalComm));
1596
1597             if (bGStat || needHalfStepKineticEnergy || doInterSimSignal)
1598             {
1599                 // Copy coordinates when needed to stop the CM motion.
1600                 if (useGpuForUpdate && !EI_VV(ir->eI) && bStopCM)
1601                 {
1602                     stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
1603                     stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1604                 }
1605                 // Since we're already communicating at this step, we
1606                 // can propagate intra-simulation signals. Note that
1607                 // check_nstglobalcomm has the responsibility for
1608                 // choosing the value of nstglobalcomm that is one way
1609                 // bGStat becomes true, so we can't get into a
1610                 // situation where e.g. checkpointing can't be
1611                 // signalled.
1612                 bool                doIntraSimSignal = true;
1613                 SimulationSignaller signaller(&signals, cr, ms, doInterSimSignal, doIntraSimSignal);
1614
1615                 compute_globals(
1616                         gstat,
1617                         cr,
1618                         ir,
1619                         fr,
1620                         ekind,
1621                         makeConstArrayRef(state->x),
1622                         makeConstArrayRef(state->v),
1623                         state->box,
1624                         mdatoms,
1625                         nrnb,
1626                         &vcm,
1627                         wcycle,
1628                         enerd,
1629                         force_vir,
1630                         shake_vir,
1631                         total_vir,
1632                         pres,
1633                         (!EI_VV(ir->eI) && bCalcEner && constr != nullptr) ? constr->rmsdData()
1634                                                                            : gmx::ArrayRef<real>{},
1635                         &signaller,
1636                         lastbox,
1637                         &totalNumberOfBondedInteractions,
1638                         &bSumEkinhOld,
1639                         (bGStat ? CGLO_GSTAT : 0) | (!EI_VV(ir->eI) && bCalcEner ? CGLO_ENERGY : 0)
1640                                 | (!EI_VV(ir->eI) && bStopCM ? CGLO_STOPCM : 0)
1641                                 | (!EI_VV(ir->eI) ? CGLO_TEMPERATURE : 0)
1642                                 | (!EI_VV(ir->eI) ? CGLO_PRESSURE : 0) | CGLO_CONSTRAINT
1643                                 | (shouldCheckNumberOfBondedInteractions ? CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS
1644                                                                          : 0));
1645                 checkNumberOfBondedInteractions(mdlog,
1646                                                 cr,
1647                                                 totalNumberOfBondedInteractions,
1648                                                 top_global,
1649                                                 &top,
1650                                                 makeConstArrayRef(state->x),
1651                                                 state->box,
1652                                                 &shouldCheckNumberOfBondedInteractions);
1653                 if (!EI_VV(ir->eI) && bStopCM)
1654                 {
1655                     process_and_stopcm_grp(
1656                             fplog, &vcm, *mdatoms, makeArrayRef(state->x), makeArrayRef(state->v));
1657                     inc_nrnb(nrnb, eNR_STOPCM, mdatoms->homenr);
1658
1659                     // TODO: The special case of removing CM motion should be dealt more gracefully
1660                     if (useGpuForUpdate)
1661                     {
1662                         stateGpu->copyCoordinatesToGpu(state->x, AtomLocality::Local);
1663                         // Here we block until the H2D copy completes because event sync with the
1664                         // force kernels that use the coordinates on the next steps is not implemented
1665                         // (not because of a race on state->x being modified on the CPU while H2D is in progress).
1666                         stateGpu->waitCoordinatesCopiedToDevice(AtomLocality::Local);
1667                         // If the COM removal changed the velocities on the CPU, this has to be accounted for.
1668                         if (vcm.mode != ComRemovalAlgorithm::No)
1669                         {
1670                             stateGpu->copyVelocitiesToGpu(state->v, AtomLocality::Local);
1671                         }
1672                     }
1673                 }
1674             }
1675         }
1676
1677         /* #############  END CALC EKIN AND PRESSURE ################# */
1678
1679         /* Note: this is OK, but there are some numerical precision issues with using the convergence of
1680            the virial that should probably be addressed eventually. state->veta has better properies,
1681            but what we actually need entering the new cycle is the new shake_vir value. Ideally, we could
1682            generate the new shake_vir, but test the veta value for convergence.  This will take some thought. */
1683
1684         if (ir->efep != FreeEnergyPerturbationType::No && !EI_VV(ir->eI))
1685         {
1686             /* Sum up the foreign energy and dK/dl terms for md and sd.
1687                Currently done every step so that dH/dl is correct in the .edr */
1688             accumulateKineticLambdaComponents(enerd, state->lambda, *ir->fepvals);
1689         }
1690
1691         update_pcouple_after_coordinates(
1692                 fplog, step, ir, mdatoms, pres, force_vir, shake_vir, pressureCouplingMu, state, nrnb, upd.deform(), !useGpuForUpdate);
1693
1694         const bool doBerendsenPressureCoupling = (inputrec->epc == PressureCoupling::Berendsen
1695                                                   && do_per_step(step, inputrec->nstpcouple));
1696         const bool doCRescalePressureCoupling  = (inputrec->epc == PressureCoupling::CRescale
1697                                                  && do_per_step(step, inputrec->nstpcouple));
1698         if (useGpuForUpdate
1699             && (doBerendsenPressureCoupling || doCRescalePressureCoupling || doParrinelloRahman))
1700         {
1701             integrator->scaleCoordinates(pressureCouplingMu);
1702             if (doCRescalePressureCoupling)
1703             {
1704                 matrix pressureCouplingInvMu;
1705                 gmx::invertBoxMatrix(pressureCouplingMu, pressureCouplingInvMu);
1706                 integrator->scaleVelocities(pressureCouplingInvMu);
1707             }
1708             integrator->setPbc(PbcType::Xyz, state->box);
1709         }
1710
1711         /* ################# END UPDATE STEP 2 ################# */
1712         /* #### We now have r(t+dt) and v(t+dt/2)  ############# */
1713
1714         /* The coordinates (x) were unshifted in update */
1715         if (!bGStat)
1716         {
1717             /* We will not sum ekinh_old,
1718              * so signal that we still have to do it.
1719              */
1720             bSumEkinhOld = TRUE;
1721         }
1722
1723         if (bCalcEner)
1724         {
1725             /* #########  BEGIN PREPARING EDR OUTPUT  ###########  */
1726
1727             /* use the directly determined last velocity, not actually the averaged half steps */
1728             if (bTrotter && ir->eI == IntegrationAlgorithm::VV)
1729             {
1730                 enerd->term[F_EKIN] = last_ekin;
1731             }
1732             enerd->term[F_ETOT] = enerd->term[F_EPOT] + enerd->term[F_EKIN];
1733
1734             if (integratorHasConservedEnergyQuantity(ir))
1735             {
1736                 if (EI_VV(ir->eI))
1737                 {
1738                     enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + saved_conserved_quantity;
1739                 }
1740                 else
1741                 {
1742                     enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + NPT_energy(ir, state, &MassQ);
1743                 }
1744             }
1745             /* #########  END PREPARING EDR OUTPUT  ###########  */
1746         }
1747
1748         /* Output stuff */
1749         if (MASTER(cr))
1750         {
1751             if (fplog && do_log && bDoExpanded)
1752             {
1753                 /* only needed if doing expanded ensemble */
1754                 PrintFreeEnergyInfoToFile(fplog,
1755                                           ir->fepvals.get(),
1756                                           ir->expandedvals.get(),
1757                                           ir->bSimTemp ? ir->simtempvals.get() : nullptr,
1758                                           state_global->dfhist,
1759                                           state->fep_state,
1760                                           ir->nstlog,
1761                                           step);
1762             }
1763             if (bCalcEner)
1764             {
1765                 energyOutput.addDataAtEnergyStep(bDoDHDL,
1766                                                  bCalcEnerStep,
1767                                                  t,
1768                                                  mdatoms->tmass,
1769                                                  enerd,
1770                                                  ir->fepvals.get(),
1771                                                  ir->expandedvals.get(),
1772                                                  lastbox,
1773                                                  PTCouplingArrays{ state->boxv,
1774                                                                    state->nosehoover_xi,
1775                                                                    state->nosehoover_vxi,
1776                                                                    state->nhpres_xi,
1777                                                                    state->nhpres_vxi },
1778                                                  state->fep_state,
1779                                                  shake_vir,
1780                                                  force_vir,
1781                                                  total_vir,
1782                                                  pres,
1783                                                  ekind,
1784                                                  mu_tot,
1785                                                  constr);
1786             }
1787             else
1788             {
1789                 energyOutput.recordNonEnergyStep();
1790             }
1791
1792             gmx_bool do_dr = do_per_step(step, ir->nstdisreout);
1793             gmx_bool do_or = do_per_step(step, ir->nstorireout);
1794
1795             if (doSimulatedAnnealing)
1796             {
1797                 gmx::EnergyOutput::printAnnealingTemperatures(
1798                         do_log ? fplog : nullptr, groups, &(ir->opts));
1799             }
1800             if (do_log || do_ene || do_dr || do_or)
1801             {
1802                 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
1803                                                    do_ene,
1804                                                    do_dr,
1805                                                    do_or,
1806                                                    do_log ? fplog : nullptr,
1807                                                    step,
1808                                                    t,
1809                                                    fr->fcdata.get(),
1810                                                    awh.get());
1811             }
1812             if (do_log && ir->bDoAwh && awh->hasFepLambdaDimension())
1813             {
1814                 const bool isInitialOutput = false;
1815                 printLambdaStateToLog(fplog, state->lambda, isInitialOutput);
1816             }
1817
1818             if (ir->bPull)
1819             {
1820                 pull_print_output(pull_work, step, t);
1821             }
1822
1823             if (do_per_step(step, ir->nstlog))
1824             {
1825                 if (fflush(fplog) != 0)
1826                 {
1827                     gmx_fatal(FARGS, "Cannot flush logfile - maybe you are out of disk space?");
1828                 }
1829             }
1830         }
1831         if (bDoExpanded)
1832         {
1833             /* Have to do this part _after_ outputting the logfile and the edr file */
1834             /* Gets written into the state at the beginning of next loop*/
1835             state->fep_state = lamnew;
1836         }
1837         else if (ir->bDoAwh && awh->needForeignEnergyDifferences(step))
1838         {
1839             state->fep_state = awh->fepLambdaState();
1840         }
1841         /* Print the remaining wall clock time for the run */
1842         if (isMasterSimMasterRank(ms, MASTER(cr)) && (do_verbose || gmx_got_usr_signal()) && !bPMETunePrinting)
1843         {
1844             if (shellfc)
1845             {
1846                 fprintf(stderr, "\n");
1847             }
1848             print_time(stderr, walltime_accounting, step, ir, cr);
1849         }
1850
1851         /* Ion/water position swapping.
1852          * Not done in last step since trajectory writing happens before this call
1853          * in the MD loop and exchanges would be lost anyway. */
1854         bNeedRepartition = FALSE;
1855         if ((ir->eSwapCoords != SwapType::No) && (step > 0) && !bLastStep
1856             && do_per_step(step, ir->swap->nstswap))
1857         {
1858             bNeedRepartition = do_swapcoords(cr,
1859                                              step,
1860                                              t,
1861                                              ir,
1862                                              swap,
1863                                              wcycle,
1864                                              as_rvec_array(state->x.data()),
1865                                              state->box,
1866                                              MASTER(cr) && mdrunOptions.verbose,
1867                                              bRerunMD);
1868
1869             if (bNeedRepartition && DOMAINDECOMP(cr))
1870             {
1871                 dd_collect_state(cr->dd, state, state_global);
1872             }
1873         }
1874
1875         /* Replica exchange */
1876         bExchanged = FALSE;
1877         if (bDoReplEx)
1878         {
1879             bExchanged = replica_exchange(fplog, cr, ms, repl_ex, state_global, enerd, state, step, t);
1880         }
1881
1882         if ((bExchanged || bNeedRepartition) && DOMAINDECOMP(cr))
1883         {
1884             dd_partition_system(fplog,
1885                                 mdlog,
1886                                 step,
1887                                 cr,
1888                                 TRUE,
1889                                 1,
1890                                 state_global,
1891                                 top_global,
1892                                 *ir,
1893                                 imdSession,
1894                                 pull_work,
1895                                 state,
1896                                 &f,
1897                                 mdAtoms,
1898                                 &top,
1899                                 fr,
1900                                 vsite,
1901                                 constr,
1902                                 nrnb,
1903                                 wcycle,
1904                                 FALSE);
1905             shouldCheckNumberOfBondedInteractions = true;
1906             upd.setNumAtoms(state->natoms);
1907         }
1908
1909         bFirstStep = FALSE;
1910         bInitStep  = FALSE;
1911
1912         /* #######  SET VARIABLES FOR NEXT ITERATION IF THEY STILL NEED IT ###### */
1913         /* With all integrators, except VV, we need to retain the pressure
1914          * at the current step for coupling at the next step.
1915          */
1916         if ((state->flags & enumValueToBitMask(StateEntry::PressurePrevious))
1917             && (bGStatEveryStep || (ir->nstpcouple > 0 && step % ir->nstpcouple == 0)))
1918         {
1919             /* Store the pressure in t_state for pressure coupling
1920              * at the next MD step.
1921              */
1922             copy_mat(pres, state->pres_prev);
1923         }
1924
1925         /* #######  END SET VARIABLES FOR NEXT ITERATION ###### */
1926
1927         if ((membed != nullptr) && (!bLastStep))
1928         {
1929             rescale_membed(step_rel, membed, as_rvec_array(state_global->x.data()));
1930         }
1931
1932         cycles = wallcycle_stop(wcycle, ewcSTEP);
1933         if (DOMAINDECOMP(cr) && wcycle)
1934         {
1935             dd_cycles_add(cr->dd, cycles, ddCyclStep);
1936         }
1937
1938         /* increase the MD step number */
1939         step++;
1940         step_rel++;
1941
1942 #if GMX_FAHCORE
1943         if (MASTER(cr))
1944         {
1945             fcReportProgress(ir->nsteps + ir->init_step, step);
1946         }
1947 #endif
1948
1949         resetHandler->resetCounters(
1950                 step, step_rel, mdlog, fplog, cr, fr->nbv.get(), nrnb, fr->pmedata, pme_loadbal, wcycle, walltime_accounting);
1951
1952         /* If bIMD is TRUE, the master updates the IMD energy record and sends positions to VMD client */
1953         imdSession->updateEnergyRecordAndSendPositionsAndEnergies(bInteractiveMDstep, step, bCalcEner);
1954     }
1955     /* End of main MD loop */
1956
1957     /* Closing TNG files can include compressing data. Therefore it is good to do that
1958      * before stopping the time measurements. */
1959     mdoutf_tng_close(outf);
1960
1961     /* Stop measuring walltime */
1962     walltime_accounting_end_time(walltime_accounting);
1963
1964     if (!thisRankHasDuty(cr, DUTY_PME))
1965     {
1966         /* Tell the PME only node to finish */
1967         gmx_pme_send_finish(cr);
1968     }
1969
1970     if (MASTER(cr))
1971     {
1972         if (ir->nstcalcenergy > 0)
1973         {
1974             energyOutput.printEnergyConservation(fplog, ir->simulation_part, EI_MD(ir->eI));
1975
1976             gmx::EnergyOutput::printAnnealingTemperatures(fplog, groups, &(ir->opts));
1977             energyOutput.printAverages(fplog, groups);
1978         }
1979     }
1980     done_mdoutf(outf);
1981
1982     if (bPMETune)
1983     {
1984         pme_loadbal_done(pme_loadbal, fplog, mdlog, fr->nbv->useGpu());
1985     }
1986
1987     done_shellfc(fplog, shellfc, step_rel);
1988
1989     if (useReplicaExchange && MASTER(cr))
1990     {
1991         print_replica_exchange_statistics(fplog, repl_ex);
1992     }
1993
1994     walltime_accounting_set_nsteps_done(walltime_accounting, step_rel);
1995
1996     global_stat_destroy(gstat);
1997 }