Modernize wallcycle counting
[alexxy/gromacs.git] / src / gromacs / mdrun / md.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2011-2019,2020,2021, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /*! \internal \file
38  *
39  * \brief Implements the integrator for normal molecular dynamics simulations
40  *
41  * \author David van der Spoel <david.vanderspoel@icm.uu.se>
42  * \ingroup module_mdrun
43  */
44 #include "gmxpre.h"
45
46 #include <cinttypes>
47 #include <cmath>
48 #include <cstdio>
49 #include <cstdlib>
50
51 #include <algorithm>
52 #include <memory>
53 #include <numeric>
54
55 #include "gromacs/applied_forces/awh/awh.h"
56 #include "gromacs/applied_forces/awh/read_params.h"
57 #include "gromacs/commandline/filenm.h"
58 #include "gromacs/domdec/collect.h"
59 #include "gromacs/domdec/dlbtiming.h"
60 #include "gromacs/domdec/domdec.h"
61 #include "gromacs/domdec/domdec_network.h"
62 #include "gromacs/domdec/domdec_struct.h"
63 #include "gromacs/domdec/gpuhaloexchange.h"
64 #include "gromacs/domdec/mdsetup.h"
65 #include "gromacs/domdec/partition.h"
66 #include "gromacs/essentialdynamics/edsam.h"
67 #include "gromacs/ewald/pme_load_balancing.h"
68 #include "gromacs/ewald/pme_pp.h"
69 #include "gromacs/fileio/trxio.h"
70 #include "gromacs/gmxlib/network.h"
71 #include "gromacs/gmxlib/nrnb.h"
72 #include "gromacs/gpu_utils/device_stream_manager.h"
73 #include "gromacs/gpu_utils/gpu_utils.h"
74 #include "gromacs/imd/imd.h"
75 #include "gromacs/listed_forces/listed_forces.h"
76 #include "gromacs/math/functions.h"
77 #include "gromacs/math/invertmatrix.h"
78 #include "gromacs/math/vec.h"
79 #include "gromacs/math/vectypes.h"
80 #include "gromacs/mdlib/checkpointhandler.h"
81 #include "gromacs/mdlib/compute_io.h"
82 #include "gromacs/mdlib/constr.h"
83 #include "gromacs/mdlib/coupling.h"
84 #include "gromacs/mdlib/ebin.h"
85 #include "gromacs/mdlib/enerdata_utils.h"
86 #include "gromacs/mdlib/energyoutput.h"
87 #include "gromacs/mdlib/expanded.h"
88 #include "gromacs/mdlib/force.h"
89 #include "gromacs/mdlib/force_flags.h"
90 #include "gromacs/mdlib/forcerec.h"
91 #include "gromacs/mdlib/freeenergyparameters.h"
92 #include "gromacs/mdlib/md_support.h"
93 #include "gromacs/mdlib/mdatoms.h"
94 #include "gromacs/mdlib/mdoutf.h"
95 #include "gromacs/mdlib/membed.h"
96 #include "gromacs/mdlib/resethandler.h"
97 #include "gromacs/mdlib/sighandler.h"
98 #include "gromacs/mdlib/simulationsignal.h"
99 #include "gromacs/mdlib/stat.h"
100 #include "gromacs/mdlib/stophandler.h"
101 #include "gromacs/mdlib/tgroup.h"
102 #include "gromacs/mdlib/trajectory_writing.h"
103 #include "gromacs/mdlib/update.h"
104 #include "gromacs/mdlib/update_constrain_gpu.h"
105 #include "gromacs/mdlib/update_vv.h"
106 #include "gromacs/mdlib/vcm.h"
107 #include "gromacs/mdlib/vsite.h"
108 #include "gromacs/mdrunutility/handlerestart.h"
109 #include "gromacs/mdrunutility/multisim.h"
110 #include "gromacs/mdrunutility/printtime.h"
111 #include "gromacs/mdtypes/awh_history.h"
112 #include "gromacs/mdtypes/awh_params.h"
113 #include "gromacs/mdtypes/commrec.h"
114 #include "gromacs/mdtypes/df_history.h"
115 #include "gromacs/mdtypes/energyhistory.h"
116 #include "gromacs/mdtypes/fcdata.h"
117 #include "gromacs/mdtypes/forcebuffers.h"
118 #include "gromacs/mdtypes/forcerec.h"
119 #include "gromacs/mdtypes/group.h"
120 #include "gromacs/mdtypes/inputrec.h"
121 #include "gromacs/mdtypes/interaction_const.h"
122 #include "gromacs/mdtypes/md_enums.h"
123 #include "gromacs/mdtypes/mdatom.h"
124 #include "gromacs/mdtypes/mdrunoptions.h"
125 #include "gromacs/mdtypes/multipletimestepping.h"
126 #include "gromacs/mdtypes/observableshistory.h"
127 #include "gromacs/mdtypes/pullhistory.h"
128 #include "gromacs/mdtypes/simulation_workload.h"
129 #include "gromacs/mdtypes/state.h"
130 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
131 #include "gromacs/modularsimulator/energydata.h"
132 #include "gromacs/nbnxm/gpu_data_mgmt.h"
133 #include "gromacs/nbnxm/nbnxm.h"
134 #include "gromacs/pbcutil/pbc.h"
135 #include "gromacs/pulling/output.h"
136 #include "gromacs/pulling/pull.h"
137 #include "gromacs/swap/swapcoords.h"
138 #include "gromacs/timing/wallcycle.h"
139 #include "gromacs/timing/walltime_accounting.h"
140 #include "gromacs/topology/atoms.h"
141 #include "gromacs/topology/idef.h"
142 #include "gromacs/topology/mtop_util.h"
143 #include "gromacs/topology/topology.h"
144 #include "gromacs/trajectory/trajectoryframe.h"
145 #include "gromacs/utility/basedefinitions.h"
146 #include "gromacs/utility/cstringutil.h"
147 #include "gromacs/utility/fatalerror.h"
148 #include "gromacs/utility/logger.h"
149 #include "gromacs/utility/real.h"
150 #include "gromacs/utility/smalloc.h"
151
152 #include "legacysimulator.h"
153 #include "replicaexchange.h"
154 #include "shellfc.h"
155
156 using gmx::SimulationSignaller;
157
158 void gmx::LegacySimulator::do_md()
159 {
160     // TODO Historically, the EM and MD "integrators" used different
161     // names for the t_inputrec *parameter, but these must have the
162     // same name, now that it's a member of a struct. We use this ir
163     // alias to avoid a large ripple of nearly useless changes.
164     // t_inputrec is being replaced by IMdpOptionsProvider, so this
165     // will go away eventually.
166     const t_inputrec* ir = inputrec;
167
168     int64_t      step, step_rel;
169     double       t, t0 = ir->init_t;
170     gmx_bool     bGStatEveryStep, bGStat, bCalcVir, bCalcEnerStep, bCalcEner;
171     gmx_bool     bNS = FALSE, bNStList, bStopCM, bFirstStep, bInitStep, bLastStep = FALSE;
172     gmx_bool     bDoDHDL = FALSE, bDoFEP = FALSE, bDoExpanded = FALSE;
173     gmx_bool     do_ene, do_log, do_verbose;
174     gmx_bool     bMasterState;
175     unsigned int force_flags;
176     tensor force_vir = { { 0 } }, shake_vir = { { 0 } }, total_vir = { { 0 } }, pres = { { 0 } };
177     int    i, m;
178     rvec   mu_tot;
179     matrix pressureCouplingMu, M;
180     gmx_repl_ex_t     repl_ex = nullptr;
181     gmx_global_stat_t gstat;
182     gmx_shellfc_t*    shellfc;
183     gmx_bool          bSumEkinhOld, bDoReplEx, bExchanged, bNeedRepartition;
184     gmx_bool          bTrotter;
185     real              dvdl_constr;
186     std::vector<RVec> cbuf;
187     matrix            lastbox;
188     int               lamnew = 0;
189     /* for FEP */
190     int       nstfep = 0;
191     double    cycles;
192     real      saved_conserved_quantity = 0;
193     real      last_ekin                = 0;
194     t_extmass MassQ;
195     char      sbuf[STEPSTRSIZE], sbuf2[STEPSTRSIZE];
196
197     /* PME load balancing data for GPU kernels */
198     gmx_bool bPMETune         = FALSE;
199     gmx_bool bPMETunePrinting = FALSE;
200
201     bool bInteractiveMDstep = false;
202
203     SimulationSignals signals;
204     // Most global communnication stages don't propagate mdrun
205     // signals, and will use this object to achieve that.
206     SimulationSignaller nullSignaller(nullptr, nullptr, nullptr, false, false);
207
208     if (!mdrunOptions.writeConfout)
209     {
210         // This is on by default, and the main known use case for
211         // turning it off is for convenience in benchmarking, which is
212         // something that should not show up in the general user
213         // interface.
214         GMX_LOG(mdlog.info)
215                 .asParagraph()
216                 .appendText(
217                         "The -noconfout functionality is deprecated, and may be removed in a "
218                         "future version.");
219     }
220
221     /* md-vv uses averaged full step velocities for T-control
222        md-vv-avek uses averaged half step velocities for T-control (but full step ekin for P control)
223        md uses averaged half step kinetic energies to determine temperature unless defined otherwise by GMX_EKIN_AVE_VEL; */
224     bTrotter = (EI_VV(ir->eI)
225                 && (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir)));
226
227     const bool bRerunMD = false;
228
229     int nstglobalcomm = computeGlobalCommunicationPeriod(mdlog, ir, cr);
230     bGStatEveryStep   = (nstglobalcomm == 1);
231
232     const SimulationGroups* groups = &top_global.groups;
233
234     std::unique_ptr<EssentialDynamics> ed = nullptr;
235     if (opt2bSet("-ei", nfile, fnm))
236     {
237         /* Initialize essential dynamics sampling */
238         ed = init_edsam(mdlog,
239                         opt2fn_null("-ei", nfile, fnm),
240                         opt2fn("-eo", nfile, fnm),
241                         top_global,
242                         *ir,
243                         cr,
244                         constr,
245                         state_global,
246                         observablesHistory,
247                         oenv,
248                         startingBehavior);
249     }
250     else if (observablesHistory->edsamHistory)
251     {
252         gmx_fatal(FARGS,
253                   "The checkpoint is from a run with essential dynamics sampling, "
254                   "but the current run did not specify the -ei option. "
255                   "Either specify the -ei option to mdrun, or do not use this checkpoint file.");
256     }
257
258     int*                fep_state = MASTER(cr) ? &state_global->fep_state : nullptr;
259     gmx::ArrayRef<real> lambda    = MASTER(cr) ? state_global->lambda : gmx::ArrayRef<real>();
260     initialize_lambdas(
261             fplog, *ir, gmx::arrayRefFromArray(ir->opts.ref_t, ir->opts.ngtc), MASTER(cr), fep_state, lambda);
262     Update upd(*ir, deform);
263     bool   doSimulatedAnnealing = false;
264     {
265         // TODO: Avoid changing inputrec (#3854)
266         // Simulated annealing updates the reference temperature.
267         auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
268         doSimulatedAnnealing   = initSimulatedAnnealing(nonConstInputrec, &upd);
269     }
270     const bool useReplicaExchange = (replExParams.exchangeInterval > 0);
271
272     const t_fcdata& fcdata = *fr->fcdata;
273
274     bool simulationsShareState = false;
275     int  nstSignalComm         = nstglobalcomm;
276     {
277         // TODO This implementation of ensemble orientation restraints is nasty because
278         // a user can't just do multi-sim with single-sim orientation restraints.
279         bool usingEnsembleRestraints =
280                 (fcdata.disres->nsystems > 1) || ((ms != nullptr) && (fcdata.orires->nr != 0));
281         bool awhUsesMultiSim = (ir->bDoAwh && ir->awhParams->shareBiasMultisim() && (ms != nullptr));
282
283         // Replica exchange, ensemble restraints and AWH need all
284         // simulations to remain synchronized, so they need
285         // checkpoints and stop conditions to act on the same step, so
286         // the propagation of such signals must take place between
287         // simulations, not just within simulations.
288         // TODO: Make algorithm initializers set these flags.
289         simulationsShareState = useReplicaExchange || usingEnsembleRestraints || awhUsesMultiSim;
290
291         if (simulationsShareState)
292         {
293             // Inter-simulation signal communication does not need to happen
294             // often, so we use a minimum of 200 steps to reduce overhead.
295             const int c_minimumInterSimulationSignallingInterval = 200;
296             nstSignalComm = ((c_minimumInterSimulationSignallingInterval + nstglobalcomm - 1) / nstglobalcomm)
297                             * nstglobalcomm;
298         }
299     }
300
301     if (startingBehavior != StartingBehavior::RestartWithAppending)
302     {
303         pleaseCiteCouplingAlgorithms(fplog, *ir);
304     }
305     gmx_mdoutf*       outf = init_mdoutf(fplog,
306                                    nfile,
307                                    fnm,
308                                    mdrunOptions,
309                                    cr,
310                                    outputProvider,
311                                    mdModulesNotifiers,
312                                    ir,
313                                    top_global,
314                                    oenv,
315                                    wcycle,
316                                    startingBehavior,
317                                    simulationsShareState,
318                                    ms);
319     gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
320                                    top_global,
321                                    *ir,
322                                    pull_work,
323                                    mdoutf_get_fp_dhdl(outf),
324                                    false,
325                                    startingBehavior,
326                                    simulationsShareState,
327                                    mdModulesNotifiers);
328
329     gstat = global_stat_init(ir);
330
331     const auto& simulationWork     = runScheduleWork->simulationWork;
332     const bool  useGpuForPme       = simulationWork.useGpuPme;
333     const bool  useGpuForNonbonded = simulationWork.useGpuNonbonded;
334     const bool  useGpuForBufferOps = simulationWork.useGpuBufferOps;
335     const bool  useGpuForUpdate    = simulationWork.useGpuUpdate;
336
337     /* Check for polarizable models and flexible constraints */
338     shellfc = init_shell_flexcon(fplog,
339                                  top_global,
340                                  constr ? constr->numFlexibleConstraints() : 0,
341                                  ir->nstcalcenergy,
342                                  DOMAINDECOMP(cr),
343                                  useGpuForPme);
344
345     {
346         double io = compute_io(ir, top_global.natoms, *groups, energyOutput.numEnergyTerms(), 1);
347         if ((io > 2000) && MASTER(cr))
348         {
349             fprintf(stderr, "\nWARNING: This run will generate roughly %.0f Mb of data\n\n", io);
350         }
351     }
352
353     // Local state only becomes valid now.
354     std::unique_ptr<t_state> stateInstance;
355     t_state*                 state;
356
357     gmx_localtop_t top(top_global.ffparams);
358
359     auto mdatoms = mdAtoms->mdatoms();
360
361     ForceBuffers     f(fr->useMts,
362                    ((useGpuForNonbonded && useGpuForBufferOps) || useGpuForUpdate)
363                            ? PinningPolicy::PinnedIfSupported
364                            : PinningPolicy::CannotBePinned);
365     const t_mdatoms* md = mdAtoms->mdatoms();
366     if (DOMAINDECOMP(cr))
367     {
368         stateInstance = std::make_unique<t_state>();
369         state         = stateInstance.get();
370         dd_init_local_state(*cr->dd, state_global, state);
371
372         /* Distribute the charge groups over the nodes from the master node */
373         dd_partition_system(fplog,
374                             mdlog,
375                             ir->init_step,
376                             cr,
377                             TRUE,
378                             1,
379                             state_global,
380                             top_global,
381                             *ir,
382                             imdSession,
383                             pull_work,
384                             state,
385                             &f,
386                             mdAtoms,
387                             &top,
388                             fr,
389                             vsite,
390                             constr,
391                             nrnb,
392                             nullptr,
393                             FALSE);
394         upd.updateAfterPartition(state->natoms,
395                                  md->cFREEZE ? gmx::arrayRefFromArray(md->cFREEZE, md->nr)
396                                              : gmx::ArrayRef<const unsigned short>(),
397                                  md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
398                                          : gmx::ArrayRef<const unsigned short>());
399     }
400     else
401     {
402         state_change_natoms(state_global, state_global->natoms);
403         /* Copy the pointer to the global state */
404         state = state_global;
405
406         /* Generate and initialize new topology */
407         mdAlgorithmsSetupAtomData(cr, *ir, top_global, &top, fr, &f, mdAtoms, constr, vsite, shellfc);
408
409         upd.updateAfterPartition(state->natoms,
410                                  md->cFREEZE ? gmx::arrayRefFromArray(md->cFREEZE, md->nr)
411                                              : gmx::ArrayRef<const unsigned short>(),
412                                  md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
413                                          : gmx::ArrayRef<const unsigned short>());
414     }
415
416     std::unique_ptr<UpdateConstrainGpu> integrator;
417
418     StatePropagatorDataGpu* stateGpu = fr->stateGpu;
419
420     // TODO: the assertions below should be handled by UpdateConstraintsBuilder.
421     if (useGpuForUpdate)
422     {
423         GMX_RELEASE_ASSERT(!DOMAINDECOMP(cr) || ddUsesUpdateGroups(*cr->dd) || constr == nullptr
424                                    || constr->numConstraintsTotal() == 0,
425                            "Constraints in domain decomposition are only supported with update "
426                            "groups if using GPU update.\n");
427         GMX_RELEASE_ASSERT(ir->eConstrAlg != ConstraintAlgorithm::Shake || constr == nullptr
428                                    || constr->numConstraintsTotal() == 0,
429                            "SHAKE is not supported with GPU update.");
430         GMX_RELEASE_ASSERT(useGpuForPme || (useGpuForNonbonded && simulationWork.useGpuBufferOps),
431                            "Either PME or short-ranged non-bonded interaction tasks must run on "
432                            "the GPU to use GPU update.\n");
433         GMX_RELEASE_ASSERT(ir->eI == IntegrationAlgorithm::MD,
434                            "Only the md integrator is supported with the GPU update.\n");
435         GMX_RELEASE_ASSERT(
436                 ir->etc != TemperatureCoupling::NoseHoover,
437                 "Nose-Hoover temperature coupling is not supported with the GPU update.\n");
438         GMX_RELEASE_ASSERT(
439                 ir->epc == PressureCoupling::No || ir->epc == PressureCoupling::ParrinelloRahman
440                         || ir->epc == PressureCoupling::Berendsen || ir->epc == PressureCoupling::CRescale,
441                 "Only Parrinello-Rahman, Berendsen, and C-rescale pressure coupling are supported "
442                 "with the GPU update.\n");
443         GMX_RELEASE_ASSERT(!mdatoms->haveVsites,
444                            "Virtual sites are not supported with the GPU update.\n");
445         GMX_RELEASE_ASSERT(ed == nullptr,
446                            "Essential dynamics is not supported with the GPU update.\n");
447         GMX_RELEASE_ASSERT(!ir->bPull || !pull_have_constraint(*ir->pull),
448                            "Constraints pulling is not supported with the GPU update.\n");
449         GMX_RELEASE_ASSERT(fcdata.orires->nr == 0,
450                            "Orientation restraints are not supported with the GPU update.\n");
451         GMX_RELEASE_ASSERT(
452                 ir->efep == FreeEnergyPerturbationType::No
453                         || (!haveFepPerturbedMasses(top_global) && !havePerturbedConstraints(top_global)),
454                 "Free energy perturbation of masses and constraints are not supported with the GPU "
455                 "update.");
456
457         if (constr != nullptr && constr->numConstraintsTotal() > 0)
458         {
459             GMX_LOG(mdlog.info)
460                     .asParagraph()
461                     .appendText("Updating coordinates and applying constraints on the GPU.");
462         }
463         else
464         {
465             GMX_LOG(mdlog.info).asParagraph().appendText("Updating coordinates on the GPU.");
466         }
467         GMX_RELEASE_ASSERT(fr->deviceStreamManager != nullptr,
468                            "Device stream manager should be initialized in order to use GPU "
469                            "update-constraints.");
470         GMX_RELEASE_ASSERT(
471                 fr->deviceStreamManager->streamIsValid(gmx::DeviceStreamType::UpdateAndConstraints),
472                 "Update stream should be initialized in order to use GPU "
473                 "update-constraints.");
474         integrator = std::make_unique<UpdateConstrainGpu>(
475                 *ir,
476                 top_global,
477                 ekind->ngtc,
478                 fr->deviceStreamManager->context(),
479                 fr->deviceStreamManager->stream(gmx::DeviceStreamType::UpdateAndConstraints),
480                 stateGpu->xUpdatedOnDevice(),
481                 wcycle);
482
483         integrator->setPbc(PbcType::Xyz, state->box);
484     }
485
486     if (useGpuForPme || (useGpuForNonbonded && useGpuForBufferOps) || useGpuForUpdate)
487     {
488         changePinningPolicy(&state->x, PinningPolicy::PinnedIfSupported);
489     }
490     if (useGpuForUpdate)
491     {
492         changePinningPolicy(&state->v, PinningPolicy::PinnedIfSupported);
493     }
494
495     // NOTE: The global state is no longer used at this point.
496     // But state_global is still used as temporary storage space for writing
497     // the global state to file and potentially for replica exchange.
498     // (Global topology should persist.)
499
500     update_mdatoms(mdatoms, state->lambda[FreeEnergyPerturbationCouplingType::Mass]);
501
502     if (ir->bExpanded)
503     {
504         /* Check nstexpanded here, because the grompp check was broken */
505         if (ir->expandedvals->nstexpanded % ir->nstcalcenergy != 0)
506         {
507             gmx_fatal(FARGS,
508                       "With expanded ensemble, nstexpanded should be a multiple of nstcalcenergy");
509         }
510         init_expanded_ensemble(startingBehavior != StartingBehavior::NewSimulation, ir, state->dfhist);
511     }
512
513     if (MASTER(cr))
514     {
515         EnergyData::initializeEnergyHistory(startingBehavior, observablesHistory, &energyOutput);
516     }
517
518     preparePrevStepPullCom(ir,
519                            pull_work,
520                            gmx::arrayRefFromArray(mdatoms->massT, mdatoms->nr),
521                            state,
522                            state_global,
523                            cr,
524                            startingBehavior != StartingBehavior::NewSimulation);
525
526     // TODO: Remove this by converting AWH into a ForceProvider
527     auto awh = prepareAwhModule(fplog,
528                                 *ir,
529                                 state_global,
530                                 cr,
531                                 ms,
532                                 startingBehavior != StartingBehavior::NewSimulation,
533                                 shellfc != nullptr,
534                                 opt2fn("-awh", nfile, fnm),
535                                 pull_work);
536
537     if (useReplicaExchange && MASTER(cr))
538     {
539         repl_ex = init_replica_exchange(fplog, ms, top_global.natoms, ir, replExParams);
540     }
541     /* PME tuning is only supported in the Verlet scheme, with PME for
542      * Coulomb. It is not supported with only LJ PME. */
543     bPMETune = (mdrunOptions.tunePme && EEL_PME(fr->ic->eeltype) && !mdrunOptions.reproducible
544                 && ir->cutoff_scheme != CutoffScheme::Group);
545
546     pme_load_balancing_t* pme_loadbal = nullptr;
547     if (bPMETune)
548     {
549         pme_loadbal_init(
550                 &pme_loadbal, cr, mdlog, *ir, state->box, *fr->ic, *fr->nbv, fr->pmedata, fr->nbv->useGpu());
551     }
552
553     if (!ir->bContinuation)
554     {
555         if (state->flags & enumValueToBitMask(StateEntry::V))
556         {
557             auto v = makeArrayRef(state->v);
558             /* Set the velocities of vsites, shells and frozen atoms to zero */
559             for (i = 0; i < mdatoms->homenr; i++)
560             {
561                 if (mdatoms->ptype[i] == ParticleType::Shell)
562                 {
563                     clear_rvec(v[i]);
564                 }
565                 else if (mdatoms->cFREEZE)
566                 {
567                     for (m = 0; m < DIM; m++)
568                     {
569                         if (ir->opts.nFreeze[mdatoms->cFREEZE[i]][m])
570                         {
571                             v[i][m] = 0;
572                         }
573                     }
574                 }
575             }
576         }
577
578         if (constr)
579         {
580             /* Constrain the initial coordinates and velocities */
581             do_constrain_first(fplog,
582                                constr,
583                                ir,
584                                mdatoms->nr,
585                                mdatoms->homenr,
586                                state->x.arrayRefWithPadding(),
587                                state->v.arrayRefWithPadding(),
588                                state->box,
589                                state->lambda[FreeEnergyPerturbationCouplingType::Bonded]);
590         }
591     }
592
593     if (ir->efep != FreeEnergyPerturbationType::No)
594     {
595         /* Set free energy calculation frequency as the greatest common
596          * denominator of nstdhdl and repl_ex_nst. */
597         nstfep = ir->fepvals->nstdhdl;
598         if (ir->bExpanded)
599         {
600             nstfep = std::gcd(ir->expandedvals->nstexpanded, nstfep);
601         }
602         if (useReplicaExchange)
603         {
604             nstfep = std::gcd(replExParams.exchangeInterval, nstfep);
605         }
606         if (ir->bDoAwh)
607         {
608             nstfep = std::gcd(ir->awhParams->nstSampleCoord(), nstfep);
609         }
610     }
611
612     /* Be REALLY careful about what flags you set here. You CANNOT assume
613      * this is the first step, since we might be restarting from a checkpoint,
614      * and in that case we should not do any modifications to the state.
615      */
616     bStopCM = (ir->comm_mode != ComRemovalAlgorithm::No && !ir->bContinuation);
617
618     // When restarting from a checkpoint, it can be appropriate to
619     // initialize ekind from quantities in the checkpoint. Otherwise,
620     // compute_globals must initialize ekind before the simulation
621     // starts/restarts. However, only the master rank knows what was
622     // found in the checkpoint file, so we have to communicate in
623     // order to coordinate the restart.
624     //
625     // TODO Consider removing this communication if/when checkpoint
626     // reading directly follows .tpr reading, because all ranks can
627     // agree on hasReadEkinState at that time.
628     bool hasReadEkinState = MASTER(cr) ? state_global->ekinstate.hasReadEkinState : false;
629     if (PAR(cr))
630     {
631         gmx_bcast(sizeof(hasReadEkinState), &hasReadEkinState, cr->mpi_comm_mygroup);
632     }
633     if (hasReadEkinState)
634     {
635         restore_ekinstate_from_state(cr, ekind, &state_global->ekinstate);
636     }
637
638     unsigned int cglo_flags =
639             (CGLO_TEMPERATURE | CGLO_GSTAT | (EI_VV(ir->eI) ? CGLO_PRESSURE : 0)
640              | (EI_VV(ir->eI) ? CGLO_CONSTRAINT : 0) | (hasReadEkinState ? CGLO_READEKIN : 0));
641
642     bSumEkinhOld = FALSE;
643
644     t_vcm vcm(top_global.groups, *ir);
645     reportComRemovalInfo(fplog, vcm);
646
647     /* To minimize communication, compute_globals computes the COM velocity
648      * and the kinetic energy for the velocities without COM motion removed.
649      * Thus to get the kinetic energy without the COM contribution, we need
650      * to call compute_globals twice.
651      */
652     for (int cgloIteration = 0; cgloIteration < (bStopCM ? 2 : 1); cgloIteration++)
653     {
654         unsigned int cglo_flags_iteration = cglo_flags;
655         if (bStopCM && cgloIteration == 0)
656         {
657             cglo_flags_iteration |= CGLO_STOPCM;
658             cglo_flags_iteration &= ~CGLO_TEMPERATURE;
659         }
660         if (DOMAINDECOMP(cr) && shouldCheckNumberOfBondedInteractions(*cr->dd) && cgloIteration == 0)
661         {
662             cglo_flags_iteration |= CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS;
663         }
664         compute_globals(gstat,
665                         cr,
666                         ir,
667                         fr,
668                         ekind,
669                         makeConstArrayRef(state->x),
670                         makeConstArrayRef(state->v),
671                         state->box,
672                         mdatoms,
673                         nrnb,
674                         &vcm,
675                         nullptr,
676                         enerd,
677                         force_vir,
678                         shake_vir,
679                         total_vir,
680                         pres,
681                         gmx::ArrayRef<real>{},
682                         &nullSignaller,
683                         state->box,
684                         &bSumEkinhOld,
685                         cglo_flags_iteration);
686         if (cglo_flags_iteration & CGLO_STOPCM)
687         {
688             /* At initialization, do not pass x with acceleration-correction mode
689              * to avoid (incorrect) correction of the initial coordinates.
690              */
691             auto x = (vcm.mode == ComRemovalAlgorithm::LinearAccelerationCorrection)
692                              ? ArrayRef<RVec>()
693                              : makeArrayRef(state->x);
694             process_and_stopcm_grp(fplog, &vcm, *mdatoms, x, makeArrayRef(state->v));
695             inc_nrnb(nrnb, eNR_STOPCM, mdatoms->homenr);
696         }
697     }
698     if (DOMAINDECOMP(cr))
699     {
700         checkNumberOfBondedInteractions(
701                 mdlog, cr, top_global, &top, makeConstArrayRef(state->x), state->box);
702     }
703     if (ir->eI == IntegrationAlgorithm::VVAK)
704     {
705         /* a second call to get the half step temperature initialized as well */
706         /* we do the same call as above, but turn the pressure off -- internally to
707            compute_globals, this is recognized as a velocity verlet half-step
708            kinetic energy calculation.  This minimized excess variables, but
709            perhaps loses some logic?*/
710
711         compute_globals(gstat,
712                         cr,
713                         ir,
714                         fr,
715                         ekind,
716                         makeConstArrayRef(state->x),
717                         makeConstArrayRef(state->v),
718                         state->box,
719                         mdatoms,
720                         nrnb,
721                         &vcm,
722                         nullptr,
723                         enerd,
724                         force_vir,
725                         shake_vir,
726                         total_vir,
727                         pres,
728                         gmx::ArrayRef<real>{},
729                         &nullSignaller,
730                         state->box,
731                         &bSumEkinhOld,
732                         cglo_flags & ~CGLO_PRESSURE);
733     }
734
735     /* Calculate the initial half step temperature, and save the ekinh_old */
736     if (startingBehavior == StartingBehavior::NewSimulation)
737     {
738         for (i = 0; (i < ir->opts.ngtc); i++)
739         {
740             copy_mat(ekind->tcstat[i].ekinh, ekind->tcstat[i].ekinh_old);
741         }
742     }
743
744     /* need to make an initiation call to get the Trotter variables set, as well as other constants
745        for non-trotter temperature control */
746     auto trotter_seq = init_npt_vars(ir, state, &MassQ, bTrotter);
747
748     if (MASTER(cr))
749     {
750         if (!ir->bContinuation)
751         {
752             if (constr && ir->eConstrAlg == ConstraintAlgorithm::Lincs)
753             {
754                 fprintf(fplog,
755                         "RMS relative constraint deviation after constraining: %.2e\n",
756                         constr->rmsd());
757             }
758             if (EI_STATE_VELOCITY(ir->eI))
759             {
760                 real temp = enerd->term[F_TEMP];
761                 if (ir->eI != IntegrationAlgorithm::VV)
762                 {
763                     /* Result of Ekin averaged over velocities of -half
764                      * and +half step, while we only have -half step here.
765                      */
766                     temp *= 2;
767                 }
768                 fprintf(fplog, "Initial temperature: %g K\n", temp);
769             }
770         }
771
772         char tbuf[20];
773         fprintf(stderr, "starting mdrun '%s'\n", *(top_global.name));
774         if (ir->nsteps >= 0)
775         {
776             sprintf(tbuf, "%8.1f", (ir->init_step + ir->nsteps) * ir->delta_t);
777         }
778         else
779         {
780             sprintf(tbuf, "%s", "infinite");
781         }
782         if (ir->init_step > 0)
783         {
784             fprintf(stderr,
785                     "%s steps, %s ps (continuing from step %s, %8.1f ps).\n",
786                     gmx_step_str(ir->init_step + ir->nsteps, sbuf),
787                     tbuf,
788                     gmx_step_str(ir->init_step, sbuf2),
789                     ir->init_step * ir->delta_t);
790         }
791         else
792         {
793             fprintf(stderr, "%s steps, %s ps.\n", gmx_step_str(ir->nsteps, sbuf), tbuf);
794         }
795         fprintf(fplog, "\n");
796     }
797
798     walltime_accounting_start_time(walltime_accounting);
799     wallcycle_start(wcycle, WallCycleCounter::Run);
800     print_start(fplog, cr, walltime_accounting, "mdrun");
801
802     /***********************************************************
803      *
804      *             Loop over MD steps
805      *
806      ************************************************************/
807
808     bFirstStep = TRUE;
809     /* Skip the first Nose-Hoover integration when we get the state from tpx */
810     bInitStep        = startingBehavior == StartingBehavior::NewSimulation || EI_VV(ir->eI);
811     bSumEkinhOld     = FALSE;
812     bExchanged       = FALSE;
813     bNeedRepartition = FALSE;
814
815     step     = ir->init_step;
816     step_rel = 0;
817
818     auto stopHandler = stopHandlerBuilder->getStopHandlerMD(
819             compat::not_null<SimulationSignal*>(&signals[eglsSTOPCOND]),
820             simulationsShareState,
821             MASTER(cr),
822             ir->nstlist,
823             mdrunOptions.reproducible,
824             nstSignalComm,
825             mdrunOptions.maximumHoursToRun,
826             ir->nstlist == 0,
827             fplog,
828             step,
829             bNS,
830             walltime_accounting);
831
832     auto checkpointHandler = std::make_unique<CheckpointHandler>(
833             compat::make_not_null<SimulationSignal*>(&signals[eglsCHKPT]),
834             simulationsShareState,
835             ir->nstlist == 0,
836             MASTER(cr),
837             mdrunOptions.writeConfout,
838             mdrunOptions.checkpointOptions.period);
839
840     const bool resetCountersIsLocal = true;
841     auto       resetHandler         = std::make_unique<ResetHandler>(
842             compat::make_not_null<SimulationSignal*>(&signals[eglsRESETCOUNTERS]),
843             !resetCountersIsLocal,
844             ir->nsteps,
845             MASTER(cr),
846             mdrunOptions.timingOptions.resetHalfway,
847             mdrunOptions.maximumHoursToRun,
848             mdlog,
849             wcycle,
850             walltime_accounting);
851
852     const DDBalanceRegionHandler ddBalanceRegionHandler(cr);
853
854     if (MASTER(cr) && isMultiSim(ms) && !useReplicaExchange)
855     {
856         logInitialMultisimStatus(ms, cr, mdlog, simulationsShareState, ir->nsteps, ir->init_step);
857     }
858
859     /* and stop now if we should */
860     bLastStep = (bLastStep || (ir->nsteps >= 0 && step_rel > ir->nsteps));
861     while (!bLastStep)
862     {
863
864         /* Determine if this is a neighbor search step */
865         bNStList = (ir->nstlist > 0 && step % ir->nstlist == 0);
866
867         if (bPMETune && bNStList)
868         {
869             // This has to be here because PME load balancing is called so early.
870             // TODO: Move to after all booleans are defined.
871             if (useGpuForUpdate && !bFirstStep)
872             {
873                 stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
874                 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
875             }
876             /* PME grid + cut-off optimization with GPUs or PME nodes */
877             pme_loadbal_do(pme_loadbal,
878                            cr,
879                            (mdrunOptions.verbose && MASTER(cr)) ? stderr : nullptr,
880                            fplog,
881                            mdlog,
882                            *ir,
883                            fr,
884                            state->box,
885                            state->x,
886                            wcycle,
887                            step,
888                            step_rel,
889                            &bPMETunePrinting,
890                            simulationWork.useGpuPmePpCommunication);
891         }
892
893         wallcycle_start(wcycle, WallCycleCounter::Step);
894
895         bLastStep = (step_rel == ir->nsteps);
896         t         = t0 + step * ir->delta_t;
897
898         // TODO Refactor this, so that nstfep does not need a default value of zero
899         if (ir->efep != FreeEnergyPerturbationType::No || ir->bSimTemp)
900         {
901             /* find and set the current lambdas */
902             state->lambda = currentLambdas(step, *(ir->fepvals), state->fep_state);
903
904             bDoDHDL = do_per_step(step, ir->fepvals->nstdhdl);
905             bDoFEP  = ((ir->efep != FreeEnergyPerturbationType::No) && do_per_step(step, nstfep));
906             bDoExpanded = (do_per_step(step, ir->expandedvals->nstexpanded) && (ir->bExpanded)
907                            && (!bFirstStep));
908         }
909
910         bDoReplEx = (useReplicaExchange && (step > 0) && !bLastStep
911                      && do_per_step(step, replExParams.exchangeInterval));
912
913         if (doSimulatedAnnealing)
914         {
915             // TODO: Avoid changing inputrec (#3854)
916             // Simulated annealing updates the reference temperature.
917             auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
918             update_annealing_target_temp(nonConstInputrec, t, &upd);
919         }
920
921         /* Stop Center of Mass motion */
922         bStopCM = (ir->comm_mode != ComRemovalAlgorithm::No && do_per_step(step, ir->nstcomm));
923
924         /* Determine whether or not to do Neighbour Searching */
925         bNS = (bFirstStep || bNStList || bExchanged || bNeedRepartition);
926
927         /* Note that the stopHandler will cause termination at nstglobalcomm
928          * steps. Since this concides with nstcalcenergy, nsttcouple and/or
929          * nstpcouple steps, we have computed the half-step kinetic energy
930          * of the previous step and can always output energies at the last step.
931          */
932         bLastStep = bLastStep || stopHandler->stoppingAfterCurrentStep(bNS);
933
934         /* do_log triggers energy and virial calculation. Because this leads
935          * to different code paths, forces can be different. Thus for exact
936          * continuation we should avoid extra log output.
937          * Note that the || bLastStep can result in non-exact continuation
938          * beyond the last step. But we don't consider that to be an issue.
939          */
940         do_log     = (do_per_step(step, ir->nstlog)
941                   || (bFirstStep && startingBehavior == StartingBehavior::NewSimulation) || bLastStep);
942         do_verbose = mdrunOptions.verbose
943                      && (step % mdrunOptions.verboseStepPrintInterval == 0 || bFirstStep || bLastStep);
944
945         if (useGpuForUpdate && !bFirstStep && bNS)
946         {
947             // Copy velocities from the GPU on search steps to keep a copy on host (device buffers are reinitialized).
948             stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
949             stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
950             // Copy coordinate from the GPU when needed at the search step.
951             // NOTE: The cases when coordinates needed on CPU for force evaluation are handled in sim_utils.
952             // NOTE: If the coordinates are to be written into output file they are also copied separately before the output.
953             stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
954             stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
955         }
956
957         // We only need to calculate virtual velocities if we are writing them in the current step
958         const bool needVirtualVelocitiesThisStep =
959                 (vsite != nullptr)
960                 && (do_per_step(step, ir->nstvout) || checkpointHandler->isCheckpointingStep());
961
962         if (vsite != nullptr)
963         {
964             // Virtual sites need to be updated before domain decomposition and forces are calculated
965             wallcycle_start(wcycle, WallCycleCounter::VsiteConstr);
966             // md-vv calculates virtual velocities once it has full-step real velocities
967             vsite->construct(state->x,
968                              state->v,
969                              state->box,
970                              (!EI_VV(inputrec->eI) && needVirtualVelocitiesThisStep)
971                                      ? VSiteOperation::PositionsAndVelocities
972                                      : VSiteOperation::Positions);
973             wallcycle_stop(wcycle, WallCycleCounter::VsiteConstr);
974         }
975
976         if (bNS && !(bFirstStep && ir->bContinuation))
977         {
978             bMasterState = FALSE;
979             /* Correct the new box if it is too skewed */
980             if (inputrecDynamicBox(ir))
981             {
982                 if (correct_box(fplog, step, state->box))
983                 {
984                     bMasterState = TRUE;
985                     // If update is offloaded, it should be informed about the box size change
986                     if (useGpuForUpdate)
987                     {
988                         integrator->setPbc(PbcType::Xyz, state->box);
989                     }
990                 }
991             }
992             if (DOMAINDECOMP(cr) && bMasterState)
993             {
994                 dd_collect_state(cr->dd, state, state_global);
995             }
996
997             if (DOMAINDECOMP(cr))
998             {
999                 /* Repartition the domain decomposition */
1000                 dd_partition_system(fplog,
1001                                     mdlog,
1002                                     step,
1003                                     cr,
1004                                     bMasterState,
1005                                     nstglobalcomm,
1006                                     state_global,
1007                                     top_global,
1008                                     *ir,
1009                                     imdSession,
1010                                     pull_work,
1011                                     state,
1012                                     &f,
1013                                     mdAtoms,
1014                                     &top,
1015                                     fr,
1016                                     vsite,
1017                                     constr,
1018                                     nrnb,
1019                                     wcycle,
1020                                     do_verbose && !bPMETunePrinting);
1021                 upd.updateAfterPartition(state->natoms,
1022                                          md->cFREEZE ? gmx::arrayRefFromArray(md->cFREEZE, md->nr)
1023                                                      : gmx::ArrayRef<const unsigned short>(),
1024                                          md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
1025                                                  : gmx::ArrayRef<const unsigned short>());
1026             }
1027         }
1028
1029         // Allocate or re-size GPU halo exchange object, if necessary
1030         if (bNS && havePPDomainDecomposition(cr) && simulationWork.useGpuHaloExchange)
1031         {
1032             GMX_RELEASE_ASSERT(fr->deviceStreamManager != nullptr,
1033                                "GPU device manager has to be initialized to use GPU "
1034                                "version of halo exchange.");
1035             constructGpuHaloExchange(mdlog, *cr, *fr->deviceStreamManager, wcycle);
1036         }
1037
1038         if (MASTER(cr) && do_log)
1039         {
1040             gmx::EnergyOutput::printHeader(
1041                     fplog, step, t); /* can we improve the information printed here? */
1042         }
1043
1044         if (ir->efep != FreeEnergyPerturbationType::No)
1045         {
1046             update_mdatoms(mdatoms, state->lambda[FreeEnergyPerturbationCouplingType::Mass]);
1047         }
1048
1049         if (bExchanged)
1050         {
1051             /* We need the kinetic energy at minus the half step for determining
1052              * the full step kinetic energy and possibly for T-coupling.*/
1053             /* This may not be quite working correctly yet . . . . */
1054             int cglo_flags = CGLO_GSTAT | CGLO_TEMPERATURE;
1055             if (DOMAINDECOMP(cr) && shouldCheckNumberOfBondedInteractions(*cr->dd))
1056             {
1057                 cglo_flags |= CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS;
1058             }
1059             compute_globals(gstat,
1060                             cr,
1061                             ir,
1062                             fr,
1063                             ekind,
1064                             makeConstArrayRef(state->x),
1065                             makeConstArrayRef(state->v),
1066                             state->box,
1067                             mdatoms,
1068                             nrnb,
1069                             &vcm,
1070                             wcycle,
1071                             enerd,
1072                             nullptr,
1073                             nullptr,
1074                             nullptr,
1075                             nullptr,
1076                             gmx::ArrayRef<real>{},
1077                             &nullSignaller,
1078                             state->box,
1079                             &bSumEkinhOld,
1080                             cglo_flags);
1081             if (DOMAINDECOMP(cr))
1082             {
1083                 checkNumberOfBondedInteractions(
1084                         mdlog, cr, top_global, &top, makeConstArrayRef(state->x), state->box);
1085             }
1086         }
1087         clear_mat(force_vir);
1088
1089         checkpointHandler->decideIfCheckpointingThisStep(bNS, bFirstStep, bLastStep);
1090
1091         /* Determine the energy and pressure:
1092          * at nstcalcenergy steps and at energy output steps (set below).
1093          */
1094         if (EI_VV(ir->eI) && (!bInitStep))
1095         {
1096             bCalcEnerStep = do_per_step(step, ir->nstcalcenergy);
1097             bCalcVir      = bCalcEnerStep
1098                        || (ir->epc != PressureCoupling::No
1099                            && (do_per_step(step, ir->nstpcouple) || do_per_step(step - 1, ir->nstpcouple)));
1100         }
1101         else
1102         {
1103             bCalcEnerStep = do_per_step(step, ir->nstcalcenergy);
1104             bCalcVir      = bCalcEnerStep
1105                        || (ir->epc != PressureCoupling::No && do_per_step(step, ir->nstpcouple));
1106         }
1107         bCalcEner = bCalcEnerStep;
1108
1109         do_ene = (do_per_step(step, ir->nstenergy) || bLastStep);
1110
1111         if (do_ene || do_log || bDoReplEx)
1112         {
1113             bCalcVir  = TRUE;
1114             bCalcEner = TRUE;
1115         }
1116
1117         /* Do we need global communication ? */
1118         bGStat = (bCalcVir || bCalcEner || bStopCM || do_per_step(step, nstglobalcomm)
1119                   || (EI_VV(ir->eI) && inputrecNvtTrotter(ir) && do_per_step(step - 1, nstglobalcomm)));
1120
1121         force_flags = (GMX_FORCE_STATECHANGED | ((inputrecDynamicBox(ir)) ? GMX_FORCE_DYNAMICBOX : 0)
1122                        | GMX_FORCE_ALLFORCES | (bCalcVir ? GMX_FORCE_VIRIAL : 0)
1123                        | (bCalcEner ? GMX_FORCE_ENERGY : 0) | (bDoFEP ? GMX_FORCE_DHDL : 0));
1124         if (fr->useMts && !do_per_step(step, ir->nstfout))
1125         {
1126             force_flags |= GMX_FORCE_DO_NOT_NEED_NORMAL_FORCE;
1127         }
1128
1129         if (shellfc)
1130         {
1131             /* Now is the time to relax the shells */
1132             relax_shell_flexcon(fplog,
1133                                 cr,
1134                                 ms,
1135                                 mdrunOptions.verbose,
1136                                 enforcedRotation,
1137                                 step,
1138                                 ir,
1139                                 imdSession,
1140                                 pull_work,
1141                                 bNS,
1142                                 force_flags,
1143                                 &top,
1144                                 constr,
1145                                 enerd,
1146                                 state->natoms,
1147                                 state->x.arrayRefWithPadding(),
1148                                 state->v.arrayRefWithPadding(),
1149                                 state->box,
1150                                 state->lambda,
1151                                 &state->hist,
1152                                 &f.view(),
1153                                 force_vir,
1154                                 *mdatoms,
1155                                 nrnb,
1156                                 wcycle,
1157                                 shellfc,
1158                                 fr,
1159                                 runScheduleWork,
1160                                 t,
1161                                 mu_tot,
1162                                 vsite,
1163                                 ddBalanceRegionHandler);
1164         }
1165         else
1166         {
1167             /* The AWH history need to be saved _before_ doing force calculations where the AWH bias
1168                is updated (or the AWH update will be performed twice for one step when continuing).
1169                It would be best to call this update function from do_md_trajectory_writing but that
1170                would occur after do_force. One would have to divide the update_awh function into one
1171                function applying the AWH force and one doing the AWH bias update. The update AWH
1172                bias function could then be called after do_md_trajectory_writing (then containing
1173                update_awh_history). The checkpointing will in the future probably moved to the start
1174                of the md loop which will rid of this issue. */
1175             if (awh && checkpointHandler->isCheckpointingStep() && MASTER(cr))
1176             {
1177                 awh->updateHistory(state_global->awhHistory.get());
1178             }
1179
1180             /* The coordinates (x) are shifted (to get whole molecules)
1181              * in do_force.
1182              * This is parallellized as well, and does communication too.
1183              * Check comments in sim_util.c
1184              */
1185             do_force(fplog,
1186                      cr,
1187                      ms,
1188                      *ir,
1189                      awh.get(),
1190                      enforcedRotation,
1191                      imdSession,
1192                      pull_work,
1193                      step,
1194                      nrnb,
1195                      wcycle,
1196                      &top,
1197                      state->box,
1198                      state->x.arrayRefWithPadding(),
1199                      &state->hist,
1200                      &f.view(),
1201                      force_vir,
1202                      mdatoms,
1203                      enerd,
1204                      state->lambda,
1205                      fr,
1206                      runScheduleWork,
1207                      vsite,
1208                      mu_tot,
1209                      t,
1210                      ed ? ed->getLegacyED() : nullptr,
1211                      (bNS ? GMX_FORCE_NS : 0) | force_flags,
1212                      ddBalanceRegionHandler);
1213         }
1214
1215         // VV integrators do not need the following velocity half step
1216         // if it is the first step after starting from a checkpoint.
1217         // That is, the half step is needed on all other steps, and
1218         // also the first step when starting from a .tpr file.
1219         if (EI_VV(ir->eI))
1220         {
1221             integrateVVFirstStep(step,
1222                                  bFirstStep,
1223                                  bInitStep,
1224                                  startingBehavior,
1225                                  nstglobalcomm,
1226                                  ir,
1227                                  fr,
1228                                  cr,
1229                                  state,
1230                                  mdatoms,
1231                                  fcdata,
1232                                  &MassQ,
1233                                  &vcm,
1234                                  top_global,
1235                                  top,
1236                                  enerd,
1237                                  ekind,
1238                                  gstat,
1239                                  &last_ekin,
1240                                  bCalcVir,
1241                                  total_vir,
1242                                  shake_vir,
1243                                  force_vir,
1244                                  pres,
1245                                  M,
1246                                  do_log,
1247                                  do_ene,
1248                                  bCalcEner,
1249                                  bGStat,
1250                                  bStopCM,
1251                                  bTrotter,
1252                                  bExchanged,
1253                                  &bSumEkinhOld,
1254                                  &saved_conserved_quantity,
1255                                  &f,
1256                                  &upd,
1257                                  constr,
1258                                  &nullSignaller,
1259                                  trotter_seq,
1260                                  nrnb,
1261                                  mdlog,
1262                                  fplog,
1263                                  wcycle);
1264             if (vsite != nullptr && needVirtualVelocitiesThisStep)
1265             {
1266                 // Positions were calculated earlier
1267                 wallcycle_start(wcycle, WallCycleCounter::VsiteConstr);
1268                 vsite->construct(state->x, state->v, state->box, VSiteOperation::Velocities);
1269                 wallcycle_stop(wcycle, WallCycleCounter::VsiteConstr);
1270             }
1271         }
1272
1273         /* ########  END FIRST UPDATE STEP  ############## */
1274         /* ########  If doing VV, we now have v(dt) ###### */
1275         if (bDoExpanded)
1276         {
1277             /* perform extended ensemble sampling in lambda - we don't
1278                actually move to the new state before outputting
1279                statistics, but if performing simulated tempering, we
1280                do update the velocities and the tau_t. */
1281             // TODO: Avoid changing inputrec (#3854)
1282             // Simulated tempering updates the reference temperature.
1283             // Expanded ensemble without simulated tempering does not change the inputrec.
1284             auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
1285             lamnew                 = ExpandedEnsembleDynamics(fplog,
1286                                               nonConstInputrec,
1287                                               enerd,
1288                                               state,
1289                                               &MassQ,
1290                                               state->fep_state,
1291                                               state->dfhist,
1292                                               step,
1293                                               state->v.rvec_array(),
1294                                               mdatoms);
1295             /* history is maintained in state->dfhist, but state_global is what is sent to trajectory and log output */
1296             if (MASTER(cr))
1297             {
1298                 copy_df_history(state_global->dfhist, state->dfhist);
1299             }
1300         }
1301
1302         // Copy coordinate from the GPU for the output/checkpointing if the update is offloaded and
1303         // coordinates have not already been copied for i) search or ii) CPU force tasks.
1304         if (useGpuForUpdate && !bNS && !runScheduleWork->domainWork.haveCpuLocalForceWork
1305             && (do_per_step(step, ir->nstxout) || do_per_step(step, ir->nstxout_compressed)
1306                 || checkpointHandler->isCheckpointingStep()))
1307         {
1308             stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
1309             stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1310         }
1311         // Copy velocities if needed for the output/checkpointing.
1312         // NOTE: Copy on the search steps is done at the beginning of the step.
1313         if (useGpuForUpdate && !bNS
1314             && (do_per_step(step, ir->nstvout) || checkpointHandler->isCheckpointingStep()))
1315         {
1316             stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
1317             stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
1318         }
1319         // Copy forces for the output if the forces were reduced on the GPU (not the case on virial steps)
1320         // and update is offloaded hence forces are kept on the GPU for update and have not been
1321         // already transferred in do_force().
1322         // TODO: There should be an improved, explicit mechanism that ensures this copy is only executed
1323         //       when the forces are ready on the GPU -- the same synchronizer should be used as the one
1324         //       prior to GPU update.
1325         // TODO: When the output flags will be included in step workload, this copy can be combined with the
1326         //       copy call in do_force(...).
1327         // NOTE: The forces should not be copied here if the vsites are present, since they were modified
1328         //       on host after the D2H copy in do_force(...).
1329         if (runScheduleWork->stepWork.useGpuFBufferOps && (simulationWork.useGpuUpdate && !vsite)
1330             && do_per_step(step, ir->nstfout))
1331         {
1332             stateGpu->copyForcesFromGpu(f.view().force(), AtomLocality::Local);
1333             stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
1334         }
1335         /* Now we have the energies and forces corresponding to the
1336          * coordinates at time t. We must output all of this before
1337          * the update.
1338          */
1339         do_md_trajectory_writing(fplog,
1340                                  cr,
1341                                  nfile,
1342                                  fnm,
1343                                  step,
1344                                  step_rel,
1345                                  t,
1346                                  ir,
1347                                  state,
1348                                  state_global,
1349                                  observablesHistory,
1350                                  top_global,
1351                                  fr,
1352                                  outf,
1353                                  energyOutput,
1354                                  ekind,
1355                                  f.view().force(),
1356                                  checkpointHandler->isCheckpointingStep(),
1357                                  bRerunMD,
1358                                  bLastStep,
1359                                  mdrunOptions.writeConfout,
1360                                  bSumEkinhOld);
1361         /* Check if IMD step and do IMD communication, if bIMD is TRUE. */
1362         bInteractiveMDstep = imdSession->run(step, bNS, state->box, state->x, t);
1363
1364         /* kludge -- virial is lost with restart for MTTK NPT control. Must reload (saved earlier). */
1365         if (startingBehavior != StartingBehavior::NewSimulation && bFirstStep
1366             && (inputrecNptTrotter(ir) || inputrecNphTrotter(ir)))
1367         {
1368             copy_mat(state->svir_prev, shake_vir);
1369             copy_mat(state->fvir_prev, force_vir);
1370         }
1371
1372         stopHandler->setSignal();
1373         resetHandler->setSignal(walltime_accounting);
1374
1375         if (bGStat || !PAR(cr))
1376         {
1377             /* In parallel we only have to check for checkpointing in steps
1378              * where we do global communication,
1379              *  otherwise the other nodes don't know.
1380              */
1381             checkpointHandler->setSignal(walltime_accounting);
1382         }
1383
1384         /* #########   START SECOND UPDATE STEP ################# */
1385
1386         /* at the start of step, randomize or scale the velocities ((if vv. Restriction of Andersen
1387            controlled in preprocessing */
1388
1389         if (ETC_ANDERSEN(ir->etc)) /* keep this outside of update_tcouple because of the extra info required to pass */
1390         {
1391             gmx_bool bIfRandomize;
1392             bIfRandomize = update_randomize_velocities(ir, step, cr, mdatoms, state->v, &upd, constr);
1393             /* if we have constraints, we have to remove the kinetic energy parallel to the bonds */
1394             if (constr && bIfRandomize)
1395             {
1396                 constrain_velocities(constr, do_log, do_ene, step, state, nullptr, false, nullptr);
1397             }
1398         }
1399         /* Box is changed in update() when we do pressure coupling,
1400          * but we should still use the old box for energy corrections and when
1401          * writing it to the energy file, so it matches the trajectory files for
1402          * the same timestep above. Make a copy in a separate array.
1403          */
1404         copy_mat(state->box, lastbox);
1405
1406         dvdl_constr = 0;
1407
1408         if (!useGpuForUpdate)
1409         {
1410             wallcycle_start(wcycle, WallCycleCounter::Update);
1411         }
1412         /* UPDATE PRESSURE VARIABLES IN TROTTER FORMULATION WITH CONSTRAINTS */
1413         if (bTrotter)
1414         {
1415             trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ3);
1416             /* We can only do Berendsen coupling after we have summed
1417              * the kinetic energy or virial. Since the happens
1418              * in global_state after update, we should only do it at
1419              * step % nstlist = 1 with bGStatEveryStep=FALSE.
1420              */
1421         }
1422         else
1423         {
1424             update_tcouple(step, ir, state, ekind, &MassQ, mdatoms);
1425             update_pcouple_before_coordinates(fplog, step, ir, state, pressureCouplingMu, M, bInitStep);
1426         }
1427
1428         /* With leap-frog type integrators we compute the kinetic energy
1429          * at a whole time step as the average of the half-time step kinetic
1430          * energies of two subsequent steps. Therefore we need to compute the
1431          * half step kinetic energy also if we need energies at the next step.
1432          */
1433         const bool needHalfStepKineticEnergy =
1434                 (!EI_VV(ir->eI) && (do_per_step(step + 1, nstglobalcomm) || step_rel + 1 == ir->nsteps));
1435
1436         // Parrinello-Rahman requires the pressure to be availible before the update to compute
1437         // the velocity scaling matrix. Hence, it runs one step after the nstpcouple step.
1438         const bool doParrinelloRahman = (ir->epc == PressureCoupling::ParrinelloRahman
1439                                          && do_per_step(step + ir->nstpcouple - 1, ir->nstpcouple));
1440
1441         if (EI_VV(ir->eI))
1442         {
1443             GMX_ASSERT(!useGpuForUpdate, "GPU update is not supported with VVAK integrator.");
1444
1445             integrateVVSecondStep(step,
1446                                   ir,
1447                                   fr,
1448                                   cr,
1449                                   state,
1450                                   mdatoms,
1451                                   fcdata,
1452                                   &MassQ,
1453                                   &vcm,
1454                                   pull_work,
1455                                   enerd,
1456                                   ekind,
1457                                   gstat,
1458                                   &dvdl_constr,
1459                                   bCalcVir,
1460                                   total_vir,
1461                                   shake_vir,
1462                                   force_vir,
1463                                   pres,
1464                                   M,
1465                                   lastbox,
1466                                   do_log,
1467                                   do_ene,
1468                                   bGStat,
1469                                   &bSumEkinhOld,
1470                                   &f,
1471                                   &cbuf,
1472                                   &upd,
1473                                   constr,
1474                                   &nullSignaller,
1475                                   trotter_seq,
1476                                   nrnb,
1477                                   wcycle);
1478         }
1479         else
1480         {
1481             if (useGpuForUpdate)
1482             {
1483                 if (bNS && (bFirstStep || DOMAINDECOMP(cr)))
1484                 {
1485                     integrator->set(stateGpu->getCoordinates(),
1486                                     stateGpu->getVelocities(),
1487                                     stateGpu->getForces(),
1488                                     top.idef,
1489                                     *mdatoms);
1490
1491                     // Copy data to the GPU after buffers might have being reinitialized
1492                     stateGpu->copyVelocitiesToGpu(state->v, AtomLocality::Local);
1493                     stateGpu->copyCoordinatesToGpu(state->x, AtomLocality::Local);
1494                 }
1495
1496                 if (simulationWork.useGpuPme && !runScheduleWork->simulationWork.useGpuPmePpCommunication
1497                     && !thisRankHasDuty(cr, DUTY_PME))
1498                 {
1499                     // The PME forces were recieved to the host, so have to be copied
1500                     stateGpu->copyForcesToGpu(f.view().force(), AtomLocality::All);
1501                 }
1502                 else if (!runScheduleWork->stepWork.useGpuFBufferOps)
1503                 {
1504                     // The buffer ops were not offloaded this step, so the forces are on the
1505                     // host and have to be copied
1506                     stateGpu->copyForcesToGpu(f.view().force(), AtomLocality::Local);
1507                 }
1508
1509                 const bool doTemperatureScaling =
1510                         (ir->etc != TemperatureCoupling::No
1511                          && do_per_step(step + ir->nsttcouple - 1, ir->nsttcouple));
1512
1513                 // This applies Leap-Frog, LINCS and SETTLE in succession
1514                 integrator->integrate(
1515                         stateGpu->getForcesReadyOnDeviceEvent(
1516                                 AtomLocality::Local, runScheduleWork->stepWork.useGpuFBufferOps),
1517                         ir->delta_t,
1518                         true,
1519                         bCalcVir,
1520                         shake_vir,
1521                         doTemperatureScaling,
1522                         ekind->tcstat,
1523                         doParrinelloRahman,
1524                         ir->nstpcouple * ir->delta_t,
1525                         M);
1526
1527                 // Copy velocities D2H after update if:
1528                 // - Globals are computed this step (includes the energy output steps).
1529                 // - Temperature is needed for the next step.
1530                 if (bGStat || needHalfStepKineticEnergy)
1531                 {
1532                     stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
1533                     stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
1534                 }
1535             }
1536             else
1537             {
1538                 /* With multiple time stepping we need to do an additional normal
1539                  * update step to obtain the virial, as the actual MTS integration
1540                  * using an acceleration where the slow forces are multiplied by mtsFactor.
1541                  * Using that acceleration would result in a virial with the slow
1542                  * force contribution would be a factor mtsFactor too large.
1543                  */
1544                 if (fr->useMts && bCalcVir && constr != nullptr)
1545                 {
1546                     upd.update_for_constraint_virial(
1547                             *ir, *mdatoms, *state, f.view().forceWithPadding(), *ekind);
1548
1549                     constrain_coordinates(constr,
1550                                           do_log,
1551                                           do_ene,
1552                                           step,
1553                                           state,
1554                                           upd.xp()->arrayRefWithPadding(),
1555                                           &dvdl_constr,
1556                                           bCalcVir,
1557                                           shake_vir);
1558                 }
1559
1560                 ArrayRefWithPadding<const RVec> forceCombined =
1561                         (fr->useMts && step % ir->mtsLevels[1].stepFactor == 0)
1562                                 ? f.view().forceMtsCombinedWithPadding()
1563                                 : f.view().forceWithPadding();
1564                 upd.update_coords(
1565                         *ir, step, mdatoms, state, forceCombined, fcdata, ekind, M, etrtPOSITION, cr, constr != nullptr);
1566
1567                 wallcycle_stop(wcycle, WallCycleCounter::Update);
1568
1569                 constrain_coordinates(constr,
1570                                       do_log,
1571                                       do_ene,
1572                                       step,
1573                                       state,
1574                                       upd.xp()->arrayRefWithPadding(),
1575                                       &dvdl_constr,
1576                                       bCalcVir && !fr->useMts,
1577                                       shake_vir);
1578
1579                 upd.update_sd_second_half(
1580                         *ir, step, &dvdl_constr, mdatoms, state, cr, nrnb, wcycle, constr, do_log, do_ene);
1581                 upd.finish_update(*ir, mdatoms, state, wcycle, constr != nullptr);
1582             }
1583
1584             if (ir->bPull && ir->pull->bSetPbcRefToPrevStepCOM)
1585             {
1586                 updatePrevStepPullCom(pull_work, state);
1587             }
1588
1589             enerd->term[F_DVDL_CONSTR] += dvdl_constr;
1590         }
1591
1592         /* ############## IF NOT VV, Calculate globals HERE  ############ */
1593         /* With Leap-Frog we can skip compute_globals at
1594          * non-communication steps, but we need to calculate
1595          * the kinetic energy one step before communication.
1596          */
1597         {
1598             // Organize to do inter-simulation signalling on steps if
1599             // and when algorithms require it.
1600             const bool doInterSimSignal = (simulationsShareState && do_per_step(step, nstSignalComm));
1601
1602             if (bGStat || needHalfStepKineticEnergy || doInterSimSignal)
1603             {
1604                 // Copy coordinates when needed to stop the CM motion.
1605                 if (useGpuForUpdate && !EI_VV(ir->eI) && bStopCM)
1606                 {
1607                     stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
1608                     stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1609                 }
1610                 // Since we're already communicating at this step, we
1611                 // can propagate intra-simulation signals. Note that
1612                 // check_nstglobalcomm has the responsibility for
1613                 // choosing the value of nstglobalcomm that is one way
1614                 // bGStat becomes true, so we can't get into a
1615                 // situation where e.g. checkpointing can't be
1616                 // signalled.
1617                 bool                doIntraSimSignal = true;
1618                 SimulationSignaller signaller(&signals, cr, ms, doInterSimSignal, doIntraSimSignal);
1619
1620                 compute_globals(
1621                         gstat,
1622                         cr,
1623                         ir,
1624                         fr,
1625                         ekind,
1626                         makeConstArrayRef(state->x),
1627                         makeConstArrayRef(state->v),
1628                         state->box,
1629                         mdatoms,
1630                         nrnb,
1631                         &vcm,
1632                         wcycle,
1633                         enerd,
1634                         force_vir,
1635                         shake_vir,
1636                         total_vir,
1637                         pres,
1638                         (!EI_VV(ir->eI) && bCalcEner && constr != nullptr) ? constr->rmsdData()
1639                                                                            : gmx::ArrayRef<real>{},
1640                         &signaller,
1641                         lastbox,
1642                         &bSumEkinhOld,
1643                         (bGStat ? CGLO_GSTAT : 0) | (!EI_VV(ir->eI) && bCalcEner ? CGLO_ENERGY : 0)
1644                                 | (!EI_VV(ir->eI) && bStopCM ? CGLO_STOPCM : 0)
1645                                 | (!EI_VV(ir->eI) ? CGLO_TEMPERATURE : 0)
1646                                 | (!EI_VV(ir->eI) ? CGLO_PRESSURE : 0) | CGLO_CONSTRAINT
1647                                 | (DOMAINDECOMP(cr) && shouldCheckNumberOfBondedInteractions(*cr->dd)
1648                                            ? CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS
1649                                            : 0));
1650                 if (DOMAINDECOMP(cr))
1651                 {
1652                     checkNumberOfBondedInteractions(
1653                             mdlog, cr, top_global, &top, makeConstArrayRef(state->x), state->box);
1654                 }
1655                 if (!EI_VV(ir->eI) && bStopCM)
1656                 {
1657                     process_and_stopcm_grp(
1658                             fplog, &vcm, *mdatoms, makeArrayRef(state->x), makeArrayRef(state->v));
1659                     inc_nrnb(nrnb, eNR_STOPCM, mdatoms->homenr);
1660
1661                     // TODO: The special case of removing CM motion should be dealt more gracefully
1662                     if (useGpuForUpdate)
1663                     {
1664                         stateGpu->copyCoordinatesToGpu(state->x, AtomLocality::Local);
1665                         // Here we block until the H2D copy completes because event sync with the
1666                         // force kernels that use the coordinates on the next steps is not implemented
1667                         // (not because of a race on state->x being modified on the CPU while H2D is in progress).
1668                         stateGpu->waitCoordinatesCopiedToDevice(AtomLocality::Local);
1669                         // If the COM removal changed the velocities on the CPU, this has to be accounted for.
1670                         if (vcm.mode != ComRemovalAlgorithm::No)
1671                         {
1672                             stateGpu->copyVelocitiesToGpu(state->v, AtomLocality::Local);
1673                         }
1674                     }
1675                 }
1676             }
1677         }
1678
1679         /* #############  END CALC EKIN AND PRESSURE ################# */
1680
1681         /* Note: this is OK, but there are some numerical precision issues with using the convergence of
1682            the virial that should probably be addressed eventually. state->veta has better properies,
1683            but what we actually need entering the new cycle is the new shake_vir value. Ideally, we could
1684            generate the new shake_vir, but test the veta value for convergence.  This will take some thought. */
1685
1686         if (ir->efep != FreeEnergyPerturbationType::No && !EI_VV(ir->eI))
1687         {
1688             /* Sum up the foreign energy and dK/dl terms for md and sd.
1689                Currently done every step so that dH/dl is correct in the .edr */
1690             accumulateKineticLambdaComponents(enerd, state->lambda, *ir->fepvals);
1691         }
1692
1693         update_pcouple_after_coordinates(
1694                 fplog, step, ir, mdatoms, pres, force_vir, shake_vir, pressureCouplingMu, state, nrnb, upd.deform(), !useGpuForUpdate);
1695
1696         const bool doBerendsenPressureCoupling = (inputrec->epc == PressureCoupling::Berendsen
1697                                                   && do_per_step(step, inputrec->nstpcouple));
1698         const bool doCRescalePressureCoupling  = (inputrec->epc == PressureCoupling::CRescale
1699                                                  && do_per_step(step, inputrec->nstpcouple));
1700         if (useGpuForUpdate
1701             && (doBerendsenPressureCoupling || doCRescalePressureCoupling || doParrinelloRahman))
1702         {
1703             integrator->scaleCoordinates(pressureCouplingMu);
1704             if (doCRescalePressureCoupling)
1705             {
1706                 matrix pressureCouplingInvMu;
1707                 gmx::invertBoxMatrix(pressureCouplingMu, pressureCouplingInvMu);
1708                 integrator->scaleVelocities(pressureCouplingInvMu);
1709             }
1710             integrator->setPbc(PbcType::Xyz, state->box);
1711         }
1712
1713         /* ################# END UPDATE STEP 2 ################# */
1714         /* #### We now have r(t+dt) and v(t+dt/2)  ############# */
1715
1716         /* The coordinates (x) were unshifted in update */
1717         if (!bGStat)
1718         {
1719             /* We will not sum ekinh_old,
1720              * so signal that we still have to do it.
1721              */
1722             bSumEkinhOld = TRUE;
1723         }
1724
1725         if (bCalcEner)
1726         {
1727             /* #########  BEGIN PREPARING EDR OUTPUT  ###########  */
1728
1729             /* use the directly determined last velocity, not actually the averaged half steps */
1730             if (bTrotter && ir->eI == IntegrationAlgorithm::VV)
1731             {
1732                 enerd->term[F_EKIN] = last_ekin;
1733             }
1734             enerd->term[F_ETOT] = enerd->term[F_EPOT] + enerd->term[F_EKIN];
1735
1736             if (integratorHasConservedEnergyQuantity(ir))
1737             {
1738                 if (EI_VV(ir->eI))
1739                 {
1740                     enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + saved_conserved_quantity;
1741                 }
1742                 else
1743                 {
1744                     enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + NPT_energy(ir, state, &MassQ);
1745                 }
1746             }
1747             /* #########  END PREPARING EDR OUTPUT  ###########  */
1748         }
1749
1750         /* Output stuff */
1751         if (MASTER(cr))
1752         {
1753             if (fplog && do_log && bDoExpanded)
1754             {
1755                 /* only needed if doing expanded ensemble */
1756                 PrintFreeEnergyInfoToFile(fplog,
1757                                           ir->fepvals.get(),
1758                                           ir->expandedvals.get(),
1759                                           ir->bSimTemp ? ir->simtempvals.get() : nullptr,
1760                                           state_global->dfhist,
1761                                           state->fep_state,
1762                                           ir->nstlog,
1763                                           step);
1764             }
1765             if (bCalcEner)
1766             {
1767                 energyOutput.addDataAtEnergyStep(bDoDHDL,
1768                                                  bCalcEnerStep,
1769                                                  t,
1770                                                  mdatoms->tmass,
1771                                                  enerd,
1772                                                  ir->fepvals.get(),
1773                                                  ir->expandedvals.get(),
1774                                                  lastbox,
1775                                                  PTCouplingArrays{ state->boxv,
1776                                                                    state->nosehoover_xi,
1777                                                                    state->nosehoover_vxi,
1778                                                                    state->nhpres_xi,
1779                                                                    state->nhpres_vxi },
1780                                                  state->fep_state,
1781                                                  shake_vir,
1782                                                  force_vir,
1783                                                  total_vir,
1784                                                  pres,
1785                                                  ekind,
1786                                                  mu_tot,
1787                                                  constr);
1788             }
1789             else
1790             {
1791                 energyOutput.recordNonEnergyStep();
1792             }
1793
1794             gmx_bool do_dr = do_per_step(step, ir->nstdisreout);
1795             gmx_bool do_or = do_per_step(step, ir->nstorireout);
1796
1797             if (doSimulatedAnnealing)
1798             {
1799                 gmx::EnergyOutput::printAnnealingTemperatures(
1800                         do_log ? fplog : nullptr, groups, &(ir->opts));
1801             }
1802             if (do_log || do_ene || do_dr || do_or)
1803             {
1804                 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
1805                                                    do_ene,
1806                                                    do_dr,
1807                                                    do_or,
1808                                                    do_log ? fplog : nullptr,
1809                                                    step,
1810                                                    t,
1811                                                    fr->fcdata.get(),
1812                                                    awh.get());
1813             }
1814             if (do_log && ir->bDoAwh && awh->hasFepLambdaDimension())
1815             {
1816                 const bool isInitialOutput = false;
1817                 printLambdaStateToLog(fplog, state->lambda, isInitialOutput);
1818             }
1819
1820             if (ir->bPull)
1821             {
1822                 pull_print_output(pull_work, step, t);
1823             }
1824
1825             if (do_per_step(step, ir->nstlog))
1826             {
1827                 if (fflush(fplog) != 0)
1828                 {
1829                     gmx_fatal(FARGS, "Cannot flush logfile - maybe you are out of disk space?");
1830                 }
1831             }
1832         }
1833         if (bDoExpanded)
1834         {
1835             /* Have to do this part _after_ outputting the logfile and the edr file */
1836             /* Gets written into the state at the beginning of next loop*/
1837             state->fep_state = lamnew;
1838         }
1839         else if (ir->bDoAwh && awh->needForeignEnergyDifferences(step))
1840         {
1841             state->fep_state = awh->fepLambdaState();
1842         }
1843         /* Print the remaining wall clock time for the run */
1844         if (isMasterSimMasterRank(ms, MASTER(cr)) && (do_verbose || gmx_got_usr_signal()) && !bPMETunePrinting)
1845         {
1846             if (shellfc)
1847             {
1848                 fprintf(stderr, "\n");
1849             }
1850             print_time(stderr, walltime_accounting, step, ir, cr);
1851         }
1852
1853         /* Ion/water position swapping.
1854          * Not done in last step since trajectory writing happens before this call
1855          * in the MD loop and exchanges would be lost anyway. */
1856         bNeedRepartition = FALSE;
1857         if ((ir->eSwapCoords != SwapType::No) && (step > 0) && !bLastStep
1858             && do_per_step(step, ir->swap->nstswap))
1859         {
1860             bNeedRepartition = do_swapcoords(cr,
1861                                              step,
1862                                              t,
1863                                              ir,
1864                                              swap,
1865                                              wcycle,
1866                                              as_rvec_array(state->x.data()),
1867                                              state->box,
1868                                              MASTER(cr) && mdrunOptions.verbose,
1869                                              bRerunMD);
1870
1871             if (bNeedRepartition && DOMAINDECOMP(cr))
1872             {
1873                 dd_collect_state(cr->dd, state, state_global);
1874             }
1875         }
1876
1877         /* Replica exchange */
1878         bExchanged = FALSE;
1879         if (bDoReplEx)
1880         {
1881             bExchanged = replica_exchange(fplog, cr, ms, repl_ex, state_global, enerd, state, step, t);
1882         }
1883
1884         if ((bExchanged || bNeedRepartition) && DOMAINDECOMP(cr))
1885         {
1886             dd_partition_system(fplog,
1887                                 mdlog,
1888                                 step,
1889                                 cr,
1890                                 TRUE,
1891                                 1,
1892                                 state_global,
1893                                 top_global,
1894                                 *ir,
1895                                 imdSession,
1896                                 pull_work,
1897                                 state,
1898                                 &f,
1899                                 mdAtoms,
1900                                 &top,
1901                                 fr,
1902                                 vsite,
1903                                 constr,
1904                                 nrnb,
1905                                 wcycle,
1906                                 FALSE);
1907             upd.updateAfterPartition(state->natoms,
1908                                      md->cFREEZE ? gmx::arrayRefFromArray(md->cFREEZE, md->nr)
1909                                                  : gmx::ArrayRef<const unsigned short>(),
1910                                      md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
1911                                              : gmx::ArrayRef<const unsigned short>());
1912         }
1913
1914         bFirstStep = FALSE;
1915         bInitStep  = FALSE;
1916
1917         /* #######  SET VARIABLES FOR NEXT ITERATION IF THEY STILL NEED IT ###### */
1918         /* With all integrators, except VV, we need to retain the pressure
1919          * at the current step for coupling at the next step.
1920          */
1921         if ((state->flags & enumValueToBitMask(StateEntry::PressurePrevious))
1922             && (bGStatEveryStep || (ir->nstpcouple > 0 && step % ir->nstpcouple == 0)))
1923         {
1924             /* Store the pressure in t_state for pressure coupling
1925              * at the next MD step.
1926              */
1927             copy_mat(pres, state->pres_prev);
1928         }
1929
1930         /* #######  END SET VARIABLES FOR NEXT ITERATION ###### */
1931
1932         if ((membed != nullptr) && (!bLastStep))
1933         {
1934             rescale_membed(step_rel, membed, as_rvec_array(state_global->x.data()));
1935         }
1936
1937         cycles = wallcycle_stop(wcycle, WallCycleCounter::Step);
1938         if (DOMAINDECOMP(cr) && wcycle)
1939         {
1940             dd_cycles_add(cr->dd, cycles, ddCyclStep);
1941         }
1942
1943         /* increase the MD step number */
1944         step++;
1945         step_rel++;
1946
1947 #if GMX_FAHCORE
1948         if (MASTER(cr))
1949         {
1950             fcReportProgress(ir->nsteps + ir->init_step, step);
1951         }
1952 #endif
1953
1954         resetHandler->resetCounters(
1955                 step, step_rel, mdlog, fplog, cr, fr->nbv.get(), nrnb, fr->pmedata, pme_loadbal, wcycle, walltime_accounting);
1956
1957         /* If bIMD is TRUE, the master updates the IMD energy record and sends positions to VMD client */
1958         imdSession->updateEnergyRecordAndSendPositionsAndEnergies(bInteractiveMDstep, step, bCalcEner);
1959     }
1960     /* End of main MD loop */
1961
1962     /* Closing TNG files can include compressing data. Therefore it is good to do that
1963      * before stopping the time measurements. */
1964     mdoutf_tng_close(outf);
1965
1966     /* Stop measuring walltime */
1967     walltime_accounting_end_time(walltime_accounting);
1968
1969     if (!thisRankHasDuty(cr, DUTY_PME))
1970     {
1971         /* Tell the PME only node to finish */
1972         gmx_pme_send_finish(cr);
1973     }
1974
1975     if (MASTER(cr))
1976     {
1977         if (ir->nstcalcenergy > 0)
1978         {
1979             energyOutput.printEnergyConservation(fplog, ir->simulation_part, EI_MD(ir->eI));
1980
1981             gmx::EnergyOutput::printAnnealingTemperatures(fplog, groups, &(ir->opts));
1982             energyOutput.printAverages(fplog, groups);
1983         }
1984     }
1985     done_mdoutf(outf);
1986
1987     if (bPMETune)
1988     {
1989         pme_loadbal_done(pme_loadbal, fplog, mdlog, fr->nbv->useGpu());
1990     }
1991
1992     done_shellfc(fplog, shellfc, step_rel);
1993
1994     if (useReplicaExchange && MASTER(cr))
1995     {
1996         print_replica_exchange_statistics(fplog, repl_ex);
1997     }
1998
1999     walltime_accounting_set_nsteps_done(walltime_accounting, step_rel);
2000
2001     global_stat_destroy(gstat);
2002 }