Remove mdatoms from expanded
[alexxy/gromacs.git] / src / gromacs / mdrun / md.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2011-2019,2020,2021, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /*! \internal \file
38  *
39  * \brief Implements the integrator for normal molecular dynamics simulations
40  *
41  * \author David van der Spoel <david.vanderspoel@icm.uu.se>
42  * \ingroup module_mdrun
43  */
44 #include "gmxpre.h"
45
46 #include <cinttypes>
47 #include <cmath>
48 #include <cstdio>
49 #include <cstdlib>
50
51 #include <algorithm>
52 #include <memory>
53 #include <numeric>
54
55 #include "gromacs/applied_forces/awh/awh.h"
56 #include "gromacs/applied_forces/awh/read_params.h"
57 #include "gromacs/commandline/filenm.h"
58 #include "gromacs/domdec/collect.h"
59 #include "gromacs/domdec/dlbtiming.h"
60 #include "gromacs/domdec/domdec.h"
61 #include "gromacs/domdec/domdec_network.h"
62 #include "gromacs/domdec/domdec_struct.h"
63 #include "gromacs/domdec/gpuhaloexchange.h"
64 #include "gromacs/domdec/mdsetup.h"
65 #include "gromacs/domdec/partition.h"
66 #include "gromacs/essentialdynamics/edsam.h"
67 #include "gromacs/ewald/pme_load_balancing.h"
68 #include "gromacs/ewald/pme_pp.h"
69 #include "gromacs/fileio/trxio.h"
70 #include "gromacs/gmxlib/network.h"
71 #include "gromacs/gmxlib/nrnb.h"
72 #include "gromacs/gpu_utils/device_stream_manager.h"
73 #include "gromacs/gpu_utils/gpu_utils.h"
74 #include "gromacs/imd/imd.h"
75 #include "gromacs/listed_forces/listed_forces.h"
76 #include "gromacs/math/functions.h"
77 #include "gromacs/math/invertmatrix.h"
78 #include "gromacs/math/vec.h"
79 #include "gromacs/math/vectypes.h"
80 #include "gromacs/mdlib/checkpointhandler.h"
81 #include "gromacs/mdlib/compute_io.h"
82 #include "gromacs/mdlib/constr.h"
83 #include "gromacs/mdlib/coupling.h"
84 #include "gromacs/mdlib/ebin.h"
85 #include "gromacs/mdlib/enerdata_utils.h"
86 #include "gromacs/mdlib/energyoutput.h"
87 #include "gromacs/mdlib/expanded.h"
88 #include "gromacs/mdlib/force.h"
89 #include "gromacs/mdlib/force_flags.h"
90 #include "gromacs/mdlib/forcerec.h"
91 #include "gromacs/mdlib/freeenergyparameters.h"
92 #include "gromacs/mdlib/md_support.h"
93 #include "gromacs/mdlib/mdatoms.h"
94 #include "gromacs/mdlib/mdoutf.h"
95 #include "gromacs/mdlib/membed.h"
96 #include "gromacs/mdlib/resethandler.h"
97 #include "gromacs/mdlib/sighandler.h"
98 #include "gromacs/mdlib/simulationsignal.h"
99 #include "gromacs/mdlib/stat.h"
100 #include "gromacs/mdlib/stophandler.h"
101 #include "gromacs/mdlib/tgroup.h"
102 #include "gromacs/mdlib/trajectory_writing.h"
103 #include "gromacs/mdlib/update.h"
104 #include "gromacs/mdlib/update_constrain_gpu.h"
105 #include "gromacs/mdlib/update_vv.h"
106 #include "gromacs/mdlib/vcm.h"
107 #include "gromacs/mdlib/vsite.h"
108 #include "gromacs/mdrunutility/handlerestart.h"
109 #include "gromacs/mdrunutility/multisim.h"
110 #include "gromacs/mdrunutility/printtime.h"
111 #include "gromacs/mdtypes/awh_history.h"
112 #include "gromacs/mdtypes/awh_params.h"
113 #include "gromacs/mdtypes/commrec.h"
114 #include "gromacs/mdtypes/df_history.h"
115 #include "gromacs/mdtypes/energyhistory.h"
116 #include "gromacs/mdtypes/fcdata.h"
117 #include "gromacs/mdtypes/forcebuffers.h"
118 #include "gromacs/mdtypes/forcerec.h"
119 #include "gromacs/mdtypes/group.h"
120 #include "gromacs/mdtypes/inputrec.h"
121 #include "gromacs/mdtypes/interaction_const.h"
122 #include "gromacs/mdtypes/md_enums.h"
123 #include "gromacs/mdtypes/mdatom.h"
124 #include "gromacs/mdtypes/mdrunoptions.h"
125 #include "gromacs/mdtypes/multipletimestepping.h"
126 #include "gromacs/mdtypes/observableshistory.h"
127 #include "gromacs/mdtypes/pullhistory.h"
128 #include "gromacs/mdtypes/simulation_workload.h"
129 #include "gromacs/mdtypes/state.h"
130 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
131 #include "gromacs/modularsimulator/energydata.h"
132 #include "gromacs/nbnxm/gpu_data_mgmt.h"
133 #include "gromacs/nbnxm/nbnxm.h"
134 #include "gromacs/pbcutil/pbc.h"
135 #include "gromacs/pulling/output.h"
136 #include "gromacs/pulling/pull.h"
137 #include "gromacs/swap/swapcoords.h"
138 #include "gromacs/timing/wallcycle.h"
139 #include "gromacs/timing/walltime_accounting.h"
140 #include "gromacs/topology/atoms.h"
141 #include "gromacs/topology/idef.h"
142 #include "gromacs/topology/mtop_util.h"
143 #include "gromacs/topology/topology.h"
144 #include "gromacs/trajectory/trajectoryframe.h"
145 #include "gromacs/utility/basedefinitions.h"
146 #include "gromacs/utility/cstringutil.h"
147 #include "gromacs/utility/fatalerror.h"
148 #include "gromacs/utility/logger.h"
149 #include "gromacs/utility/real.h"
150 #include "gromacs/utility/smalloc.h"
151
152 #include "legacysimulator.h"
153 #include "replicaexchange.h"
154 #include "shellfc.h"
155
156 using gmx::SimulationSignaller;
157
158 void gmx::LegacySimulator::do_md()
159 {
160     // TODO Historically, the EM and MD "integrators" used different
161     // names for the t_inputrec *parameter, but these must have the
162     // same name, now that it's a member of a struct. We use this ir
163     // alias to avoid a large ripple of nearly useless changes.
164     // t_inputrec is being replaced by IMdpOptionsProvider, so this
165     // will go away eventually.
166     const t_inputrec* ir = inputrec;
167
168     int64_t      step, step_rel;
169     double       t, t0 = ir->init_t;
170     gmx_bool     bGStatEveryStep, bGStat, bCalcVir, bCalcEnerStep, bCalcEner;
171     gmx_bool     bNS = FALSE, bNStList, bStopCM, bFirstStep, bInitStep, bLastStep = FALSE;
172     gmx_bool     bDoDHDL = FALSE, bDoFEP = FALSE, bDoExpanded = FALSE;
173     gmx_bool     do_ene, do_log, do_verbose;
174     gmx_bool     bMasterState;
175     unsigned int force_flags;
176     tensor force_vir = { { 0 } }, shake_vir = { { 0 } }, total_vir = { { 0 } }, pres = { { 0 } };
177     int    i, m;
178     rvec   mu_tot;
179     matrix pressureCouplingMu, M;
180     gmx_repl_ex_t     repl_ex = nullptr;
181     gmx_global_stat_t gstat;
182     gmx_shellfc_t*    shellfc;
183     gmx_bool          bSumEkinhOld, bDoReplEx, bExchanged, bNeedRepartition;
184     gmx_bool          bTrotter;
185     real              dvdl_constr;
186     std::vector<RVec> cbuf;
187     matrix            lastbox;
188     int               lamnew = 0;
189     /* for FEP */
190     int       nstfep = 0;
191     double    cycles;
192     real      saved_conserved_quantity = 0;
193     real      last_ekin                = 0;
194     t_extmass MassQ;
195     char      sbuf[STEPSTRSIZE], sbuf2[STEPSTRSIZE];
196
197     /* PME load balancing data for GPU kernels */
198     gmx_bool bPMETune         = FALSE;
199     gmx_bool bPMETunePrinting = FALSE;
200
201     bool bInteractiveMDstep = false;
202
203     SimulationSignals signals;
204     // Most global communnication stages don't propagate mdrun
205     // signals, and will use this object to achieve that.
206     SimulationSignaller nullSignaller(nullptr, nullptr, nullptr, false, false);
207
208     if (!mdrunOptions.writeConfout)
209     {
210         // This is on by default, and the main known use case for
211         // turning it off is for convenience in benchmarking, which is
212         // something that should not show up in the general user
213         // interface.
214         GMX_LOG(mdlog.info)
215                 .asParagraph()
216                 .appendText(
217                         "The -noconfout functionality is deprecated, and may be removed in a "
218                         "future version.");
219     }
220
221     /* md-vv uses averaged full step velocities for T-control
222        md-vv-avek uses averaged half step velocities for T-control (but full step ekin for P control)
223        md uses averaged half step kinetic energies to determine temperature unless defined otherwise by GMX_EKIN_AVE_VEL; */
224     bTrotter = (EI_VV(ir->eI)
225                 && (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir)));
226
227     const bool bRerunMD = false;
228
229     int nstglobalcomm = computeGlobalCommunicationPeriod(mdlog, ir, cr);
230     bGStatEveryStep   = (nstglobalcomm == 1);
231
232     const SimulationGroups* groups = &top_global.groups;
233
234     std::unique_ptr<EssentialDynamics> ed = nullptr;
235     if (opt2bSet("-ei", nfile, fnm))
236     {
237         /* Initialize essential dynamics sampling */
238         ed = init_edsam(mdlog,
239                         opt2fn_null("-ei", nfile, fnm),
240                         opt2fn("-eo", nfile, fnm),
241                         top_global,
242                         *ir,
243                         cr,
244                         constr,
245                         state_global,
246                         observablesHistory,
247                         oenv,
248                         startingBehavior);
249     }
250     else if (observablesHistory->edsamHistory)
251     {
252         gmx_fatal(FARGS,
253                   "The checkpoint is from a run with essential dynamics sampling, "
254                   "but the current run did not specify the -ei option. "
255                   "Either specify the -ei option to mdrun, or do not use this checkpoint file.");
256     }
257
258     int*                fep_state = MASTER(cr) ? &state_global->fep_state : nullptr;
259     gmx::ArrayRef<real> lambda    = MASTER(cr) ? state_global->lambda : gmx::ArrayRef<real>();
260     initialize_lambdas(fplog,
261                        ir->efep,
262                        ir->bSimTemp,
263                        *ir->fepvals,
264                        ir->simtempvals->temperatures,
265                        gmx::arrayRefFromArray(ir->opts.ref_t, ir->opts.ngtc),
266                        MASTER(cr),
267                        fep_state,
268                        lambda);
269     Update upd(*ir, deform);
270     bool   doSimulatedAnnealing = false;
271     {
272         // TODO: Avoid changing inputrec (#3854)
273         // Simulated annealing updates the reference temperature.
274         auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
275         doSimulatedAnnealing   = initSimulatedAnnealing(nonConstInputrec, &upd);
276     }
277     const bool useReplicaExchange = (replExParams.exchangeInterval > 0);
278
279     const t_fcdata& fcdata = *fr->fcdata;
280
281     bool simulationsShareState = false;
282     int  nstSignalComm         = nstglobalcomm;
283     {
284         // TODO This implementation of ensemble orientation restraints is nasty because
285         // a user can't just do multi-sim with single-sim orientation restraints.
286         bool usingEnsembleRestraints =
287                 (fcdata.disres->nsystems > 1) || ((ms != nullptr) && (fcdata.orires->nr != 0));
288         bool awhUsesMultiSim = (ir->bDoAwh && ir->awhParams->shareBiasMultisim() && (ms != nullptr));
289
290         // Replica exchange, ensemble restraints and AWH need all
291         // simulations to remain synchronized, so they need
292         // checkpoints and stop conditions to act on the same step, so
293         // the propagation of such signals must take place between
294         // simulations, not just within simulations.
295         // TODO: Make algorithm initializers set these flags.
296         simulationsShareState = useReplicaExchange || usingEnsembleRestraints || awhUsesMultiSim;
297
298         if (simulationsShareState)
299         {
300             // Inter-simulation signal communication does not need to happen
301             // often, so we use a minimum of 200 steps to reduce overhead.
302             const int c_minimumInterSimulationSignallingInterval = 200;
303             nstSignalComm = ((c_minimumInterSimulationSignallingInterval + nstglobalcomm - 1) / nstglobalcomm)
304                             * nstglobalcomm;
305         }
306     }
307
308     if (startingBehavior != StartingBehavior::RestartWithAppending)
309     {
310         pleaseCiteCouplingAlgorithms(fplog, *ir);
311     }
312     gmx_mdoutf*       outf = init_mdoutf(fplog,
313                                    nfile,
314                                    fnm,
315                                    mdrunOptions,
316                                    cr,
317                                    outputProvider,
318                                    mdModulesNotifiers,
319                                    ir,
320                                    top_global,
321                                    oenv,
322                                    wcycle,
323                                    startingBehavior,
324                                    simulationsShareState,
325                                    ms);
326     gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
327                                    top_global,
328                                    *ir,
329                                    pull_work,
330                                    mdoutf_get_fp_dhdl(outf),
331                                    false,
332                                    startingBehavior,
333                                    simulationsShareState,
334                                    mdModulesNotifiers);
335
336     gstat = global_stat_init(ir);
337
338     const auto& simulationWork     = runScheduleWork->simulationWork;
339     const bool  useGpuForPme       = simulationWork.useGpuPme;
340     const bool  useGpuForNonbonded = simulationWork.useGpuNonbonded;
341     const bool  useGpuForBufferOps = simulationWork.useGpuBufferOps;
342     const bool  useGpuForUpdate    = simulationWork.useGpuUpdate;
343
344     /* Check for polarizable models and flexible constraints */
345     shellfc = init_shell_flexcon(fplog,
346                                  top_global,
347                                  constr ? constr->numFlexibleConstraints() : 0,
348                                  ir->nstcalcenergy,
349                                  DOMAINDECOMP(cr),
350                                  useGpuForPme);
351
352     {
353         double io = compute_io(ir, top_global.natoms, *groups, energyOutput.numEnergyTerms(), 1);
354         if ((io > 2000) && MASTER(cr))
355         {
356             fprintf(stderr, "\nWARNING: This run will generate roughly %.0f Mb of data\n\n", io);
357         }
358     }
359
360     // Local state only becomes valid now.
361     std::unique_ptr<t_state> stateInstance;
362     t_state*                 state;
363
364     gmx_localtop_t top(top_global.ffparams);
365
366     ForceBuffers     f(fr->useMts,
367                    ((useGpuForNonbonded && useGpuForBufferOps) || useGpuForUpdate)
368                            ? PinningPolicy::PinnedIfSupported
369                            : PinningPolicy::CannotBePinned);
370     const t_mdatoms* md = mdAtoms->mdatoms();
371     if (DOMAINDECOMP(cr))
372     {
373         stateInstance = std::make_unique<t_state>();
374         state         = stateInstance.get();
375         dd_init_local_state(*cr->dd, state_global, state);
376
377         /* Distribute the charge groups over the nodes from the master node */
378         dd_partition_system(fplog,
379                             mdlog,
380                             ir->init_step,
381                             cr,
382                             TRUE,
383                             1,
384                             state_global,
385                             top_global,
386                             *ir,
387                             imdSession,
388                             pull_work,
389                             state,
390                             &f,
391                             mdAtoms,
392                             &top,
393                             fr,
394                             vsite,
395                             constr,
396                             nrnb,
397                             nullptr,
398                             FALSE);
399         upd.updateAfterPartition(state->natoms,
400                                  md->cFREEZE ? gmx::arrayRefFromArray(md->cFREEZE, md->nr)
401                                              : gmx::ArrayRef<const unsigned short>(),
402                                  md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
403                                          : gmx::ArrayRef<const unsigned short>());
404     }
405     else
406     {
407         state_change_natoms(state_global, state_global->natoms);
408         /* Copy the pointer to the global state */
409         state = state_global;
410
411         /* Generate and initialize new topology */
412         mdAlgorithmsSetupAtomData(cr, *ir, top_global, &top, fr, &f, mdAtoms, constr, vsite, shellfc);
413
414         upd.updateAfterPartition(state->natoms,
415                                  md->cFREEZE ? gmx::arrayRefFromArray(md->cFREEZE, md->nr)
416                                              : gmx::ArrayRef<const unsigned short>(),
417                                  md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
418                                          : gmx::ArrayRef<const unsigned short>());
419     }
420
421     std::unique_ptr<UpdateConstrainGpu> integrator;
422
423     StatePropagatorDataGpu* stateGpu = fr->stateGpu;
424
425     // TODO: the assertions below should be handled by UpdateConstraintsBuilder.
426     if (useGpuForUpdate)
427     {
428         GMX_RELEASE_ASSERT(!DOMAINDECOMP(cr) || ddUsesUpdateGroups(*cr->dd) || constr == nullptr
429                                    || constr->numConstraintsTotal() == 0,
430                            "Constraints in domain decomposition are only supported with update "
431                            "groups if using GPU update.\n");
432         GMX_RELEASE_ASSERT(ir->eConstrAlg != ConstraintAlgorithm::Shake || constr == nullptr
433                                    || constr->numConstraintsTotal() == 0,
434                            "SHAKE is not supported with GPU update.");
435         GMX_RELEASE_ASSERT(useGpuForPme || (useGpuForNonbonded && simulationWork.useGpuBufferOps),
436                            "Either PME or short-ranged non-bonded interaction tasks must run on "
437                            "the GPU to use GPU update.\n");
438         GMX_RELEASE_ASSERT(ir->eI == IntegrationAlgorithm::MD,
439                            "Only the md integrator is supported with the GPU update.\n");
440         GMX_RELEASE_ASSERT(
441                 ir->etc != TemperatureCoupling::NoseHoover,
442                 "Nose-Hoover temperature coupling is not supported with the GPU update.\n");
443         GMX_RELEASE_ASSERT(
444                 ir->epc == PressureCoupling::No || ir->epc == PressureCoupling::ParrinelloRahman
445                         || ir->epc == PressureCoupling::Berendsen || ir->epc == PressureCoupling::CRescale,
446                 "Only Parrinello-Rahman, Berendsen, and C-rescale pressure coupling are supported "
447                 "with the GPU update.\n");
448         GMX_RELEASE_ASSERT(!md->haveVsites,
449                            "Virtual sites are not supported with the GPU update.\n");
450         GMX_RELEASE_ASSERT(ed == nullptr,
451                            "Essential dynamics is not supported with the GPU update.\n");
452         GMX_RELEASE_ASSERT(!ir->bPull || !pull_have_constraint(*ir->pull),
453                            "Constraints pulling is not supported with the GPU update.\n");
454         GMX_RELEASE_ASSERT(fcdata.orires->nr == 0,
455                            "Orientation restraints are not supported with the GPU update.\n");
456         GMX_RELEASE_ASSERT(
457                 ir->efep == FreeEnergyPerturbationType::No
458                         || (!haveFepPerturbedMasses(top_global) && !havePerturbedConstraints(top_global)),
459                 "Free energy perturbation of masses and constraints are not supported with the GPU "
460                 "update.");
461
462         if (constr != nullptr && constr->numConstraintsTotal() > 0)
463         {
464             GMX_LOG(mdlog.info)
465                     .asParagraph()
466                     .appendText("Updating coordinates and applying constraints on the GPU.");
467         }
468         else
469         {
470             GMX_LOG(mdlog.info).asParagraph().appendText("Updating coordinates on the GPU.");
471         }
472         GMX_RELEASE_ASSERT(fr->deviceStreamManager != nullptr,
473                            "Device stream manager should be initialized in order to use GPU "
474                            "update-constraints.");
475         GMX_RELEASE_ASSERT(
476                 fr->deviceStreamManager->streamIsValid(gmx::DeviceStreamType::UpdateAndConstraints),
477                 "Update stream should be initialized in order to use GPU "
478                 "update-constraints.");
479         integrator = std::make_unique<UpdateConstrainGpu>(
480                 *ir,
481                 top_global,
482                 ekind->ngtc,
483                 fr->deviceStreamManager->context(),
484                 fr->deviceStreamManager->stream(gmx::DeviceStreamType::UpdateAndConstraints),
485                 stateGpu->xUpdatedOnDevice(),
486                 wcycle);
487
488         integrator->setPbc(PbcType::Xyz, state->box);
489     }
490
491     if (useGpuForPme || (useGpuForNonbonded && useGpuForBufferOps) || useGpuForUpdate)
492     {
493         changePinningPolicy(&state->x, PinningPolicy::PinnedIfSupported);
494     }
495     if (useGpuForUpdate)
496     {
497         changePinningPolicy(&state->v, PinningPolicy::PinnedIfSupported);
498     }
499
500     // NOTE: The global state is no longer used at this point.
501     // But state_global is still used as temporary storage space for writing
502     // the global state to file and potentially for replica exchange.
503     // (Global topology should persist.)
504
505     update_mdatoms(mdAtoms->mdatoms(), state->lambda[FreeEnergyPerturbationCouplingType::Mass]);
506
507     if (ir->bExpanded)
508     {
509         /* Check nstexpanded here, because the grompp check was broken */
510         if (ir->expandedvals->nstexpanded % ir->nstcalcenergy != 0)
511         {
512             gmx_fatal(FARGS,
513                       "With expanded ensemble, nstexpanded should be a multiple of nstcalcenergy");
514         }
515         init_expanded_ensemble(startingBehavior != StartingBehavior::NewSimulation, ir, state->dfhist);
516     }
517
518     if (MASTER(cr))
519     {
520         EnergyData::initializeEnergyHistory(startingBehavior, observablesHistory, &energyOutput);
521     }
522
523     preparePrevStepPullCom(ir,
524                            pull_work,
525                            gmx::arrayRefFromArray(md->massT, md->nr),
526                            state,
527                            state_global,
528                            cr,
529                            startingBehavior != StartingBehavior::NewSimulation);
530
531     // TODO: Remove this by converting AWH into a ForceProvider
532     auto awh = prepareAwhModule(fplog,
533                                 *ir,
534                                 state_global,
535                                 cr,
536                                 ms,
537                                 startingBehavior != StartingBehavior::NewSimulation,
538                                 shellfc != nullptr,
539                                 opt2fn("-awh", nfile, fnm),
540                                 pull_work);
541
542     if (useReplicaExchange && MASTER(cr))
543     {
544         repl_ex = init_replica_exchange(fplog, ms, top_global.natoms, ir, replExParams);
545     }
546     /* PME tuning is only supported in the Verlet scheme, with PME for
547      * Coulomb. It is not supported with only LJ PME. */
548     bPMETune = (mdrunOptions.tunePme && EEL_PME(fr->ic->eeltype) && !mdrunOptions.reproducible
549                 && ir->cutoff_scheme != CutoffScheme::Group);
550
551     pme_load_balancing_t* pme_loadbal = nullptr;
552     if (bPMETune)
553     {
554         pme_loadbal_init(
555                 &pme_loadbal, cr, mdlog, *ir, state->box, *fr->ic, *fr->nbv, fr->pmedata, fr->nbv->useGpu());
556     }
557
558     if (!ir->bContinuation)
559     {
560         if (state->flags & enumValueToBitMask(StateEntry::V))
561         {
562             auto v = makeArrayRef(state->v);
563             /* Set the velocities of vsites, shells and frozen atoms to zero */
564             for (i = 0; i < md->homenr; i++)
565             {
566                 if (md->ptype[i] == ParticleType::Shell)
567                 {
568                     clear_rvec(v[i]);
569                 }
570                 else if (md->cFREEZE)
571                 {
572                     for (m = 0; m < DIM; m++)
573                     {
574                         if (ir->opts.nFreeze[md->cFREEZE[i]][m])
575                         {
576                             v[i][m] = 0;
577                         }
578                     }
579                 }
580             }
581         }
582
583         if (constr)
584         {
585             /* Constrain the initial coordinates and velocities */
586             do_constrain_first(fplog,
587                                constr,
588                                ir,
589                                md->nr,
590                                md->homenr,
591                                state->x.arrayRefWithPadding(),
592                                state->v.arrayRefWithPadding(),
593                                state->box,
594                                state->lambda[FreeEnergyPerturbationCouplingType::Bonded]);
595         }
596     }
597
598     if (ir->efep != FreeEnergyPerturbationType::No)
599     {
600         /* Set free energy calculation frequency as the greatest common
601          * denominator of nstdhdl and repl_ex_nst. */
602         nstfep = ir->fepvals->nstdhdl;
603         if (ir->bExpanded)
604         {
605             nstfep = std::gcd(ir->expandedvals->nstexpanded, nstfep);
606         }
607         if (useReplicaExchange)
608         {
609             nstfep = std::gcd(replExParams.exchangeInterval, nstfep);
610         }
611         if (ir->bDoAwh)
612         {
613             nstfep = std::gcd(ir->awhParams->nstSampleCoord(), nstfep);
614         }
615     }
616
617     /* Be REALLY careful about what flags you set here. You CANNOT assume
618      * this is the first step, since we might be restarting from a checkpoint,
619      * and in that case we should not do any modifications to the state.
620      */
621     bStopCM = (ir->comm_mode != ComRemovalAlgorithm::No && !ir->bContinuation);
622
623     // When restarting from a checkpoint, it can be appropriate to
624     // initialize ekind from quantities in the checkpoint. Otherwise,
625     // compute_globals must initialize ekind before the simulation
626     // starts/restarts. However, only the master rank knows what was
627     // found in the checkpoint file, so we have to communicate in
628     // order to coordinate the restart.
629     //
630     // TODO Consider removing this communication if/when checkpoint
631     // reading directly follows .tpr reading, because all ranks can
632     // agree on hasReadEkinState at that time.
633     bool hasReadEkinState = MASTER(cr) ? state_global->ekinstate.hasReadEkinState : false;
634     if (PAR(cr))
635     {
636         gmx_bcast(sizeof(hasReadEkinState), &hasReadEkinState, cr->mpi_comm_mygroup);
637     }
638     if (hasReadEkinState)
639     {
640         restore_ekinstate_from_state(cr, ekind, &state_global->ekinstate);
641     }
642
643     unsigned int cglo_flags =
644             (CGLO_TEMPERATURE | CGLO_GSTAT | (EI_VV(ir->eI) ? CGLO_PRESSURE : 0)
645              | (EI_VV(ir->eI) ? CGLO_CONSTRAINT : 0) | (hasReadEkinState ? CGLO_READEKIN : 0));
646
647     bSumEkinhOld = FALSE;
648
649     t_vcm vcm(top_global.groups, *ir);
650     reportComRemovalInfo(fplog, vcm);
651
652     /* To minimize communication, compute_globals computes the COM velocity
653      * and the kinetic energy for the velocities without COM motion removed.
654      * Thus to get the kinetic energy without the COM contribution, we need
655      * to call compute_globals twice.
656      */
657     for (int cgloIteration = 0; cgloIteration < (bStopCM ? 2 : 1); cgloIteration++)
658     {
659         unsigned int cglo_flags_iteration = cglo_flags;
660         if (bStopCM && cgloIteration == 0)
661         {
662             cglo_flags_iteration |= CGLO_STOPCM;
663             cglo_flags_iteration &= ~CGLO_TEMPERATURE;
664         }
665         if (DOMAINDECOMP(cr) && shouldCheckNumberOfBondedInteractions(*cr->dd) && cgloIteration == 0)
666         {
667             cglo_flags_iteration |= CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS;
668         }
669         compute_globals(gstat,
670                         cr,
671                         ir,
672                         fr,
673                         ekind,
674                         makeConstArrayRef(state->x),
675                         makeConstArrayRef(state->v),
676                         state->box,
677                         md,
678                         nrnb,
679                         &vcm,
680                         nullptr,
681                         enerd,
682                         force_vir,
683                         shake_vir,
684                         total_vir,
685                         pres,
686                         gmx::ArrayRef<real>{},
687                         &nullSignaller,
688                         state->box,
689                         &bSumEkinhOld,
690                         cglo_flags_iteration);
691         if (cglo_flags_iteration & CGLO_STOPCM)
692         {
693             /* At initialization, do not pass x with acceleration-correction mode
694              * to avoid (incorrect) correction of the initial coordinates.
695              */
696             auto x = (vcm.mode == ComRemovalAlgorithm::LinearAccelerationCorrection)
697                              ? ArrayRef<RVec>()
698                              : makeArrayRef(state->x);
699             process_and_stopcm_grp(fplog, &vcm, *md, x, makeArrayRef(state->v));
700             inc_nrnb(nrnb, eNR_STOPCM, md->homenr);
701         }
702     }
703     if (DOMAINDECOMP(cr))
704     {
705         checkNumberOfBondedInteractions(
706                 mdlog, cr, top_global, &top, makeConstArrayRef(state->x), state->box);
707     }
708     if (ir->eI == IntegrationAlgorithm::VVAK)
709     {
710         /* a second call to get the half step temperature initialized as well */
711         /* we do the same call as above, but turn the pressure off -- internally to
712            compute_globals, this is recognized as a velocity verlet half-step
713            kinetic energy calculation.  This minimized excess variables, but
714            perhaps loses some logic?*/
715
716         compute_globals(gstat,
717                         cr,
718                         ir,
719                         fr,
720                         ekind,
721                         makeConstArrayRef(state->x),
722                         makeConstArrayRef(state->v),
723                         state->box,
724                         md,
725                         nrnb,
726                         &vcm,
727                         nullptr,
728                         enerd,
729                         force_vir,
730                         shake_vir,
731                         total_vir,
732                         pres,
733                         gmx::ArrayRef<real>{},
734                         &nullSignaller,
735                         state->box,
736                         &bSumEkinhOld,
737                         cglo_flags & ~CGLO_PRESSURE);
738     }
739
740     /* Calculate the initial half step temperature, and save the ekinh_old */
741     if (startingBehavior == StartingBehavior::NewSimulation)
742     {
743         for (i = 0; (i < ir->opts.ngtc); i++)
744         {
745             copy_mat(ekind->tcstat[i].ekinh, ekind->tcstat[i].ekinh_old);
746         }
747     }
748
749     /* need to make an initiation call to get the Trotter variables set, as well as other constants
750        for non-trotter temperature control */
751     auto trotter_seq = init_npt_vars(ir, state, &MassQ, bTrotter);
752
753     if (MASTER(cr))
754     {
755         if (!ir->bContinuation)
756         {
757             if (constr && ir->eConstrAlg == ConstraintAlgorithm::Lincs)
758             {
759                 fprintf(fplog,
760                         "RMS relative constraint deviation after constraining: %.2e\n",
761                         constr->rmsd());
762             }
763             if (EI_STATE_VELOCITY(ir->eI))
764             {
765                 real temp = enerd->term[F_TEMP];
766                 if (ir->eI != IntegrationAlgorithm::VV)
767                 {
768                     /* Result of Ekin averaged over velocities of -half
769                      * and +half step, while we only have -half step here.
770                      */
771                     temp *= 2;
772                 }
773                 fprintf(fplog, "Initial temperature: %g K\n", temp);
774             }
775         }
776
777         char tbuf[20];
778         fprintf(stderr, "starting mdrun '%s'\n", *(top_global.name));
779         if (ir->nsteps >= 0)
780         {
781             sprintf(tbuf, "%8.1f", (ir->init_step + ir->nsteps) * ir->delta_t);
782         }
783         else
784         {
785             sprintf(tbuf, "%s", "infinite");
786         }
787         if (ir->init_step > 0)
788         {
789             fprintf(stderr,
790                     "%s steps, %s ps (continuing from step %s, %8.1f ps).\n",
791                     gmx_step_str(ir->init_step + ir->nsteps, sbuf),
792                     tbuf,
793                     gmx_step_str(ir->init_step, sbuf2),
794                     ir->init_step * ir->delta_t);
795         }
796         else
797         {
798             fprintf(stderr, "%s steps, %s ps.\n", gmx_step_str(ir->nsteps, sbuf), tbuf);
799         }
800         fprintf(fplog, "\n");
801     }
802
803     walltime_accounting_start_time(walltime_accounting);
804     wallcycle_start(wcycle, WallCycleCounter::Run);
805     print_start(fplog, cr, walltime_accounting, "mdrun");
806
807     /***********************************************************
808      *
809      *             Loop over MD steps
810      *
811      ************************************************************/
812
813     bFirstStep = TRUE;
814     /* Skip the first Nose-Hoover integration when we get the state from tpx */
815     bInitStep        = startingBehavior == StartingBehavior::NewSimulation || EI_VV(ir->eI);
816     bSumEkinhOld     = FALSE;
817     bExchanged       = FALSE;
818     bNeedRepartition = FALSE;
819
820     step     = ir->init_step;
821     step_rel = 0;
822
823     auto stopHandler = stopHandlerBuilder->getStopHandlerMD(
824             compat::not_null<SimulationSignal*>(&signals[eglsSTOPCOND]),
825             simulationsShareState,
826             MASTER(cr),
827             ir->nstlist,
828             mdrunOptions.reproducible,
829             nstSignalComm,
830             mdrunOptions.maximumHoursToRun,
831             ir->nstlist == 0,
832             fplog,
833             step,
834             bNS,
835             walltime_accounting);
836
837     auto checkpointHandler = std::make_unique<CheckpointHandler>(
838             compat::make_not_null<SimulationSignal*>(&signals[eglsCHKPT]),
839             simulationsShareState,
840             ir->nstlist == 0,
841             MASTER(cr),
842             mdrunOptions.writeConfout,
843             mdrunOptions.checkpointOptions.period);
844
845     const bool resetCountersIsLocal = true;
846     auto       resetHandler         = std::make_unique<ResetHandler>(
847             compat::make_not_null<SimulationSignal*>(&signals[eglsRESETCOUNTERS]),
848             !resetCountersIsLocal,
849             ir->nsteps,
850             MASTER(cr),
851             mdrunOptions.timingOptions.resetHalfway,
852             mdrunOptions.maximumHoursToRun,
853             mdlog,
854             wcycle,
855             walltime_accounting);
856
857     const DDBalanceRegionHandler ddBalanceRegionHandler(cr);
858
859     if (MASTER(cr) && isMultiSim(ms) && !useReplicaExchange)
860     {
861         logInitialMultisimStatus(ms, cr, mdlog, simulationsShareState, ir->nsteps, ir->init_step);
862     }
863
864     /* and stop now if we should */
865     bLastStep = (bLastStep || (ir->nsteps >= 0 && step_rel > ir->nsteps));
866     while (!bLastStep)
867     {
868
869         /* Determine if this is a neighbor search step */
870         bNStList = (ir->nstlist > 0 && step % ir->nstlist == 0);
871
872         if (bPMETune && bNStList)
873         {
874             // This has to be here because PME load balancing is called so early.
875             // TODO: Move to after all booleans are defined.
876             if (useGpuForUpdate && !bFirstStep)
877             {
878                 stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
879                 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
880             }
881             /* PME grid + cut-off optimization with GPUs or PME nodes */
882             pme_loadbal_do(pme_loadbal,
883                            cr,
884                            (mdrunOptions.verbose && MASTER(cr)) ? stderr : nullptr,
885                            fplog,
886                            mdlog,
887                            *ir,
888                            fr,
889                            state->box,
890                            state->x,
891                            wcycle,
892                            step,
893                            step_rel,
894                            &bPMETunePrinting,
895                            simulationWork.useGpuPmePpCommunication);
896         }
897
898         wallcycle_start(wcycle, WallCycleCounter::Step);
899
900         bLastStep = (step_rel == ir->nsteps);
901         t         = t0 + step * ir->delta_t;
902
903         // TODO Refactor this, so that nstfep does not need a default value of zero
904         if (ir->efep != FreeEnergyPerturbationType::No || ir->bSimTemp)
905         {
906             /* find and set the current lambdas */
907             state->lambda = currentLambdas(step, *(ir->fepvals), state->fep_state);
908
909             bDoDHDL = do_per_step(step, ir->fepvals->nstdhdl);
910             bDoFEP  = ((ir->efep != FreeEnergyPerturbationType::No) && do_per_step(step, nstfep));
911             bDoExpanded = (do_per_step(step, ir->expandedvals->nstexpanded) && (ir->bExpanded)
912                            && (!bFirstStep));
913         }
914
915         bDoReplEx = (useReplicaExchange && (step > 0) && !bLastStep
916                      && do_per_step(step, replExParams.exchangeInterval));
917
918         if (doSimulatedAnnealing)
919         {
920             // TODO: Avoid changing inputrec (#3854)
921             // Simulated annealing updates the reference temperature.
922             auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
923             update_annealing_target_temp(nonConstInputrec, t, &upd);
924         }
925
926         /* Stop Center of Mass motion */
927         bStopCM = (ir->comm_mode != ComRemovalAlgorithm::No && do_per_step(step, ir->nstcomm));
928
929         /* Determine whether or not to do Neighbour Searching */
930         bNS = (bFirstStep || bNStList || bExchanged || bNeedRepartition);
931
932         /* Note that the stopHandler will cause termination at nstglobalcomm
933          * steps. Since this concides with nstcalcenergy, nsttcouple and/or
934          * nstpcouple steps, we have computed the half-step kinetic energy
935          * of the previous step and can always output energies at the last step.
936          */
937         bLastStep = bLastStep || stopHandler->stoppingAfterCurrentStep(bNS);
938
939         /* do_log triggers energy and virial calculation. Because this leads
940          * to different code paths, forces can be different. Thus for exact
941          * continuation we should avoid extra log output.
942          * Note that the || bLastStep can result in non-exact continuation
943          * beyond the last step. But we don't consider that to be an issue.
944          */
945         do_log     = (do_per_step(step, ir->nstlog)
946                   || (bFirstStep && startingBehavior == StartingBehavior::NewSimulation) || bLastStep);
947         do_verbose = mdrunOptions.verbose
948                      && (step % mdrunOptions.verboseStepPrintInterval == 0 || bFirstStep || bLastStep);
949
950         if (useGpuForUpdate && !bFirstStep && bNS)
951         {
952             // Copy velocities from the GPU on search steps to keep a copy on host (device buffers are reinitialized).
953             stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
954             stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
955             // Copy coordinate from the GPU when needed at the search step.
956             // NOTE: The cases when coordinates needed on CPU for force evaluation are handled in sim_utils.
957             // NOTE: If the coordinates are to be written into output file they are also copied separately before the output.
958             stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
959             stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
960         }
961
962         // We only need to calculate virtual velocities if we are writing them in the current step
963         const bool needVirtualVelocitiesThisStep =
964                 (vsite != nullptr)
965                 && (do_per_step(step, ir->nstvout) || checkpointHandler->isCheckpointingStep());
966
967         if (vsite != nullptr)
968         {
969             // Virtual sites need to be updated before domain decomposition and forces are calculated
970             wallcycle_start(wcycle, WallCycleCounter::VsiteConstr);
971             // md-vv calculates virtual velocities once it has full-step real velocities
972             vsite->construct(state->x,
973                              state->v,
974                              state->box,
975                              (!EI_VV(inputrec->eI) && needVirtualVelocitiesThisStep)
976                                      ? VSiteOperation::PositionsAndVelocities
977                                      : VSiteOperation::Positions);
978             wallcycle_stop(wcycle, WallCycleCounter::VsiteConstr);
979         }
980
981         if (bNS && !(bFirstStep && ir->bContinuation))
982         {
983             bMasterState = FALSE;
984             /* Correct the new box if it is too skewed */
985             if (inputrecDynamicBox(ir))
986             {
987                 if (correct_box(fplog, step, state->box))
988                 {
989                     bMasterState = TRUE;
990                     // If update is offloaded, it should be informed about the box size change
991                     if (useGpuForUpdate)
992                     {
993                         integrator->setPbc(PbcType::Xyz, state->box);
994                     }
995                 }
996             }
997             if (DOMAINDECOMP(cr) && bMasterState)
998             {
999                 dd_collect_state(cr->dd, state, state_global);
1000             }
1001
1002             if (DOMAINDECOMP(cr))
1003             {
1004                 /* Repartition the domain decomposition */
1005                 dd_partition_system(fplog,
1006                                     mdlog,
1007                                     step,
1008                                     cr,
1009                                     bMasterState,
1010                                     nstglobalcomm,
1011                                     state_global,
1012                                     top_global,
1013                                     *ir,
1014                                     imdSession,
1015                                     pull_work,
1016                                     state,
1017                                     &f,
1018                                     mdAtoms,
1019                                     &top,
1020                                     fr,
1021                                     vsite,
1022                                     constr,
1023                                     nrnb,
1024                                     wcycle,
1025                                     do_verbose && !bPMETunePrinting);
1026                 upd.updateAfterPartition(state->natoms,
1027                                          md->cFREEZE ? gmx::arrayRefFromArray(md->cFREEZE, md->nr)
1028                                                      : gmx::ArrayRef<const unsigned short>(),
1029                                          md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
1030                                                  : gmx::ArrayRef<const unsigned short>());
1031             }
1032         }
1033
1034         // Allocate or re-size GPU halo exchange object, if necessary
1035         if (bNS && havePPDomainDecomposition(cr) && simulationWork.useGpuHaloExchange)
1036         {
1037             GMX_RELEASE_ASSERT(fr->deviceStreamManager != nullptr,
1038                                "GPU device manager has to be initialized to use GPU "
1039                                "version of halo exchange.");
1040             constructGpuHaloExchange(mdlog, *cr, *fr->deviceStreamManager, wcycle);
1041         }
1042
1043         if (MASTER(cr) && do_log)
1044         {
1045             gmx::EnergyOutput::printHeader(
1046                     fplog, step, t); /* can we improve the information printed here? */
1047         }
1048
1049         if (ir->efep != FreeEnergyPerturbationType::No)
1050         {
1051             update_mdatoms(mdAtoms->mdatoms(), state->lambda[FreeEnergyPerturbationCouplingType::Mass]);
1052         }
1053
1054         if (bExchanged)
1055         {
1056             /* We need the kinetic energy at minus the half step for determining
1057              * the full step kinetic energy and possibly for T-coupling.*/
1058             /* This may not be quite working correctly yet . . . . */
1059             int cglo_flags = CGLO_GSTAT | CGLO_TEMPERATURE;
1060             if (DOMAINDECOMP(cr) && shouldCheckNumberOfBondedInteractions(*cr->dd))
1061             {
1062                 cglo_flags |= CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS;
1063             }
1064             compute_globals(gstat,
1065                             cr,
1066                             ir,
1067                             fr,
1068                             ekind,
1069                             makeConstArrayRef(state->x),
1070                             makeConstArrayRef(state->v),
1071                             state->box,
1072                             md,
1073                             nrnb,
1074                             &vcm,
1075                             wcycle,
1076                             enerd,
1077                             nullptr,
1078                             nullptr,
1079                             nullptr,
1080                             nullptr,
1081                             gmx::ArrayRef<real>{},
1082                             &nullSignaller,
1083                             state->box,
1084                             &bSumEkinhOld,
1085                             cglo_flags);
1086             if (DOMAINDECOMP(cr))
1087             {
1088                 checkNumberOfBondedInteractions(
1089                         mdlog, cr, top_global, &top, makeConstArrayRef(state->x), state->box);
1090             }
1091         }
1092         clear_mat(force_vir);
1093
1094         checkpointHandler->decideIfCheckpointingThisStep(bNS, bFirstStep, bLastStep);
1095
1096         /* Determine the energy and pressure:
1097          * at nstcalcenergy steps and at energy output steps (set below).
1098          */
1099         if (EI_VV(ir->eI) && (!bInitStep))
1100         {
1101             bCalcEnerStep = do_per_step(step, ir->nstcalcenergy);
1102             bCalcVir      = bCalcEnerStep
1103                        || (ir->epc != PressureCoupling::No
1104                            && (do_per_step(step, ir->nstpcouple) || do_per_step(step - 1, ir->nstpcouple)));
1105         }
1106         else
1107         {
1108             bCalcEnerStep = do_per_step(step, ir->nstcalcenergy);
1109             bCalcVir      = bCalcEnerStep
1110                        || (ir->epc != PressureCoupling::No && do_per_step(step, ir->nstpcouple));
1111         }
1112         bCalcEner = bCalcEnerStep;
1113
1114         do_ene = (do_per_step(step, ir->nstenergy) || bLastStep);
1115
1116         if (do_ene || do_log || bDoReplEx)
1117         {
1118             bCalcVir  = TRUE;
1119             bCalcEner = TRUE;
1120         }
1121
1122         /* Do we need global communication ? */
1123         bGStat = (bCalcVir || bCalcEner || bStopCM || do_per_step(step, nstglobalcomm)
1124                   || (EI_VV(ir->eI) && inputrecNvtTrotter(ir) && do_per_step(step - 1, nstglobalcomm)));
1125
1126         force_flags = (GMX_FORCE_STATECHANGED | ((inputrecDynamicBox(ir)) ? GMX_FORCE_DYNAMICBOX : 0)
1127                        | GMX_FORCE_ALLFORCES | (bCalcVir ? GMX_FORCE_VIRIAL : 0)
1128                        | (bCalcEner ? GMX_FORCE_ENERGY : 0) | (bDoFEP ? GMX_FORCE_DHDL : 0));
1129         if (fr->useMts && !do_per_step(step, ir->nstfout))
1130         {
1131             force_flags |= GMX_FORCE_DO_NOT_NEED_NORMAL_FORCE;
1132         }
1133
1134         if (shellfc)
1135         {
1136             /* Now is the time to relax the shells */
1137             relax_shell_flexcon(fplog,
1138                                 cr,
1139                                 ms,
1140                                 mdrunOptions.verbose,
1141                                 enforcedRotation,
1142                                 step,
1143                                 ir,
1144                                 imdSession,
1145                                 pull_work,
1146                                 bNS,
1147                                 force_flags,
1148                                 &top,
1149                                 constr,
1150                                 enerd,
1151                                 state->natoms,
1152                                 state->x.arrayRefWithPadding(),
1153                                 state->v.arrayRefWithPadding(),
1154                                 state->box,
1155                                 state->lambda,
1156                                 &state->hist,
1157                                 &f.view(),
1158                                 force_vir,
1159                                 *md,
1160                                 nrnb,
1161                                 wcycle,
1162                                 shellfc,
1163                                 fr,
1164                                 runScheduleWork,
1165                                 t,
1166                                 mu_tot,
1167                                 vsite,
1168                                 ddBalanceRegionHandler);
1169         }
1170         else
1171         {
1172             /* The AWH history need to be saved _before_ doing force calculations where the AWH bias
1173                is updated (or the AWH update will be performed twice for one step when continuing).
1174                It would be best to call this update function from do_md_trajectory_writing but that
1175                would occur after do_force. One would have to divide the update_awh function into one
1176                function applying the AWH force and one doing the AWH bias update. The update AWH
1177                bias function could then be called after do_md_trajectory_writing (then containing
1178                update_awh_history). The checkpointing will in the future probably moved to the start
1179                of the md loop which will rid of this issue. */
1180             if (awh && checkpointHandler->isCheckpointingStep() && MASTER(cr))
1181             {
1182                 awh->updateHistory(state_global->awhHistory.get());
1183             }
1184
1185             /* The coordinates (x) are shifted (to get whole molecules)
1186              * in do_force.
1187              * This is parallellized as well, and does communication too.
1188              * Check comments in sim_util.c
1189              */
1190             do_force(fplog,
1191                      cr,
1192                      ms,
1193                      *ir,
1194                      awh.get(),
1195                      enforcedRotation,
1196                      imdSession,
1197                      pull_work,
1198                      step,
1199                      nrnb,
1200                      wcycle,
1201                      &top,
1202                      state->box,
1203                      state->x.arrayRefWithPadding(),
1204                      &state->hist,
1205                      &f.view(),
1206                      force_vir,
1207                      md,
1208                      enerd,
1209                      state->lambda,
1210                      fr,
1211                      runScheduleWork,
1212                      vsite,
1213                      mu_tot,
1214                      t,
1215                      ed ? ed->getLegacyED() : nullptr,
1216                      (bNS ? GMX_FORCE_NS : 0) | force_flags,
1217                      ddBalanceRegionHandler);
1218         }
1219
1220         // VV integrators do not need the following velocity half step
1221         // if it is the first step after starting from a checkpoint.
1222         // That is, the half step is needed on all other steps, and
1223         // also the first step when starting from a .tpr file.
1224         if (EI_VV(ir->eI))
1225         {
1226             integrateVVFirstStep(step,
1227                                  bFirstStep,
1228                                  bInitStep,
1229                                  startingBehavior,
1230                                  nstglobalcomm,
1231                                  ir,
1232                                  fr,
1233                                  cr,
1234                                  state,
1235                                  mdAtoms->mdatoms(),
1236                                  fcdata,
1237                                  &MassQ,
1238                                  &vcm,
1239                                  top_global,
1240                                  top,
1241                                  enerd,
1242                                  ekind,
1243                                  gstat,
1244                                  &last_ekin,
1245                                  bCalcVir,
1246                                  total_vir,
1247                                  shake_vir,
1248                                  force_vir,
1249                                  pres,
1250                                  M,
1251                                  do_log,
1252                                  do_ene,
1253                                  bCalcEner,
1254                                  bGStat,
1255                                  bStopCM,
1256                                  bTrotter,
1257                                  bExchanged,
1258                                  &bSumEkinhOld,
1259                                  &saved_conserved_quantity,
1260                                  &f,
1261                                  &upd,
1262                                  constr,
1263                                  &nullSignaller,
1264                                  trotter_seq,
1265                                  nrnb,
1266                                  mdlog,
1267                                  fplog,
1268                                  wcycle);
1269             if (vsite != nullptr && needVirtualVelocitiesThisStep)
1270             {
1271                 // Positions were calculated earlier
1272                 wallcycle_start(wcycle, WallCycleCounter::VsiteConstr);
1273                 vsite->construct(state->x, state->v, state->box, VSiteOperation::Velocities);
1274                 wallcycle_stop(wcycle, WallCycleCounter::VsiteConstr);
1275             }
1276         }
1277
1278         /* ########  END FIRST UPDATE STEP  ############## */
1279         /* ########  If doing VV, we now have v(dt) ###### */
1280         if (bDoExpanded)
1281         {
1282             /* perform extended ensemble sampling in lambda - we don't
1283                actually move to the new state before outputting
1284                statistics, but if performing simulated tempering, we
1285                do update the velocities and the tau_t. */
1286             // TODO: Avoid changing inputrec (#3854)
1287             // Simulated tempering updates the reference temperature.
1288             // Expanded ensemble without simulated tempering does not change the inputrec.
1289             auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
1290             lamnew                 = ExpandedEnsembleDynamics(fplog,
1291                                               nonConstInputrec,
1292                                               enerd,
1293                                               state,
1294                                               &MassQ,
1295                                               state->fep_state,
1296                                               state->dfhist,
1297                                               step,
1298                                               state->v.rvec_array(),
1299                                               md->homenr,
1300                                               md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
1301                                                       : gmx::ArrayRef<const unsigned short>());
1302             /* history is maintained in state->dfhist, but state_global is what is sent to trajectory and log output */
1303             if (MASTER(cr))
1304             {
1305                 copy_df_history(state_global->dfhist, state->dfhist);
1306             }
1307         }
1308
1309         // Copy coordinate from the GPU for the output/checkpointing if the update is offloaded and
1310         // coordinates have not already been copied for i) search or ii) CPU force tasks.
1311         if (useGpuForUpdate && !bNS && !runScheduleWork->domainWork.haveCpuLocalForceWork
1312             && (do_per_step(step, ir->nstxout) || do_per_step(step, ir->nstxout_compressed)
1313                 || checkpointHandler->isCheckpointingStep()))
1314         {
1315             stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
1316             stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1317         }
1318         // Copy velocities if needed for the output/checkpointing.
1319         // NOTE: Copy on the search steps is done at the beginning of the step.
1320         if (useGpuForUpdate && !bNS
1321             && (do_per_step(step, ir->nstvout) || checkpointHandler->isCheckpointingStep()))
1322         {
1323             stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
1324             stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
1325         }
1326         // Copy forces for the output if the forces were reduced on the GPU (not the case on virial steps)
1327         // and update is offloaded hence forces are kept on the GPU for update and have not been
1328         // already transferred in do_force().
1329         // TODO: There should be an improved, explicit mechanism that ensures this copy is only executed
1330         //       when the forces are ready on the GPU -- the same synchronizer should be used as the one
1331         //       prior to GPU update.
1332         // TODO: When the output flags will be included in step workload, this copy can be combined with the
1333         //       copy call in do_force(...).
1334         // NOTE: The forces should not be copied here if the vsites are present, since they were modified
1335         //       on host after the D2H copy in do_force(...).
1336         if (runScheduleWork->stepWork.useGpuFBufferOps && (simulationWork.useGpuUpdate && !vsite)
1337             && do_per_step(step, ir->nstfout))
1338         {
1339             stateGpu->copyForcesFromGpu(f.view().force(), AtomLocality::Local);
1340             stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
1341         }
1342         /* Now we have the energies and forces corresponding to the
1343          * coordinates at time t. We must output all of this before
1344          * the update.
1345          */
1346         do_md_trajectory_writing(fplog,
1347                                  cr,
1348                                  nfile,
1349                                  fnm,
1350                                  step,
1351                                  step_rel,
1352                                  t,
1353                                  ir,
1354                                  state,
1355                                  state_global,
1356                                  observablesHistory,
1357                                  top_global,
1358                                  fr,
1359                                  outf,
1360                                  energyOutput,
1361                                  ekind,
1362                                  f.view().force(),
1363                                  checkpointHandler->isCheckpointingStep(),
1364                                  bRerunMD,
1365                                  bLastStep,
1366                                  mdrunOptions.writeConfout,
1367                                  bSumEkinhOld);
1368         /* Check if IMD step and do IMD communication, if bIMD is TRUE. */
1369         bInteractiveMDstep = imdSession->run(step, bNS, state->box, state->x, t);
1370
1371         /* kludge -- virial is lost with restart for MTTK NPT control. Must reload (saved earlier). */
1372         if (startingBehavior != StartingBehavior::NewSimulation && bFirstStep
1373             && (inputrecNptTrotter(ir) || inputrecNphTrotter(ir)))
1374         {
1375             copy_mat(state->svir_prev, shake_vir);
1376             copy_mat(state->fvir_prev, force_vir);
1377         }
1378
1379         stopHandler->setSignal();
1380         resetHandler->setSignal(walltime_accounting);
1381
1382         if (bGStat || !PAR(cr))
1383         {
1384             /* In parallel we only have to check for checkpointing in steps
1385              * where we do global communication,
1386              *  otherwise the other nodes don't know.
1387              */
1388             checkpointHandler->setSignal(walltime_accounting);
1389         }
1390
1391         /* #########   START SECOND UPDATE STEP ################# */
1392
1393         /* at the start of step, randomize or scale the velocities ((if vv. Restriction of Andersen
1394            controlled in preprocessing */
1395
1396         if (ETC_ANDERSEN(ir->etc)) /* keep this outside of update_tcouple because of the extra info required to pass */
1397         {
1398             gmx_bool bIfRandomize;
1399             bIfRandomize = update_randomize_velocities(ir, step, cr, md, state->v, &upd, constr);
1400             /* if we have constraints, we have to remove the kinetic energy parallel to the bonds */
1401             if (constr && bIfRandomize)
1402             {
1403                 constrain_velocities(constr, do_log, do_ene, step, state, nullptr, false, nullptr);
1404             }
1405         }
1406         /* Box is changed in update() when we do pressure coupling,
1407          * but we should still use the old box for energy corrections and when
1408          * writing it to the energy file, so it matches the trajectory files for
1409          * the same timestep above. Make a copy in a separate array.
1410          */
1411         copy_mat(state->box, lastbox);
1412
1413         dvdl_constr = 0;
1414
1415         if (!useGpuForUpdate)
1416         {
1417             wallcycle_start(wcycle, WallCycleCounter::Update);
1418         }
1419         /* UPDATE PRESSURE VARIABLES IN TROTTER FORMULATION WITH CONSTRAINTS */
1420         if (bTrotter)
1421         {
1422             trotter_update(ir, step, ekind, enerd, state, total_vir, md, &MassQ, trotter_seq, ettTSEQ3);
1423             /* We can only do Berendsen coupling after we have summed
1424              * the kinetic energy or virial. Since the happens
1425              * in global_state after update, we should only do it at
1426              * step % nstlist = 1 with bGStatEveryStep=FALSE.
1427              */
1428         }
1429         else
1430         {
1431             update_tcouple(step, ir, state, ekind, &MassQ, md);
1432             update_pcouple_before_coordinates(fplog, step, ir, state, pressureCouplingMu, M, bInitStep);
1433         }
1434
1435         /* With leap-frog type integrators we compute the kinetic energy
1436          * at a whole time step as the average of the half-time step kinetic
1437          * energies of two subsequent steps. Therefore we need to compute the
1438          * half step kinetic energy also if we need energies at the next step.
1439          */
1440         const bool needHalfStepKineticEnergy =
1441                 (!EI_VV(ir->eI) && (do_per_step(step + 1, nstglobalcomm) || step_rel + 1 == ir->nsteps));
1442
1443         // Parrinello-Rahman requires the pressure to be availible before the update to compute
1444         // the velocity scaling matrix. Hence, it runs one step after the nstpcouple step.
1445         const bool doParrinelloRahman = (ir->epc == PressureCoupling::ParrinelloRahman
1446                                          && do_per_step(step + ir->nstpcouple - 1, ir->nstpcouple));
1447
1448         if (EI_VV(ir->eI))
1449         {
1450             GMX_ASSERT(!useGpuForUpdate, "GPU update is not supported with VVAK integrator.");
1451
1452             integrateVVSecondStep(step,
1453                                   ir,
1454                                   fr,
1455                                   cr,
1456                                   state,
1457                                   mdAtoms->mdatoms(),
1458                                   fcdata,
1459                                   &MassQ,
1460                                   &vcm,
1461                                   pull_work,
1462                                   enerd,
1463                                   ekind,
1464                                   gstat,
1465                                   &dvdl_constr,
1466                                   bCalcVir,
1467                                   total_vir,
1468                                   shake_vir,
1469                                   force_vir,
1470                                   pres,
1471                                   M,
1472                                   lastbox,
1473                                   do_log,
1474                                   do_ene,
1475                                   bGStat,
1476                                   &bSumEkinhOld,
1477                                   &f,
1478                                   &cbuf,
1479                                   &upd,
1480                                   constr,
1481                                   &nullSignaller,
1482                                   trotter_seq,
1483                                   nrnb,
1484                                   wcycle);
1485         }
1486         else
1487         {
1488             if (useGpuForUpdate)
1489             {
1490                 if (bNS && (bFirstStep || DOMAINDECOMP(cr)))
1491                 {
1492                     integrator->set(stateGpu->getCoordinates(),
1493                                     stateGpu->getVelocities(),
1494                                     stateGpu->getForces(),
1495                                     top.idef,
1496                                     *md);
1497
1498                     // Copy data to the GPU after buffers might have being reinitialized
1499                     stateGpu->copyVelocitiesToGpu(state->v, AtomLocality::Local);
1500                     stateGpu->copyCoordinatesToGpu(state->x, AtomLocality::Local);
1501                 }
1502
1503                 if (simulationWork.useGpuPme && !runScheduleWork->simulationWork.useGpuPmePpCommunication
1504                     && !thisRankHasDuty(cr, DUTY_PME))
1505                 {
1506                     // The PME forces were recieved to the host, so have to be copied
1507                     stateGpu->copyForcesToGpu(f.view().force(), AtomLocality::All);
1508                 }
1509                 else if (!runScheduleWork->stepWork.useGpuFBufferOps)
1510                 {
1511                     // The buffer ops were not offloaded this step, so the forces are on the
1512                     // host and have to be copied
1513                     stateGpu->copyForcesToGpu(f.view().force(), AtomLocality::Local);
1514                 }
1515
1516                 const bool doTemperatureScaling =
1517                         (ir->etc != TemperatureCoupling::No
1518                          && do_per_step(step + ir->nsttcouple - 1, ir->nsttcouple));
1519
1520                 // This applies Leap-Frog, LINCS and SETTLE in succession
1521                 integrator->integrate(
1522                         stateGpu->getForcesReadyOnDeviceEvent(
1523                                 AtomLocality::Local, runScheduleWork->stepWork.useGpuFBufferOps),
1524                         ir->delta_t,
1525                         true,
1526                         bCalcVir,
1527                         shake_vir,
1528                         doTemperatureScaling,
1529                         ekind->tcstat,
1530                         doParrinelloRahman,
1531                         ir->nstpcouple * ir->delta_t,
1532                         M);
1533
1534                 // Copy velocities D2H after update if:
1535                 // - Globals are computed this step (includes the energy output steps).
1536                 // - Temperature is needed for the next step.
1537                 if (bGStat || needHalfStepKineticEnergy)
1538                 {
1539                     stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
1540                     stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
1541                 }
1542             }
1543             else
1544             {
1545                 /* With multiple time stepping we need to do an additional normal
1546                  * update step to obtain the virial, as the actual MTS integration
1547                  * using an acceleration where the slow forces are multiplied by mtsFactor.
1548                  * Using that acceleration would result in a virial with the slow
1549                  * force contribution would be a factor mtsFactor too large.
1550                  */
1551                 if (fr->useMts && bCalcVir && constr != nullptr)
1552                 {
1553                     upd.update_for_constraint_virial(*ir,
1554                                                      md->homenr,
1555                                                      md->havePartiallyFrozenAtoms,
1556                                                      gmx::arrayRefFromArray(md->invmass, md->nr),
1557                                                      gmx::arrayRefFromArray(md->invMassPerDim, md->nr),
1558                                                      *state,
1559                                                      f.view().forceWithPadding(),
1560                                                      *ekind);
1561
1562                     constrain_coordinates(constr,
1563                                           do_log,
1564                                           do_ene,
1565                                           step,
1566                                           state,
1567                                           upd.xp()->arrayRefWithPadding(),
1568                                           &dvdl_constr,
1569                                           bCalcVir,
1570                                           shake_vir);
1571                 }
1572
1573                 ArrayRefWithPadding<const RVec> forceCombined =
1574                         (fr->useMts && step % ir->mtsLevels[1].stepFactor == 0)
1575                                 ? f.view().forceMtsCombinedWithPadding()
1576                                 : f.view().forceWithPadding();
1577                 upd.update_coords(*ir,
1578                                   step,
1579                                   md->homenr,
1580                                   md->havePartiallyFrozenAtoms,
1581                                   gmx::arrayRefFromArray(md->ptype, md->nr),
1582                                   gmx::arrayRefFromArray(md->invmass, md->nr),
1583                                   gmx::arrayRefFromArray(md->invMassPerDim, md->nr),
1584                                   state,
1585                                   forceCombined,
1586                                   fcdata,
1587                                   ekind,
1588                                   M,
1589                                   etrtPOSITION,
1590                                   cr,
1591                                   constr != nullptr);
1592
1593                 wallcycle_stop(wcycle, WallCycleCounter::Update);
1594
1595                 constrain_coordinates(constr,
1596                                       do_log,
1597                                       do_ene,
1598                                       step,
1599                                       state,
1600                                       upd.xp()->arrayRefWithPadding(),
1601                                       &dvdl_constr,
1602                                       bCalcVir && !fr->useMts,
1603                                       shake_vir);
1604
1605                 upd.update_sd_second_half(*ir,
1606                                           step,
1607                                           &dvdl_constr,
1608                                           md->homenr,
1609                                           gmx::arrayRefFromArray(md->ptype, md->nr),
1610                                           gmx::arrayRefFromArray(md->invmass, md->nr),
1611                                           state,
1612                                           cr,
1613                                           nrnb,
1614                                           wcycle,
1615                                           constr,
1616                                           do_log,
1617                                           do_ene);
1618                 upd.finish_update(
1619                         *ir, md->havePartiallyFrozenAtoms, md->homenr, state, wcycle, constr != nullptr);
1620             }
1621
1622             if (ir->bPull && ir->pull->bSetPbcRefToPrevStepCOM)
1623             {
1624                 updatePrevStepPullCom(pull_work, state);
1625             }
1626
1627             enerd->term[F_DVDL_CONSTR] += dvdl_constr;
1628         }
1629
1630         /* ############## IF NOT VV, Calculate globals HERE  ############ */
1631         /* With Leap-Frog we can skip compute_globals at
1632          * non-communication steps, but we need to calculate
1633          * the kinetic energy one step before communication.
1634          */
1635         {
1636             // Organize to do inter-simulation signalling on steps if
1637             // and when algorithms require it.
1638             const bool doInterSimSignal = (simulationsShareState && do_per_step(step, nstSignalComm));
1639
1640             if (bGStat || needHalfStepKineticEnergy || doInterSimSignal)
1641             {
1642                 // Copy coordinates when needed to stop the CM motion.
1643                 if (useGpuForUpdate && !EI_VV(ir->eI) && bStopCM)
1644                 {
1645                     stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
1646                     stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1647                 }
1648                 // Since we're already communicating at this step, we
1649                 // can propagate intra-simulation signals. Note that
1650                 // check_nstglobalcomm has the responsibility for
1651                 // choosing the value of nstglobalcomm that is one way
1652                 // bGStat becomes true, so we can't get into a
1653                 // situation where e.g. checkpointing can't be
1654                 // signalled.
1655                 bool                doIntraSimSignal = true;
1656                 SimulationSignaller signaller(&signals, cr, ms, doInterSimSignal, doIntraSimSignal);
1657
1658                 compute_globals(
1659                         gstat,
1660                         cr,
1661                         ir,
1662                         fr,
1663                         ekind,
1664                         makeConstArrayRef(state->x),
1665                         makeConstArrayRef(state->v),
1666                         state->box,
1667                         md,
1668                         nrnb,
1669                         &vcm,
1670                         wcycle,
1671                         enerd,
1672                         force_vir,
1673                         shake_vir,
1674                         total_vir,
1675                         pres,
1676                         (!EI_VV(ir->eI) && bCalcEner && constr != nullptr) ? constr->rmsdData()
1677                                                                            : gmx::ArrayRef<real>{},
1678                         &signaller,
1679                         lastbox,
1680                         &bSumEkinhOld,
1681                         (bGStat ? CGLO_GSTAT : 0) | (!EI_VV(ir->eI) && bCalcEner ? CGLO_ENERGY : 0)
1682                                 | (!EI_VV(ir->eI) && bStopCM ? CGLO_STOPCM : 0)
1683                                 | (!EI_VV(ir->eI) ? CGLO_TEMPERATURE : 0)
1684                                 | (!EI_VV(ir->eI) ? CGLO_PRESSURE : 0) | CGLO_CONSTRAINT
1685                                 | (DOMAINDECOMP(cr) && shouldCheckNumberOfBondedInteractions(*cr->dd)
1686                                            ? CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS
1687                                            : 0));
1688                 if (DOMAINDECOMP(cr))
1689                 {
1690                     checkNumberOfBondedInteractions(
1691                             mdlog, cr, top_global, &top, makeConstArrayRef(state->x), state->box);
1692                 }
1693                 if (!EI_VV(ir->eI) && bStopCM)
1694                 {
1695                     process_and_stopcm_grp(
1696                             fplog, &vcm, *md, makeArrayRef(state->x), makeArrayRef(state->v));
1697                     inc_nrnb(nrnb, eNR_STOPCM, md->homenr);
1698
1699                     // TODO: The special case of removing CM motion should be dealt more gracefully
1700                     if (useGpuForUpdate)
1701                     {
1702                         stateGpu->copyCoordinatesToGpu(state->x, AtomLocality::Local);
1703                         // Here we block until the H2D copy completes because event sync with the
1704                         // force kernels that use the coordinates on the next steps is not implemented
1705                         // (not because of a race on state->x being modified on the CPU while H2D is in progress).
1706                         stateGpu->waitCoordinatesCopiedToDevice(AtomLocality::Local);
1707                         // If the COM removal changed the velocities on the CPU, this has to be accounted for.
1708                         if (vcm.mode != ComRemovalAlgorithm::No)
1709                         {
1710                             stateGpu->copyVelocitiesToGpu(state->v, AtomLocality::Local);
1711                         }
1712                     }
1713                 }
1714             }
1715         }
1716
1717         /* #############  END CALC EKIN AND PRESSURE ################# */
1718
1719         /* Note: this is OK, but there are some numerical precision issues with using the convergence of
1720            the virial that should probably be addressed eventually. state->veta has better properies,
1721            but what we actually need entering the new cycle is the new shake_vir value. Ideally, we could
1722            generate the new shake_vir, but test the veta value for convergence.  This will take some thought. */
1723
1724         if (ir->efep != FreeEnergyPerturbationType::No && !EI_VV(ir->eI))
1725         {
1726             /* Sum up the foreign energy and dK/dl terms for md and sd.
1727                Currently done every step so that dH/dl is correct in the .edr */
1728             accumulateKineticLambdaComponents(enerd, state->lambda, *ir->fepvals);
1729         }
1730
1731         update_pcouple_after_coordinates(
1732                 fplog, step, ir, md, pres, force_vir, shake_vir, pressureCouplingMu, state, nrnb, upd.deform(), !useGpuForUpdate);
1733
1734         const bool doBerendsenPressureCoupling = (inputrec->epc == PressureCoupling::Berendsen
1735                                                   && do_per_step(step, inputrec->nstpcouple));
1736         const bool doCRescalePressureCoupling  = (inputrec->epc == PressureCoupling::CRescale
1737                                                  && do_per_step(step, inputrec->nstpcouple));
1738         if (useGpuForUpdate
1739             && (doBerendsenPressureCoupling || doCRescalePressureCoupling || doParrinelloRahman))
1740         {
1741             integrator->scaleCoordinates(pressureCouplingMu);
1742             if (doCRescalePressureCoupling)
1743             {
1744                 matrix pressureCouplingInvMu;
1745                 gmx::invertBoxMatrix(pressureCouplingMu, pressureCouplingInvMu);
1746                 integrator->scaleVelocities(pressureCouplingInvMu);
1747             }
1748             integrator->setPbc(PbcType::Xyz, state->box);
1749         }
1750
1751         /* ################# END UPDATE STEP 2 ################# */
1752         /* #### We now have r(t+dt) and v(t+dt/2)  ############# */
1753
1754         /* The coordinates (x) were unshifted in update */
1755         if (!bGStat)
1756         {
1757             /* We will not sum ekinh_old,
1758              * so signal that we still have to do it.
1759              */
1760             bSumEkinhOld = TRUE;
1761         }
1762
1763         if (bCalcEner)
1764         {
1765             /* #########  BEGIN PREPARING EDR OUTPUT  ###########  */
1766
1767             /* use the directly determined last velocity, not actually the averaged half steps */
1768             if (bTrotter && ir->eI == IntegrationAlgorithm::VV)
1769             {
1770                 enerd->term[F_EKIN] = last_ekin;
1771             }
1772             enerd->term[F_ETOT] = enerd->term[F_EPOT] + enerd->term[F_EKIN];
1773
1774             if (integratorHasConservedEnergyQuantity(ir))
1775             {
1776                 if (EI_VV(ir->eI))
1777                 {
1778                     enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + saved_conserved_quantity;
1779                 }
1780                 else
1781                 {
1782                     enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + NPT_energy(ir, state, &MassQ);
1783                 }
1784             }
1785             /* #########  END PREPARING EDR OUTPUT  ###########  */
1786         }
1787
1788         /* Output stuff */
1789         if (MASTER(cr))
1790         {
1791             if (fplog && do_log && bDoExpanded)
1792             {
1793                 /* only needed if doing expanded ensemble */
1794                 PrintFreeEnergyInfoToFile(fplog,
1795                                           ir->fepvals.get(),
1796                                           ir->expandedvals.get(),
1797                                           ir->bSimTemp ? ir->simtempvals.get() : nullptr,
1798                                           state_global->dfhist,
1799                                           state->fep_state,
1800                                           ir->nstlog,
1801                                           step);
1802             }
1803             if (bCalcEner)
1804             {
1805                 energyOutput.addDataAtEnergyStep(bDoDHDL,
1806                                                  bCalcEnerStep,
1807                                                  t,
1808                                                  md->tmass,
1809                                                  enerd,
1810                                                  ir->fepvals.get(),
1811                                                  ir->expandedvals.get(),
1812                                                  lastbox,
1813                                                  PTCouplingArrays{ state->boxv,
1814                                                                    state->nosehoover_xi,
1815                                                                    state->nosehoover_vxi,
1816                                                                    state->nhpres_xi,
1817                                                                    state->nhpres_vxi },
1818                                                  state->fep_state,
1819                                                  shake_vir,
1820                                                  force_vir,
1821                                                  total_vir,
1822                                                  pres,
1823                                                  ekind,
1824                                                  mu_tot,
1825                                                  constr);
1826             }
1827             else
1828             {
1829                 energyOutput.recordNonEnergyStep();
1830             }
1831
1832             gmx_bool do_dr = do_per_step(step, ir->nstdisreout);
1833             gmx_bool do_or = do_per_step(step, ir->nstorireout);
1834
1835             if (doSimulatedAnnealing)
1836             {
1837                 gmx::EnergyOutput::printAnnealingTemperatures(
1838                         do_log ? fplog : nullptr, groups, &(ir->opts));
1839             }
1840             if (do_log || do_ene || do_dr || do_or)
1841             {
1842                 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
1843                                                    do_ene,
1844                                                    do_dr,
1845                                                    do_or,
1846                                                    do_log ? fplog : nullptr,
1847                                                    step,
1848                                                    t,
1849                                                    fr->fcdata.get(),
1850                                                    awh.get());
1851             }
1852             if (do_log && ir->bDoAwh && awh->hasFepLambdaDimension())
1853             {
1854                 const bool isInitialOutput = false;
1855                 printLambdaStateToLog(fplog, state->lambda, isInitialOutput);
1856             }
1857
1858             if (ir->bPull)
1859             {
1860                 pull_print_output(pull_work, step, t);
1861             }
1862
1863             if (do_per_step(step, ir->nstlog))
1864             {
1865                 if (fflush(fplog) != 0)
1866                 {
1867                     gmx_fatal(FARGS, "Cannot flush logfile - maybe you are out of disk space?");
1868                 }
1869             }
1870         }
1871         if (bDoExpanded)
1872         {
1873             /* Have to do this part _after_ outputting the logfile and the edr file */
1874             /* Gets written into the state at the beginning of next loop*/
1875             state->fep_state = lamnew;
1876         }
1877         else if (ir->bDoAwh && awh->needForeignEnergyDifferences(step))
1878         {
1879             state->fep_state = awh->fepLambdaState();
1880         }
1881         /* Print the remaining wall clock time for the run */
1882         if (isMasterSimMasterRank(ms, MASTER(cr)) && (do_verbose || gmx_got_usr_signal()) && !bPMETunePrinting)
1883         {
1884             if (shellfc)
1885             {
1886                 fprintf(stderr, "\n");
1887             }
1888             print_time(stderr, walltime_accounting, step, ir, cr);
1889         }
1890
1891         /* Ion/water position swapping.
1892          * Not done in last step since trajectory writing happens before this call
1893          * in the MD loop and exchanges would be lost anyway. */
1894         bNeedRepartition = FALSE;
1895         if ((ir->eSwapCoords != SwapType::No) && (step > 0) && !bLastStep
1896             && do_per_step(step, ir->swap->nstswap))
1897         {
1898             bNeedRepartition = do_swapcoords(cr,
1899                                              step,
1900                                              t,
1901                                              ir,
1902                                              swap,
1903                                              wcycle,
1904                                              as_rvec_array(state->x.data()),
1905                                              state->box,
1906                                              MASTER(cr) && mdrunOptions.verbose,
1907                                              bRerunMD);
1908
1909             if (bNeedRepartition && DOMAINDECOMP(cr))
1910             {
1911                 dd_collect_state(cr->dd, state, state_global);
1912             }
1913         }
1914
1915         /* Replica exchange */
1916         bExchanged = FALSE;
1917         if (bDoReplEx)
1918         {
1919             bExchanged = replica_exchange(fplog, cr, ms, repl_ex, state_global, enerd, state, step, t);
1920         }
1921
1922         if ((bExchanged || bNeedRepartition) && DOMAINDECOMP(cr))
1923         {
1924             dd_partition_system(fplog,
1925                                 mdlog,
1926                                 step,
1927                                 cr,
1928                                 TRUE,
1929                                 1,
1930                                 state_global,
1931                                 top_global,
1932                                 *ir,
1933                                 imdSession,
1934                                 pull_work,
1935                                 state,
1936                                 &f,
1937                                 mdAtoms,
1938                                 &top,
1939                                 fr,
1940                                 vsite,
1941                                 constr,
1942                                 nrnb,
1943                                 wcycle,
1944                                 FALSE);
1945             upd.updateAfterPartition(state->natoms,
1946                                      md->cFREEZE ? gmx::arrayRefFromArray(md->cFREEZE, md->nr)
1947                                                  : gmx::ArrayRef<const unsigned short>(),
1948                                      md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
1949                                              : gmx::ArrayRef<const unsigned short>());
1950         }
1951
1952         bFirstStep = FALSE;
1953         bInitStep  = FALSE;
1954
1955         /* #######  SET VARIABLES FOR NEXT ITERATION IF THEY STILL NEED IT ###### */
1956         /* With all integrators, except VV, we need to retain the pressure
1957          * at the current step for coupling at the next step.
1958          */
1959         if ((state->flags & enumValueToBitMask(StateEntry::PressurePrevious))
1960             && (bGStatEveryStep || (ir->nstpcouple > 0 && step % ir->nstpcouple == 0)))
1961         {
1962             /* Store the pressure in t_state for pressure coupling
1963              * at the next MD step.
1964              */
1965             copy_mat(pres, state->pres_prev);
1966         }
1967
1968         /* #######  END SET VARIABLES FOR NEXT ITERATION ###### */
1969
1970         if ((membed != nullptr) && (!bLastStep))
1971         {
1972             rescale_membed(step_rel, membed, as_rvec_array(state_global->x.data()));
1973         }
1974
1975         cycles = wallcycle_stop(wcycle, WallCycleCounter::Step);
1976         if (DOMAINDECOMP(cr) && wcycle)
1977         {
1978             dd_cycles_add(cr->dd, cycles, ddCyclStep);
1979         }
1980
1981         /* increase the MD step number */
1982         step++;
1983         step_rel++;
1984
1985 #if GMX_FAHCORE
1986         if (MASTER(cr))
1987         {
1988             fcReportProgress(ir->nsteps + ir->init_step, step);
1989         }
1990 #endif
1991
1992         resetHandler->resetCounters(
1993                 step, step_rel, mdlog, fplog, cr, fr->nbv.get(), nrnb, fr->pmedata, pme_loadbal, wcycle, walltime_accounting);
1994
1995         /* If bIMD is TRUE, the master updates the IMD energy record and sends positions to VMD client */
1996         imdSession->updateEnergyRecordAndSendPositionsAndEnergies(bInteractiveMDstep, step, bCalcEner);
1997     }
1998     /* End of main MD loop */
1999
2000     /* Closing TNG files can include compressing data. Therefore it is good to do that
2001      * before stopping the time measurements. */
2002     mdoutf_tng_close(outf);
2003
2004     /* Stop measuring walltime */
2005     walltime_accounting_end_time(walltime_accounting);
2006
2007     if (!thisRankHasDuty(cr, DUTY_PME))
2008     {
2009         /* Tell the PME only node to finish */
2010         gmx_pme_send_finish(cr);
2011     }
2012
2013     if (MASTER(cr))
2014     {
2015         if (ir->nstcalcenergy > 0)
2016         {
2017             energyOutput.printEnergyConservation(fplog, ir->simulation_part, EI_MD(ir->eI));
2018
2019             gmx::EnergyOutput::printAnnealingTemperatures(fplog, groups, &(ir->opts));
2020             energyOutput.printAverages(fplog, groups);
2021         }
2022     }
2023     done_mdoutf(outf);
2024
2025     if (bPMETune)
2026     {
2027         pme_loadbal_done(pme_loadbal, fplog, mdlog, fr->nbv->useGpu());
2028     }
2029
2030     done_shellfc(fplog, shellfc, step_rel);
2031
2032     if (useReplicaExchange && MASTER(cr))
2033     {
2034         print_replica_exchange_statistics(fplog, repl_ex);
2035     }
2036
2037     walltime_accounting_set_nsteps_done(walltime_accounting, step_rel);
2038
2039     global_stat_destroy(gstat);
2040 }