f5db890f8963a62adc30faf73542712e510cc34a
[alexxy/gromacs.git] / src / gromacs / mdrun / md.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2011-2019,2020,2021, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /*! \internal \file
38  *
39  * \brief Implements the integrator for normal molecular dynamics simulations
40  *
41  * \author David van der Spoel <david.vanderspoel@icm.uu.se>
42  * \ingroup module_mdrun
43  */
44 #include "gmxpre.h"
45
46 #include <cinttypes>
47 #include <cmath>
48 #include <cstdio>
49 #include <cstdlib>
50
51 #include <algorithm>
52 #include <memory>
53 #include <numeric>
54
55 #include "gromacs/applied_forces/awh/awh.h"
56 #include "gromacs/applied_forces/awh/read_params.h"
57 #include "gromacs/commandline/filenm.h"
58 #include "gromacs/domdec/collect.h"
59 #include "gromacs/domdec/dlbtiming.h"
60 #include "gromacs/domdec/domdec.h"
61 #include "gromacs/domdec/domdec_network.h"
62 #include "gromacs/domdec/domdec_struct.h"
63 #include "gromacs/domdec/gpuhaloexchange.h"
64 #include "gromacs/domdec/mdsetup.h"
65 #include "gromacs/domdec/partition.h"
66 #include "gromacs/essentialdynamics/edsam.h"
67 #include "gromacs/ewald/pme_load_balancing.h"
68 #include "gromacs/ewald/pme_pp.h"
69 #include "gromacs/fileio/trxio.h"
70 #include "gromacs/gmxlib/network.h"
71 #include "gromacs/gmxlib/nrnb.h"
72 #include "gromacs/gpu_utils/device_stream_manager.h"
73 #include "gromacs/gpu_utils/gpu_utils.h"
74 #include "gromacs/imd/imd.h"
75 #include "gromacs/listed_forces/listed_forces.h"
76 #include "gromacs/math/functions.h"
77 #include "gromacs/math/invertmatrix.h"
78 #include "gromacs/math/vec.h"
79 #include "gromacs/math/vectypes.h"
80 #include "gromacs/mdlib/checkpointhandler.h"
81 #include "gromacs/mdlib/compute_io.h"
82 #include "gromacs/mdlib/constr.h"
83 #include "gromacs/mdlib/coupling.h"
84 #include "gromacs/mdlib/ebin.h"
85 #include "gromacs/mdlib/enerdata_utils.h"
86 #include "gromacs/mdlib/energyoutput.h"
87 #include "gromacs/mdlib/expanded.h"
88 #include "gromacs/mdlib/force.h"
89 #include "gromacs/mdlib/force_flags.h"
90 #include "gromacs/mdlib/forcerec.h"
91 #include "gromacs/mdlib/freeenergyparameters.h"
92 #include "gromacs/mdlib/md_support.h"
93 #include "gromacs/mdlib/mdatoms.h"
94 #include "gromacs/mdlib/mdoutf.h"
95 #include "gromacs/mdlib/membed.h"
96 #include "gromacs/mdlib/resethandler.h"
97 #include "gromacs/mdlib/sighandler.h"
98 #include "gromacs/mdlib/simulationsignal.h"
99 #include "gromacs/mdlib/stat.h"
100 #include "gromacs/mdlib/stophandler.h"
101 #include "gromacs/mdlib/tgroup.h"
102 #include "gromacs/mdlib/trajectory_writing.h"
103 #include "gromacs/mdlib/update.h"
104 #include "gromacs/mdlib/update_constrain_gpu.h"
105 #include "gromacs/mdlib/update_vv.h"
106 #include "gromacs/mdlib/vcm.h"
107 #include "gromacs/mdlib/vsite.h"
108 #include "gromacs/mdrunutility/handlerestart.h"
109 #include "gromacs/mdrunutility/multisim.h"
110 #include "gromacs/mdrunutility/printtime.h"
111 #include "gromacs/mdtypes/awh_history.h"
112 #include "gromacs/mdtypes/awh_params.h"
113 #include "gromacs/mdtypes/commrec.h"
114 #include "gromacs/mdtypes/df_history.h"
115 #include "gromacs/mdtypes/energyhistory.h"
116 #include "gromacs/mdtypes/fcdata.h"
117 #include "gromacs/mdtypes/forcebuffers.h"
118 #include "gromacs/mdtypes/forcerec.h"
119 #include "gromacs/mdtypes/group.h"
120 #include "gromacs/mdtypes/inputrec.h"
121 #include "gromacs/mdtypes/interaction_const.h"
122 #include "gromacs/mdtypes/md_enums.h"
123 #include "gromacs/mdtypes/mdatom.h"
124 #include "gromacs/mdtypes/mdrunoptions.h"
125 #include "gromacs/mdtypes/multipletimestepping.h"
126 #include "gromacs/mdtypes/observableshistory.h"
127 #include "gromacs/mdtypes/pullhistory.h"
128 #include "gromacs/mdtypes/simulation_workload.h"
129 #include "gromacs/mdtypes/state.h"
130 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
131 #include "gromacs/modularsimulator/energydata.h"
132 #include "gromacs/nbnxm/gpu_data_mgmt.h"
133 #include "gromacs/nbnxm/nbnxm.h"
134 #include "gromacs/pbcutil/pbc.h"
135 #include "gromacs/pulling/output.h"
136 #include "gromacs/pulling/pull.h"
137 #include "gromacs/swap/swapcoords.h"
138 #include "gromacs/timing/wallcycle.h"
139 #include "gromacs/timing/walltime_accounting.h"
140 #include "gromacs/topology/atoms.h"
141 #include "gromacs/topology/idef.h"
142 #include "gromacs/topology/mtop_util.h"
143 #include "gromacs/topology/topology.h"
144 #include "gromacs/trajectory/trajectoryframe.h"
145 #include "gromacs/utility/basedefinitions.h"
146 #include "gromacs/utility/cstringutil.h"
147 #include "gromacs/utility/fatalerror.h"
148 #include "gromacs/utility/logger.h"
149 #include "gromacs/utility/real.h"
150 #include "gromacs/utility/smalloc.h"
151
152 #include "legacysimulator.h"
153 #include "replicaexchange.h"
154 #include "shellfc.h"
155
156 using gmx::SimulationSignaller;
157
158 void gmx::LegacySimulator::do_md()
159 {
160     // TODO Historically, the EM and MD "integrators" used different
161     // names for the t_inputrec *parameter, but these must have the
162     // same name, now that it's a member of a struct. We use this ir
163     // alias to avoid a large ripple of nearly useless changes.
164     // t_inputrec is being replaced by IMdpOptionsProvider, so this
165     // will go away eventually.
166     const t_inputrec* ir = inputrec;
167
168     int64_t      step, step_rel;
169     double       t, t0 = ir->init_t;
170     gmx_bool     bGStatEveryStep, bGStat, bCalcVir, bCalcEnerStep, bCalcEner;
171     gmx_bool     bNS = FALSE, bNStList, bStopCM, bFirstStep, bInitStep, bLastStep = FALSE;
172     gmx_bool     bDoDHDL = FALSE, bDoFEP = FALSE, bDoExpanded = FALSE;
173     gmx_bool     do_ene, do_log, do_verbose;
174     gmx_bool     bMasterState;
175     unsigned int force_flags;
176     tensor force_vir = { { 0 } }, shake_vir = { { 0 } }, total_vir = { { 0 } }, pres = { { 0 } };
177     int    i, m;
178     rvec   mu_tot;
179     matrix pressureCouplingMu, M;
180     gmx_repl_ex_t     repl_ex = nullptr;
181     gmx_global_stat_t gstat;
182     gmx_shellfc_t*    shellfc;
183     gmx_bool          bSumEkinhOld, bDoReplEx, bExchanged, bNeedRepartition;
184     gmx_bool          bTrotter;
185     real              dvdl_constr;
186     std::vector<RVec> cbuf;
187     matrix            lastbox;
188     int               lamnew = 0;
189     /* for FEP */
190     int       nstfep = 0;
191     double    cycles;
192     real      saved_conserved_quantity = 0;
193     real      last_ekin                = 0;
194     t_extmass MassQ;
195     char      sbuf[STEPSTRSIZE], sbuf2[STEPSTRSIZE];
196
197     /* PME load balancing data for GPU kernels */
198     gmx_bool bPMETune         = FALSE;
199     gmx_bool bPMETunePrinting = FALSE;
200
201     bool bInteractiveMDstep = false;
202
203     SimulationSignals signals;
204     // Most global communnication stages don't propagate mdrun
205     // signals, and will use this object to achieve that.
206     SimulationSignaller nullSignaller(nullptr, nullptr, nullptr, false, false);
207
208     if (!mdrunOptions.writeConfout)
209     {
210         // This is on by default, and the main known use case for
211         // turning it off is for convenience in benchmarking, which is
212         // something that should not show up in the general user
213         // interface.
214         GMX_LOG(mdlog.info)
215                 .asParagraph()
216                 .appendText(
217                         "The -noconfout functionality is deprecated, and may be removed in a "
218                         "future version.");
219     }
220
221     /* md-vv uses averaged full step velocities for T-control
222        md-vv-avek uses averaged half step velocities for T-control (but full step ekin for P control)
223        md uses averaged half step kinetic energies to determine temperature unless defined otherwise by GMX_EKIN_AVE_VEL; */
224     bTrotter = (EI_VV(ir->eI)
225                 && (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir)));
226
227     const bool bRerunMD = false;
228
229     int nstglobalcomm = computeGlobalCommunicationPeriod(mdlog, ir, cr);
230     bGStatEveryStep   = (nstglobalcomm == 1);
231
232     const SimulationGroups* groups = &top_global.groups;
233
234     std::unique_ptr<EssentialDynamics> ed = nullptr;
235     if (opt2bSet("-ei", nfile, fnm))
236     {
237         /* Initialize essential dynamics sampling */
238         ed = init_edsam(mdlog,
239                         opt2fn_null("-ei", nfile, fnm),
240                         opt2fn("-eo", nfile, fnm),
241                         top_global,
242                         *ir,
243                         cr,
244                         constr,
245                         state_global,
246                         observablesHistory,
247                         oenv,
248                         startingBehavior);
249     }
250     else if (observablesHistory->edsamHistory)
251     {
252         gmx_fatal(FARGS,
253                   "The checkpoint is from a run with essential dynamics sampling, "
254                   "but the current run did not specify the -ei option. "
255                   "Either specify the -ei option to mdrun, or do not use this checkpoint file.");
256     }
257
258     int*                fep_state = MASTER(cr) ? &state_global->fep_state : nullptr;
259     gmx::ArrayRef<real> lambda    = MASTER(cr) ? state_global->lambda : gmx::ArrayRef<real>();
260     initialize_lambdas(
261             fplog, *ir, gmx::arrayRefFromArray(ir->opts.ref_t, ir->opts.ngtc), MASTER(cr), fep_state, lambda);
262     Update upd(*ir, deform);
263     bool   doSimulatedAnnealing = false;
264     {
265         // TODO: Avoid changing inputrec (#3854)
266         // Simulated annealing updates the reference temperature.
267         auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
268         doSimulatedAnnealing   = initSimulatedAnnealing(nonConstInputrec, &upd);
269     }
270     const bool useReplicaExchange = (replExParams.exchangeInterval > 0);
271
272     const t_fcdata& fcdata = *fr->fcdata;
273
274     bool simulationsShareState = false;
275     int  nstSignalComm         = nstglobalcomm;
276     {
277         // TODO This implementation of ensemble orientation restraints is nasty because
278         // a user can't just do multi-sim with single-sim orientation restraints.
279         bool usingEnsembleRestraints =
280                 (fcdata.disres->nsystems > 1) || ((ms != nullptr) && (fcdata.orires->nr != 0));
281         bool awhUsesMultiSim = (ir->bDoAwh && ir->awhParams->shareBiasMultisim() && (ms != nullptr));
282
283         // Replica exchange, ensemble restraints and AWH need all
284         // simulations to remain synchronized, so they need
285         // checkpoints and stop conditions to act on the same step, so
286         // the propagation of such signals must take place between
287         // simulations, not just within simulations.
288         // TODO: Make algorithm initializers set these flags.
289         simulationsShareState = useReplicaExchange || usingEnsembleRestraints || awhUsesMultiSim;
290
291         if (simulationsShareState)
292         {
293             // Inter-simulation signal communication does not need to happen
294             // often, so we use a minimum of 200 steps to reduce overhead.
295             const int c_minimumInterSimulationSignallingInterval = 200;
296             nstSignalComm = ((c_minimumInterSimulationSignallingInterval + nstglobalcomm - 1) / nstglobalcomm)
297                             * nstglobalcomm;
298         }
299     }
300
301     if (startingBehavior != StartingBehavior::RestartWithAppending)
302     {
303         pleaseCiteCouplingAlgorithms(fplog, *ir);
304     }
305     gmx_mdoutf*       outf = init_mdoutf(fplog,
306                                    nfile,
307                                    fnm,
308                                    mdrunOptions,
309                                    cr,
310                                    outputProvider,
311                                    mdModulesNotifiers,
312                                    ir,
313                                    top_global,
314                                    oenv,
315                                    wcycle,
316                                    startingBehavior,
317                                    simulationsShareState,
318                                    ms);
319     gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
320                                    top_global,
321                                    *ir,
322                                    pull_work,
323                                    mdoutf_get_fp_dhdl(outf),
324                                    false,
325                                    startingBehavior,
326                                    simulationsShareState,
327                                    mdModulesNotifiers);
328
329     gstat = global_stat_init(ir);
330
331     const auto& simulationWork     = runScheduleWork->simulationWork;
332     const bool  useGpuForPme       = simulationWork.useGpuPme;
333     const bool  useGpuForNonbonded = simulationWork.useGpuNonbonded;
334     const bool  useGpuForBufferOps = simulationWork.useGpuBufferOps;
335     const bool  useGpuForUpdate    = simulationWork.useGpuUpdate;
336
337     /* Check for polarizable models and flexible constraints */
338     shellfc = init_shell_flexcon(fplog,
339                                  top_global,
340                                  constr ? constr->numFlexibleConstraints() : 0,
341                                  ir->nstcalcenergy,
342                                  DOMAINDECOMP(cr),
343                                  useGpuForPme);
344
345     {
346         double io = compute_io(ir, top_global.natoms, *groups, energyOutput.numEnergyTerms(), 1);
347         if ((io > 2000) && MASTER(cr))
348         {
349             fprintf(stderr, "\nWARNING: This run will generate roughly %.0f Mb of data\n\n", io);
350         }
351     }
352
353     // Local state only becomes valid now.
354     std::unique_ptr<t_state> stateInstance;
355     t_state*                 state;
356
357     gmx_localtop_t top(top_global.ffparams);
358
359     auto mdatoms = mdAtoms->mdatoms();
360
361     ForceBuffers     f(fr->useMts,
362                    ((useGpuForNonbonded && useGpuForBufferOps) || useGpuForUpdate)
363                            ? PinningPolicy::PinnedIfSupported
364                            : PinningPolicy::CannotBePinned);
365     const t_mdatoms* md = mdAtoms->mdatoms();
366     if (DOMAINDECOMP(cr))
367     {
368         stateInstance = std::make_unique<t_state>();
369         state         = stateInstance.get();
370         dd_init_local_state(*cr->dd, state_global, state);
371
372         /* Distribute the charge groups over the nodes from the master node */
373         dd_partition_system(fplog,
374                             mdlog,
375                             ir->init_step,
376                             cr,
377                             TRUE,
378                             1,
379                             state_global,
380                             top_global,
381                             *ir,
382                             imdSession,
383                             pull_work,
384                             state,
385                             &f,
386                             mdAtoms,
387                             &top,
388                             fr,
389                             vsite,
390                             constr,
391                             nrnb,
392                             nullptr,
393                             FALSE);
394         upd.updateAfterPartition(state->natoms,
395                                  md->cFREEZE ? gmx::arrayRefFromArray(md->cFREEZE, md->nr)
396                                              : gmx::ArrayRef<const unsigned short>(),
397                                  md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
398                                          : gmx::ArrayRef<const unsigned short>());
399     }
400     else
401     {
402         state_change_natoms(state_global, state_global->natoms);
403         /* Copy the pointer to the global state */
404         state = state_global;
405
406         /* Generate and initialize new topology */
407         mdAlgorithmsSetupAtomData(cr, *ir, top_global, &top, fr, &f, mdAtoms, constr, vsite, shellfc);
408
409         upd.updateAfterPartition(state->natoms,
410                                  md->cFREEZE ? gmx::arrayRefFromArray(md->cFREEZE, md->nr)
411                                              : gmx::ArrayRef<const unsigned short>(),
412                                  md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
413                                          : gmx::ArrayRef<const unsigned short>());
414     }
415
416     std::unique_ptr<UpdateConstrainGpu> integrator;
417
418     StatePropagatorDataGpu* stateGpu = fr->stateGpu;
419
420     // TODO: the assertions below should be handled by UpdateConstraintsBuilder.
421     if (useGpuForUpdate)
422     {
423         GMX_RELEASE_ASSERT(!DOMAINDECOMP(cr) || ddUsesUpdateGroups(*cr->dd) || constr == nullptr
424                                    || constr->numConstraintsTotal() == 0,
425                            "Constraints in domain decomposition are only supported with update "
426                            "groups if using GPU update.\n");
427         GMX_RELEASE_ASSERT(ir->eConstrAlg != ConstraintAlgorithm::Shake || constr == nullptr
428                                    || constr->numConstraintsTotal() == 0,
429                            "SHAKE is not supported with GPU update.");
430         GMX_RELEASE_ASSERT(useGpuForPme || (useGpuForNonbonded && simulationWork.useGpuBufferOps),
431                            "Either PME or short-ranged non-bonded interaction tasks must run on "
432                            "the GPU to use GPU update.\n");
433         GMX_RELEASE_ASSERT(ir->eI == IntegrationAlgorithm::MD,
434                            "Only the md integrator is supported with the GPU update.\n");
435         GMX_RELEASE_ASSERT(
436                 ir->etc != TemperatureCoupling::NoseHoover,
437                 "Nose-Hoover temperature coupling is not supported with the GPU update.\n");
438         GMX_RELEASE_ASSERT(
439                 ir->epc == PressureCoupling::No || ir->epc == PressureCoupling::ParrinelloRahman
440                         || ir->epc == PressureCoupling::Berendsen || ir->epc == PressureCoupling::CRescale,
441                 "Only Parrinello-Rahman, Berendsen, and C-rescale pressure coupling are supported "
442                 "with the GPU update.\n");
443         GMX_RELEASE_ASSERT(!mdatoms->haveVsites,
444                            "Virtual sites are not supported with the GPU update.\n");
445         GMX_RELEASE_ASSERT(ed == nullptr,
446                            "Essential dynamics is not supported with the GPU update.\n");
447         GMX_RELEASE_ASSERT(!ir->bPull || !pull_have_constraint(*ir->pull),
448                            "Constraints pulling is not supported with the GPU update.\n");
449         GMX_RELEASE_ASSERT(fcdata.orires->nr == 0,
450                            "Orientation restraints are not supported with the GPU update.\n");
451         GMX_RELEASE_ASSERT(
452                 ir->efep == FreeEnergyPerturbationType::No
453                         || (!haveFepPerturbedMasses(top_global) && !havePerturbedConstraints(top_global)),
454                 "Free energy perturbation of masses and constraints are not supported with the GPU "
455                 "update.");
456
457         if (constr != nullptr && constr->numConstraintsTotal() > 0)
458         {
459             GMX_LOG(mdlog.info)
460                     .asParagraph()
461                     .appendText("Updating coordinates and applying constraints on the GPU.");
462         }
463         else
464         {
465             GMX_LOG(mdlog.info).asParagraph().appendText("Updating coordinates on the GPU.");
466         }
467         GMX_RELEASE_ASSERT(fr->deviceStreamManager != nullptr,
468                            "Device stream manager should be initialized in order to use GPU "
469                            "update-constraints.");
470         GMX_RELEASE_ASSERT(
471                 fr->deviceStreamManager->streamIsValid(gmx::DeviceStreamType::UpdateAndConstraints),
472                 "Update stream should be initialized in order to use GPU "
473                 "update-constraints.");
474         integrator = std::make_unique<UpdateConstrainGpu>(
475                 *ir,
476                 top_global,
477                 ekind->ngtc,
478                 fr->deviceStreamManager->context(),
479                 fr->deviceStreamManager->stream(gmx::DeviceStreamType::UpdateAndConstraints),
480                 stateGpu->xUpdatedOnDevice(),
481                 wcycle);
482
483         integrator->setPbc(PbcType::Xyz, state->box);
484     }
485
486     if (useGpuForPme || (useGpuForNonbonded && useGpuForBufferOps) || useGpuForUpdate)
487     {
488         changePinningPolicy(&state->x, PinningPolicy::PinnedIfSupported);
489     }
490     if (useGpuForUpdate)
491     {
492         changePinningPolicy(&state->v, PinningPolicy::PinnedIfSupported);
493     }
494
495     // NOTE: The global state is no longer used at this point.
496     // But state_global is still used as temporary storage space for writing
497     // the global state to file and potentially for replica exchange.
498     // (Global topology should persist.)
499
500     update_mdatoms(mdatoms, state->lambda[FreeEnergyPerturbationCouplingType::Mass]);
501
502     if (ir->bExpanded)
503     {
504         /* Check nstexpanded here, because the grompp check was broken */
505         if (ir->expandedvals->nstexpanded % ir->nstcalcenergy != 0)
506         {
507             gmx_fatal(FARGS,
508                       "With expanded ensemble, nstexpanded should be a multiple of nstcalcenergy");
509         }
510         init_expanded_ensemble(startingBehavior != StartingBehavior::NewSimulation, ir, state->dfhist);
511     }
512
513     if (MASTER(cr))
514     {
515         EnergyData::initializeEnergyHistory(startingBehavior, observablesHistory, &energyOutput);
516     }
517
518     preparePrevStepPullCom(ir,
519                            pull_work,
520                            gmx::arrayRefFromArray(mdatoms->massT, mdatoms->nr),
521                            state,
522                            state_global,
523                            cr,
524                            startingBehavior != StartingBehavior::NewSimulation);
525
526     // TODO: Remove this by converting AWH into a ForceProvider
527     auto awh = prepareAwhModule(fplog,
528                                 *ir,
529                                 state_global,
530                                 cr,
531                                 ms,
532                                 startingBehavior != StartingBehavior::NewSimulation,
533                                 shellfc != nullptr,
534                                 opt2fn("-awh", nfile, fnm),
535                                 pull_work);
536
537     if (useReplicaExchange && MASTER(cr))
538     {
539         repl_ex = init_replica_exchange(fplog, ms, top_global.natoms, ir, replExParams);
540     }
541     /* PME tuning is only supported in the Verlet scheme, with PME for
542      * Coulomb. It is not supported with only LJ PME. */
543     bPMETune = (mdrunOptions.tunePme && EEL_PME(fr->ic->eeltype) && !mdrunOptions.reproducible
544                 && ir->cutoff_scheme != CutoffScheme::Group);
545
546     pme_load_balancing_t* pme_loadbal = nullptr;
547     if (bPMETune)
548     {
549         pme_loadbal_init(
550                 &pme_loadbal, cr, mdlog, *ir, state->box, *fr->ic, *fr->nbv, fr->pmedata, fr->nbv->useGpu());
551     }
552
553     if (!ir->bContinuation)
554     {
555         if (state->flags & enumValueToBitMask(StateEntry::V))
556         {
557             auto v = makeArrayRef(state->v);
558             /* Set the velocities of vsites, shells and frozen atoms to zero */
559             for (i = 0; i < mdatoms->homenr; i++)
560             {
561                 if (mdatoms->ptype[i] == ParticleType::Shell)
562                 {
563                     clear_rvec(v[i]);
564                 }
565                 else if (mdatoms->cFREEZE)
566                 {
567                     for (m = 0; m < DIM; m++)
568                     {
569                         if (ir->opts.nFreeze[mdatoms->cFREEZE[i]][m])
570                         {
571                             v[i][m] = 0;
572                         }
573                     }
574                 }
575             }
576         }
577
578         if (constr)
579         {
580             /* Constrain the initial coordinates and velocities */
581             do_constrain_first(fplog,
582                                constr,
583                                ir,
584                                mdatoms->nr,
585                                mdatoms->homenr,
586                                state->x.arrayRefWithPadding(),
587                                state->v.arrayRefWithPadding(),
588                                state->box,
589                                state->lambda[FreeEnergyPerturbationCouplingType::Bonded]);
590         }
591     }
592
593     if (ir->efep != FreeEnergyPerturbationType::No)
594     {
595         /* Set free energy calculation frequency as the greatest common
596          * denominator of nstdhdl and repl_ex_nst. */
597         nstfep = ir->fepvals->nstdhdl;
598         if (ir->bExpanded)
599         {
600             nstfep = std::gcd(ir->expandedvals->nstexpanded, nstfep);
601         }
602         if (useReplicaExchange)
603         {
604             nstfep = std::gcd(replExParams.exchangeInterval, nstfep);
605         }
606         if (ir->bDoAwh)
607         {
608             nstfep = std::gcd(ir->awhParams->nstSampleCoord(), nstfep);
609         }
610     }
611
612     /* Be REALLY careful about what flags you set here. You CANNOT assume
613      * this is the first step, since we might be restarting from a checkpoint,
614      * and in that case we should not do any modifications to the state.
615      */
616     bStopCM = (ir->comm_mode != ComRemovalAlgorithm::No && !ir->bContinuation);
617
618     // When restarting from a checkpoint, it can be appropriate to
619     // initialize ekind from quantities in the checkpoint. Otherwise,
620     // compute_globals must initialize ekind before the simulation
621     // starts/restarts. However, only the master rank knows what was
622     // found in the checkpoint file, so we have to communicate in
623     // order to coordinate the restart.
624     //
625     // TODO Consider removing this communication if/when checkpoint
626     // reading directly follows .tpr reading, because all ranks can
627     // agree on hasReadEkinState at that time.
628     bool hasReadEkinState = MASTER(cr) ? state_global->ekinstate.hasReadEkinState : false;
629     if (PAR(cr))
630     {
631         gmx_bcast(sizeof(hasReadEkinState), &hasReadEkinState, cr->mpi_comm_mygroup);
632     }
633     if (hasReadEkinState)
634     {
635         restore_ekinstate_from_state(cr, ekind, &state_global->ekinstate);
636     }
637
638     unsigned int cglo_flags =
639             (CGLO_TEMPERATURE | CGLO_GSTAT | (EI_VV(ir->eI) ? CGLO_PRESSURE : 0)
640              | (EI_VV(ir->eI) ? CGLO_CONSTRAINT : 0) | (hasReadEkinState ? CGLO_READEKIN : 0));
641
642     bSumEkinhOld = FALSE;
643
644     t_vcm vcm(top_global.groups, *ir);
645     reportComRemovalInfo(fplog, vcm);
646
647     /* To minimize communication, compute_globals computes the COM velocity
648      * and the kinetic energy for the velocities without COM motion removed.
649      * Thus to get the kinetic energy without the COM contribution, we need
650      * to call compute_globals twice.
651      */
652     for (int cgloIteration = 0; cgloIteration < (bStopCM ? 2 : 1); cgloIteration++)
653     {
654         unsigned int cglo_flags_iteration = cglo_flags;
655         if (bStopCM && cgloIteration == 0)
656         {
657             cglo_flags_iteration |= CGLO_STOPCM;
658             cglo_flags_iteration &= ~CGLO_TEMPERATURE;
659         }
660         if (DOMAINDECOMP(cr) && shouldCheckNumberOfBondedInteractions(*cr->dd) && cgloIteration == 0)
661         {
662             cglo_flags_iteration |= CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS;
663         }
664         compute_globals(gstat,
665                         cr,
666                         ir,
667                         fr,
668                         ekind,
669                         makeConstArrayRef(state->x),
670                         makeConstArrayRef(state->v),
671                         state->box,
672                         mdatoms,
673                         nrnb,
674                         &vcm,
675                         nullptr,
676                         enerd,
677                         force_vir,
678                         shake_vir,
679                         total_vir,
680                         pres,
681                         gmx::ArrayRef<real>{},
682                         &nullSignaller,
683                         state->box,
684                         &bSumEkinhOld,
685                         cglo_flags_iteration);
686         if (cglo_flags_iteration & CGLO_STOPCM)
687         {
688             /* At initialization, do not pass x with acceleration-correction mode
689              * to avoid (incorrect) correction of the initial coordinates.
690              */
691             auto x = (vcm.mode == ComRemovalAlgorithm::LinearAccelerationCorrection)
692                              ? ArrayRef<RVec>()
693                              : makeArrayRef(state->x);
694             process_and_stopcm_grp(fplog, &vcm, *mdatoms, x, makeArrayRef(state->v));
695             inc_nrnb(nrnb, eNR_STOPCM, mdatoms->homenr);
696         }
697     }
698     if (DOMAINDECOMP(cr))
699     {
700         checkNumberOfBondedInteractions(
701                 mdlog, cr, top_global, &top, makeConstArrayRef(state->x), state->box);
702     }
703     if (ir->eI == IntegrationAlgorithm::VVAK)
704     {
705         /* a second call to get the half step temperature initialized as well */
706         /* we do the same call as above, but turn the pressure off -- internally to
707            compute_globals, this is recognized as a velocity verlet half-step
708            kinetic energy calculation.  This minimized excess variables, but
709            perhaps loses some logic?*/
710
711         compute_globals(gstat,
712                         cr,
713                         ir,
714                         fr,
715                         ekind,
716                         makeConstArrayRef(state->x),
717                         makeConstArrayRef(state->v),
718                         state->box,
719                         mdatoms,
720                         nrnb,
721                         &vcm,
722                         nullptr,
723                         enerd,
724                         force_vir,
725                         shake_vir,
726                         total_vir,
727                         pres,
728                         gmx::ArrayRef<real>{},
729                         &nullSignaller,
730                         state->box,
731                         &bSumEkinhOld,
732                         cglo_flags & ~CGLO_PRESSURE);
733     }
734
735     /* Calculate the initial half step temperature, and save the ekinh_old */
736     if (startingBehavior == StartingBehavior::NewSimulation)
737     {
738         for (i = 0; (i < ir->opts.ngtc); i++)
739         {
740             copy_mat(ekind->tcstat[i].ekinh, ekind->tcstat[i].ekinh_old);
741         }
742     }
743
744     /* need to make an initiation call to get the Trotter variables set, as well as other constants
745        for non-trotter temperature control */
746     auto trotter_seq = init_npt_vars(ir, state, &MassQ, bTrotter);
747
748     if (MASTER(cr))
749     {
750         if (!ir->bContinuation)
751         {
752             if (constr && ir->eConstrAlg == ConstraintAlgorithm::Lincs)
753             {
754                 fprintf(fplog,
755                         "RMS relative constraint deviation after constraining: %.2e\n",
756                         constr->rmsd());
757             }
758             if (EI_STATE_VELOCITY(ir->eI))
759             {
760                 real temp = enerd->term[F_TEMP];
761                 if (ir->eI != IntegrationAlgorithm::VV)
762                 {
763                     /* Result of Ekin averaged over velocities of -half
764                      * and +half step, while we only have -half step here.
765                      */
766                     temp *= 2;
767                 }
768                 fprintf(fplog, "Initial temperature: %g K\n", temp);
769             }
770         }
771
772         char tbuf[20];
773         fprintf(stderr, "starting mdrun '%s'\n", *(top_global.name));
774         if (ir->nsteps >= 0)
775         {
776             sprintf(tbuf, "%8.1f", (ir->init_step + ir->nsteps) * ir->delta_t);
777         }
778         else
779         {
780             sprintf(tbuf, "%s", "infinite");
781         }
782         if (ir->init_step > 0)
783         {
784             fprintf(stderr,
785                     "%s steps, %s ps (continuing from step %s, %8.1f ps).\n",
786                     gmx_step_str(ir->init_step + ir->nsteps, sbuf),
787                     tbuf,
788                     gmx_step_str(ir->init_step, sbuf2),
789                     ir->init_step * ir->delta_t);
790         }
791         else
792         {
793             fprintf(stderr, "%s steps, %s ps.\n", gmx_step_str(ir->nsteps, sbuf), tbuf);
794         }
795         fprintf(fplog, "\n");
796     }
797
798     walltime_accounting_start_time(walltime_accounting);
799     wallcycle_start(wcycle, WallCycleCounter::Run);
800     print_start(fplog, cr, walltime_accounting, "mdrun");
801
802     /***********************************************************
803      *
804      *             Loop over MD steps
805      *
806      ************************************************************/
807
808     bFirstStep = TRUE;
809     /* Skip the first Nose-Hoover integration when we get the state from tpx */
810     bInitStep        = startingBehavior == StartingBehavior::NewSimulation || EI_VV(ir->eI);
811     bSumEkinhOld     = FALSE;
812     bExchanged       = FALSE;
813     bNeedRepartition = FALSE;
814
815     step     = ir->init_step;
816     step_rel = 0;
817
818     auto stopHandler = stopHandlerBuilder->getStopHandlerMD(
819             compat::not_null<SimulationSignal*>(&signals[eglsSTOPCOND]),
820             simulationsShareState,
821             MASTER(cr),
822             ir->nstlist,
823             mdrunOptions.reproducible,
824             nstSignalComm,
825             mdrunOptions.maximumHoursToRun,
826             ir->nstlist == 0,
827             fplog,
828             step,
829             bNS,
830             walltime_accounting);
831
832     auto checkpointHandler = std::make_unique<CheckpointHandler>(
833             compat::make_not_null<SimulationSignal*>(&signals[eglsCHKPT]),
834             simulationsShareState,
835             ir->nstlist == 0,
836             MASTER(cr),
837             mdrunOptions.writeConfout,
838             mdrunOptions.checkpointOptions.period);
839
840     const bool resetCountersIsLocal = true;
841     auto       resetHandler         = std::make_unique<ResetHandler>(
842             compat::make_not_null<SimulationSignal*>(&signals[eglsRESETCOUNTERS]),
843             !resetCountersIsLocal,
844             ir->nsteps,
845             MASTER(cr),
846             mdrunOptions.timingOptions.resetHalfway,
847             mdrunOptions.maximumHoursToRun,
848             mdlog,
849             wcycle,
850             walltime_accounting);
851
852     const DDBalanceRegionHandler ddBalanceRegionHandler(cr);
853
854     if (MASTER(cr) && isMultiSim(ms) && !useReplicaExchange)
855     {
856         logInitialMultisimStatus(ms, cr, mdlog, simulationsShareState, ir->nsteps, ir->init_step);
857     }
858
859     /* and stop now if we should */
860     bLastStep = (bLastStep || (ir->nsteps >= 0 && step_rel > ir->nsteps));
861     while (!bLastStep)
862     {
863
864         /* Determine if this is a neighbor search step */
865         bNStList = (ir->nstlist > 0 && step % ir->nstlist == 0);
866
867         if (bPMETune && bNStList)
868         {
869             // This has to be here because PME load balancing is called so early.
870             // TODO: Move to after all booleans are defined.
871             if (useGpuForUpdate && !bFirstStep)
872             {
873                 stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
874                 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
875             }
876             /* PME grid + cut-off optimization with GPUs or PME nodes */
877             pme_loadbal_do(pme_loadbal,
878                            cr,
879                            (mdrunOptions.verbose && MASTER(cr)) ? stderr : nullptr,
880                            fplog,
881                            mdlog,
882                            *ir,
883                            fr,
884                            state->box,
885                            state->x,
886                            wcycle,
887                            step,
888                            step_rel,
889                            &bPMETunePrinting,
890                            simulationWork.useGpuPmePpCommunication);
891         }
892
893         wallcycle_start(wcycle, WallCycleCounter::Step);
894
895         bLastStep = (step_rel == ir->nsteps);
896         t         = t0 + step * ir->delta_t;
897
898         // TODO Refactor this, so that nstfep does not need a default value of zero
899         if (ir->efep != FreeEnergyPerturbationType::No || ir->bSimTemp)
900         {
901             /* find and set the current lambdas */
902             state->lambda = currentLambdas(step, *(ir->fepvals), state->fep_state);
903
904             bDoDHDL = do_per_step(step, ir->fepvals->nstdhdl);
905             bDoFEP  = ((ir->efep != FreeEnergyPerturbationType::No) && do_per_step(step, nstfep));
906             bDoExpanded = (do_per_step(step, ir->expandedvals->nstexpanded) && (ir->bExpanded)
907                            && (!bFirstStep));
908         }
909
910         bDoReplEx = (useReplicaExchange && (step > 0) && !bLastStep
911                      && do_per_step(step, replExParams.exchangeInterval));
912
913         if (doSimulatedAnnealing)
914         {
915             // TODO: Avoid changing inputrec (#3854)
916             // Simulated annealing updates the reference temperature.
917             auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
918             update_annealing_target_temp(nonConstInputrec, t, &upd);
919         }
920
921         /* Stop Center of Mass motion */
922         bStopCM = (ir->comm_mode != ComRemovalAlgorithm::No && do_per_step(step, ir->nstcomm));
923
924         /* Determine whether or not to do Neighbour Searching */
925         bNS = (bFirstStep || bNStList || bExchanged || bNeedRepartition);
926
927         /* Note that the stopHandler will cause termination at nstglobalcomm
928          * steps. Since this concides with nstcalcenergy, nsttcouple and/or
929          * nstpcouple steps, we have computed the half-step kinetic energy
930          * of the previous step and can always output energies at the last step.
931          */
932         bLastStep = bLastStep || stopHandler->stoppingAfterCurrentStep(bNS);
933
934         /* do_log triggers energy and virial calculation. Because this leads
935          * to different code paths, forces can be different. Thus for exact
936          * continuation we should avoid extra log output.
937          * Note that the || bLastStep can result in non-exact continuation
938          * beyond the last step. But we don't consider that to be an issue.
939          */
940         do_log     = (do_per_step(step, ir->nstlog)
941                   || (bFirstStep && startingBehavior == StartingBehavior::NewSimulation) || bLastStep);
942         do_verbose = mdrunOptions.verbose
943                      && (step % mdrunOptions.verboseStepPrintInterval == 0 || bFirstStep || bLastStep);
944
945         if (useGpuForUpdate && !bFirstStep && bNS)
946         {
947             // Copy velocities from the GPU on search steps to keep a copy on host (device buffers are reinitialized).
948             stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
949             stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
950             // Copy coordinate from the GPU when needed at the search step.
951             // NOTE: The cases when coordinates needed on CPU for force evaluation are handled in sim_utils.
952             // NOTE: If the coordinates are to be written into output file they are also copied separately before the output.
953             stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
954             stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
955         }
956
957         // We only need to calculate virtual velocities if we are writing them in the current step
958         const bool needVirtualVelocitiesThisStep =
959                 (vsite != nullptr)
960                 && (do_per_step(step, ir->nstvout) || checkpointHandler->isCheckpointingStep());
961
962         if (vsite != nullptr)
963         {
964             // Virtual sites need to be updated before domain decomposition and forces are calculated
965             wallcycle_start(wcycle, WallCycleCounter::VsiteConstr);
966             // md-vv calculates virtual velocities once it has full-step real velocities
967             vsite->construct(state->x,
968                              state->v,
969                              state->box,
970                              (!EI_VV(inputrec->eI) && needVirtualVelocitiesThisStep)
971                                      ? VSiteOperation::PositionsAndVelocities
972                                      : VSiteOperation::Positions);
973             wallcycle_stop(wcycle, WallCycleCounter::VsiteConstr);
974         }
975
976         if (bNS && !(bFirstStep && ir->bContinuation))
977         {
978             bMasterState = FALSE;
979             /* Correct the new box if it is too skewed */
980             if (inputrecDynamicBox(ir))
981             {
982                 if (correct_box(fplog, step, state->box))
983                 {
984                     bMasterState = TRUE;
985                     // If update is offloaded, it should be informed about the box size change
986                     if (useGpuForUpdate)
987                     {
988                         integrator->setPbc(PbcType::Xyz, state->box);
989                     }
990                 }
991             }
992             if (DOMAINDECOMP(cr) && bMasterState)
993             {
994                 dd_collect_state(cr->dd, state, state_global);
995             }
996
997             if (DOMAINDECOMP(cr))
998             {
999                 /* Repartition the domain decomposition */
1000                 dd_partition_system(fplog,
1001                                     mdlog,
1002                                     step,
1003                                     cr,
1004                                     bMasterState,
1005                                     nstglobalcomm,
1006                                     state_global,
1007                                     top_global,
1008                                     *ir,
1009                                     imdSession,
1010                                     pull_work,
1011                                     state,
1012                                     &f,
1013                                     mdAtoms,
1014                                     &top,
1015                                     fr,
1016                                     vsite,
1017                                     constr,
1018                                     nrnb,
1019                                     wcycle,
1020                                     do_verbose && !bPMETunePrinting);
1021                 upd.updateAfterPartition(state->natoms,
1022                                          md->cFREEZE ? gmx::arrayRefFromArray(md->cFREEZE, md->nr)
1023                                                      : gmx::ArrayRef<const unsigned short>(),
1024                                          md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
1025                                                  : gmx::ArrayRef<const unsigned short>());
1026             }
1027         }
1028
1029         // Allocate or re-size GPU halo exchange object, if necessary
1030         if (bNS && havePPDomainDecomposition(cr) && simulationWork.useGpuHaloExchange)
1031         {
1032             GMX_RELEASE_ASSERT(fr->deviceStreamManager != nullptr,
1033                                "GPU device manager has to be initialized to use GPU "
1034                                "version of halo exchange.");
1035             constructGpuHaloExchange(mdlog, *cr, *fr->deviceStreamManager, wcycle);
1036         }
1037
1038         if (MASTER(cr) && do_log)
1039         {
1040             gmx::EnergyOutput::printHeader(
1041                     fplog, step, t); /* can we improve the information printed here? */
1042         }
1043
1044         if (ir->efep != FreeEnergyPerturbationType::No)
1045         {
1046             update_mdatoms(mdatoms, state->lambda[FreeEnergyPerturbationCouplingType::Mass]);
1047         }
1048
1049         if (bExchanged)
1050         {
1051             /* We need the kinetic energy at minus the half step for determining
1052              * the full step kinetic energy and possibly for T-coupling.*/
1053             /* This may not be quite working correctly yet . . . . */
1054             int cglo_flags = CGLO_GSTAT | CGLO_TEMPERATURE;
1055             if (DOMAINDECOMP(cr) && shouldCheckNumberOfBondedInteractions(*cr->dd))
1056             {
1057                 cglo_flags |= CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS;
1058             }
1059             compute_globals(gstat,
1060                             cr,
1061                             ir,
1062                             fr,
1063                             ekind,
1064                             makeConstArrayRef(state->x),
1065                             makeConstArrayRef(state->v),
1066                             state->box,
1067                             mdatoms,
1068                             nrnb,
1069                             &vcm,
1070                             wcycle,
1071                             enerd,
1072                             nullptr,
1073                             nullptr,
1074                             nullptr,
1075                             nullptr,
1076                             gmx::ArrayRef<real>{},
1077                             &nullSignaller,
1078                             state->box,
1079                             &bSumEkinhOld,
1080                             cglo_flags);
1081             if (DOMAINDECOMP(cr))
1082             {
1083                 checkNumberOfBondedInteractions(
1084                         mdlog, cr, top_global, &top, makeConstArrayRef(state->x), state->box);
1085             }
1086         }
1087         clear_mat(force_vir);
1088
1089         checkpointHandler->decideIfCheckpointingThisStep(bNS, bFirstStep, bLastStep);
1090
1091         /* Determine the energy and pressure:
1092          * at nstcalcenergy steps and at energy output steps (set below).
1093          */
1094         if (EI_VV(ir->eI) && (!bInitStep))
1095         {
1096             bCalcEnerStep = do_per_step(step, ir->nstcalcenergy);
1097             bCalcVir      = bCalcEnerStep
1098                        || (ir->epc != PressureCoupling::No
1099                            && (do_per_step(step, ir->nstpcouple) || do_per_step(step - 1, ir->nstpcouple)));
1100         }
1101         else
1102         {
1103             bCalcEnerStep = do_per_step(step, ir->nstcalcenergy);
1104             bCalcVir      = bCalcEnerStep
1105                        || (ir->epc != PressureCoupling::No && do_per_step(step, ir->nstpcouple));
1106         }
1107         bCalcEner = bCalcEnerStep;
1108
1109         do_ene = (do_per_step(step, ir->nstenergy) || bLastStep);
1110
1111         if (do_ene || do_log || bDoReplEx)
1112         {
1113             bCalcVir  = TRUE;
1114             bCalcEner = TRUE;
1115         }
1116
1117         /* Do we need global communication ? */
1118         bGStat = (bCalcVir || bCalcEner || bStopCM || do_per_step(step, nstglobalcomm)
1119                   || (EI_VV(ir->eI) && inputrecNvtTrotter(ir) && do_per_step(step - 1, nstglobalcomm)));
1120
1121         force_flags = (GMX_FORCE_STATECHANGED | ((inputrecDynamicBox(ir)) ? GMX_FORCE_DYNAMICBOX : 0)
1122                        | GMX_FORCE_ALLFORCES | (bCalcVir ? GMX_FORCE_VIRIAL : 0)
1123                        | (bCalcEner ? GMX_FORCE_ENERGY : 0) | (bDoFEP ? GMX_FORCE_DHDL : 0));
1124         if (fr->useMts && !do_per_step(step, ir->nstfout))
1125         {
1126             force_flags |= GMX_FORCE_DO_NOT_NEED_NORMAL_FORCE;
1127         }
1128
1129         if (shellfc)
1130         {
1131             /* Now is the time to relax the shells */
1132             relax_shell_flexcon(fplog,
1133                                 cr,
1134                                 ms,
1135                                 mdrunOptions.verbose,
1136                                 enforcedRotation,
1137                                 step,
1138                                 ir,
1139                                 imdSession,
1140                                 pull_work,
1141                                 bNS,
1142                                 force_flags,
1143                                 &top,
1144                                 constr,
1145                                 enerd,
1146                                 state->natoms,
1147                                 state->x.arrayRefWithPadding(),
1148                                 state->v.arrayRefWithPadding(),
1149                                 state->box,
1150                                 state->lambda,
1151                                 &state->hist,
1152                                 &f.view(),
1153                                 force_vir,
1154                                 *mdatoms,
1155                                 nrnb,
1156                                 wcycle,
1157                                 shellfc,
1158                                 fr,
1159                                 runScheduleWork,
1160                                 t,
1161                                 mu_tot,
1162                                 vsite,
1163                                 ddBalanceRegionHandler);
1164         }
1165         else
1166         {
1167             /* The AWH history need to be saved _before_ doing force calculations where the AWH bias
1168                is updated (or the AWH update will be performed twice for one step when continuing).
1169                It would be best to call this update function from do_md_trajectory_writing but that
1170                would occur after do_force. One would have to divide the update_awh function into one
1171                function applying the AWH force and one doing the AWH bias update. The update AWH
1172                bias function could then be called after do_md_trajectory_writing (then containing
1173                update_awh_history). The checkpointing will in the future probably moved to the start
1174                of the md loop which will rid of this issue. */
1175             if (awh && checkpointHandler->isCheckpointingStep() && MASTER(cr))
1176             {
1177                 awh->updateHistory(state_global->awhHistory.get());
1178             }
1179
1180             /* The coordinates (x) are shifted (to get whole molecules)
1181              * in do_force.
1182              * This is parallellized as well, and does communication too.
1183              * Check comments in sim_util.c
1184              */
1185             do_force(fplog,
1186                      cr,
1187                      ms,
1188                      *ir,
1189                      awh.get(),
1190                      enforcedRotation,
1191                      imdSession,
1192                      pull_work,
1193                      step,
1194                      nrnb,
1195                      wcycle,
1196                      &top,
1197                      state->box,
1198                      state->x.arrayRefWithPadding(),
1199                      &state->hist,
1200                      &f.view(),
1201                      force_vir,
1202                      mdatoms,
1203                      enerd,
1204                      state->lambda,
1205                      fr,
1206                      runScheduleWork,
1207                      vsite,
1208                      mu_tot,
1209                      t,
1210                      ed ? ed->getLegacyED() : nullptr,
1211                      (bNS ? GMX_FORCE_NS : 0) | force_flags,
1212                      ddBalanceRegionHandler);
1213         }
1214
1215         // VV integrators do not need the following velocity half step
1216         // if it is the first step after starting from a checkpoint.
1217         // That is, the half step is needed on all other steps, and
1218         // also the first step when starting from a .tpr file.
1219         if (EI_VV(ir->eI))
1220         {
1221             integrateVVFirstStep(step,
1222                                  bFirstStep,
1223                                  bInitStep,
1224                                  startingBehavior,
1225                                  nstglobalcomm,
1226                                  ir,
1227                                  fr,
1228                                  cr,
1229                                  state,
1230                                  mdatoms,
1231                                  fcdata,
1232                                  &MassQ,
1233                                  &vcm,
1234                                  top_global,
1235                                  top,
1236                                  enerd,
1237                                  ekind,
1238                                  gstat,
1239                                  &last_ekin,
1240                                  bCalcVir,
1241                                  total_vir,
1242                                  shake_vir,
1243                                  force_vir,
1244                                  pres,
1245                                  M,
1246                                  do_log,
1247                                  do_ene,
1248                                  bCalcEner,
1249                                  bGStat,
1250                                  bStopCM,
1251                                  bTrotter,
1252                                  bExchanged,
1253                                  &bSumEkinhOld,
1254                                  &saved_conserved_quantity,
1255                                  &f,
1256                                  &upd,
1257                                  constr,
1258                                  &nullSignaller,
1259                                  trotter_seq,
1260                                  nrnb,
1261                                  mdlog,
1262                                  fplog,
1263                                  wcycle);
1264             if (vsite != nullptr && needVirtualVelocitiesThisStep)
1265             {
1266                 // Positions were calculated earlier
1267                 wallcycle_start(wcycle, WallCycleCounter::VsiteConstr);
1268                 vsite->construct(state->x, state->v, state->box, VSiteOperation::Velocities);
1269                 wallcycle_stop(wcycle, WallCycleCounter::VsiteConstr);
1270             }
1271         }
1272
1273         /* ########  END FIRST UPDATE STEP  ############## */
1274         /* ########  If doing VV, we now have v(dt) ###### */
1275         if (bDoExpanded)
1276         {
1277             /* perform extended ensemble sampling in lambda - we don't
1278                actually move to the new state before outputting
1279                statistics, but if performing simulated tempering, we
1280                do update the velocities and the tau_t. */
1281             // TODO: Avoid changing inputrec (#3854)
1282             // Simulated tempering updates the reference temperature.
1283             // Expanded ensemble without simulated tempering does not change the inputrec.
1284             auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
1285             lamnew                 = ExpandedEnsembleDynamics(fplog,
1286                                               nonConstInputrec,
1287                                               enerd,
1288                                               state,
1289                                               &MassQ,
1290                                               state->fep_state,
1291                                               state->dfhist,
1292                                               step,
1293                                               state->v.rvec_array(),
1294                                               mdatoms);
1295             /* history is maintained in state->dfhist, but state_global is what is sent to trajectory and log output */
1296             if (MASTER(cr))
1297             {
1298                 copy_df_history(state_global->dfhist, state->dfhist);
1299             }
1300         }
1301
1302         // Copy coordinate from the GPU for the output/checkpointing if the update is offloaded and
1303         // coordinates have not already been copied for i) search or ii) CPU force tasks.
1304         if (useGpuForUpdate && !bNS && !runScheduleWork->domainWork.haveCpuLocalForceWork
1305             && (do_per_step(step, ir->nstxout) || do_per_step(step, ir->nstxout_compressed)
1306                 || checkpointHandler->isCheckpointingStep()))
1307         {
1308             stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
1309             stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1310         }
1311         // Copy velocities if needed for the output/checkpointing.
1312         // NOTE: Copy on the search steps is done at the beginning of the step.
1313         if (useGpuForUpdate && !bNS
1314             && (do_per_step(step, ir->nstvout) || checkpointHandler->isCheckpointingStep()))
1315         {
1316             stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
1317             stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
1318         }
1319         // Copy forces for the output if the forces were reduced on the GPU (not the case on virial steps)
1320         // and update is offloaded hence forces are kept on the GPU for update and have not been
1321         // already transferred in do_force().
1322         // TODO: There should be an improved, explicit mechanism that ensures this copy is only executed
1323         //       when the forces are ready on the GPU -- the same synchronizer should be used as the one
1324         //       prior to GPU update.
1325         // TODO: When the output flags will be included in step workload, this copy can be combined with the
1326         //       copy call in do_force(...).
1327         // NOTE: The forces should not be copied here if the vsites are present, since they were modified
1328         //       on host after the D2H copy in do_force(...).
1329         if (runScheduleWork->stepWork.useGpuFBufferOps && (simulationWork.useGpuUpdate && !vsite)
1330             && do_per_step(step, ir->nstfout))
1331         {
1332             stateGpu->copyForcesFromGpu(f.view().force(), AtomLocality::Local);
1333             stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
1334         }
1335         /* Now we have the energies and forces corresponding to the
1336          * coordinates at time t. We must output all of this before
1337          * the update.
1338          */
1339         do_md_trajectory_writing(fplog,
1340                                  cr,
1341                                  nfile,
1342                                  fnm,
1343                                  step,
1344                                  step_rel,
1345                                  t,
1346                                  ir,
1347                                  state,
1348                                  state_global,
1349                                  observablesHistory,
1350                                  top_global,
1351                                  fr,
1352                                  outf,
1353                                  energyOutput,
1354                                  ekind,
1355                                  f.view().force(),
1356                                  checkpointHandler->isCheckpointingStep(),
1357                                  bRerunMD,
1358                                  bLastStep,
1359                                  mdrunOptions.writeConfout,
1360                                  bSumEkinhOld);
1361         /* Check if IMD step and do IMD communication, if bIMD is TRUE. */
1362         bInteractiveMDstep = imdSession->run(step, bNS, state->box, state->x, t);
1363
1364         /* kludge -- virial is lost with restart for MTTK NPT control. Must reload (saved earlier). */
1365         if (startingBehavior != StartingBehavior::NewSimulation && bFirstStep
1366             && (inputrecNptTrotter(ir) || inputrecNphTrotter(ir)))
1367         {
1368             copy_mat(state->svir_prev, shake_vir);
1369             copy_mat(state->fvir_prev, force_vir);
1370         }
1371
1372         stopHandler->setSignal();
1373         resetHandler->setSignal(walltime_accounting);
1374
1375         if (bGStat || !PAR(cr))
1376         {
1377             /* In parallel we only have to check for checkpointing in steps
1378              * where we do global communication,
1379              *  otherwise the other nodes don't know.
1380              */
1381             checkpointHandler->setSignal(walltime_accounting);
1382         }
1383
1384         /* #########   START SECOND UPDATE STEP ################# */
1385
1386         /* at the start of step, randomize or scale the velocities ((if vv. Restriction of Andersen
1387            controlled in preprocessing */
1388
1389         if (ETC_ANDERSEN(ir->etc)) /* keep this outside of update_tcouple because of the extra info required to pass */
1390         {
1391             gmx_bool bIfRandomize;
1392             bIfRandomize = update_randomize_velocities(ir, step, cr, mdatoms, state->v, &upd, constr);
1393             /* if we have constraints, we have to remove the kinetic energy parallel to the bonds */
1394             if (constr && bIfRandomize)
1395             {
1396                 constrain_velocities(constr, do_log, do_ene, step, state, nullptr, false, nullptr);
1397             }
1398         }
1399         /* Box is changed in update() when we do pressure coupling,
1400          * but we should still use the old box for energy corrections and when
1401          * writing it to the energy file, so it matches the trajectory files for
1402          * the same timestep above. Make a copy in a separate array.
1403          */
1404         copy_mat(state->box, lastbox);
1405
1406         dvdl_constr = 0;
1407
1408         if (!useGpuForUpdate)
1409         {
1410             wallcycle_start(wcycle, WallCycleCounter::Update);
1411         }
1412         /* UPDATE PRESSURE VARIABLES IN TROTTER FORMULATION WITH CONSTRAINTS */
1413         if (bTrotter)
1414         {
1415             trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ3);
1416             /* We can only do Berendsen coupling after we have summed
1417              * the kinetic energy or virial. Since the happens
1418              * in global_state after update, we should only do it at
1419              * step % nstlist = 1 with bGStatEveryStep=FALSE.
1420              */
1421         }
1422         else
1423         {
1424             update_tcouple(step, ir, state, ekind, &MassQ, mdatoms);
1425             update_pcouple_before_coordinates(fplog, step, ir, state, pressureCouplingMu, M, bInitStep);
1426         }
1427
1428         /* With leap-frog type integrators we compute the kinetic energy
1429          * at a whole time step as the average of the half-time step kinetic
1430          * energies of two subsequent steps. Therefore we need to compute the
1431          * half step kinetic energy also if we need energies at the next step.
1432          */
1433         const bool needHalfStepKineticEnergy =
1434                 (!EI_VV(ir->eI) && (do_per_step(step + 1, nstglobalcomm) || step_rel + 1 == ir->nsteps));
1435
1436         // Parrinello-Rahman requires the pressure to be availible before the update to compute
1437         // the velocity scaling matrix. Hence, it runs one step after the nstpcouple step.
1438         const bool doParrinelloRahman = (ir->epc == PressureCoupling::ParrinelloRahman
1439                                          && do_per_step(step + ir->nstpcouple - 1, ir->nstpcouple));
1440
1441         if (EI_VV(ir->eI))
1442         {
1443             GMX_ASSERT(!useGpuForUpdate, "GPU update is not supported with VVAK integrator.");
1444
1445             integrateVVSecondStep(step,
1446                                   ir,
1447                                   fr,
1448                                   cr,
1449                                   state,
1450                                   mdatoms,
1451                                   fcdata,
1452                                   &MassQ,
1453                                   &vcm,
1454                                   pull_work,
1455                                   enerd,
1456                                   ekind,
1457                                   gstat,
1458                                   &dvdl_constr,
1459                                   bCalcVir,
1460                                   total_vir,
1461                                   shake_vir,
1462                                   force_vir,
1463                                   pres,
1464                                   M,
1465                                   lastbox,
1466                                   do_log,
1467                                   do_ene,
1468                                   bGStat,
1469                                   &bSumEkinhOld,
1470                                   &f,
1471                                   &cbuf,
1472                                   &upd,
1473                                   constr,
1474                                   &nullSignaller,
1475                                   trotter_seq,
1476                                   nrnb,
1477                                   wcycle);
1478         }
1479         else
1480         {
1481             if (useGpuForUpdate)
1482             {
1483                 if (bNS && (bFirstStep || DOMAINDECOMP(cr)))
1484                 {
1485                     integrator->set(stateGpu->getCoordinates(),
1486                                     stateGpu->getVelocities(),
1487                                     stateGpu->getForces(),
1488                                     top.idef,
1489                                     *mdatoms);
1490
1491                     // Copy data to the GPU after buffers might have being reinitialized
1492                     stateGpu->copyVelocitiesToGpu(state->v, AtomLocality::Local);
1493                     stateGpu->copyCoordinatesToGpu(state->x, AtomLocality::Local);
1494                 }
1495
1496                 if (simulationWork.useGpuPme && !runScheduleWork->simulationWork.useGpuPmePpCommunication
1497                     && !thisRankHasDuty(cr, DUTY_PME))
1498                 {
1499                     // The PME forces were recieved to the host, so have to be copied
1500                     stateGpu->copyForcesToGpu(f.view().force(), AtomLocality::All);
1501                 }
1502                 else if (!runScheduleWork->stepWork.useGpuFBufferOps)
1503                 {
1504                     // The buffer ops were not offloaded this step, so the forces are on the
1505                     // host and have to be copied
1506                     stateGpu->copyForcesToGpu(f.view().force(), AtomLocality::Local);
1507                 }
1508
1509                 const bool doTemperatureScaling =
1510                         (ir->etc != TemperatureCoupling::No
1511                          && do_per_step(step + ir->nsttcouple - 1, ir->nsttcouple));
1512
1513                 // This applies Leap-Frog, LINCS and SETTLE in succession
1514                 integrator->integrate(
1515                         stateGpu->getForcesReadyOnDeviceEvent(
1516                                 AtomLocality::Local, runScheduleWork->stepWork.useGpuFBufferOps),
1517                         ir->delta_t,
1518                         true,
1519                         bCalcVir,
1520                         shake_vir,
1521                         doTemperatureScaling,
1522                         ekind->tcstat,
1523                         doParrinelloRahman,
1524                         ir->nstpcouple * ir->delta_t,
1525                         M);
1526
1527                 // Copy velocities D2H after update if:
1528                 // - Globals are computed this step (includes the energy output steps).
1529                 // - Temperature is needed for the next step.
1530                 if (bGStat || needHalfStepKineticEnergy)
1531                 {
1532                     stateGpu->copyVelocitiesFromGpu(state->v, AtomLocality::Local);
1533                     stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
1534                 }
1535             }
1536             else
1537             {
1538                 /* With multiple time stepping we need to do an additional normal
1539                  * update step to obtain the virial, as the actual MTS integration
1540                  * using an acceleration where the slow forces are multiplied by mtsFactor.
1541                  * Using that acceleration would result in a virial with the slow
1542                  * force contribution would be a factor mtsFactor too large.
1543                  */
1544                 if (fr->useMts && bCalcVir && constr != nullptr)
1545                 {
1546                     upd.update_for_constraint_virial(
1547                             *ir,
1548                             mdatoms->homenr,
1549                             mdatoms->havePartiallyFrozenAtoms,
1550                             gmx::arrayRefFromArray(mdatoms->invmass, mdatoms->nr),
1551                             gmx::arrayRefFromArray(mdatoms->invMassPerDim, mdatoms->nr),
1552                             *state,
1553                             f.view().forceWithPadding(),
1554                             *ekind);
1555
1556                     constrain_coordinates(constr,
1557                                           do_log,
1558                                           do_ene,
1559                                           step,
1560                                           state,
1561                                           upd.xp()->arrayRefWithPadding(),
1562                                           &dvdl_constr,
1563                                           bCalcVir,
1564                                           shake_vir);
1565                 }
1566
1567                 ArrayRefWithPadding<const RVec> forceCombined =
1568                         (fr->useMts && step % ir->mtsLevels[1].stepFactor == 0)
1569                                 ? f.view().forceMtsCombinedWithPadding()
1570                                 : f.view().forceWithPadding();
1571                 upd.update_coords(*ir,
1572                                   step,
1573                                   mdatoms->homenr,
1574                                   mdatoms->havePartiallyFrozenAtoms,
1575                                   gmx::arrayRefFromArray(md->ptype, md->nr),
1576                                   gmx::arrayRefFromArray(md->invmass, md->nr),
1577                                   gmx::arrayRefFromArray(md->invMassPerDim, md->nr),
1578                                   state,
1579                                   forceCombined,
1580                                   fcdata,
1581                                   ekind,
1582                                   M,
1583                                   etrtPOSITION,
1584                                   cr,
1585                                   constr != nullptr);
1586
1587                 wallcycle_stop(wcycle, WallCycleCounter::Update);
1588
1589                 constrain_coordinates(constr,
1590                                       do_log,
1591                                       do_ene,
1592                                       step,
1593                                       state,
1594                                       upd.xp()->arrayRefWithPadding(),
1595                                       &dvdl_constr,
1596                                       bCalcVir && !fr->useMts,
1597                                       shake_vir);
1598
1599                 upd.update_sd_second_half(*ir,
1600                                           step,
1601                                           &dvdl_constr,
1602                                           mdatoms->homenr,
1603                                           gmx::arrayRefFromArray(md->ptype, md->nr),
1604                                           gmx::arrayRefFromArray(md->invmass, md->nr),
1605                                           state,
1606                                           cr,
1607                                           nrnb,
1608                                           wcycle,
1609                                           constr,
1610                                           do_log,
1611                                           do_ene);
1612                 upd.finish_update(*ir, mdatoms, state, wcycle, constr != nullptr);
1613             }
1614
1615             if (ir->bPull && ir->pull->bSetPbcRefToPrevStepCOM)
1616             {
1617                 updatePrevStepPullCom(pull_work, state);
1618             }
1619
1620             enerd->term[F_DVDL_CONSTR] += dvdl_constr;
1621         }
1622
1623         /* ############## IF NOT VV, Calculate globals HERE  ############ */
1624         /* With Leap-Frog we can skip compute_globals at
1625          * non-communication steps, but we need to calculate
1626          * the kinetic energy one step before communication.
1627          */
1628         {
1629             // Organize to do inter-simulation signalling on steps if
1630             // and when algorithms require it.
1631             const bool doInterSimSignal = (simulationsShareState && do_per_step(step, nstSignalComm));
1632
1633             if (bGStat || needHalfStepKineticEnergy || doInterSimSignal)
1634             {
1635                 // Copy coordinates when needed to stop the CM motion.
1636                 if (useGpuForUpdate && !EI_VV(ir->eI) && bStopCM)
1637                 {
1638                     stateGpu->copyCoordinatesFromGpu(state->x, AtomLocality::Local);
1639                     stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1640                 }
1641                 // Since we're already communicating at this step, we
1642                 // can propagate intra-simulation signals. Note that
1643                 // check_nstglobalcomm has the responsibility for
1644                 // choosing the value of nstglobalcomm that is one way
1645                 // bGStat becomes true, so we can't get into a
1646                 // situation where e.g. checkpointing can't be
1647                 // signalled.
1648                 bool                doIntraSimSignal = true;
1649                 SimulationSignaller signaller(&signals, cr, ms, doInterSimSignal, doIntraSimSignal);
1650
1651                 compute_globals(
1652                         gstat,
1653                         cr,
1654                         ir,
1655                         fr,
1656                         ekind,
1657                         makeConstArrayRef(state->x),
1658                         makeConstArrayRef(state->v),
1659                         state->box,
1660                         mdatoms,
1661                         nrnb,
1662                         &vcm,
1663                         wcycle,
1664                         enerd,
1665                         force_vir,
1666                         shake_vir,
1667                         total_vir,
1668                         pres,
1669                         (!EI_VV(ir->eI) && bCalcEner && constr != nullptr) ? constr->rmsdData()
1670                                                                            : gmx::ArrayRef<real>{},
1671                         &signaller,
1672                         lastbox,
1673                         &bSumEkinhOld,
1674                         (bGStat ? CGLO_GSTAT : 0) | (!EI_VV(ir->eI) && bCalcEner ? CGLO_ENERGY : 0)
1675                                 | (!EI_VV(ir->eI) && bStopCM ? CGLO_STOPCM : 0)
1676                                 | (!EI_VV(ir->eI) ? CGLO_TEMPERATURE : 0)
1677                                 | (!EI_VV(ir->eI) ? CGLO_PRESSURE : 0) | CGLO_CONSTRAINT
1678                                 | (DOMAINDECOMP(cr) && shouldCheckNumberOfBondedInteractions(*cr->dd)
1679                                            ? CGLO_CHECK_NUMBER_OF_BONDED_INTERACTIONS
1680                                            : 0));
1681                 if (DOMAINDECOMP(cr))
1682                 {
1683                     checkNumberOfBondedInteractions(
1684                             mdlog, cr, top_global, &top, makeConstArrayRef(state->x), state->box);
1685                 }
1686                 if (!EI_VV(ir->eI) && bStopCM)
1687                 {
1688                     process_and_stopcm_grp(
1689                             fplog, &vcm, *mdatoms, makeArrayRef(state->x), makeArrayRef(state->v));
1690                     inc_nrnb(nrnb, eNR_STOPCM, mdatoms->homenr);
1691
1692                     // TODO: The special case of removing CM motion should be dealt more gracefully
1693                     if (useGpuForUpdate)
1694                     {
1695                         stateGpu->copyCoordinatesToGpu(state->x, AtomLocality::Local);
1696                         // Here we block until the H2D copy completes because event sync with the
1697                         // force kernels that use the coordinates on the next steps is not implemented
1698                         // (not because of a race on state->x being modified on the CPU while H2D is in progress).
1699                         stateGpu->waitCoordinatesCopiedToDevice(AtomLocality::Local);
1700                         // If the COM removal changed the velocities on the CPU, this has to be accounted for.
1701                         if (vcm.mode != ComRemovalAlgorithm::No)
1702                         {
1703                             stateGpu->copyVelocitiesToGpu(state->v, AtomLocality::Local);
1704                         }
1705                     }
1706                 }
1707             }
1708         }
1709
1710         /* #############  END CALC EKIN AND PRESSURE ################# */
1711
1712         /* Note: this is OK, but there are some numerical precision issues with using the convergence of
1713            the virial that should probably be addressed eventually. state->veta has better properies,
1714            but what we actually need entering the new cycle is the new shake_vir value. Ideally, we could
1715            generate the new shake_vir, but test the veta value for convergence.  This will take some thought. */
1716
1717         if (ir->efep != FreeEnergyPerturbationType::No && !EI_VV(ir->eI))
1718         {
1719             /* Sum up the foreign energy and dK/dl terms for md and sd.
1720                Currently done every step so that dH/dl is correct in the .edr */
1721             accumulateKineticLambdaComponents(enerd, state->lambda, *ir->fepvals);
1722         }
1723
1724         update_pcouple_after_coordinates(
1725                 fplog, step, ir, mdatoms, pres, force_vir, shake_vir, pressureCouplingMu, state, nrnb, upd.deform(), !useGpuForUpdate);
1726
1727         const bool doBerendsenPressureCoupling = (inputrec->epc == PressureCoupling::Berendsen
1728                                                   && do_per_step(step, inputrec->nstpcouple));
1729         const bool doCRescalePressureCoupling  = (inputrec->epc == PressureCoupling::CRescale
1730                                                  && do_per_step(step, inputrec->nstpcouple));
1731         if (useGpuForUpdate
1732             && (doBerendsenPressureCoupling || doCRescalePressureCoupling || doParrinelloRahman))
1733         {
1734             integrator->scaleCoordinates(pressureCouplingMu);
1735             if (doCRescalePressureCoupling)
1736             {
1737                 matrix pressureCouplingInvMu;
1738                 gmx::invertBoxMatrix(pressureCouplingMu, pressureCouplingInvMu);
1739                 integrator->scaleVelocities(pressureCouplingInvMu);
1740             }
1741             integrator->setPbc(PbcType::Xyz, state->box);
1742         }
1743
1744         /* ################# END UPDATE STEP 2 ################# */
1745         /* #### We now have r(t+dt) and v(t+dt/2)  ############# */
1746
1747         /* The coordinates (x) were unshifted in update */
1748         if (!bGStat)
1749         {
1750             /* We will not sum ekinh_old,
1751              * so signal that we still have to do it.
1752              */
1753             bSumEkinhOld = TRUE;
1754         }
1755
1756         if (bCalcEner)
1757         {
1758             /* #########  BEGIN PREPARING EDR OUTPUT  ###########  */
1759
1760             /* use the directly determined last velocity, not actually the averaged half steps */
1761             if (bTrotter && ir->eI == IntegrationAlgorithm::VV)
1762             {
1763                 enerd->term[F_EKIN] = last_ekin;
1764             }
1765             enerd->term[F_ETOT] = enerd->term[F_EPOT] + enerd->term[F_EKIN];
1766
1767             if (integratorHasConservedEnergyQuantity(ir))
1768             {
1769                 if (EI_VV(ir->eI))
1770                 {
1771                     enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + saved_conserved_quantity;
1772                 }
1773                 else
1774                 {
1775                     enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + NPT_energy(ir, state, &MassQ);
1776                 }
1777             }
1778             /* #########  END PREPARING EDR OUTPUT  ###########  */
1779         }
1780
1781         /* Output stuff */
1782         if (MASTER(cr))
1783         {
1784             if (fplog && do_log && bDoExpanded)
1785             {
1786                 /* only needed if doing expanded ensemble */
1787                 PrintFreeEnergyInfoToFile(fplog,
1788                                           ir->fepvals.get(),
1789                                           ir->expandedvals.get(),
1790                                           ir->bSimTemp ? ir->simtempvals.get() : nullptr,
1791                                           state_global->dfhist,
1792                                           state->fep_state,
1793                                           ir->nstlog,
1794                                           step);
1795             }
1796             if (bCalcEner)
1797             {
1798                 energyOutput.addDataAtEnergyStep(bDoDHDL,
1799                                                  bCalcEnerStep,
1800                                                  t,
1801                                                  mdatoms->tmass,
1802                                                  enerd,
1803                                                  ir->fepvals.get(),
1804                                                  ir->expandedvals.get(),
1805                                                  lastbox,
1806                                                  PTCouplingArrays{ state->boxv,
1807                                                                    state->nosehoover_xi,
1808                                                                    state->nosehoover_vxi,
1809                                                                    state->nhpres_xi,
1810                                                                    state->nhpres_vxi },
1811                                                  state->fep_state,
1812                                                  shake_vir,
1813                                                  force_vir,
1814                                                  total_vir,
1815                                                  pres,
1816                                                  ekind,
1817                                                  mu_tot,
1818                                                  constr);
1819             }
1820             else
1821             {
1822                 energyOutput.recordNonEnergyStep();
1823             }
1824
1825             gmx_bool do_dr = do_per_step(step, ir->nstdisreout);
1826             gmx_bool do_or = do_per_step(step, ir->nstorireout);
1827
1828             if (doSimulatedAnnealing)
1829             {
1830                 gmx::EnergyOutput::printAnnealingTemperatures(
1831                         do_log ? fplog : nullptr, groups, &(ir->opts));
1832             }
1833             if (do_log || do_ene || do_dr || do_or)
1834             {
1835                 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
1836                                                    do_ene,
1837                                                    do_dr,
1838                                                    do_or,
1839                                                    do_log ? fplog : nullptr,
1840                                                    step,
1841                                                    t,
1842                                                    fr->fcdata.get(),
1843                                                    awh.get());
1844             }
1845             if (do_log && ir->bDoAwh && awh->hasFepLambdaDimension())
1846             {
1847                 const bool isInitialOutput = false;
1848                 printLambdaStateToLog(fplog, state->lambda, isInitialOutput);
1849             }
1850
1851             if (ir->bPull)
1852             {
1853                 pull_print_output(pull_work, step, t);
1854             }
1855
1856             if (do_per_step(step, ir->nstlog))
1857             {
1858                 if (fflush(fplog) != 0)
1859                 {
1860                     gmx_fatal(FARGS, "Cannot flush logfile - maybe you are out of disk space?");
1861                 }
1862             }
1863         }
1864         if (bDoExpanded)
1865         {
1866             /* Have to do this part _after_ outputting the logfile and the edr file */
1867             /* Gets written into the state at the beginning of next loop*/
1868             state->fep_state = lamnew;
1869         }
1870         else if (ir->bDoAwh && awh->needForeignEnergyDifferences(step))
1871         {
1872             state->fep_state = awh->fepLambdaState();
1873         }
1874         /* Print the remaining wall clock time for the run */
1875         if (isMasterSimMasterRank(ms, MASTER(cr)) && (do_verbose || gmx_got_usr_signal()) && !bPMETunePrinting)
1876         {
1877             if (shellfc)
1878             {
1879                 fprintf(stderr, "\n");
1880             }
1881             print_time(stderr, walltime_accounting, step, ir, cr);
1882         }
1883
1884         /* Ion/water position swapping.
1885          * Not done in last step since trajectory writing happens before this call
1886          * in the MD loop and exchanges would be lost anyway. */
1887         bNeedRepartition = FALSE;
1888         if ((ir->eSwapCoords != SwapType::No) && (step > 0) && !bLastStep
1889             && do_per_step(step, ir->swap->nstswap))
1890         {
1891             bNeedRepartition = do_swapcoords(cr,
1892                                              step,
1893                                              t,
1894                                              ir,
1895                                              swap,
1896                                              wcycle,
1897                                              as_rvec_array(state->x.data()),
1898                                              state->box,
1899                                              MASTER(cr) && mdrunOptions.verbose,
1900                                              bRerunMD);
1901
1902             if (bNeedRepartition && DOMAINDECOMP(cr))
1903             {
1904                 dd_collect_state(cr->dd, state, state_global);
1905             }
1906         }
1907
1908         /* Replica exchange */
1909         bExchanged = FALSE;
1910         if (bDoReplEx)
1911         {
1912             bExchanged = replica_exchange(fplog, cr, ms, repl_ex, state_global, enerd, state, step, t);
1913         }
1914
1915         if ((bExchanged || bNeedRepartition) && DOMAINDECOMP(cr))
1916         {
1917             dd_partition_system(fplog,
1918                                 mdlog,
1919                                 step,
1920                                 cr,
1921                                 TRUE,
1922                                 1,
1923                                 state_global,
1924                                 top_global,
1925                                 *ir,
1926                                 imdSession,
1927                                 pull_work,
1928                                 state,
1929                                 &f,
1930                                 mdAtoms,
1931                                 &top,
1932                                 fr,
1933                                 vsite,
1934                                 constr,
1935                                 nrnb,
1936                                 wcycle,
1937                                 FALSE);
1938             upd.updateAfterPartition(state->natoms,
1939                                      md->cFREEZE ? gmx::arrayRefFromArray(md->cFREEZE, md->nr)
1940                                                  : gmx::ArrayRef<const unsigned short>(),
1941                                      md->cTC ? gmx::arrayRefFromArray(md->cTC, md->nr)
1942                                              : gmx::ArrayRef<const unsigned short>());
1943         }
1944
1945         bFirstStep = FALSE;
1946         bInitStep  = FALSE;
1947
1948         /* #######  SET VARIABLES FOR NEXT ITERATION IF THEY STILL NEED IT ###### */
1949         /* With all integrators, except VV, we need to retain the pressure
1950          * at the current step for coupling at the next step.
1951          */
1952         if ((state->flags & enumValueToBitMask(StateEntry::PressurePrevious))
1953             && (bGStatEveryStep || (ir->nstpcouple > 0 && step % ir->nstpcouple == 0)))
1954         {
1955             /* Store the pressure in t_state for pressure coupling
1956              * at the next MD step.
1957              */
1958             copy_mat(pres, state->pres_prev);
1959         }
1960
1961         /* #######  END SET VARIABLES FOR NEXT ITERATION ###### */
1962
1963         if ((membed != nullptr) && (!bLastStep))
1964         {
1965             rescale_membed(step_rel, membed, as_rvec_array(state_global->x.data()));
1966         }
1967
1968         cycles = wallcycle_stop(wcycle, WallCycleCounter::Step);
1969         if (DOMAINDECOMP(cr) && wcycle)
1970         {
1971             dd_cycles_add(cr->dd, cycles, ddCyclStep);
1972         }
1973
1974         /* increase the MD step number */
1975         step++;
1976         step_rel++;
1977
1978 #if GMX_FAHCORE
1979         if (MASTER(cr))
1980         {
1981             fcReportProgress(ir->nsteps + ir->init_step, step);
1982         }
1983 #endif
1984
1985         resetHandler->resetCounters(
1986                 step, step_rel, mdlog, fplog, cr, fr->nbv.get(), nrnb, fr->pmedata, pme_loadbal, wcycle, walltime_accounting);
1987
1988         /* If bIMD is TRUE, the master updates the IMD energy record and sends positions to VMD client */
1989         imdSession->updateEnergyRecordAndSendPositionsAndEnergies(bInteractiveMDstep, step, bCalcEner);
1990     }
1991     /* End of main MD loop */
1992
1993     /* Closing TNG files can include compressing data. Therefore it is good to do that
1994      * before stopping the time measurements. */
1995     mdoutf_tng_close(outf);
1996
1997     /* Stop measuring walltime */
1998     walltime_accounting_end_time(walltime_accounting);
1999
2000     if (!thisRankHasDuty(cr, DUTY_PME))
2001     {
2002         /* Tell the PME only node to finish */
2003         gmx_pme_send_finish(cr);
2004     }
2005
2006     if (MASTER(cr))
2007     {
2008         if (ir->nstcalcenergy > 0)
2009         {
2010             energyOutput.printEnergyConservation(fplog, ir->simulation_part, EI_MD(ir->eI));
2011
2012             gmx::EnergyOutput::printAnnealingTemperatures(fplog, groups, &(ir->opts));
2013             energyOutput.printAverages(fplog, groups);
2014         }
2015     }
2016     done_mdoutf(outf);
2017
2018     if (bPMETune)
2019     {
2020         pme_loadbal_done(pme_loadbal, fplog, mdlog, fr->nbv->useGpu());
2021     }
2022
2023     done_shellfc(fplog, shellfc, step_rel);
2024
2025     if (useReplicaExchange && MASTER(cr))
2026     {
2027         print_replica_exchange_statistics(fplog, repl_ex);
2028     }
2029
2030     walltime_accounting_set_nsteps_done(walltime_accounting, step_rel);
2031
2032     global_stat_destroy(gstat);
2033 }