61ca2b0758881e2574edd22a6ae8231ecf5cb709
[alexxy/gromacs.git] / src / gromacs / mdlib / sim_util.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "config.h"
40
41 #include <cmath>
42 #include <cstdint>
43 #include <cstdio>
44 #include <cstring>
45
46 #include <array>
47
48 #include "gromacs/awh/awh.h"
49 #include "gromacs/domdec/dlbtiming.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/domdec/partition.h"
53 #include "gromacs/essentialdynamics/edsam.h"
54 #include "gromacs/ewald/pme.h"
55 #include "gromacs/gmxlib/chargegroup.h"
56 #include "gromacs/gmxlib/network.h"
57 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
58 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
59 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
60 #include "gromacs/gpu_utils/gpu_utils.h"
61 #include "gromacs/imd/imd.h"
62 #include "gromacs/listed_forces/disre.h"
63 #include "gromacs/listed_forces/gpubonded.h"
64 #include "gromacs/listed_forces/listed_forces.h"
65 #include "gromacs/listed_forces/manage_threading.h"
66 #include "gromacs/listed_forces/orires.h"
67 #include "gromacs/math/arrayrefwithpadding.h"
68 #include "gromacs/math/functions.h"
69 #include "gromacs/math/units.h"
70 #include "gromacs/math/vec.h"
71 #include "gromacs/math/vecdump.h"
72 #include "gromacs/mdlib/calcmu.h"
73 #include "gromacs/mdlib/calcvir.h"
74 #include "gromacs/mdlib/constr.h"
75 #include "gromacs/mdlib/enerdata_utils.h"
76 #include "gromacs/mdlib/force.h"
77 #include "gromacs/mdlib/forcerec.h"
78 #include "gromacs/mdlib/gmx_omp_nthreads.h"
79 #include "gromacs/mdlib/ppforceworkload.h"
80 #include "gromacs/mdlib/qmmm.h"
81 #include "gromacs/mdlib/update.h"
82 #include "gromacs/mdtypes/commrec.h"
83 #include "gromacs/mdtypes/enerdata.h"
84 #include "gromacs/mdtypes/forceoutput.h"
85 #include "gromacs/mdtypes/iforceprovider.h"
86 #include "gromacs/mdtypes/inputrec.h"
87 #include "gromacs/mdtypes/md_enums.h"
88 #include "gromacs/mdtypes/state.h"
89 #include "gromacs/nbnxm/atomdata.h"
90 #include "gromacs/nbnxm/gpu_data_mgmt.h"
91 #include "gromacs/nbnxm/nbnxm.h"
92 #include "gromacs/pbcutil/ishift.h"
93 #include "gromacs/pbcutil/mshift.h"
94 #include "gromacs/pbcutil/pbc.h"
95 #include "gromacs/pulling/pull.h"
96 #include "gromacs/pulling/pull_rotation.h"
97 #include "gromacs/timing/cyclecounter.h"
98 #include "gromacs/timing/gpu_timing.h"
99 #include "gromacs/timing/wallcycle.h"
100 #include "gromacs/timing/wallcyclereporting.h"
101 #include "gromacs/timing/walltime_accounting.h"
102 #include "gromacs/topology/topology.h"
103 #include "gromacs/utility/arrayref.h"
104 #include "gromacs/utility/basedefinitions.h"
105 #include "gromacs/utility/cstringutil.h"
106 #include "gromacs/utility/exceptions.h"
107 #include "gromacs/utility/fatalerror.h"
108 #include "gromacs/utility/gmxassert.h"
109 #include "gromacs/utility/gmxmpi.h"
110 #include "gromacs/utility/logger.h"
111 #include "gromacs/utility/smalloc.h"
112 #include "gromacs/utility/strconvert.h"
113 #include "gromacs/utility/sysinfo.h"
114
115 using gmx::ForceOutputs;
116
117 // TODO: this environment variable allows us to verify before release
118 // that on less common architectures the total cost of polling is not larger than
119 // a blocking wait (so polling does not introduce overhead when the static
120 // PME-first ordering would suffice).
121 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
122
123 // environment variable to enable GPU buffer ops, to allow incremental and optional
124 // introduction of this functionality.
125 // TODO eventially tie this in with other existing GPU flags.
126 static const bool c_enableGpuBufOps = (getenv("GMX_USE_GPU_BUFFER_OPS") != nullptr);
127
128 static void sum_forces(rvec f[], gmx::ArrayRef<const gmx::RVec> forceToAdd)
129 {
130     const int      end = forceToAdd.size();
131
132     int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
133 #pragma omp parallel for num_threads(nt) schedule(static)
134     for (int i = 0; i < end; i++)
135     {
136         rvec_inc(f[i], forceToAdd[i]);
137     }
138 }
139
140 static void calc_virial(int start, int homenr, const rvec x[],
141                         const gmx::ForceWithShiftForces &forceWithShiftForces,
142                         tensor vir_part, const t_graph *graph, const matrix box,
143                         t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
144 {
145     /* The short-range virial from surrounding boxes */
146     const rvec *fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
147     calc_vir(SHIFTS, fr->shift_vec, fshift, vir_part, ePBC == epbcSCREW, box);
148     inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
149
150     /* Calculate partial virial, for local atoms only, based on short range.
151      * Total virial is computed in global_stat, called from do_md
152      */
153     const rvec *f = as_rvec_array(forceWithShiftForces.force().data());
154     f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
155     inc_nrnb(nrnb, eNR_VIRIAL, homenr);
156
157     if (debug)
158     {
159         pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
160     }
161 }
162
163 static void pull_potential_wrapper(const t_commrec *cr,
164                                    const t_inputrec *ir,
165                                    const matrix box, gmx::ArrayRef<const gmx::RVec> x,
166                                    gmx::ForceWithVirial *force,
167                                    const t_mdatoms *mdatoms,
168                                    gmx_enerdata_t *enerd,
169                                    pull_t *pull_work,
170                                    const real *lambda,
171                                    double t,
172                                    gmx_wallcycle_t wcycle)
173 {
174     t_pbc  pbc;
175     real   dvdl;
176
177     /* Calculate the center of mass forces, this requires communication,
178      * which is why pull_potential is called close to other communication.
179      */
180     wallcycle_start(wcycle, ewcPULLPOT);
181     set_pbc(&pbc, ir->ePBC, box);
182     dvdl                     = 0;
183     enerd->term[F_COM_PULL] +=
184         pull_potential(pull_work, mdatoms, &pbc,
185                        cr, t, lambda[efptRESTRAINT], as_rvec_array(x.data()), force, &dvdl);
186     enerd->dvdl_lin[efptRESTRAINT] += dvdl;
187     wallcycle_stop(wcycle, ewcPULLPOT);
188 }
189
190 static void pme_receive_force_ener(const t_commrec      *cr,
191                                    gmx::ForceWithVirial *forceWithVirial,
192                                    gmx_enerdata_t       *enerd,
193                                    gmx_wallcycle_t       wcycle)
194 {
195     real   e_q, e_lj, dvdl_q, dvdl_lj;
196     float  cycles_ppdpme, cycles_seppme;
197
198     cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
199     dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
200
201     /* In case of node-splitting, the PP nodes receive the long-range
202      * forces, virial and energy from the PME nodes here.
203      */
204     wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
205     dvdl_q  = 0;
206     dvdl_lj = 0;
207     gmx_pme_receive_f(cr, forceWithVirial, &e_q, &e_lj, &dvdl_q, &dvdl_lj,
208                       &cycles_seppme);
209     enerd->term[F_COUL_RECIP] += e_q;
210     enerd->term[F_LJ_RECIP]   += e_lj;
211     enerd->dvdl_lin[efptCOUL] += dvdl_q;
212     enerd->dvdl_lin[efptVDW]  += dvdl_lj;
213
214     if (wcycle)
215     {
216         dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
217     }
218     wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
219 }
220
221 static void print_large_forces(FILE            *fp,
222                                const t_mdatoms *md,
223                                const t_commrec *cr,
224                                int64_t          step,
225                                real             forceTolerance,
226                                const rvec      *x,
227                                const rvec      *f)
228 {
229     real           force2Tolerance = gmx::square(forceTolerance);
230     gmx::index     numNonFinite    = 0;
231     for (int i = 0; i < md->homenr; i++)
232     {
233         real force2    = norm2(f[i]);
234         bool nonFinite = !std::isfinite(force2);
235         if (force2 >= force2Tolerance || nonFinite)
236         {
237             fprintf(fp, "step %" PRId64 " atom %6d  x %8.3f %8.3f %8.3f  force %12.5e\n",
238                     step,
239                     ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
240         }
241         if (nonFinite)
242         {
243             numNonFinite++;
244         }
245     }
246     if (numNonFinite > 0)
247     {
248         /* Note that with MPI this fatal call on one rank might interrupt
249          * the printing on other ranks. But we can only avoid that with
250          * an expensive MPI barrier that we would need at each step.
251          */
252         gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
253     }
254 }
255
256 static void post_process_forces(const t_commrec       *cr,
257                                 int64_t                step,
258                                 t_nrnb                *nrnb,
259                                 gmx_wallcycle_t        wcycle,
260                                 const gmx_localtop_t  *top,
261                                 const matrix           box,
262                                 const rvec             x[],
263                                 ForceOutputs          *forceOutputs,
264                                 tensor                 vir_force,
265                                 const t_mdatoms       *mdatoms,
266                                 const t_graph         *graph,
267                                 const t_forcerec      *fr,
268                                 const gmx_vsite_t     *vsite,
269                                 const gmx::ForceFlags &forceFlags)
270 {
271     rvec *f = as_rvec_array(forceOutputs->forceWithShiftForces().force().data());
272
273     if (fr->haveDirectVirialContributions)
274     {
275         auto &forceWithVirial = forceOutputs->forceWithVirial();
276         rvec *fDirectVir      = as_rvec_array(forceWithVirial.force_.data());
277
278         if (vsite)
279         {
280             /* Spread the mesh force on virtual sites to the other particles...
281              * This is parallellized. MPI communication is performed
282              * if the constructing atoms aren't local.
283              */
284             matrix virial = { { 0 } };
285             spread_vsite_f(vsite, x, fDirectVir, nullptr,
286                            forceFlags.computeVirial, virial,
287                            nrnb,
288                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
289             forceWithVirial.addVirialContribution(virial);
290         }
291
292         if (forceFlags.computeVirial)
293         {
294             /* Now add the forces, this is local */
295             sum_forces(f, forceWithVirial.force_);
296
297             /* Add the direct virial contributions */
298             GMX_ASSERT(forceWithVirial.computeVirial_, "forceWithVirial should request virial computation when we request the virial");
299             m_add(vir_force, forceWithVirial.getVirial(), vir_force);
300
301             if (debug)
302             {
303                 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
304             }
305         }
306     }
307
308     if (fr->print_force >= 0)
309     {
310         print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
311     }
312 }
313
314 static void do_nb_verlet(t_forcerec                       *fr,
315                          const interaction_const_t        *ic,
316                          gmx_enerdata_t                   *enerd,
317                          int                               legacyForceFlags,
318                          const gmx::ForceFlags            &forceFlags,
319                          const Nbnxm::InteractionLocality  ilocality,
320                          const int                         clearF,
321                          const int64_t                     step,
322                          t_nrnb                           *nrnb,
323                          gmx_wallcycle_t                   wcycle)
324 {
325     if (!(legacyForceFlags & GMX_FORCE_NONBONDED))
326     {
327         /* skip non-bonded calculation */
328         return;
329     }
330
331     nonbonded_verlet_t *nbv  = fr->nbv.get();
332
333     /* GPU kernel launch overhead is already timed separately */
334     if (fr->cutoff_scheme != ecutsVERLET)
335     {
336         gmx_incons("Invalid cut-off scheme passed!");
337     }
338
339     if (!nbv->useGpu())
340     {
341         /* When dynamic pair-list  pruning is requested, we need to prune
342          * at nstlistPrune steps.
343          */
344         if (nbv->isDynamicPruningStepCpu(step))
345         {
346             /* Prune the pair-list beyond fr->ic->rlistPrune using
347              * the current coordinates of the atoms.
348              */
349             wallcycle_sub_start(wcycle, ewcsNONBONDED_PRUNING);
350             nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
351             wallcycle_sub_stop(wcycle, ewcsNONBONDED_PRUNING);
352         }
353     }
354
355     nbv->dispatchNonbondedKernel(ilocality, *ic, legacyForceFlags, forceFlags, clearF, *fr, enerd, nrnb);
356 }
357
358 static inline void clear_rvecs_omp(int n, rvec v[])
359 {
360     int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
361
362     /* Note that we would like to avoid this conditional by putting it
363      * into the omp pragma instead, but then we still take the full
364      * omp parallel for overhead (at least with gcc5).
365      */
366     if (nth == 1)
367     {
368         for (int i = 0; i < n; i++)
369         {
370             clear_rvec(v[i]);
371         }
372     }
373     else
374     {
375 #pragma omp parallel for num_threads(nth) schedule(static)
376         for (int i = 0; i < n; i++)
377         {
378             clear_rvec(v[i]);
379         }
380     }
381 }
382
383 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
384  *
385  * \param groupOptions  Group options, containing T-coupling options
386  */
387 static real averageKineticEnergyEstimate(const t_grpopts &groupOptions)
388 {
389     real nrdfCoupled   = 0;
390     real nrdfUncoupled = 0;
391     real kineticEnergy = 0;
392     for (int g = 0; g < groupOptions.ngtc; g++)
393     {
394         if (groupOptions.tau_t[g] >= 0)
395         {
396             nrdfCoupled   += groupOptions.nrdf[g];
397             kineticEnergy += groupOptions.nrdf[g]*0.5*groupOptions.ref_t[g]*BOLTZ;
398         }
399         else
400         {
401             nrdfUncoupled += groupOptions.nrdf[g];
402         }
403     }
404
405     /* This conditional with > also catches nrdf=0 */
406     if (nrdfCoupled > nrdfUncoupled)
407     {
408         return kineticEnergy*(nrdfCoupled + nrdfUncoupled)/nrdfCoupled;
409     }
410     else
411     {
412         return 0;
413     }
414 }
415
416 /*! \brief This routine checks that the potential energy is finite.
417  *
418  * Always checks that the potential energy is finite. If step equals
419  * inputrec.init_step also checks that the magnitude of the potential energy
420  * is reasonable. Terminates with a fatal error when a check fails.
421  * Note that passing this check does not guarantee finite forces,
422  * since those use slightly different arithmetics. But in most cases
423  * there is just a narrow coordinate range where forces are not finite
424  * and energies are finite.
425  *
426  * \param[in] step      The step number, used for checking and printing
427  * \param[in] enerd     The energy data; the non-bonded group energies need to be added to enerd.term[F_EPOT] before calling this routine
428  * \param[in] inputrec  The input record
429  */
430 static void checkPotentialEnergyValidity(int64_t               step,
431                                          const gmx_enerdata_t &enerd,
432                                          const t_inputrec     &inputrec)
433 {
434     /* Threshold valid for comparing absolute potential energy against
435      * the kinetic energy. Normally one should not consider absolute
436      * potential energy values, but with a factor of one million
437      * we should never get false positives.
438      */
439     constexpr real c_thresholdFactor = 1e6;
440
441     bool           energyIsNotFinite    = !std::isfinite(enerd.term[F_EPOT]);
442     real           averageKineticEnergy = 0;
443     /* We only check for large potential energy at the initial step,
444      * because that is by far the most likely step for this too occur
445      * and because computing the average kinetic energy is not free.
446      * Note: nstcalcenergy >> 1 often does not allow to catch large energies
447      * before they become NaN.
448      */
449     if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
450     {
451         averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
452     }
453
454     if (energyIsNotFinite || (averageKineticEnergy > 0 &&
455                               enerd.term[F_EPOT] > c_thresholdFactor*averageKineticEnergy))
456     {
457         gmx_fatal(FARGS, "Step %" PRId64 ": The total potential energy is %g, which is %s. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A %s potential energy can be caused by overlapping interactions in bonded interactions or very large%s coordinate values. Usually this is caused by a badly- or non-equilibrated initial configuration, incorrect interactions or parameters in the topology.",
458                   step,
459                   enerd.term[F_EPOT],
460                   energyIsNotFinite ? "not finite" : "extremely high",
461                   enerd.term[F_LJ],
462                   enerd.term[F_COUL_SR],
463                   energyIsNotFinite ? "non-finite" : "very high",
464                   energyIsNotFinite ? " or Nan" : "");
465     }
466 }
467
468 /*! \brief Return true if there are special forces computed this step.
469  *
470  * The conditionals exactly correspond to those in computeSpecialForces().
471  */
472 static bool
473 haveSpecialForces(const t_inputrec              *inputrec,
474                   ForceProviders                *forceProviders,
475                   const pull_t                  *pull_work,
476                   const bool                     computeForces,
477                   const gmx_edsam               *ed)
478 {
479
480     return
481         ((computeForces && forceProviders->hasForceProvider()) ||         // forceProviders
482          (inputrec->bPull && pull_have_potential(pull_work)) ||           // pull
483          inputrec->bRot ||                                                // enforced rotation
484          (ed != nullptr) ||                                               // flooding
485          (inputrec->bIMD && computeForces));                              // IMD
486 }
487
488 /*! \brief Compute forces and/or energies for special algorithms
489  *
490  * The intention is to collect all calls to algorithms that compute
491  * forces on local atoms only and that do not contribute to the local
492  * virial sum (but add their virial contribution separately).
493  * Eventually these should likely all become ForceProviders.
494  * Within this function the intention is to have algorithms that do
495  * global communication at the end, so global barriers within the MD loop
496  * are as close together as possible.
497  *
498  * \param[in]     fplog            The log file
499  * \param[in]     cr               The communication record
500  * \param[in]     inputrec         The input record
501  * \param[in]     awh              The Awh module (nullptr if none in use).
502  * \param[in]     enforcedRotation Enforced rotation module.
503  * \param[in]     imdSession       The IMD session
504  * \param[in]     pull_work        The pull work structure.
505  * \param[in]     step             The current MD step
506  * \param[in]     t                The current time
507  * \param[in,out] wcycle           Wallcycle accounting struct
508  * \param[in,out] forceProviders   Pointer to a list of force providers
509  * \param[in]     box              The unit cell
510  * \param[in]     x                The coordinates
511  * \param[in]     mdatoms          Per atom properties
512  * \param[in]     lambda           Array of free-energy lambda values
513  * \param[in]     forceFlags       Force schedule flags
514  * \param[in,out] forceWithVirial  Force and virial buffers
515  * \param[in,out] enerd            Energy buffer
516  * \param[in,out] ed               Essential dynamics pointer
517  * \param[in]     didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
518  *
519  * \todo Remove didNeighborSearch, which is used incorrectly.
520  * \todo Convert all other algorithms called here to ForceProviders.
521  */
522 static void
523 computeSpecialForces(FILE                          *fplog,
524                      const t_commrec               *cr,
525                      const t_inputrec              *inputrec,
526                      gmx::Awh                      *awh,
527                      gmx_enfrot                    *enforcedRotation,
528                      gmx::ImdSession               *imdSession,
529                      pull_t                        *pull_work,
530                      int64_t                        step,
531                      double                         t,
532                      gmx_wallcycle_t                wcycle,
533                      ForceProviders                *forceProviders,
534                      const matrix                   box,
535                      gmx::ArrayRef<const gmx::RVec> x,
536                      const t_mdatoms               *mdatoms,
537                      real                          *lambda,
538                      const gmx::ForceFlags         &forceFlags,
539                      gmx::ForceWithVirial          *forceWithVirial,
540                      gmx_enerdata_t                *enerd,
541                      gmx_edsam                     *ed,
542                      bool                           didNeighborSearch)
543 {
544     /* NOTE: Currently all ForceProviders only provide forces.
545      *       When they also provide energies, remove this conditional.
546      */
547     if (forceFlags.computeForces)
548     {
549         gmx::ForceProviderInput  forceProviderInput(x, *mdatoms, t, box, *cr);
550         gmx::ForceProviderOutput forceProviderOutput(forceWithVirial, enerd);
551
552         /* Collect forces from modules */
553         forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
554     }
555
556     if (inputrec->bPull && pull_have_potential(pull_work))
557     {
558         pull_potential_wrapper(cr, inputrec, box, x,
559                                forceWithVirial,
560                                mdatoms, enerd, pull_work, lambda, t,
561                                wcycle);
562
563         if (awh)
564         {
565             enerd->term[F_COM_PULL] +=
566                 awh->applyBiasForcesAndUpdateBias(inputrec->ePBC, *mdatoms, box,
567                                                   forceWithVirial,
568                                                   t, step, wcycle, fplog);
569         }
570     }
571
572     rvec *f = as_rvec_array(forceWithVirial->force_.data());
573
574     /* Add the forces from enforced rotation potentials (if any) */
575     if (inputrec->bRot)
576     {
577         wallcycle_start(wcycle, ewcROTadd);
578         enerd->term[F_COM_PULL] += add_rot_forces(enforcedRotation, f, cr, step, t);
579         wallcycle_stop(wcycle, ewcROTadd);
580     }
581
582     if (ed)
583     {
584         /* Note that since init_edsam() is called after the initialization
585          * of forcerec, edsam doesn't request the noVirSum force buffer.
586          * Thus if no other algorithm (e.g. PME) requires it, the forces
587          * here will contribute to the virial.
588          */
589         do_flood(cr, inputrec, as_rvec_array(x.data()), f, ed, box, step, didNeighborSearch);
590     }
591
592     /* Add forces from interactive molecular dynamics (IMD), if any */
593     if (inputrec->bIMD && forceFlags.computeForces)
594     {
595         imdSession->applyForces(f);
596     }
597 }
598
599 /*! \brief Launch the prepare_step and spread stages of PME GPU.
600  *
601  * \param[in]  pmedata       The PME structure
602  * \param[in]  box           The box matrix
603  * \param[in]  x             Coordinate array
604  * \param[in]  forceFlags    Force schedule flags
605  * \param[in]  pmeFlags      PME flags
606  * \param[in]  wcycle        The wallcycle structure
607  */
608 static inline void launchPmeGpuSpread(gmx_pme_t             *pmedata,
609                                       const matrix           box,
610                                       const rvec             x[],
611                                       const gmx::ForceFlags &forceFlags,
612                                       int                    pmeFlags,
613                                       gmx_wallcycle_t        wcycle)
614 {
615     pme_gpu_prepare_computation(pmedata, forceFlags.haveDynamicBox, box, wcycle, pmeFlags);
616     pme_gpu_launch_spread(pmedata, x, wcycle);
617 }
618
619 /*! \brief Launch the FFT and gather stages of PME GPU
620  *
621  * This function only implements setting the output forces (no accumulation).
622  *
623  * \param[in]  pmedata        The PME structure
624  * \param[in]  wcycle         The wallcycle structure
625  * \param[in]  useGpuFPmeReduction Whether forces will be reduced on GPU
626  */
627 static void launchPmeGpuFftAndGather(gmx_pme_t        *pmedata,
628                                      gmx_wallcycle_t   wcycle,
629                                      bool              useGpuFPmeReduction)
630 {
631     pme_gpu_launch_complex_transforms(pmedata, wcycle);
632     pme_gpu_launch_gather(pmedata, wcycle, PmeForceOutputHandling::Set, useGpuFPmeReduction);
633 }
634
635 /*! \brief
636  *  Polling wait for either of the PME or nonbonded GPU tasks.
637  *
638  * Instead of a static order in waiting for GPU tasks, this function
639  * polls checking which of the two tasks completes first, and does the
640  * associated force buffer reduction overlapped with the other task.
641  * By doing that, unlike static scheduling order, it can always overlap
642  * one of the reductions, regardless of the GPU task completion order.
643  *
644  * \param[in]     nbv              Nonbonded verlet structure
645  * \param[in,out] pmedata          PME module data
646  * \param[in,out] forceOutputs     Output buffer for the forces and virial
647  * \param[in,out] enerd            Energy data structure results are reduced into
648  * \param[in]     forceFlags       Force schedule flags
649  * \param[in]     pmeFlags         PME flags
650  * \param[in]     wcycle           The wallcycle structure
651  */
652 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t                  *nbv,
653                                         gmx_pme_t                           *pmedata,
654                                         gmx::ForceOutputs                   *forceOutputs,
655                                         gmx_enerdata_t                      *enerd,
656                                         const gmx::ForceFlags               &forceFlags,
657                                         int                                  pmeFlags,
658                                         gmx_wallcycle_t                      wcycle)
659 {
660     bool isPmeGpuDone = false;
661     bool isNbGpuDone  = false;
662
663
664
665     gmx::ForceWithShiftForces      &forceWithShiftForces = forceOutputs->forceWithShiftForces();
666     gmx::ForceWithVirial           &forceWithVirial      = forceOutputs->forceWithVirial();
667
668     gmx::ArrayRef<const gmx::RVec>  pmeGpuForces;
669
670     while (!isPmeGpuDone || !isNbGpuDone)
671     {
672         if (!isPmeGpuDone)
673         {
674             GpuTaskCompletion completionType = (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
675             isPmeGpuDone = pme_gpu_try_finish_task(pmedata, pmeFlags, wcycle, &forceWithVirial, enerd, completionType);
676         }
677
678         if (!isNbGpuDone)
679         {
680             GpuTaskCompletion completionType = (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
681             isNbGpuDone = Nbnxm::gpu_try_finish_task(nbv->gpu_nbv,
682                                                      forceFlags,
683                                                      Nbnxm::AtomLocality::Local,
684                                                      enerd->grpp.ener[egLJSR].data(),
685                                                      enerd->grpp.ener[egCOULSR].data(),
686                                                      forceWithShiftForces.shiftForces(), completionType, wcycle);
687
688             if (isNbGpuDone)
689             {
690                 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local,
691                                               forceWithShiftForces.force());
692             }
693         }
694     }
695 }
696
697 /*! \brief Set up the different force buffers; also does clearing.
698  *
699  * \param[in] fr        force record pointer
700  * \param[in] pull_work The pull work object.
701  * \param[in] inputrec  input record
702  * \param[in] force     force array
703  * \param[in] forceFlags Force schedule flags
704  * \param[out] wcycle   wallcycle recording structure
705  *
706  * \returns             Cleared force output structure
707  */
708 static ForceOutputs
709 setupForceOutputs(t_forcerec                          *fr,
710                   pull_t                              *pull_work,
711                   const t_inputrec                    &inputrec,
712                   gmx::ArrayRefWithPadding<gmx::RVec>  force,
713                   const gmx::ForceFlags               &forceFlags,
714                   gmx_wallcycle_t                      wcycle)
715 {
716     wallcycle_sub_start(wcycle, ewcsCLEAR_FORCE_BUFFER);
717
718     /* NOTE: We assume fr->shiftForces is all zeros here */
719     gmx::ForceWithShiftForces forceWithShiftForces(force, forceFlags.computeVirial, fr->shiftForces);
720
721     if (forceFlags.computeForces)
722     {
723         /* Clear the short- and long-range forces */
724         clear_rvecs_omp(fr->natoms_force_constr,
725                         as_rvec_array(forceWithShiftForces.force().data()));
726     }
727
728     /* If we need to compute the virial, we might need a separate
729      * force buffer for algorithms for which the virial is calculated
730      * directly, such as PME. Otherwise, forceWithVirial uses the
731      * the same force (f in legacy calls) buffer as other algorithms.
732      */
733     const bool useSeparateForceWithVirialBuffer = (forceFlags.computeForces &&
734                                                    (forceFlags.computeVirial && fr->haveDirectVirialContributions));
735     /* forceWithVirial uses the local atom range only */
736     gmx::ForceWithVirial forceWithVirial(useSeparateForceWithVirialBuffer ?
737                                          fr->forceBufferForDirectVirialContributions : force.unpaddedArrayRef(),
738                                          forceFlags.computeVirial);
739
740     if (useSeparateForceWithVirialBuffer)
741     {
742         /* TODO: update comment
743          * We only compute forces on local atoms. Note that vsites can
744          * spread to non-local atoms, but that part of the buffer is
745          * cleared separately in the vsite spreading code.
746          */
747         clear_rvecs_omp(forceWithVirial.force_.size(), as_rvec_array(forceWithVirial.force_.data()));
748     }
749
750     if (inputrec.bPull && pull_have_constraint(pull_work))
751     {
752         clear_pull_forces(pull_work);
753     }
754
755     wallcycle_sub_stop(wcycle, ewcsCLEAR_FORCE_BUFFER);
756
757     return ForceOutputs(forceWithShiftForces, forceWithVirial);
758 }
759
760
761 /*! \brief Set up flags that indicate what type of work is there to compute.
762  *
763  * Currently we only update it at search steps,
764  * but some properties may change more frequently (e.g. virial/non-virial step),
765  * so when including those either the frequency of update (per-step) or the scope
766  * of a flag will change (i.e. a set of flags for nstlist steps).
767  *
768  */
769 static void
770 setupForceWorkload(gmx::PpForceWorkload  *forceWork,
771                    const t_inputrec      *inputrec,
772                    const t_forcerec      *fr,
773                    const pull_t          *pull_work,
774                    const gmx_edsam       *ed,
775                    const t_idef          &idef,
776                    const t_fcdata        *fcd,
777                    const gmx::ForceFlags &forceFlags
778                    )
779 {
780     forceWork->haveSpecialForces      = haveSpecialForces(inputrec, fr->forceProviders, pull_work, forceFlags.computeForces, ed);
781     forceWork->haveCpuBondedWork      = haveCpuBondeds(*fr);
782     forceWork->haveGpuBondedWork      = ((fr->gpuBonded != nullptr) && fr->gpuBonded->haveInteractions());
783     forceWork->haveRestraintsWork     = havePositionRestraints(idef, *fcd);
784     forceWork->haveCpuListedForceWork = haveCpuListedForces(*fr, idef, *fcd);
785 }
786
787 /*! \brief Set up force flag stuct from the force bitmask.
788  *
789  * \param[out]     flags                Force schedule flags
790  * \param[in]      legacyFlags          Force bitmask flags used to construct the new flags
791  * \param[in]      isNonbondedOn        Global override, if false forces to turn off all nonbonded calculation.
792  */
793 static void
794 setupForceFlags(gmx::ForceFlags *flags,
795                 const int        legacyFlags,
796                 const bool       isNonbondedOn)
797 {
798     flags->stateChanged           = ((legacyFlags & GMX_FORCE_STATECHANGED) != 0);
799     flags->haveDynamicBox         = ((legacyFlags & GMX_FORCE_DYNAMICBOX) != 0);
800     flags->doNeighborSearch       = ((legacyFlags & GMX_FORCE_NS) != 0);
801     flags->computeVirial          = ((legacyFlags & GMX_FORCE_VIRIAL) != 0);
802     flags->computeEnergy          = ((legacyFlags & GMX_FORCE_ENERGY) != 0);
803     flags->computeForces          = ((legacyFlags & GMX_FORCE_FORCES) != 0);
804     flags->computeListedForces    = ((legacyFlags & GMX_FORCE_LISTED) != 0);
805     flags->computeNonbondedForces = ((legacyFlags & GMX_FORCE_NONBONDED) != 0) && isNonbondedOn;
806 }
807
808
809 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
810  *
811  * TODO: eliminate the \p useGpuNonbonded and \p useGpuNonbonded when these are
812  * incorporated in PpForceWorkload.
813  */
814 static void
815 launchGpuEndOfStepTasks(nonbonded_verlet_t            *nbv,
816                         gmx::GpuBonded                *gpuBonded,
817                         gmx_pme_t                     *pmedata,
818                         gmx_enerdata_t                *enerd,
819                         const gmx::MdScheduleWorkload &mdScheduleWork,
820                         bool                           useGpuNonbonded,
821                         bool                           useGpuPme,
822                         int64_t                        step,
823                         gmx_wallcycle_t                wcycle)
824 {
825     if (useGpuNonbonded)
826     {
827         /* Launch pruning before buffer clearing because the API overhead of the
828          * clear kernel launches can leave the GPU idle while it could be running
829          * the prune kernel.
830          */
831         if (nbv->isDynamicPruningStepGpu(step))
832         {
833             nbv->dispatchPruneKernelGpu(step);
834         }
835
836         /* now clear the GPU outputs while we finish the step on the CPU */
837         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
838         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
839         Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, mdScheduleWork.forceFlags.computeVirial);
840         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
841         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
842     }
843
844     if (useGpuPme)
845     {
846         pme_gpu_reinit_computation(pmedata, wcycle);
847     }
848
849     if (mdScheduleWork.forceWork.haveGpuBondedWork && mdScheduleWork.forceFlags.computeEnergy)
850     {
851         // in principle this should be included in the DD balancing region,
852         // but generally it is infrequent so we'll omit it for the sake of
853         // simpler code
854         gpuBonded->waitAccumulateEnergyTerms(enerd);
855
856         gpuBonded->clearEnergies();
857     }
858 }
859
860
861 void do_force(FILE                                     *fplog,
862               const t_commrec                          *cr,
863               const gmx_multisim_t                     *ms,
864               const t_inputrec                         *inputrec,
865               gmx::Awh                                 *awh,
866               gmx_enfrot                               *enforcedRotation,
867               gmx::ImdSession                          *imdSession,
868               pull_t                                   *pull_work,
869               int64_t                                   step,
870               t_nrnb                                   *nrnb,
871               gmx_wallcycle_t                           wcycle,
872               const gmx_localtop_t                     *top,
873               const matrix                              box,
874               gmx::ArrayRefWithPadding<gmx::RVec>       x,
875               history_t                                *hist,
876               gmx::ArrayRefWithPadding<gmx::RVec>       force,
877               tensor                                    vir_force,
878               const t_mdatoms                          *mdatoms,
879               gmx_enerdata_t                           *enerd,
880               t_fcdata                                 *fcd,
881               gmx::ArrayRef<real>                       lambda,
882               t_graph                                  *graph,
883               t_forcerec                               *fr,
884               gmx::MdScheduleWorkload                  *mdScheduleWork,
885               const gmx_vsite_t                        *vsite,
886               rvec                                      mu_tot,
887               double                                    t,
888               gmx_edsam                                *ed,
889               int                                       flags,
890               const DDBalanceRegionHandler             &ddBalanceRegionHandler)
891 {
892     int                  i, j;
893     double               mu[2*DIM];
894     gmx_bool             bFillGrid, bCalcCGCM;
895     gmx_bool             bUseGPU, bUseOrEmulGPU;
896     nonbonded_verlet_t  *nbv = fr->nbv.get();
897     interaction_const_t *ic  = fr->ic;
898
899     // TODO remove the code below when the legacy flags are not in use anymore
900     /* modify force flag if not doing nonbonded */
901     if (!fr->bNonbonded)
902     {
903         flags &= ~GMX_FORCE_NONBONDED;
904     }
905     setupForceFlags(&mdScheduleWork->forceFlags, flags, fr->bNonbonded);
906
907     const gmx::ForceFlags &forceFlags = mdScheduleWork->forceFlags;
908
909     bFillGrid     = (forceFlags.doNeighborSearch && forceFlags.stateChanged);
910     bCalcCGCM     = (bFillGrid && !DOMAINDECOMP(cr));
911     bUseGPU       = fr->nbv->useGpu();
912     bUseOrEmulGPU = bUseGPU || fr->nbv->emulateGpu();
913
914     const auto pmeRunMode = fr->pmedata ? pme_run_mode(fr->pmedata) : PmeRunMode::CPU;
915     // TODO slim this conditional down - inputrec and duty checks should mean the same in proper code!
916     const bool useGpuPme  = EEL_PME(fr->ic->eeltype) && thisRankHasDuty(cr, DUTY_PME) &&
917         ((pmeRunMode == PmeRunMode::GPU) || (pmeRunMode == PmeRunMode::Mixed));
918     const int  pmeFlags = GMX_PME_SPREAD | GMX_PME_SOLVE |
919         (forceFlags.computeVirial   ? GMX_PME_CALC_ENER_VIR : 0) |
920         (forceFlags.computeEnergy ? GMX_PME_CALC_ENER_VIR : 0) |
921         (forceFlags.computeForces   ? GMX_PME_CALC_F : 0);
922
923     // Switches on whether to use GPU for position and force buffer operations
924     // TODO consider all possible combinations of triggers, and how to combine optimally in each case.
925     const BufferOpsUseGpu useGpuXBufOps = (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA)) ?
926         BufferOpsUseGpu::True : BufferOpsUseGpu::False;;
927     // GPU Force buffer ops are disabled on virial steps, because the virial calc is not yet ported to GPU
928     const BufferOpsUseGpu useGpuFBufOps = (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA))
929         && !(forceFlags.computeVirial || forceFlags.computeEnergy) ?
930         BufferOpsUseGpu::True : BufferOpsUseGpu::False;
931     // TODO: move / add this flag to the internal PME GPU data structures
932     const bool useGpuFPmeReduction = (useGpuFBufOps == BufferOpsUseGpu::True) &&
933         thisRankHasDuty(cr, DUTY_PME) && useGpuPme; // only supported if this rank is perfoming PME on the GPU
934
935     /* At a search step we need to start the first balancing region
936      * somewhere early inside the step after communication during domain
937      * decomposition (and not during the previous step as usual).
938      */
939     if (forceFlags.doNeighborSearch)
940     {
941         ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
942     }
943
944     const int start  = 0;
945     const int homenr = mdatoms->homenr;
946
947     clear_mat(vir_force);
948
949     if (forceFlags.stateChanged)
950     {
951         if (inputrecNeedMutot(inputrec))
952         {
953             /* Calculate total (local) dipole moment in a temporary common array.
954              * This makes it possible to sum them over nodes faster.
955              */
956             calc_mu(start, homenr,
957                     x.unpaddedArrayRef(), mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
958                     mu, mu+DIM);
959         }
960     }
961
962     if (fr->ePBC != epbcNONE)
963     {
964         /* Compute shift vectors every step,
965          * because of pressure coupling or box deformation!
966          */
967         if (forceFlags.haveDynamicBox && forceFlags.stateChanged)
968         {
969             calc_shifts(box, fr->shift_vec);
970         }
971
972         if (bCalcCGCM)
973         {
974             put_atoms_in_box_omp(fr->ePBC, box, x.unpaddedArrayRef().subArray(0, homenr), gmx_omp_nthreads_get(emntDefault));
975             inc_nrnb(nrnb, eNR_SHIFTX, homenr);
976         }
977         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
978         {
979             unshift_self(graph, box, as_rvec_array(x.unpaddedArrayRef().data()));
980         }
981     }
982
983     nbnxn_atomdata_copy_shiftvec(forceFlags.haveDynamicBox,
984                                  fr->shift_vec, nbv->nbat.get());
985
986 #if GMX_MPI
987     if (!thisRankHasDuty(cr, DUTY_PME))
988     {
989         /* Send particle coordinates to the pme nodes.
990          * Since this is only implemented for domain decomposition
991          * and domain decomposition does not use the graph,
992          * we do not need to worry about shifting.
993          */
994         gmx_pme_send_coordinates(cr, box, as_rvec_array(x.unpaddedArrayRef().data()),
995                                  lambda[efptCOUL], lambda[efptVDW],
996                                  (forceFlags.computeVirial || forceFlags.computeEnergy),
997                                  step, wcycle);
998     }
999 #endif /* GMX_MPI */
1000
1001     if (useGpuPme)
1002     {
1003         launchPmeGpuSpread(fr->pmedata, box, as_rvec_array(x.unpaddedArrayRef().data()), forceFlags, pmeFlags, wcycle);
1004     }
1005
1006     /* do gridding for pair search */
1007     if (forceFlags.doNeighborSearch)
1008     {
1009         if (graph && forceFlags.stateChanged)
1010         {
1011             /* Calculate intramolecular shift vectors to make molecules whole */
1012             mk_mshift(fplog, graph, fr->ePBC, box, as_rvec_array(x.unpaddedArrayRef().data()));
1013         }
1014
1015         // TODO
1016         // - vzero is constant, do we need to pass it?
1017         // - box_diag should be passed directly to nbnxn_put_on_grid
1018         //
1019         rvec vzero;
1020         clear_rvec(vzero);
1021
1022         rvec box_diag;
1023         box_diag[XX] = box[XX][XX];
1024         box_diag[YY] = box[YY][YY];
1025         box_diag[ZZ] = box[ZZ][ZZ];
1026
1027         wallcycle_start(wcycle, ewcNS);
1028         if (!DOMAINDECOMP(cr))
1029         {
1030             wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
1031             nbnxn_put_on_grid(nbv, box,
1032                               0, vzero, box_diag,
1033                               nullptr, 0, mdatoms->homenr, -1,
1034                               fr->cginfo, x.unpaddedArrayRef(),
1035                               0, nullptr);
1036             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
1037         }
1038         else
1039         {
1040             wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
1041             nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd),
1042                                        fr->cginfo, x.unpaddedArrayRef());
1043             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
1044         }
1045
1046         nbv->setAtomProperties(*mdatoms, fr->cginfo);
1047
1048         wallcycle_stop(wcycle, ewcNS);
1049
1050         /* initialize the GPU nbnxm atom data and bonded data structures */
1051         if (bUseGPU)
1052         {
1053             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1054
1055             wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1056             Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1057             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1058
1059             if (fr->gpuBonded)
1060             {
1061                 /* Now we put all atoms on the grid, we can assign bonded
1062                  * interactions to the GPU, where the grid order is
1063                  * needed. Also the xq, f and fshift device buffers have
1064                  * been reallocated if needed, so the bonded code can
1065                  * learn about them. */
1066                 // TODO the xq, f, and fshift buffers are now shared
1067                 // resources, so they should be maintained by a
1068                 // higher-level object than the nb module.
1069                 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(nbv->getGridIndices(),
1070                                                                       top->idef,
1071                                                                       Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1072                                                                       Nbnxm::gpu_get_f(nbv->gpu_nbv),
1073                                                                       Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1074             }
1075             wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1076         }
1077     }
1078
1079     // Call it per-step as force-flags can change.
1080     // Need to run after the GPU-offload bonded interaction lists
1081     // are set up to be able to determine whether there is bonded work.
1082     setupForceWorkload(&mdScheduleWork->forceWork,
1083                        inputrec,
1084                        fr,
1085                        pull_work,
1086                        ed,
1087                        top->idef,
1088                        fcd,
1089                        forceFlags);
1090
1091     const gmx::PpForceWorkload &forceWork = mdScheduleWork->forceWork;
1092
1093     /* do local pair search */
1094     if (forceFlags.doNeighborSearch)
1095     {
1096         // TODO: fuse this branch with the above forceFlags.doNeighborSearch block
1097         wallcycle_start_nocount(wcycle, ewcNS);
1098         wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1099         /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1100         nbv->constructPairlist(Nbnxm::InteractionLocality::Local,
1101                                &top->excls, step, nrnb);
1102
1103         nbv->setupGpuShortRangeWork(fr->gpuBonded, Nbnxm::InteractionLocality::Local);
1104
1105         wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1106         wallcycle_stop(wcycle, ewcNS);
1107
1108         if (useGpuXBufOps == BufferOpsUseGpu::True)
1109         {
1110             nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1111         }
1112         // For force buffer ops, we use the below conditon rather than
1113         // useGpuFBufOps to ensure that init is performed even if this
1114         // NS step is also a virial step (on which f buf ops are deactivated).
1115         if (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA))
1116         {
1117             nbv->atomdata_init_add_nbat_f_to_f_gpu();
1118         }
1119     }
1120     else
1121     {
1122         nbv->setCoordinates(Nbnxm::AtomLocality::Local, false,
1123                             x.unpaddedArrayRef(), useGpuXBufOps, pme_gpu_get_device_x(fr->pmedata));
1124     }
1125
1126     if (bUseGPU)
1127     {
1128         ddBalanceRegionHandler.openBeforeForceComputationGpu();
1129
1130         wallcycle_start(wcycle, ewcLAUNCH_GPU);
1131
1132         wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1133         Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1134         if (forceFlags.doNeighborSearch || (useGpuXBufOps == BufferOpsUseGpu::False))
1135         {
1136             Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1137                                       Nbnxm::AtomLocality::Local);
1138         }
1139         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1140         // with X buffer ops offloaded to the GPU on all but the search steps
1141
1142         // bonded work not split into separate local and non-local, so with DD
1143         // we can only launch the kernel after non-local coordinates have been received.
1144         if (forceWork.haveGpuBondedWork && !havePPDomainDecomposition(cr))
1145         {
1146             wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1147             fr->gpuBonded->launchKernel(fr, forceFlags, box);
1148             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1149         }
1150
1151         /* launch local nonbonded work on GPU */
1152         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1153         do_nb_verlet(fr, ic, enerd, flags, forceFlags, Nbnxm::InteractionLocality::Local, enbvClearFNo,
1154                      step, nrnb, wcycle);
1155         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1156         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1157     }
1158
1159     if (useGpuPme)
1160     {
1161         // In PME GPU and mixed mode we launch FFT / gather after the
1162         // X copy/transform to allow overlap as well as after the GPU NB
1163         // launch to avoid FFT launch overhead hijacking the CPU and delaying
1164         // the nonbonded kernel.
1165         launchPmeGpuFftAndGather(fr->pmedata, wcycle, useGpuFPmeReduction);
1166     }
1167
1168     /* Communicate coordinates and sum dipole if necessary +
1169        do non-local pair search */
1170     if (havePPDomainDecomposition(cr))
1171     {
1172         if (forceFlags.doNeighborSearch)
1173         {
1174             // TODO: fuse this branch with the above large forceFlags.doNeighborSearch block
1175             wallcycle_start_nocount(wcycle, ewcNS);
1176             wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1177             /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1178             nbv->constructPairlist(Nbnxm::InteractionLocality::NonLocal,
1179                                    &top->excls, step, nrnb);
1180
1181             nbv->setupGpuShortRangeWork(fr->gpuBonded, Nbnxm::InteractionLocality::NonLocal);
1182             wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1183             wallcycle_stop(wcycle, ewcNS);
1184         }
1185         else
1186         {
1187             dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1188
1189             nbv->setCoordinates(Nbnxm::AtomLocality::NonLocal, false,
1190                                 x.unpaddedArrayRef(), useGpuXBufOps, pme_gpu_get_device_x(fr->pmedata));
1191
1192         }
1193
1194         if (bUseGPU)
1195         {
1196             wallcycle_start(wcycle, ewcLAUNCH_GPU);
1197
1198             if (forceFlags.doNeighborSearch || (useGpuXBufOps == BufferOpsUseGpu::False))
1199             {
1200                 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1201                 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1202                                           Nbnxm::AtomLocality::NonLocal);
1203                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1204             }
1205
1206             if (forceWork.haveGpuBondedWork)
1207             {
1208                 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1209                 fr->gpuBonded->launchKernel(fr, forceFlags, box);
1210                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1211             }
1212
1213             /* launch non-local nonbonded tasks on GPU */
1214             wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1215             do_nb_verlet(fr, ic, enerd, flags, forceFlags, Nbnxm::InteractionLocality::NonLocal, enbvClearFNo,
1216                          step, nrnb, wcycle);
1217             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1218
1219             wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1220         }
1221     }
1222
1223     if (bUseGPU)
1224     {
1225         /* launch D2H copy-back F */
1226         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1227         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1228
1229         bool copyBackNbForce  = (useGpuFBufOps == BufferOpsUseGpu::False);
1230
1231         if (havePPDomainDecomposition(cr))
1232         {
1233             Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1234                                       forceFlags, Nbnxm::AtomLocality::NonLocal, copyBackNbForce);
1235         }
1236         Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1237                                   forceFlags, Nbnxm::AtomLocality::Local, copyBackNbForce);
1238         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1239
1240         if (forceWork.haveGpuBondedWork && forceFlags.computeEnergy)
1241         {
1242             fr->gpuBonded->launchEnergyTransfer();
1243         }
1244         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1245     }
1246
1247     if (forceFlags.stateChanged && inputrecNeedMutot(inputrec))
1248     {
1249         if (PAR(cr))
1250         {
1251             gmx_sumd(2*DIM, mu, cr);
1252
1253             ddBalanceRegionHandler.reopenRegionCpu();
1254         }
1255
1256         for (i = 0; i < 2; i++)
1257         {
1258             for (j = 0; j < DIM; j++)
1259             {
1260                 fr->mu_tot[i][j] = mu[i*DIM + j];
1261             }
1262         }
1263     }
1264     if (fr->efep == efepNO)
1265     {
1266         copy_rvec(fr->mu_tot[0], mu_tot);
1267     }
1268     else
1269     {
1270         for (j = 0; j < DIM; j++)
1271         {
1272             mu_tot[j] =
1273                 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1274                 lambda[efptCOUL]*fr->mu_tot[1][j];
1275         }
1276     }
1277
1278     /* Reset energies */
1279     reset_enerdata(enerd);
1280     /* Clear the shift forces */
1281     // TODO: This should be linked to the shift force buffer in use, or cleared before use instead
1282     for (gmx::RVec &elem : fr->shiftForces)
1283     {
1284         elem = { 0.0_real, 0.0_real, 0.0_real };
1285     }
1286
1287     if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1288     {
1289         wallcycle_start(wcycle, ewcPPDURINGPME);
1290         dd_force_flop_start(cr->dd, nrnb);
1291     }
1292
1293     if (inputrec->bRot)
1294     {
1295         wallcycle_start(wcycle, ewcROT);
1296         do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step, forceFlags.doNeighborSearch);
1297         wallcycle_stop(wcycle, ewcROT);
1298     }
1299
1300     /* Start the force cycle counter.
1301      * Note that a different counter is used for dynamic load balancing.
1302      */
1303     wallcycle_start(wcycle, ewcFORCE);
1304
1305     // Set up and clear force outputs.
1306     // We use std::move to keep the compiler happy, it has no effect.
1307     ForceOutputs forceOut = setupForceOutputs(fr, pull_work, *inputrec, std::move(force), forceFlags, wcycle);
1308
1309     /* We calculate the non-bonded forces, when done on the CPU, here.
1310      * We do this before calling do_force_lowlevel, because in that
1311      * function, the listed forces are calculated before PME, which
1312      * does communication.  With this order, non-bonded and listed
1313      * force calculation imbalance can be balanced out by the domain
1314      * decomposition load balancing.
1315      */
1316
1317     if (!bUseOrEmulGPU)
1318     {
1319         do_nb_verlet(fr, ic, enerd, flags, forceFlags, Nbnxm::InteractionLocality::Local, enbvClearFYes,
1320                      step, nrnb, wcycle);
1321     }
1322
1323     if (fr->efep != efepNO)
1324     {
1325         /* Calculate the local and non-local free energy interactions here.
1326          * Happens here on the CPU both with and without GPU.
1327          */
1328         nbv->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::Local,
1329                                       fr, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut.forceWithShiftForces(), *mdatoms,
1330                                       inputrec->fepvals, lambda.data(),
1331                                       enerd, flags, nrnb);
1332
1333         if (havePPDomainDecomposition(cr))
1334         {
1335             nbv->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::NonLocal,
1336                                           fr, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut.forceWithShiftForces(), *mdatoms,
1337                                           inputrec->fepvals, lambda.data(),
1338                                           enerd, flags, nrnb);
1339         }
1340     }
1341
1342     if (!bUseOrEmulGPU)
1343     {
1344         if (havePPDomainDecomposition(cr))
1345         {
1346             do_nb_verlet(fr, ic, enerd, flags, forceFlags, Nbnxm::InteractionLocality::NonLocal, enbvClearFNo,
1347                          step, nrnb, wcycle);
1348         }
1349
1350         /* Add all the non-bonded force to the normal force array.
1351          * This can be split into a local and a non-local part when overlapping
1352          * communication with calculation with domain decomposition.
1353          */
1354         wallcycle_stop(wcycle, ewcFORCE);
1355         nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::All, forceOut.forceWithShiftForces().force());
1356         wallcycle_start_nocount(wcycle, ewcFORCE);
1357
1358         /* If there are multiple fshift output buffers we need to reduce them */
1359         if (forceFlags.computeVirial)
1360         {
1361             /* This is not in a subcounter because it takes a
1362                negligible and constant-sized amount of time */
1363             nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat,
1364                                                      forceOut.forceWithShiftForces().shiftForces());
1365         }
1366     }
1367
1368     /* update QMMMrec, if necessary */
1369     if (fr->bQMMM)
1370     {
1371         update_QMMMrec(cr, fr, as_rvec_array(x.unpaddedArrayRef().data()), mdatoms, box);
1372     }
1373
1374     /* Compute the bonded and non-bonded energies and optionally forces */
1375     do_force_lowlevel(fr, inputrec, &(top->idef),
1376                       cr, ms, nrnb, wcycle, mdatoms,
1377                       x, hist, &forceOut, enerd, fcd,
1378                       box, lambda.data(), graph, fr->mu_tot,
1379                       flags,
1380                       ddBalanceRegionHandler);
1381
1382     wallcycle_stop(wcycle, ewcFORCE);
1383
1384     computeSpecialForces(fplog, cr, inputrec, awh, enforcedRotation,
1385                          imdSession, pull_work, step, t, wcycle,
1386                          fr->forceProviders, box, x.unpaddedArrayRef(), mdatoms, lambda.data(),
1387                          forceFlags, &forceOut.forceWithVirial(), enerd,
1388                          ed, forceFlags.doNeighborSearch);
1389
1390     bool                   useCpuFPmeReduction = thisRankHasDuty(cr, DUTY_PME) && !useGpuFPmeReduction;
1391     bool                   haveCpuForces       = (forceWork.haveSpecialForces || forceWork.haveCpuListedForceWork || useCpuFPmeReduction);
1392
1393     // Will store the amount of cycles spent waiting for the GPU that
1394     // will be later used in the DLB accounting.
1395     float cycles_wait_gpu = 0;
1396     if (bUseOrEmulGPU)
1397     {
1398         auto &forceWithShiftForces = forceOut.forceWithShiftForces();
1399         rvec *f                    = as_rvec_array(forceWithShiftForces.force().data());
1400
1401         /* wait for non-local forces (or calculate in emulation mode) */
1402         if (havePPDomainDecomposition(cr))
1403         {
1404             if (bUseGPU)
1405             {
1406                 cycles_wait_gpu += Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1407                                                                forceFlags, Nbnxm::AtomLocality::NonLocal,
1408                                                                enerd->grpp.ener[egLJSR].data(),
1409                                                                enerd->grpp.ener[egCOULSR].data(),
1410                                                                forceWithShiftForces.shiftForces(),
1411                                                                wcycle);
1412             }
1413             else
1414             {
1415                 wallcycle_start_nocount(wcycle, ewcFORCE);
1416                 do_nb_verlet(fr, ic, enerd, flags, forceFlags, Nbnxm::InteractionLocality::NonLocal, enbvClearFYes,
1417                              step, nrnb, wcycle);
1418                 wallcycle_stop(wcycle, ewcFORCE);
1419             }
1420
1421             if (useGpuFBufOps == BufferOpsUseGpu::True && haveCpuForces)
1422             {
1423                 nbv->launch_copy_f_to_gpu(f, Nbnxm::AtomLocality::NonLocal);
1424             }
1425
1426             // flag to specify if forces should be accumulated in force buffer
1427             // ops. For non-local part, this just depends on whether CPU forces are present.
1428             bool accumulateForce = (useGpuFBufOps == BufferOpsUseGpu::True) && haveCpuForces;
1429             nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::NonLocal,
1430                                           forceWithShiftForces.force(), pme_gpu_get_device_f(fr->pmedata),
1431                                           pme_gpu_get_f_ready_synchronizer(fr->pmedata),
1432                                           useGpuFBufOps, useGpuFPmeReduction, accumulateForce);
1433             if (useGpuFBufOps == BufferOpsUseGpu::True)
1434             {
1435                 nbv->launch_copy_f_from_gpu(f, Nbnxm::AtomLocality::NonLocal);
1436             }
1437
1438             if (fr->nbv->emulateGpu() && forceFlags.computeVirial)
1439             {
1440                 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat,
1441                                                          forceWithShiftForces.shiftForces());
1442             }
1443         }
1444     }
1445
1446     if (havePPDomainDecomposition(cr))
1447     {
1448         /* We are done with the CPU compute.
1449          * We will now communicate the non-local forces.
1450          * If we use a GPU this will overlap with GPU work, so in that case
1451          * we do not close the DD force balancing region here.
1452          */
1453         ddBalanceRegionHandler.closeAfterForceComputationCpu();
1454
1455         if (forceFlags.computeForces)
1456         {
1457             if (useGpuFBufOps == BufferOpsUseGpu::True)
1458             {
1459                 nbv->wait_for_gpu_force_reduction(Nbnxm::AtomLocality::NonLocal);
1460             }
1461             dd_move_f(cr->dd, &forceOut.forceWithShiftForces(), wcycle);
1462         }
1463     }
1464
1465     // With both nonbonded and PME offloaded a GPU on the same rank, we use
1466     // an alternating wait/reduction scheme.
1467     bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPme && bUseGPU && !DOMAINDECOMP(cr) &&
1468                              (useGpuFBufOps == BufferOpsUseGpu::False));
1469     if (alternateGpuWait)
1470     {
1471         alternatePmeNbGpuWaitReduce(fr->nbv.get(), fr->pmedata, &forceOut, enerd,
1472                                     forceFlags, pmeFlags, wcycle);
1473     }
1474
1475     if (!alternateGpuWait && useGpuPme)
1476     {
1477         pme_gpu_wait_and_reduce(fr->pmedata, pmeFlags, wcycle, &forceOut.forceWithVirial(), enerd, useGpuFPmeReduction);
1478     }
1479
1480     /* Wait for local GPU NB outputs on the non-alternating wait path */
1481     if (!alternateGpuWait && bUseGPU)
1482     {
1483         /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1484          * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1485          * but even with a step of 0.1 ms the difference is less than 1%
1486          * of the step time.
1487          */
1488         const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
1489         const float waitCycles               =
1490             Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1491                                         forceFlags, Nbnxm::AtomLocality::Local,
1492                                         enerd->grpp.ener[egLJSR].data(),
1493                                         enerd->grpp.ener[egCOULSR].data(),
1494                                         forceOut.forceWithShiftForces().shiftForces(),
1495                                         wcycle);
1496
1497         if (ddBalanceRegionHandler.useBalancingRegion())
1498         {
1499             DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
1500             if (forceFlags.computeForces &&  waitCycles <= gpuWaitApiOverheadMargin)
1501             {
1502                 /* We measured few cycles, it could be that the kernel
1503                  * and transfer finished earlier and there was no actual
1504                  * wait time, only API call overhead.
1505                  * Then the actual time could be anywhere between 0 and
1506                  * cycles_wait_est. We will use half of cycles_wait_est.
1507                  */
1508                 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
1509             }
1510             ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
1511         }
1512     }
1513
1514     if (fr->nbv->emulateGpu())
1515     {
1516         // NOTE: emulation kernel is not included in the balancing region,
1517         // but emulation mode does not target performance anyway
1518         wallcycle_start_nocount(wcycle, ewcFORCE);
1519         do_nb_verlet(fr, ic, enerd, flags, forceFlags, Nbnxm::InteractionLocality::Local,
1520                      DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1521                      step, nrnb, wcycle);
1522         wallcycle_stop(wcycle, ewcFORCE);
1523     }
1524
1525     /* Do the nonbonded GPU (or emulation) force buffer reduction
1526      * on the non-alternating path. */
1527     if (bUseOrEmulGPU && !alternateGpuWait)
1528     {
1529         gmx::ArrayRef<gmx::RVec>  force = forceOut.forceWithShiftForces().force();
1530         rvec                     *f     = as_rvec_array(force.data());
1531
1532         // TODO: move these steps as early as possible:
1533         // - CPU f H2D should be as soon as all CPU-side forces are done
1534         // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
1535         //   before the next CPU task that consumes the forces: vsite spread or update)
1536         //
1537         if (useGpuFBufOps == BufferOpsUseGpu::True && (haveCpuForces || DOMAINDECOMP(cr)))
1538         {
1539             nbv->launch_copy_f_to_gpu(f, Nbnxm::AtomLocality::Local);
1540         }
1541         // flag to specify if forces should be accumulated in force
1542         // buffer ops. For local part, this depends on whether CPU
1543         // forces are present, or if DD is active (in which case the
1544         // halo exchange has resulted in contributions from the
1545         // non-local part).
1546         bool accumulateForce = (useGpuFBufOps == BufferOpsUseGpu::True) &&
1547             (haveCpuForces || DOMAINDECOMP(cr));
1548         nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local,
1549                                       force, pme_gpu_get_device_f(fr->pmedata),
1550                                       pme_gpu_get_f_ready_synchronizer(fr->pmedata),
1551                                       useGpuFBufOps, useGpuFPmeReduction, accumulateForce);
1552         if (useGpuFBufOps == BufferOpsUseGpu::True)
1553         {
1554             nbv->launch_copy_f_from_gpu(f, Nbnxm::AtomLocality::Local);
1555             nbv->wait_for_gpu_force_reduction(Nbnxm::AtomLocality::Local);
1556         }
1557     }
1558
1559     launchGpuEndOfStepTasks(nbv, fr->gpuBonded, fr->pmedata, enerd,
1560                             *mdScheduleWork,
1561                             bUseGPU, useGpuPme,
1562                             step,
1563                             wcycle);
1564
1565     if (DOMAINDECOMP(cr))
1566     {
1567         dd_force_flop_stop(cr->dd, nrnb);
1568     }
1569
1570     if (forceFlags.computeForces)
1571     {
1572         rvec *f = as_rvec_array(forceOut.forceWithShiftForces().force().data());
1573
1574         /* If we have NoVirSum forces, but we do not calculate the virial,
1575          * we sum fr->f_novirsum=forceOut.f later.
1576          */
1577         if (vsite && !(fr->haveDirectVirialContributions && !forceFlags.computeVirial))
1578         {
1579             rvec *fshift = as_rvec_array(forceOut.forceWithShiftForces().shiftForces().data());
1580             spread_vsite_f(vsite, as_rvec_array(x.unpaddedArrayRef().data()), f, fshift, FALSE, nullptr, nrnb,
1581                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
1582         }
1583
1584         if (forceFlags.computeVirial)
1585         {
1586             /* Calculation of the virial must be done after vsites! */
1587             calc_virial(0, mdatoms->homenr, as_rvec_array(x.unpaddedArrayRef().data()),
1588                         forceOut.forceWithShiftForces(),
1589                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1590         }
1591     }
1592
1593     if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME))
1594     {
1595         /* In case of node-splitting, the PP nodes receive the long-range
1596          * forces, virial and energy from the PME nodes here.
1597          */
1598         pme_receive_force_ener(cr, &forceOut.forceWithVirial(), enerd, wcycle);
1599     }
1600
1601     if (forceFlags.computeForces)
1602     {
1603         post_process_forces(cr, step, nrnb, wcycle,
1604                             top, box, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut,
1605                             vir_force, mdatoms, graph, fr, vsite,
1606                             forceFlags);
1607     }
1608
1609     if (forceFlags.computeEnergy)
1610     {
1611         /* Sum the potential energy terms from group contributions */
1612         sum_epot(&(enerd->grpp), enerd->term);
1613
1614         if (!EI_TPI(inputrec->eI))
1615         {
1616             checkPotentialEnergyValidity(step, *enerd, *inputrec);
1617         }
1618     }
1619
1620     /* In case we don't have constraints and are using GPUs, the next balancing
1621      * region starts here.
1622      * Some "special" work at the end of do_force_cuts?, such as vsite spread,
1623      * virial calculation and COM pulling, is not thus not included in
1624      * the balance timing, which is ok as most tasks do communication.
1625      */
1626     ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);
1627 }