5208ffc5c1c5c40e877551e139f97d7d3367d622
[alexxy/gromacs.git] / src / gromacs / mdlib / sim_util.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013-2019,2020,2021, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "config.h"
40
41 #include <cmath>
42 #include <cstdint>
43 #include <cstdio>
44 #include <cstring>
45
46 #include <array>
47 #include <optional>
48
49 #include "gromacs/applied_forces/awh/awh.h"
50 #include "gromacs/domdec/dlbtiming.h"
51 #include "gromacs/domdec/domdec.h"
52 #include "gromacs/domdec/domdec_struct.h"
53 #include "gromacs/domdec/gpuhaloexchange.h"
54 #include "gromacs/domdec/partition.h"
55 #include "gromacs/essentialdynamics/edsam.h"
56 #include "gromacs/ewald/pme.h"
57 #include "gromacs/ewald/pme_pp.h"
58 #include "gromacs/ewald/pme_pp_comm_gpu.h"
59 #include "gromacs/gmxlib/network.h"
60 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
61 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
62 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
63 #include "gromacs/gpu_utils/gpu_utils.h"
64 #include "gromacs/imd/imd.h"
65 #include "gromacs/listed_forces/disre.h"
66 #include "gromacs/listed_forces/gpubonded.h"
67 #include "gromacs/listed_forces/listed_forces.h"
68 #include "gromacs/listed_forces/orires.h"
69 #include "gromacs/math/arrayrefwithpadding.h"
70 #include "gromacs/math/functions.h"
71 #include "gromacs/math/units.h"
72 #include "gromacs/math/vec.h"
73 #include "gromacs/math/vecdump.h"
74 #include "gromacs/mdlib/calcmu.h"
75 #include "gromacs/mdlib/calcvir.h"
76 #include "gromacs/mdlib/constr.h"
77 #include "gromacs/mdlib/dispersioncorrection.h"
78 #include "gromacs/mdlib/enerdata_utils.h"
79 #include "gromacs/mdlib/force.h"
80 #include "gromacs/mdlib/force_flags.h"
81 #include "gromacs/mdlib/forcerec.h"
82 #include "gromacs/mdlib/gmx_omp_nthreads.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdlib/vsite.h"
85 #include "gromacs/mdlib/wall.h"
86 #include "gromacs/mdlib/wholemoleculetransform.h"
87 #include "gromacs/mdtypes/commrec.h"
88 #include "gromacs/mdtypes/enerdata.h"
89 #include "gromacs/mdtypes/forcebuffers.h"
90 #include "gromacs/mdtypes/forceoutput.h"
91 #include "gromacs/mdtypes/forcerec.h"
92 #include "gromacs/mdtypes/iforceprovider.h"
93 #include "gromacs/mdtypes/inputrec.h"
94 #include "gromacs/mdtypes/md_enums.h"
95 #include "gromacs/mdtypes/mdatom.h"
96 #include "gromacs/mdtypes/multipletimestepping.h"
97 #include "gromacs/mdtypes/simulation_workload.h"
98 #include "gromacs/mdtypes/state.h"
99 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
100 #include "gromacs/nbnxm/gpu_data_mgmt.h"
101 #include "gromacs/nbnxm/nbnxm.h"
102 #include "gromacs/nbnxm/nbnxm_gpu.h"
103 #include "gromacs/pbcutil/ishift.h"
104 #include "gromacs/pbcutil/pbc.h"
105 #include "gromacs/pulling/pull.h"
106 #include "gromacs/pulling/pull_rotation.h"
107 #include "gromacs/timing/cyclecounter.h"
108 #include "gromacs/timing/gpu_timing.h"
109 #include "gromacs/timing/wallcycle.h"
110 #include "gromacs/timing/wallcyclereporting.h"
111 #include "gromacs/timing/walltime_accounting.h"
112 #include "gromacs/topology/topology.h"
113 #include "gromacs/utility/arrayref.h"
114 #include "gromacs/utility/basedefinitions.h"
115 #include "gromacs/utility/cstringutil.h"
116 #include "gromacs/utility/exceptions.h"
117 #include "gromacs/utility/fatalerror.h"
118 #include "gromacs/utility/fixedcapacityvector.h"
119 #include "gromacs/utility/gmxassert.h"
120 #include "gromacs/utility/gmxmpi.h"
121 #include "gromacs/utility/logger.h"
122 #include "gromacs/utility/smalloc.h"
123 #include "gromacs/utility/strconvert.h"
124 #include "gromacs/utility/sysinfo.h"
125
126 #include "gpuforcereduction.h"
127
128 using gmx::ArrayRef;
129 using gmx::AtomLocality;
130 using gmx::DomainLifetimeWorkload;
131 using gmx::ForceOutputs;
132 using gmx::ForceWithShiftForces;
133 using gmx::InteractionLocality;
134 using gmx::RVec;
135 using gmx::SimulationWorkload;
136 using gmx::StepWorkload;
137
138 // TODO: this environment variable allows us to verify before release
139 // that on less common architectures the total cost of polling is not larger than
140 // a blocking wait (so polling does not introduce overhead when the static
141 // PME-first ordering would suffice).
142 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
143
144 static void sum_forces(ArrayRef<RVec> f, ArrayRef<const RVec> forceToAdd)
145 {
146     GMX_ASSERT(f.size() >= forceToAdd.size(), "Accumulation buffer should be sufficiently large");
147     const int end = forceToAdd.size();
148
149     int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
150 #pragma omp parallel for num_threads(nt) schedule(static)
151     for (int i = 0; i < end; i++)
152     {
153         rvec_inc(f[i], forceToAdd[i]);
154     }
155 }
156
157 static void calc_virial(int                              start,
158                         int                              homenr,
159                         const rvec                       x[],
160                         const gmx::ForceWithShiftForces& forceWithShiftForces,
161                         tensor                           vir_part,
162                         const matrix                     box,
163                         t_nrnb*                          nrnb,
164                         const t_forcerec*                fr,
165                         PbcType                          pbcType)
166 {
167     /* The short-range virial from surrounding boxes */
168     const rvec* fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
169     calc_vir(SHIFTS, fr->shift_vec, fshift, vir_part, pbcType == PbcType::Screw, box);
170     inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
171
172     /* Calculate partial virial, for local atoms only, based on short range.
173      * Total virial is computed in global_stat, called from do_md
174      */
175     const rvec* f = as_rvec_array(forceWithShiftForces.force().data());
176     f_calc_vir(start, start + homenr, x, f, vir_part, box);
177     inc_nrnb(nrnb, eNR_VIRIAL, homenr);
178
179     if (debug)
180     {
181         pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
182     }
183 }
184
185 static void pull_potential_wrapper(const t_commrec*               cr,
186                                    const t_inputrec&              ir,
187                                    const matrix                   box,
188                                    gmx::ArrayRef<const gmx::RVec> x,
189                                    gmx::ForceWithVirial*          force,
190                                    const t_mdatoms*               mdatoms,
191                                    gmx_enerdata_t*                enerd,
192                                    pull_t*                        pull_work,
193                                    const real*                    lambda,
194                                    double                         t,
195                                    gmx_wallcycle_t                wcycle)
196 {
197     t_pbc pbc;
198     real  dvdl;
199
200     /* Calculate the center of mass forces, this requires communication,
201      * which is why pull_potential is called close to other communication.
202      */
203     wallcycle_start(wcycle, ewcPULLPOT);
204     set_pbc(&pbc, ir.pbcType, box);
205     dvdl = 0;
206     enerd->term[F_COM_PULL] +=
207             pull_potential(pull_work,
208                            mdatoms->massT,
209                            &pbc,
210                            cr,
211                            t,
212                            lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Restraint)],
213                            as_rvec_array(x.data()),
214                            force,
215                            &dvdl);
216     enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Restraint] += dvdl;
217     wallcycle_stop(wcycle, ewcPULLPOT);
218 }
219
220 static void pme_receive_force_ener(t_forcerec*           fr,
221                                    const t_commrec*      cr,
222                                    gmx::ForceWithVirial* forceWithVirial,
223                                    gmx_enerdata_t*       enerd,
224                                    bool                  useGpuPmePpComms,
225                                    bool                  receivePmeForceToGpu,
226                                    gmx_wallcycle_t       wcycle)
227 {
228     real  e_q, e_lj, dvdl_q, dvdl_lj;
229     float cycles_ppdpme, cycles_seppme;
230
231     cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
232     dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
233
234     /* In case of node-splitting, the PP nodes receive the long-range
235      * forces, virial and energy from the PME nodes here.
236      */
237     wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
238     dvdl_q  = 0;
239     dvdl_lj = 0;
240     gmx_pme_receive_f(fr->pmePpCommGpu.get(),
241                       cr,
242                       forceWithVirial,
243                       &e_q,
244                       &e_lj,
245                       &dvdl_q,
246                       &dvdl_lj,
247                       useGpuPmePpComms,
248                       receivePmeForceToGpu,
249                       &cycles_seppme);
250     enerd->term[F_COUL_RECIP] += e_q;
251     enerd->term[F_LJ_RECIP] += e_lj;
252     enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Coul] += dvdl_q;
253     enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += dvdl_lj;
254
255     if (wcycle)
256     {
257         dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
258     }
259     wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
260 }
261
262 static void print_large_forces(FILE*                fp,
263                                const t_mdatoms*     md,
264                                const t_commrec*     cr,
265                                int64_t              step,
266                                real                 forceTolerance,
267                                ArrayRef<const RVec> x,
268                                ArrayRef<const RVec> f)
269 {
270     real       force2Tolerance = gmx::square(forceTolerance);
271     gmx::index numNonFinite    = 0;
272     for (int i = 0; i < md->homenr; i++)
273     {
274         real force2    = norm2(f[i]);
275         bool nonFinite = !std::isfinite(force2);
276         if (force2 >= force2Tolerance || nonFinite)
277         {
278             fprintf(fp,
279                     "step %" PRId64 " atom %6d  x %8.3f %8.3f %8.3f  force %12.5e\n",
280                     step,
281                     ddglatnr(cr->dd, i),
282                     x[i][XX],
283                     x[i][YY],
284                     x[i][ZZ],
285                     std::sqrt(force2));
286         }
287         if (nonFinite)
288         {
289             numNonFinite++;
290         }
291     }
292     if (numNonFinite > 0)
293     {
294         /* Note that with MPI this fatal call on one rank might interrupt
295          * the printing on other ranks. But we can only avoid that with
296          * an expensive MPI barrier that we would need at each step.
297          */
298         gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
299     }
300 }
301
302 //! When necessary, spreads forces on vsites and computes the virial for \p forceOutputs->forceWithShiftForces()
303 static void postProcessForceWithShiftForces(t_nrnb*                   nrnb,
304                                             gmx_wallcycle_t           wcycle,
305                                             const matrix              box,
306                                             ArrayRef<const RVec>      x,
307                                             ForceOutputs*             forceOutputs,
308                                             tensor                    vir_force,
309                                             const t_mdatoms&          mdatoms,
310                                             const t_forcerec&         fr,
311                                             gmx::VirtualSitesHandler* vsite,
312                                             const StepWorkload&       stepWork)
313 {
314     ForceWithShiftForces& forceWithShiftForces = forceOutputs->forceWithShiftForces();
315
316     /* If we have NoVirSum forces, but we do not calculate the virial,
317      * we later sum the forceWithShiftForces buffer together with
318      * the noVirSum buffer and spread the combined vsite forces at once.
319      */
320     if (vsite && (!forceOutputs->haveForceWithVirial() || stepWork.computeVirial))
321     {
322         using VirialHandling = gmx::VirtualSitesHandler::VirialHandling;
323
324         auto                 f      = forceWithShiftForces.force();
325         auto                 fshift = forceWithShiftForces.shiftForces();
326         const VirialHandling virialHandling =
327                 (stepWork.computeVirial ? VirialHandling::Pbc : VirialHandling::None);
328         vsite->spreadForces(x, f, virialHandling, fshift, nullptr, nrnb, box, wcycle);
329         forceWithShiftForces.haveSpreadVsiteForces() = true;
330     }
331
332     if (stepWork.computeVirial)
333     {
334         /* Calculation of the virial must be done after vsites! */
335         calc_virial(
336                 0, mdatoms.homenr, as_rvec_array(x.data()), forceWithShiftForces, vir_force, box, nrnb, &fr, fr.pbcType);
337     }
338 }
339
340 //! Spread, compute virial for and sum forces, when necessary
341 static void postProcessForces(const t_commrec*          cr,
342                               int64_t                   step,
343                               t_nrnb*                   nrnb,
344                               gmx_wallcycle_t           wcycle,
345                               const matrix              box,
346                               ArrayRef<const RVec>      x,
347                               ForceOutputs*             forceOutputs,
348                               tensor                    vir_force,
349                               const t_mdatoms*          mdatoms,
350                               const t_forcerec*         fr,
351                               gmx::VirtualSitesHandler* vsite,
352                               const StepWorkload&       stepWork)
353 {
354     // Extract the final output force buffer, which is also the buffer for forces with shift forces
355     ArrayRef<RVec> f = forceOutputs->forceWithShiftForces().force();
356
357     if (forceOutputs->haveForceWithVirial())
358     {
359         auto& forceWithVirial = forceOutputs->forceWithVirial();
360
361         if (vsite)
362         {
363             /* Spread the mesh force on virtual sites to the other particles...
364              * This is parallellized. MPI communication is performed
365              * if the constructing atoms aren't local.
366              */
367             GMX_ASSERT(!stepWork.computeVirial || f.data() != forceWithVirial.force_.data(),
368                        "We need separate force buffers for shift and virial forces when "
369                        "computing the virial");
370             GMX_ASSERT(!stepWork.computeVirial
371                                || forceOutputs->forceWithShiftForces().haveSpreadVsiteForces(),
372                        "We should spread the force with shift forces separately when computing "
373                        "the virial");
374             const gmx::VirtualSitesHandler::VirialHandling virialHandling =
375                     (stepWork.computeVirial ? gmx::VirtualSitesHandler::VirialHandling::NonLinear
376                                             : gmx::VirtualSitesHandler::VirialHandling::None);
377             matrix virial = { { 0 } };
378             vsite->spreadForces(x, forceWithVirial.force_, virialHandling, {}, virial, nrnb, box, wcycle);
379             forceWithVirial.addVirialContribution(virial);
380         }
381
382         if (stepWork.computeVirial)
383         {
384             /* Now add the forces, this is local */
385             sum_forces(f, forceWithVirial.force_);
386
387             /* Add the direct virial contributions */
388             GMX_ASSERT(
389                     forceWithVirial.computeVirial_,
390                     "forceWithVirial should request virial computation when we request the virial");
391             m_add(vir_force, forceWithVirial.getVirial(), vir_force);
392
393             if (debug)
394             {
395                 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
396             }
397         }
398     }
399     else
400     {
401         GMX_ASSERT(vsite == nullptr || forceOutputs->forceWithShiftForces().haveSpreadVsiteForces(),
402                    "We should have spread the vsite forces (earlier)");
403     }
404
405     if (fr->print_force >= 0)
406     {
407         print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
408     }
409 }
410
411 static void do_nb_verlet(t_forcerec*                fr,
412                          const interaction_const_t* ic,
413                          gmx_enerdata_t*            enerd,
414                          const StepWorkload&        stepWork,
415                          const InteractionLocality  ilocality,
416                          const int                  clearF,
417                          const int64_t              step,
418                          t_nrnb*                    nrnb,
419                          gmx_wallcycle_t            wcycle)
420 {
421     if (!stepWork.computeNonbondedForces)
422     {
423         /* skip non-bonded calculation */
424         return;
425     }
426
427     nonbonded_verlet_t* nbv = fr->nbv.get();
428
429     /* GPU kernel launch overhead is already timed separately */
430     if (!nbv->useGpu())
431     {
432         /* When dynamic pair-list  pruning is requested, we need to prune
433          * at nstlistPrune steps.
434          */
435         if (nbv->isDynamicPruningStepCpu(step))
436         {
437             /* Prune the pair-list beyond fr->ic->rlistPrune using
438              * the current coordinates of the atoms.
439              */
440             wallcycle_sub_start(wcycle, ewcsNONBONDED_PRUNING);
441             nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
442             wallcycle_sub_stop(wcycle, ewcsNONBONDED_PRUNING);
443         }
444     }
445
446     nbv->dispatchNonbondedKernel(ilocality, *ic, stepWork, clearF, *fr, enerd, nrnb);
447 }
448
449 static inline void clearRVecs(ArrayRef<RVec> v, const bool useOpenmpThreading)
450 {
451     int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, v.ssize());
452
453     /* Note that we would like to avoid this conditional by putting it
454      * into the omp pragma instead, but then we still take the full
455      * omp parallel for overhead (at least with gcc5).
456      */
457     if (!useOpenmpThreading || nth == 1)
458     {
459         for (RVec& elem : v)
460         {
461             clear_rvec(elem);
462         }
463     }
464     else
465     {
466 #pragma omp parallel for num_threads(nth) schedule(static)
467         for (gmx::index i = 0; i < v.ssize(); i++)
468         {
469             clear_rvec(v[i]);
470         }
471     }
472 }
473
474 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
475  *
476  * \param groupOptions  Group options, containing T-coupling options
477  */
478 static real averageKineticEnergyEstimate(const t_grpopts& groupOptions)
479 {
480     real nrdfCoupled   = 0;
481     real nrdfUncoupled = 0;
482     real kineticEnergy = 0;
483     for (int g = 0; g < groupOptions.ngtc; g++)
484     {
485         if (groupOptions.tau_t[g] >= 0)
486         {
487             nrdfCoupled += groupOptions.nrdf[g];
488             kineticEnergy += groupOptions.nrdf[g] * 0.5 * groupOptions.ref_t[g] * gmx::c_boltz;
489         }
490         else
491         {
492             nrdfUncoupled += groupOptions.nrdf[g];
493         }
494     }
495
496     /* This conditional with > also catches nrdf=0 */
497     if (nrdfCoupled > nrdfUncoupled)
498     {
499         return kineticEnergy * (nrdfCoupled + nrdfUncoupled) / nrdfCoupled;
500     }
501     else
502     {
503         return 0;
504     }
505 }
506
507 /*! \brief This routine checks that the potential energy is finite.
508  *
509  * Always checks that the potential energy is finite. If step equals
510  * inputrec.init_step also checks that the magnitude of the potential energy
511  * is reasonable. Terminates with a fatal error when a check fails.
512  * Note that passing this check does not guarantee finite forces,
513  * since those use slightly different arithmetics. But in most cases
514  * there is just a narrow coordinate range where forces are not finite
515  * and energies are finite.
516  *
517  * \param[in] step      The step number, used for checking and printing
518  * \param[in] enerd     The energy data; the non-bonded group energies need to be added to
519  * enerd.term[F_EPOT] before calling this routine \param[in] inputrec  The input record
520  */
521 static void checkPotentialEnergyValidity(int64_t step, const gmx_enerdata_t& enerd, const t_inputrec& inputrec)
522 {
523     /* Threshold valid for comparing absolute potential energy against
524      * the kinetic energy. Normally one should not consider absolute
525      * potential energy values, but with a factor of one million
526      * we should never get false positives.
527      */
528     constexpr real c_thresholdFactor = 1e6;
529
530     bool energyIsNotFinite    = !std::isfinite(enerd.term[F_EPOT]);
531     real averageKineticEnergy = 0;
532     /* We only check for large potential energy at the initial step,
533      * because that is by far the most likely step for this too occur
534      * and because computing the average kinetic energy is not free.
535      * Note: nstcalcenergy >> 1 often does not allow to catch large energies
536      * before they become NaN.
537      */
538     if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
539     {
540         averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
541     }
542
543     if (energyIsNotFinite
544         || (averageKineticEnergy > 0 && enerd.term[F_EPOT] > c_thresholdFactor * averageKineticEnergy))
545     {
546         gmx_fatal(
547                 FARGS,
548                 "Step %" PRId64
549                 ": The total potential energy is %g, which is %s. The LJ and electrostatic "
550                 "contributions to the energy are %g and %g, respectively. A %s potential energy "
551                 "can be caused by overlapping interactions in bonded interactions or very large%s "
552                 "coordinate values. Usually this is caused by a badly- or non-equilibrated initial "
553                 "configuration, incorrect interactions or parameters in the topology.",
554                 step,
555                 enerd.term[F_EPOT],
556                 energyIsNotFinite ? "not finite" : "extremely high",
557                 enerd.term[F_LJ],
558                 enerd.term[F_COUL_SR],
559                 energyIsNotFinite ? "non-finite" : "very high",
560                 energyIsNotFinite ? " or Nan" : "");
561     }
562 }
563
564 /*! \brief Return true if there are special forces computed this step.
565  *
566  * The conditionals exactly correspond to those in computeSpecialForces().
567  */
568 static bool haveSpecialForces(const t_inputrec&          inputrec,
569                               const gmx::ForceProviders& forceProviders,
570                               const pull_t*              pull_work,
571                               const bool                 computeForces,
572                               const gmx_edsam*           ed)
573 {
574
575     return ((computeForces && forceProviders.hasForceProvider()) || // forceProviders
576             (inputrec.bPull && pull_have_potential(*pull_work)) ||  // pull
577             inputrec.bRot ||                                        // enforced rotation
578             (ed != nullptr) ||                                      // flooding
579             (inputrec.bIMD && computeForces));                      // IMD
580 }
581
582 /*! \brief Compute forces and/or energies for special algorithms
583  *
584  * The intention is to collect all calls to algorithms that compute
585  * forces on local atoms only and that do not contribute to the local
586  * virial sum (but add their virial contribution separately).
587  * Eventually these should likely all become ForceProviders.
588  * Within this function the intention is to have algorithms that do
589  * global communication at the end, so global barriers within the MD loop
590  * are as close together as possible.
591  *
592  * \param[in]     fplog            The log file
593  * \param[in]     cr               The communication record
594  * \param[in]     inputrec         The input record
595  * \param[in]     awh              The Awh module (nullptr if none in use).
596  * \param[in]     enforcedRotation Enforced rotation module.
597  * \param[in]     imdSession       The IMD session
598  * \param[in]     pull_work        The pull work structure.
599  * \param[in]     step             The current MD step
600  * \param[in]     t                The current time
601  * \param[in,out] wcycle           Wallcycle accounting struct
602  * \param[in,out] forceProviders   Pointer to a list of force providers
603  * \param[in]     box              The unit cell
604  * \param[in]     x                The coordinates
605  * \param[in]     mdatoms          Per atom properties
606  * \param[in]     lambda           Array of free-energy lambda values
607  * \param[in]     stepWork         Step schedule flags
608  * \param[in,out] forceWithVirialMtsLevel0  Force and virial for MTS level0 forces
609  * \param[in,out] forceWithVirialMtsLevel1  Force and virial for MTS level1 forces, can be nullptr
610  * \param[in,out] enerd            Energy buffer
611  * \param[in,out] ed               Essential dynamics pointer
612  * \param[in]     didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
613  *
614  * \todo Remove didNeighborSearch, which is used incorrectly.
615  * \todo Convert all other algorithms called here to ForceProviders.
616  */
617 static void computeSpecialForces(FILE*                          fplog,
618                                  const t_commrec*               cr,
619                                  const t_inputrec&              inputrec,
620                                  gmx::Awh*                      awh,
621                                  gmx_enfrot*                    enforcedRotation,
622                                  gmx::ImdSession*               imdSession,
623                                  pull_t*                        pull_work,
624                                  int64_t                        step,
625                                  double                         t,
626                                  gmx_wallcycle_t                wcycle,
627                                  gmx::ForceProviders*           forceProviders,
628                                  const matrix                   box,
629                                  gmx::ArrayRef<const gmx::RVec> x,
630                                  const t_mdatoms*               mdatoms,
631                                  gmx::ArrayRef<const real>      lambda,
632                                  const StepWorkload&            stepWork,
633                                  gmx::ForceWithVirial*          forceWithVirialMtsLevel0,
634                                  gmx::ForceWithVirial*          forceWithVirialMtsLevel1,
635                                  gmx_enerdata_t*                enerd,
636                                  gmx_edsam*                     ed,
637                                  bool                           didNeighborSearch)
638 {
639     /* NOTE: Currently all ForceProviders only provide forces.
640      *       When they also provide energies, remove this conditional.
641      */
642     if (stepWork.computeForces)
643     {
644         gmx::ForceProviderInput  forceProviderInput(x, *mdatoms, t, box, *cr);
645         gmx::ForceProviderOutput forceProviderOutput(forceWithVirialMtsLevel0, enerd);
646
647         /* Collect forces from modules */
648         forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
649     }
650
651     if (inputrec.bPull && pull_have_potential(*pull_work))
652     {
653         const int mtsLevel = forceGroupMtsLevel(inputrec.mtsLevels, gmx::MtsForceGroups::Pull);
654         if (mtsLevel == 0 || stepWork.computeSlowForces)
655         {
656             auto& forceWithVirial = (mtsLevel == 0) ? forceWithVirialMtsLevel0 : forceWithVirialMtsLevel1;
657             pull_potential_wrapper(
658                     cr, inputrec, box, x, forceWithVirial, mdatoms, enerd, pull_work, lambda.data(), t, wcycle);
659         }
660     }
661     if (awh)
662     {
663         const int mtsLevel = forceGroupMtsLevel(inputrec.mtsLevels, gmx::MtsForceGroups::Pull);
664         if (mtsLevel == 0 || stepWork.computeSlowForces)
665         {
666             const bool needForeignEnergyDifferences = awh->needForeignEnergyDifferences(step);
667             std::vector<double> foreignLambdaDeltaH, foreignLambdaDhDl;
668             if (needForeignEnergyDifferences)
669             {
670                 enerd->foreignLambdaTerms.finalizePotentialContributions(
671                         enerd->dvdl_lin, lambda, *inputrec.fepvals);
672                 std::tie(foreignLambdaDeltaH, foreignLambdaDhDl) = enerd->foreignLambdaTerms.getTerms(cr);
673             }
674
675             auto& forceWithVirial = (mtsLevel == 0) ? forceWithVirialMtsLevel0 : forceWithVirialMtsLevel1;
676             enerd->term[F_COM_PULL] += awh->applyBiasForcesAndUpdateBias(inputrec.pbcType,
677                                                                          mdatoms->massT,
678                                                                          foreignLambdaDeltaH,
679                                                                          foreignLambdaDhDl,
680                                                                          box,
681                                                                          forceWithVirial,
682                                                                          t,
683                                                                          step,
684                                                                          wcycle,
685                                                                          fplog);
686         }
687     }
688
689     rvec* f = as_rvec_array(forceWithVirialMtsLevel0->force_.data());
690
691     /* Add the forces from enforced rotation potentials (if any) */
692     if (inputrec.bRot)
693     {
694         wallcycle_start(wcycle, ewcROTadd);
695         enerd->term[F_COM_PULL] += add_rot_forces(enforcedRotation, f, cr, step, t);
696         wallcycle_stop(wcycle, ewcROTadd);
697     }
698
699     if (ed)
700     {
701         /* Note that since init_edsam() is called after the initialization
702          * of forcerec, edsam doesn't request the noVirSum force buffer.
703          * Thus if no other algorithm (e.g. PME) requires it, the forces
704          * here will contribute to the virial.
705          */
706         do_flood(cr, inputrec, as_rvec_array(x.data()), f, ed, box, step, didNeighborSearch);
707     }
708
709     /* Add forces from interactive molecular dynamics (IMD), if any */
710     if (inputrec.bIMD && stepWork.computeForces)
711     {
712         imdSession->applyForces(f);
713     }
714 }
715
716 /*! \brief Launch the prepare_step and spread stages of PME GPU.
717  *
718  * \param[in]  pmedata              The PME structure
719  * \param[in]  box                  The box matrix
720  * \param[in]  stepWork             Step schedule flags
721  * \param[in]  xReadyOnDevice       Event synchronizer indicating that the coordinates are ready in the device memory.
722  * \param[in]  lambdaQ              The Coulomb lambda of the current state.
723  * \param[in]  wcycle               The wallcycle structure
724  */
725 static inline void launchPmeGpuSpread(gmx_pme_t*            pmedata,
726                                       const matrix          box,
727                                       const StepWorkload&   stepWork,
728                                       GpuEventSynchronizer* xReadyOnDevice,
729                                       const real            lambdaQ,
730                                       gmx_wallcycle_t       wcycle)
731 {
732     pme_gpu_prepare_computation(pmedata, box, wcycle, stepWork);
733     pme_gpu_launch_spread(pmedata, xReadyOnDevice, wcycle, lambdaQ);
734 }
735
736 /*! \brief Launch the FFT and gather stages of PME GPU
737  *
738  * This function only implements setting the output forces (no accumulation).
739  *
740  * \param[in]  pmedata        The PME structure
741  * \param[in]  lambdaQ        The Coulomb lambda of the current system state.
742  * \param[in]  wcycle         The wallcycle structure
743  * \param[in]  stepWork       Step schedule flags
744  */
745 static void launchPmeGpuFftAndGather(gmx_pme_t*               pmedata,
746                                      const real               lambdaQ,
747                                      gmx_wallcycle_t          wcycle,
748                                      const gmx::StepWorkload& stepWork)
749 {
750     pme_gpu_launch_complex_transforms(pmedata, wcycle, stepWork);
751     pme_gpu_launch_gather(pmedata, wcycle, lambdaQ);
752 }
753
754 /*! \brief
755  *  Polling wait for either of the PME or nonbonded GPU tasks.
756  *
757  * Instead of a static order in waiting for GPU tasks, this function
758  * polls checking which of the two tasks completes first, and does the
759  * associated force buffer reduction overlapped with the other task.
760  * By doing that, unlike static scheduling order, it can always overlap
761  * one of the reductions, regardless of the GPU task completion order.
762  *
763  * \param[in]     nbv              Nonbonded verlet structure
764  * \param[in,out] pmedata          PME module data
765  * \param[in,out] forceOutputsNonbonded  Force outputs for the non-bonded forces and shift forces
766  * \param[in,out] forceOutputsPme  Force outputs for the PME forces and virial
767  * \param[in,out] enerd            Energy data structure results are reduced into
768  * \param[in]     lambdaQ          The Coulomb lambda of the current system state.
769  * \param[in]     stepWork         Step schedule flags
770  * \param[in]     wcycle           The wallcycle structure
771  */
772 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t* nbv,
773                                         gmx_pme_t*          pmedata,
774                                         gmx::ForceOutputs*  forceOutputsNonbonded,
775                                         gmx::ForceOutputs*  forceOutputsPme,
776                                         gmx_enerdata_t*     enerd,
777                                         const real          lambdaQ,
778                                         const StepWorkload& stepWork,
779                                         gmx_wallcycle_t     wcycle)
780 {
781     bool isPmeGpuDone = false;
782     bool isNbGpuDone  = false;
783
784     gmx::ArrayRef<const gmx::RVec> pmeGpuForces;
785
786     while (!isPmeGpuDone || !isNbGpuDone)
787     {
788         if (!isPmeGpuDone)
789         {
790             GpuTaskCompletion completionType =
791                     (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
792             isPmeGpuDone = pme_gpu_try_finish_task(
793                     pmedata, stepWork, wcycle, &forceOutputsPme->forceWithVirial(), enerd, lambdaQ, completionType);
794         }
795
796         if (!isNbGpuDone)
797         {
798             auto&             forceBuffersNonbonded = forceOutputsNonbonded->forceWithShiftForces();
799             GpuTaskCompletion completionType =
800                     (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
801             isNbGpuDone = Nbnxm::gpu_try_finish_task(
802                     nbv->gpu_nbv,
803                     stepWork,
804                     AtomLocality::Local,
805                     enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
806                     enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
807                     forceBuffersNonbonded.shiftForces(),
808                     completionType,
809                     wcycle);
810
811             if (isNbGpuDone)
812             {
813                 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceBuffersNonbonded.force());
814             }
815         }
816     }
817 }
818
819 /*! \brief Set up the different force buffers; also does clearing.
820  *
821  * \param[in] forceHelperBuffers  Helper force buffers
822  * \param[in] force     force array
823  * \param[in] stepWork  Step schedule flags
824  * \param[out] wcycle   wallcycle recording structure
825  *
826  * \returns             Cleared force output structure
827  */
828 static ForceOutputs setupForceOutputs(ForceHelperBuffers*                 forceHelperBuffers,
829                                       gmx::ArrayRefWithPadding<gmx::RVec> force,
830                                       const StepWorkload&                 stepWork,
831                                       gmx_wallcycle_t                     wcycle)
832 {
833     wallcycle_sub_start(wcycle, ewcsCLEAR_FORCE_BUFFER);
834
835     /* NOTE: We assume fr->shiftForces is all zeros here */
836     gmx::ForceWithShiftForces forceWithShiftForces(
837             force, stepWork.computeVirial, forceHelperBuffers->shiftForces());
838
839     if (stepWork.computeForces)
840     {
841         /* Clear the short- and long-range forces */
842         clearRVecs(forceWithShiftForces.force(), true);
843
844         /* Clear the shift forces */
845         clearRVecs(forceWithShiftForces.shiftForces(), false);
846     }
847
848     /* If we need to compute the virial, we might need a separate
849      * force buffer for algorithms for which the virial is calculated
850      * directly, such as PME. Otherwise, forceWithVirial uses the
851      * the same force (f in legacy calls) buffer as other algorithms.
852      */
853     const bool useSeparateForceWithVirialBuffer =
854             (stepWork.computeForces
855              && (stepWork.computeVirial && forceHelperBuffers->haveDirectVirialContributions()));
856     /* forceWithVirial uses the local atom range only */
857     gmx::ForceWithVirial forceWithVirial(
858             useSeparateForceWithVirialBuffer ? forceHelperBuffers->forceBufferForDirectVirialContributions()
859                                              : force.unpaddedArrayRef(),
860             stepWork.computeVirial);
861
862     if (useSeparateForceWithVirialBuffer)
863     {
864         /* TODO: update comment
865          * We only compute forces on local atoms. Note that vsites can
866          * spread to non-local atoms, but that part of the buffer is
867          * cleared separately in the vsite spreading code.
868          */
869         clearRVecs(forceWithVirial.force_, true);
870     }
871
872     wallcycle_sub_stop(wcycle, ewcsCLEAR_FORCE_BUFFER);
873
874     return ForceOutputs(
875             forceWithShiftForces, forceHelperBuffers->haveDirectVirialContributions(), forceWithVirial);
876 }
877
878
879 /*! \brief Set up flags that have the lifetime of the domain indicating what type of work is there to compute.
880  */
881 static DomainLifetimeWorkload setupDomainLifetimeWorkload(const t_inputrec&         inputrec,
882                                                           const t_forcerec&         fr,
883                                                           const pull_t*             pull_work,
884                                                           const gmx_edsam*          ed,
885                                                           const t_mdatoms&          mdatoms,
886                                                           const SimulationWorkload& simulationWork,
887                                                           const StepWorkload&       stepWork)
888 {
889     DomainLifetimeWorkload domainWork;
890     // Note that haveSpecialForces is constant over the whole run
891     domainWork.haveSpecialForces =
892             haveSpecialForces(inputrec, *fr.forceProviders, pull_work, stepWork.computeForces, ed);
893     domainWork.haveCpuListedForceWork = false;
894     domainWork.haveCpuBondedWork      = false;
895     for (const auto& listedForces : fr.listedForces)
896     {
897         if (listedForces.haveCpuListedForces(*fr.fcdata))
898         {
899             domainWork.haveCpuListedForceWork = true;
900         }
901         if (listedForces.haveCpuBondeds())
902         {
903             domainWork.haveCpuBondedWork = true;
904         }
905     }
906     domainWork.haveGpuBondedWork = ((fr.gpuBonded != nullptr) && fr.gpuBonded->haveInteractions());
907     // Note that haveFreeEnergyWork is constant over the whole run
908     domainWork.haveFreeEnergyWork =
909             (fr.efep != FreeEnergyPerturbationType::No && mdatoms.nPerturbed != 0);
910     // We assume we have local force work if there are CPU
911     // force tasks including PME or nonbondeds.
912     domainWork.haveCpuLocalForceWork =
913             domainWork.haveSpecialForces || domainWork.haveCpuListedForceWork
914             || domainWork.haveFreeEnergyWork || simulationWork.useCpuNonbonded || simulationWork.useCpuPme
915             || simulationWork.haveEwaldSurfaceContribution || inputrec.nwall > 0;
916
917     return domainWork;
918 }
919
920 /*! \brief Set up force flag stuct from the force bitmask.
921  *
922  * \param[in]      legacyFlags          Force bitmask flags used to construct the new flags
923  * \param[in]      mtsLevels            The multiple time-stepping levels, either empty or 2 levels
924  * \param[in]      step                 The current MD step
925  * \param[in]      simulationWork       Simulation workload description.
926  * \param[in]      rankHasPmeDuty       If this rank computes PME.
927  *
928  * \returns New Stepworkload description.
929  */
930 static StepWorkload setupStepWorkload(const int                     legacyFlags,
931                                       ArrayRef<const gmx::MtsLevel> mtsLevels,
932                                       const int64_t                 step,
933                                       const SimulationWorkload&     simulationWork,
934                                       const bool                    rankHasPmeDuty)
935 {
936     GMX_ASSERT(mtsLevels.empty() || mtsLevels.size() == 2, "Expect 0 or 2 MTS levels");
937     const bool computeSlowForces = (mtsLevels.empty() || step % mtsLevels[1].stepFactor == 0);
938
939     StepWorkload flags;
940     flags.stateChanged        = ((legacyFlags & GMX_FORCE_STATECHANGED) != 0);
941     flags.haveDynamicBox      = ((legacyFlags & GMX_FORCE_DYNAMICBOX) != 0);
942     flags.doNeighborSearch    = ((legacyFlags & GMX_FORCE_NS) != 0);
943     flags.computeSlowForces   = computeSlowForces;
944     flags.computeVirial       = ((legacyFlags & GMX_FORCE_VIRIAL) != 0);
945     flags.computeEnergy       = ((legacyFlags & GMX_FORCE_ENERGY) != 0);
946     flags.computeForces       = ((legacyFlags & GMX_FORCE_FORCES) != 0);
947     flags.computeListedForces = ((legacyFlags & GMX_FORCE_LISTED) != 0);
948     flags.computeNonbondedForces =
949             ((legacyFlags & GMX_FORCE_NONBONDED) != 0) && simulationWork.computeNonbonded
950             && !(simulationWork.computeNonbondedAtMtsLevel1 && !computeSlowForces);
951     flags.computeDhdl = ((legacyFlags & GMX_FORCE_DHDL) != 0);
952
953     if (simulationWork.useGpuBufferOps)
954     {
955         GMX_ASSERT(simulationWork.useGpuNonbonded,
956                    "Can only offload buffer ops if nonbonded computation is also offloaded");
957     }
958     flags.useGpuXBufferOps = simulationWork.useGpuBufferOps;
959     // on virial steps the CPU reduction path is taken
960     flags.useGpuFBufferOps = simulationWork.useGpuBufferOps && !flags.computeVirial;
961     flags.useGpuPmeFReduction = flags.computeSlowForces && flags.useGpuFBufferOps && simulationWork.useGpuPme
962                                 && (rankHasPmeDuty || simulationWork.useGpuPmePpCommunication);
963     flags.useGpuXHalo = simulationWork.useGpuHaloExchange;
964     flags.useGpuFHalo = simulationWork.useGpuHaloExchange && flags.useGpuFBufferOps;
965
966     return flags;
967 }
968
969
970 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
971  *
972  * TODO: eliminate \p useGpuPmeOnThisRank when this is
973  * incorporated in DomainLifetimeWorkload.
974  */
975 static void launchGpuEndOfStepTasks(nonbonded_verlet_t*               nbv,
976                                     gmx::GpuBonded*                   gpuBonded,
977                                     gmx_pme_t*                        pmedata,
978                                     gmx_enerdata_t*                   enerd,
979                                     const gmx::MdrunScheduleWorkload& runScheduleWork,
980                                     bool                              useGpuPmeOnThisRank,
981                                     int64_t                           step,
982                                     gmx_wallcycle_t                   wcycle)
983 {
984     if (runScheduleWork.simulationWork.useGpuNonbonded && runScheduleWork.stepWork.computeNonbondedForces)
985     {
986         /* Launch pruning before buffer clearing because the API overhead of the
987          * clear kernel launches can leave the GPU idle while it could be running
988          * the prune kernel.
989          */
990         if (nbv->isDynamicPruningStepGpu(step))
991         {
992             nbv->dispatchPruneKernelGpu(step);
993         }
994
995         /* now clear the GPU outputs while we finish the step on the CPU */
996         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
997         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
998         Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, runScheduleWork.stepWork.computeVirial);
999         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1000         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1001     }
1002
1003     if (useGpuPmeOnThisRank)
1004     {
1005         pme_gpu_reinit_computation(pmedata, wcycle);
1006     }
1007
1008     if (runScheduleWork.domainWork.haveGpuBondedWork && runScheduleWork.stepWork.computeEnergy)
1009     {
1010         // in principle this should be included in the DD balancing region,
1011         // but generally it is infrequent so we'll omit it for the sake of
1012         // simpler code
1013         gpuBonded->waitAccumulateEnergyTerms(enerd);
1014
1015         gpuBonded->clearEnergies();
1016     }
1017 }
1018
1019 //! \brief Data structure to hold dipole-related data and staging arrays
1020 struct DipoleData
1021 {
1022     //! Dipole staging for fast summing over MPI
1023     gmx::DVec muStaging[2] = { { 0.0, 0.0, 0.0 } };
1024     //! Dipole staging for states A and B (index 0 and 1 resp.)
1025     gmx::RVec muStateAB[2] = { { 0.0_real, 0.0_real, 0.0_real } };
1026 };
1027
1028
1029 static void reduceAndUpdateMuTot(DipoleData*                   dipoleData,
1030                                  const t_commrec*              cr,
1031                                  const bool                    haveFreeEnergy,
1032                                  gmx::ArrayRef<const real>     lambda,
1033                                  rvec                          muTotal,
1034                                  const DDBalanceRegionHandler& ddBalanceRegionHandler)
1035 {
1036     if (PAR(cr))
1037     {
1038         gmx_sumd(2 * DIM, dipoleData->muStaging[0], cr);
1039         ddBalanceRegionHandler.reopenRegionCpu();
1040     }
1041     for (int i = 0; i < 2; i++)
1042     {
1043         for (int j = 0; j < DIM; j++)
1044         {
1045             dipoleData->muStateAB[i][j] = dipoleData->muStaging[i][j];
1046         }
1047     }
1048
1049     if (!haveFreeEnergy)
1050     {
1051         copy_rvec(dipoleData->muStateAB[0], muTotal);
1052     }
1053     else
1054     {
1055         for (int j = 0; j < DIM; j++)
1056         {
1057             muTotal[j] = (1.0 - lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)])
1058                                  * dipoleData->muStateAB[0][j]
1059                          + lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)]
1060                                    * dipoleData->muStateAB[1][j];
1061         }
1062     }
1063 }
1064
1065 /*! \brief Combines MTS level0 and level1 force buffes into a full and MTS-combined force buffer.
1066  *
1067  * \param[in]     numAtoms        The number of atoms to combine forces for
1068  * \param[in,out] forceMtsLevel0  Input: F_level0, output: F_level0 + F_level1
1069  * \param[in,out] forceMts        Input: F_level1, output: F_level0 + mtsFactor * F_level1
1070  * \param[in]     mtsFactor       The factor between the level0 and level1 time step
1071  */
1072 static void combineMtsForces(const int      numAtoms,
1073                              ArrayRef<RVec> forceMtsLevel0,
1074                              ArrayRef<RVec> forceMts,
1075                              const real     mtsFactor)
1076 {
1077     const int gmx_unused numThreads = gmx_omp_nthreads_get(emntDefault);
1078 #pragma omp parallel for num_threads(numThreads) schedule(static)
1079     for (int i = 0; i < numAtoms; i++)
1080     {
1081         const RVec forceMtsLevel0Tmp = forceMtsLevel0[i];
1082         forceMtsLevel0[i] += forceMts[i];
1083         forceMts[i] = forceMtsLevel0Tmp + mtsFactor * forceMts[i];
1084     }
1085 }
1086
1087 /*! \brief Setup for the local and non-local GPU force reductions:
1088  * reinitialization plus the registration of forces and dependencies.
1089  *
1090  * \param [in] runScheduleWork               Schedule workload flag structure
1091  * \param [in] cr                            Communication record object
1092  * \param [in] fr                            Force record object
1093  */
1094 static void setupGpuForceReductions(gmx::MdrunScheduleWorkload* runScheduleWork,
1095                                     const t_commrec*            cr,
1096                                     t_forcerec*                 fr)
1097 {
1098
1099     nonbonded_verlet_t*          nbv      = fr->nbv.get();
1100     gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
1101
1102     // (re-)initialize local GPU force reduction
1103     const bool accumulate =
1104             runScheduleWork->domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr);
1105     const int atomStart = 0;
1106     fr->gpuForceReduction[gmx::AtomLocality::Local]->reinit(stateGpu->getForces(),
1107                                                             nbv->getNumAtoms(AtomLocality::Local),
1108                                                             nbv->getGridIndices(),
1109                                                             atomStart,
1110                                                             accumulate,
1111                                                             stateGpu->fReducedOnDevice());
1112
1113     // register forces and add dependencies
1114     fr->gpuForceReduction[gmx::AtomLocality::Local]->registerNbnxmForce(nbv->getGpuForces());
1115
1116     if (runScheduleWork->simulationWork.useGpuPme
1117         && (thisRankHasDuty(cr, DUTY_PME) || runScheduleWork->simulationWork.useGpuPmePpCommunication))
1118     {
1119         void* forcePtr = thisRankHasDuty(cr, DUTY_PME) ? pme_gpu_get_device_f(fr->pmedata)
1120                                                        : // PME force buffer on same GPU
1121                                  fr->pmePpCommGpu->getGpuForceStagingPtr(); // buffer received from other GPU
1122         fr->gpuForceReduction[gmx::AtomLocality::Local]->registerRvecForce(forcePtr);
1123
1124         GpuEventSynchronizer* const pmeSynchronizer =
1125                 (thisRankHasDuty(cr, DUTY_PME) ? pme_gpu_get_f_ready_synchronizer(fr->pmedata)
1126                                                : // PME force buffer on same GPU
1127                          fr->pmePpCommGpu->getForcesReadySynchronizer()); // buffer received from other GPU
1128         fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(pmeSynchronizer);
1129     }
1130
1131     if ((runScheduleWork->domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr))
1132         && !runScheduleWork->simulationWork.useGpuHaloExchange)
1133     {
1134         auto forcesReadyLocality = havePPDomainDecomposition(cr) ? AtomLocality::Local : AtomLocality::All;
1135         const bool useGpuForceBufferOps = true;
1136         fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(
1137                 stateGpu->getForcesReadyOnDeviceEvent(forcesReadyLocality, useGpuForceBufferOps));
1138     }
1139
1140     if (runScheduleWork->simulationWork.useGpuHaloExchange)
1141     {
1142         fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(
1143                 cr->dd->gpuHaloExchange[0][0]->getForcesReadyOnDeviceEvent());
1144     }
1145
1146     if (havePPDomainDecomposition(cr))
1147     {
1148         // (re-)initialize non-local GPU force reduction
1149         const bool accumulate = runScheduleWork->domainWork.haveCpuBondedWork
1150                                 || runScheduleWork->domainWork.haveFreeEnergyWork;
1151         const int atomStart = dd_numHomeAtoms(*cr->dd);
1152         fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->reinit(stateGpu->getForces(),
1153                                                                    nbv->getNumAtoms(AtomLocality::NonLocal),
1154                                                                    nbv->getGridIndices(),
1155                                                                    atomStart,
1156                                                                    accumulate);
1157
1158         // register forces and add dependencies
1159         fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->registerNbnxmForce(nbv->getGpuForces());
1160         if (runScheduleWork->domainWork.haveCpuBondedWork || runScheduleWork->domainWork.haveFreeEnergyWork)
1161         {
1162             fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->addDependency(
1163                     stateGpu->getForcesReadyOnDeviceEvent(AtomLocality::NonLocal, true));
1164         }
1165     }
1166 }
1167
1168
1169 void do_force(FILE*                               fplog,
1170               const t_commrec*                    cr,
1171               const gmx_multisim_t*               ms,
1172               const t_inputrec&                   inputrec,
1173               gmx::Awh*                           awh,
1174               gmx_enfrot*                         enforcedRotation,
1175               gmx::ImdSession*                    imdSession,
1176               pull_t*                             pull_work,
1177               int64_t                             step,
1178               t_nrnb*                             nrnb,
1179               gmx_wallcycle_t                     wcycle,
1180               const gmx_localtop_t*               top,
1181               const matrix                        box,
1182               gmx::ArrayRefWithPadding<gmx::RVec> x,
1183               const history_t*                    hist,
1184               gmx::ForceBuffersView*              forceView,
1185               tensor                              vir_force,
1186               const t_mdatoms*                    mdatoms,
1187               gmx_enerdata_t*                     enerd,
1188               gmx::ArrayRef<const real>           lambda,
1189               t_forcerec*                         fr,
1190               gmx::MdrunScheduleWorkload*         runScheduleWork,
1191               gmx::VirtualSitesHandler*           vsite,
1192               rvec                                muTotal,
1193               double                              t,
1194               gmx_edsam*                          ed,
1195               int                                 legacyFlags,
1196               const DDBalanceRegionHandler&       ddBalanceRegionHandler)
1197 {
1198     auto force = forceView->forceWithPadding();
1199     GMX_ASSERT(force.unpaddedArrayRef().ssize() >= fr->natoms_force_constr,
1200                "The size of the force buffer should be at least the number of atoms to compute "
1201                "forces for");
1202
1203     nonbonded_verlet_t*  nbv = fr->nbv.get();
1204     interaction_const_t* ic  = fr->ic.get();
1205
1206     gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
1207
1208     const SimulationWorkload& simulationWork = runScheduleWork->simulationWork;
1209
1210     runScheduleWork->stepWork = setupStepWorkload(
1211             legacyFlags, inputrec.mtsLevels, step, simulationWork, thisRankHasDuty(cr, DUTY_PME));
1212     const StepWorkload& stepWork = runScheduleWork->stepWork;
1213
1214     const bool useGpuPmeOnThisRank =
1215             simulationWork.useGpuPme && thisRankHasDuty(cr, DUTY_PME) && stepWork.computeSlowForces;
1216
1217     /* At a search step we need to start the first balancing region
1218      * somewhere early inside the step after communication during domain
1219      * decomposition (and not during the previous step as usual).
1220      */
1221     if (stepWork.doNeighborSearch)
1222     {
1223         ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
1224     }
1225
1226     clear_mat(vir_force);
1227
1228     if (fr->pbcType != PbcType::No)
1229     {
1230         /* Compute shift vectors every step,
1231          * because of pressure coupling or box deformation!
1232          */
1233         if (stepWork.haveDynamicBox && stepWork.stateChanged)
1234         {
1235             calc_shifts(box, fr->shift_vec);
1236         }
1237
1238         const bool fillGrid = (stepWork.doNeighborSearch && stepWork.stateChanged);
1239         const bool calcCGCM = (fillGrid && !DOMAINDECOMP(cr));
1240         if (calcCGCM)
1241         {
1242             put_atoms_in_box_omp(fr->pbcType,
1243                                  box,
1244                                  x.unpaddedArrayRef().subArray(0, mdatoms->homenr),
1245                                  gmx_omp_nthreads_get(emntDefault));
1246             inc_nrnb(nrnb, eNR_SHIFTX, mdatoms->homenr);
1247         }
1248     }
1249
1250     nbnxn_atomdata_copy_shiftvec(stepWork.haveDynamicBox, fr->shift_vec, nbv->nbat.get());
1251
1252     const bool pmeSendCoordinatesFromGpu =
1253             GMX_MPI && simulationWork.useGpuPmePpCommunication && !(stepWork.doNeighborSearch);
1254     const bool reinitGpuPmePpComms =
1255             GMX_MPI && simulationWork.useGpuPmePpCommunication && (stepWork.doNeighborSearch);
1256
1257     const auto localXReadyOnDevice = (useGpuPmeOnThisRank || simulationWork.useGpuBufferOps)
1258                                              ? stateGpu->getCoordinatesReadyOnDeviceEvent(
1259                                                        AtomLocality::Local, simulationWork, stepWork)
1260                                              : nullptr;
1261
1262     // Copy coordinate from the GPU if update is on the GPU and there
1263     // are forces to be computed on the CPU, or for the computation of
1264     // virial, or if host-side data will be transferred from this task
1265     // to a remote task for halo exchange or PME-PP communication. At
1266     // search steps the current coordinates are already on the host,
1267     // hence copy is not needed.
1268     const bool haveHostPmePpComms =
1269             !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication;
1270
1271     GMX_ASSERT(simulationWork.useGpuHaloExchange
1272                        == ((cr->dd != nullptr) && (!cr->dd->gpuHaloExchange[0].empty())),
1273                "The GPU halo exchange is active, but it has not been constructed.");
1274     const bool haveHostHaloExchangeComms =
1275             havePPDomainDecomposition(cr) && !simulationWork.useGpuHaloExchange;
1276
1277     bool gmx_used_in_debug haveCopiedXFromGpu = false;
1278     if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch
1279         && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial
1280             || haveHostPmePpComms || haveHostHaloExchangeComms))
1281     {
1282         stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1283         haveCopiedXFromGpu = true;
1284     }
1285
1286     // If coordinates are to be sent to PME task from CPU memory, perform that send here.
1287     // Otherwise the send will occur after H2D coordinate transfer.
1288     if (GMX_MPI && !thisRankHasDuty(cr, DUTY_PME) && !pmeSendCoordinatesFromGpu && stepWork.computeSlowForces)
1289     {
1290         /* Send particle coordinates to the pme nodes */
1291         if (!stepWork.doNeighborSearch && simulationWork.useGpuUpdate)
1292         {
1293             stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1294         }
1295
1296         gmx_pme_send_coordinates(fr,
1297                                  cr,
1298                                  box,
1299                                  as_rvec_array(x.unpaddedArrayRef().data()),
1300                                  lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1301                                  lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)],
1302                                  (stepWork.computeVirial || stepWork.computeEnergy),
1303                                  step,
1304                                  simulationWork.useGpuPmePpCommunication,
1305                                  reinitGpuPmePpComms,
1306                                  pmeSendCoordinatesFromGpu,
1307                                  localXReadyOnDevice,
1308                                  wcycle);
1309     }
1310
1311     // Coordinates on the device are needed if PME or BufferOps are offloaded.
1312     // The local coordinates can be copied right away.
1313     // NOTE: Consider moving this copy to right after they are updated and constrained,
1314     //       if the later is not offloaded.
1315     if (useGpuPmeOnThisRank || stepWork.useGpuXBufferOps)
1316     {
1317         if (stepWork.doNeighborSearch)
1318         {
1319             // TODO refactor this to do_md, after partitioning.
1320             stateGpu->reinit(mdatoms->homenr,
1321                              cr->dd != nullptr ? dd_numAtomsZones(*cr->dd) : mdatoms->homenr);
1322             if (useGpuPmeOnThisRank)
1323             {
1324                 // TODO: This should be moved into PME setup function ( pme_gpu_prepare_computation(...) )
1325                 pme_gpu_set_device_x(fr->pmedata, stateGpu->getCoordinates());
1326             }
1327         }
1328         // We need to copy coordinates when:
1329         // 1. Update is not offloaded
1330         // 2. The buffers were reinitialized on search step
1331         if (!simulationWork.useGpuUpdate || stepWork.doNeighborSearch)
1332         {
1333             GMX_ASSERT(stateGpu != nullptr, "stateGpu should not be null");
1334             stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1335         }
1336     }
1337
1338     // If coordinates are to be sent to PME task from GPU memory, perform that send here.
1339     // Otherwise the send will occur before the H2D coordinate transfer.
1340     if (!thisRankHasDuty(cr, DUTY_PME) && pmeSendCoordinatesFromGpu)
1341     {
1342         /* Send particle coordinates to the pme nodes */
1343         gmx_pme_send_coordinates(fr,
1344                                  cr,
1345                                  box,
1346                                  as_rvec_array(x.unpaddedArrayRef().data()),
1347                                  lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1348                                  lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)],
1349                                  (stepWork.computeVirial || stepWork.computeEnergy),
1350                                  step,
1351                                  simulationWork.useGpuPmePpCommunication,
1352                                  reinitGpuPmePpComms,
1353                                  pmeSendCoordinatesFromGpu,
1354                                  localXReadyOnDevice,
1355                                  wcycle);
1356     }
1357
1358     if (useGpuPmeOnThisRank)
1359     {
1360         launchPmeGpuSpread(fr->pmedata,
1361                            box,
1362                            stepWork,
1363                            localXReadyOnDevice,
1364                            lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1365                            wcycle);
1366     }
1367
1368     const gmx::DomainLifetimeWorkload& domainWork = runScheduleWork->domainWork;
1369
1370     /* do gridding for pair search */
1371     if (stepWork.doNeighborSearch)
1372     {
1373         if (fr->wholeMoleculeTransform && stepWork.stateChanged)
1374         {
1375             fr->wholeMoleculeTransform->updateForAtomPbcJumps(x.unpaddedArrayRef(), box);
1376         }
1377
1378         wallcycle_start(wcycle, ewcNS);
1379         if (!DOMAINDECOMP(cr))
1380         {
1381             const rvec vzero       = { 0.0_real, 0.0_real, 0.0_real };
1382             const rvec boxDiagonal = { box[XX][XX], box[YY][YY], box[ZZ][ZZ] };
1383             wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
1384             nbnxn_put_on_grid(nbv,
1385                               box,
1386                               0,
1387                               vzero,
1388                               boxDiagonal,
1389                               nullptr,
1390                               { 0, mdatoms->homenr },
1391                               -1,
1392                               fr->cginfo,
1393                               x.unpaddedArrayRef(),
1394                               0,
1395                               nullptr);
1396             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
1397         }
1398         else
1399         {
1400             wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
1401             nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd), fr->cginfo, x.unpaddedArrayRef());
1402             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
1403         }
1404
1405         nbv->setAtomProperties(gmx::constArrayRefFromArray(mdatoms->typeA, mdatoms->nr),
1406                                gmx::constArrayRefFromArray(mdatoms->chargeA, mdatoms->nr),
1407                                fr->cginfo);
1408
1409         wallcycle_stop(wcycle, ewcNS);
1410
1411         /* initialize the GPU nbnxm atom data and bonded data structures */
1412         if (simulationWork.useGpuNonbonded)
1413         {
1414             // Note: cycle counting only nononbondeds, gpuBonded counts internally
1415             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1416             wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1417             Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1418             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1419             wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1420
1421             if (fr->gpuBonded)
1422             {
1423                 /* Now we put all atoms on the grid, we can assign bonded
1424                  * interactions to the GPU, where the grid order is
1425                  * needed. Also the xq, f and fshift device buffers have
1426                  * been reallocated if needed, so the bonded code can
1427                  * learn about them. */
1428                 // TODO the xq, f, and fshift buffers are now shared
1429                 // resources, so they should be maintained by a
1430                 // higher-level object than the nb module.
1431                 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(nbv->getGridIndices(),
1432                                                                       top->idef,
1433                                                                       Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1434                                                                       Nbnxm::gpu_get_f(nbv->gpu_nbv),
1435                                                                       Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1436             }
1437         }
1438
1439         // Need to run after the GPU-offload bonded interaction lists
1440         // are set up to be able to determine whether there is bonded work.
1441         runScheduleWork->domainWork = setupDomainLifetimeWorkload(
1442                 inputrec, *fr, pull_work, ed, *mdatoms, simulationWork, stepWork);
1443
1444         wallcycle_start_nocount(wcycle, ewcNS);
1445         wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1446         /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1447         nbv->constructPairlist(InteractionLocality::Local, top->excls, step, nrnb);
1448
1449         nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::Local);
1450
1451         wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1452         wallcycle_stop(wcycle, ewcNS);
1453
1454         if (stepWork.useGpuXBufferOps)
1455         {
1456             nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1457         }
1458
1459         if (simulationWork.useGpuBufferOps)
1460         {
1461             setupGpuForceReductions(runScheduleWork, cr, fr);
1462         }
1463     }
1464     else if (!EI_TPI(inputrec.eI) && stepWork.computeNonbondedForces)
1465     {
1466         if (stepWork.useGpuXBufferOps)
1467         {
1468             GMX_ASSERT(stateGpu, "stateGpu should be valid when buffer ops are offloaded");
1469             nbv->convertCoordinatesGpu(AtomLocality::Local, stateGpu->getCoordinates(), localXReadyOnDevice);
1470         }
1471         else
1472         {
1473             if (simulationWork.useGpuUpdate)
1474             {
1475                 GMX_ASSERT(stateGpu, "need a valid stateGpu object");
1476                 GMX_ASSERT(haveCopiedXFromGpu,
1477                            "a wait should only be triggered if copy has been scheduled");
1478                 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1479             }
1480             nbv->convertCoordinates(AtomLocality::Local, x.unpaddedArrayRef());
1481         }
1482     }
1483
1484     if (simulationWork.useGpuNonbonded && (stepWork.computeNonbondedForces || domainWork.haveGpuBondedWork))
1485     {
1486         ddBalanceRegionHandler.openBeforeForceComputationGpu();
1487
1488         wallcycle_start(wcycle, ewcLAUNCH_GPU);
1489         wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1490         Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1491         if (stepWork.doNeighborSearch || !stepWork.useGpuXBufferOps)
1492         {
1493             Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::Local);
1494         }
1495         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1496         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1497         // with X buffer ops offloaded to the GPU on all but the search steps
1498
1499         // bonded work not split into separate local and non-local, so with DD
1500         // we can only launch the kernel after non-local coordinates have been received.
1501         if (domainWork.haveGpuBondedWork && !havePPDomainDecomposition(cr))
1502         {
1503             fr->gpuBonded->setPbcAndlaunchKernel(fr->pbcType, box, fr->bMolPBC, stepWork);
1504         }
1505
1506         /* launch local nonbonded work on GPU */
1507         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1508         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1509         do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFNo, step, nrnb, wcycle);
1510         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1511         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1512     }
1513
1514     if (useGpuPmeOnThisRank)
1515     {
1516         // In PME GPU and mixed mode we launch FFT / gather after the
1517         // X copy/transform to allow overlap as well as after the GPU NB
1518         // launch to avoid FFT launch overhead hijacking the CPU and delaying
1519         // the nonbonded kernel.
1520         launchPmeGpuFftAndGather(fr->pmedata,
1521                                  lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1522                                  wcycle,
1523                                  stepWork);
1524     }
1525
1526     /* Communicate coordinates and sum dipole if necessary +
1527        do non-local pair search */
1528     if (havePPDomainDecomposition(cr))
1529     {
1530         if (stepWork.doNeighborSearch)
1531         {
1532             // TODO: fuse this branch with the above large stepWork.doNeighborSearch block
1533             wallcycle_start_nocount(wcycle, ewcNS);
1534             wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1535             /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1536             nbv->constructPairlist(InteractionLocality::NonLocal, top->excls, step, nrnb);
1537
1538             nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::NonLocal);
1539             wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1540             wallcycle_stop(wcycle, ewcNS);
1541             // TODO refactor this GPU halo exchange re-initialisation
1542             // to location in do_md where GPU halo exchange is
1543             // constructed at partitioning, after above stateGpu
1544             // re-initialization has similarly been refactored
1545             if (simulationWork.useGpuHaloExchange)
1546             {
1547                 reinitGpuHaloExchange(*cr, stateGpu->getCoordinates(), stateGpu->getForces());
1548             }
1549         }
1550         else
1551         {
1552             if (stepWork.useGpuXHalo)
1553             {
1554                 // The following must be called after local setCoordinates (which records an event
1555                 // when the coordinate data has been copied to the device).
1556                 communicateGpuHaloCoordinates(*cr, box, localXReadyOnDevice);
1557
1558                 if (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork)
1559                 {
1560                     // non-local part of coordinate buffer must be copied back to host for CPU work
1561                     stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1562                 }
1563             }
1564             else
1565             {
1566                 if (simulationWork.useGpuUpdate)
1567                 {
1568                     GMX_ASSERT(haveCopiedXFromGpu,
1569                                "a wait should only be triggered if copy has been scheduled");
1570                     stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1571                 }
1572                 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1573             }
1574
1575             if (stepWork.useGpuXBufferOps)
1576             {
1577                 if (!useGpuPmeOnThisRank && !stepWork.useGpuXHalo)
1578                 {
1579                     stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1580                 }
1581                 nbv->convertCoordinatesGpu(AtomLocality::NonLocal,
1582                                            stateGpu->getCoordinates(),
1583                                            stateGpu->getCoordinatesReadyOnDeviceEvent(
1584                                                    AtomLocality::NonLocal, simulationWork, stepWork));
1585             }
1586             else
1587             {
1588                 nbv->convertCoordinates(AtomLocality::NonLocal, x.unpaddedArrayRef());
1589             }
1590         }
1591
1592         if (simulationWork.useGpuNonbonded)
1593         {
1594
1595             if (stepWork.doNeighborSearch || !stepWork.useGpuXBufferOps)
1596             {
1597                 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1598                 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1599                 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::NonLocal);
1600                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1601                 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1602             }
1603
1604             if (domainWork.haveGpuBondedWork)
1605             {
1606                 fr->gpuBonded->setPbcAndlaunchKernel(fr->pbcType, box, fr->bMolPBC, stepWork);
1607             }
1608
1609             /* launch non-local nonbonded tasks on GPU */
1610             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1611             wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1612             do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step, nrnb, wcycle);
1613             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1614             wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1615         }
1616     }
1617
1618     if (simulationWork.useGpuNonbonded && stepWork.computeNonbondedForces)
1619     {
1620         /* launch D2H copy-back F */
1621         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1622         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1623
1624         if (havePPDomainDecomposition(cr))
1625         {
1626             Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::NonLocal);
1627         }
1628         Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::Local);
1629         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1630
1631         if (domainWork.haveGpuBondedWork && stepWork.computeEnergy)
1632         {
1633             fr->gpuBonded->launchEnergyTransfer();
1634         }
1635         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1636     }
1637
1638     gmx::ArrayRef<const gmx::RVec> xWholeMolecules;
1639     if (fr->wholeMoleculeTransform)
1640     {
1641         xWholeMolecules = fr->wholeMoleculeTransform->wholeMoleculeCoordinates(x.unpaddedArrayRef(), box);
1642     }
1643
1644     DipoleData dipoleData;
1645
1646     if (simulationWork.computeMuTot)
1647     {
1648         const int start = 0;
1649
1650         /* Calculate total (local) dipole moment in a temporary common array.
1651          * This makes it possible to sum them over nodes faster.
1652          */
1653         gmx::ArrayRef<const gmx::RVec> xRef =
1654                 (xWholeMolecules.empty() ? x.unpaddedArrayRef() : xWholeMolecules);
1655         calc_mu(start,
1656                 mdatoms->homenr,
1657                 xRef,
1658                 mdatoms->chargeA,
1659                 mdatoms->chargeB,
1660                 mdatoms->nChargePerturbed,
1661                 dipoleData.muStaging[0],
1662                 dipoleData.muStaging[1]);
1663
1664         reduceAndUpdateMuTot(
1665                 &dipoleData, cr, (fr->efep != FreeEnergyPerturbationType::No), lambda, muTotal, ddBalanceRegionHandler);
1666     }
1667
1668     /* Reset energies */
1669     reset_enerdata(enerd);
1670
1671     if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1672     {
1673         wallcycle_start(wcycle, ewcPPDURINGPME);
1674         dd_force_flop_start(cr->dd, nrnb);
1675     }
1676
1677     // For the rest of the CPU tasks that depend on GPU-update produced coordinates,
1678     // this wait ensures that the D2H transfer is complete.
1679     if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch
1680         && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial))
1681     {
1682         GMX_ASSERT(haveCopiedXFromGpu, "a wait should only be triggered if copy has been scheduled");
1683         stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1684     }
1685
1686     if (inputrec.bRot)
1687     {
1688         wallcycle_start(wcycle, ewcROT);
1689         do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step, stepWork.doNeighborSearch);
1690         wallcycle_stop(wcycle, ewcROT);
1691     }
1692
1693     /* Start the force cycle counter.
1694      * Note that a different counter is used for dynamic load balancing.
1695      */
1696     wallcycle_start(wcycle, ewcFORCE);
1697
1698     /* Set up and clear force outputs:
1699      * forceOutMtsLevel0:  everything except what is in the other two outputs
1700      * forceOutMtsLevel1:  PME-mesh and listed-forces group 1
1701      * forceOutNonbonded: non-bonded forces
1702      * Without multiple time stepping all point to the same object.
1703      * With multiple time-stepping the use is different for MTS fast (level0 only) and slow steps.
1704      */
1705     ForceOutputs forceOutMtsLevel0 =
1706             setupForceOutputs(&fr->forceHelperBuffers[0], force, stepWork, wcycle);
1707
1708     // Force output for MTS combined forces, only set at level1 MTS steps
1709     std::optional<ForceOutputs> forceOutMts =
1710             (fr->useMts && stepWork.computeSlowForces)
1711                     ? std::optional(setupForceOutputs(&fr->forceHelperBuffers[1],
1712                                                       forceView->forceMtsCombinedWithPadding(),
1713                                                       stepWork,
1714                                                       wcycle))
1715                     : std::nullopt;
1716
1717     ForceOutputs* forceOutMtsLevel1 =
1718             fr->useMts ? (stepWork.computeSlowForces ? &forceOutMts.value() : nullptr) : &forceOutMtsLevel0;
1719
1720     const bool nonbondedAtMtsLevel1 = runScheduleWork->simulationWork.computeNonbondedAtMtsLevel1;
1721
1722     ForceOutputs* forceOutNonbonded = nonbondedAtMtsLevel1 ? forceOutMtsLevel1 : &forceOutMtsLevel0;
1723
1724     if (inputrec.bPull && pull_have_constraint(*pull_work))
1725     {
1726         clear_pull_forces(pull_work);
1727     }
1728
1729     /* We calculate the non-bonded forces, when done on the CPU, here.
1730      * We do this before calling do_force_lowlevel, because in that
1731      * function, the listed forces are calculated before PME, which
1732      * does communication.  With this order, non-bonded and listed
1733      * force calculation imbalance can be balanced out by the domain
1734      * decomposition load balancing.
1735      */
1736
1737     const bool useOrEmulateGpuNb = simulationWork.useGpuNonbonded || fr->nbv->emulateGpu();
1738
1739     if (!useOrEmulateGpuNb)
1740     {
1741         do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFYes, step, nrnb, wcycle);
1742     }
1743
1744     if (fr->efep != FreeEnergyPerturbationType::No && stepWork.computeNonbondedForces)
1745     {
1746         /* Calculate the local and non-local free energy interactions here.
1747          * Happens here on the CPU both with and without GPU.
1748          */
1749         nbv->dispatchFreeEnergyKernel(InteractionLocality::Local,
1750                                       fr,
1751                                       as_rvec_array(x.unpaddedArrayRef().data()),
1752                                       &forceOutNonbonded->forceWithShiftForces(),
1753                                       *mdatoms,
1754                                       inputrec.fepvals.get(),
1755                                       lambda,
1756                                       enerd,
1757                                       stepWork,
1758                                       nrnb);
1759
1760         if (havePPDomainDecomposition(cr))
1761         {
1762             nbv->dispatchFreeEnergyKernel(InteractionLocality::NonLocal,
1763                                           fr,
1764                                           as_rvec_array(x.unpaddedArrayRef().data()),
1765                                           &forceOutNonbonded->forceWithShiftForces(),
1766                                           *mdatoms,
1767                                           inputrec.fepvals.get(),
1768                                           lambda,
1769                                           enerd,
1770                                           stepWork,
1771                                           nrnb);
1772         }
1773     }
1774
1775     if (stepWork.computeNonbondedForces && !useOrEmulateGpuNb)
1776     {
1777         if (havePPDomainDecomposition(cr))
1778         {
1779             do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step, nrnb, wcycle);
1780         }
1781
1782         if (stepWork.computeForces)
1783         {
1784             /* Add all the non-bonded force to the normal force array.
1785              * This can be split into a local and a non-local part when overlapping
1786              * communication with calculation with domain decomposition.
1787              */
1788             wallcycle_stop(wcycle, ewcFORCE);
1789             nbv->atomdata_add_nbat_f_to_f(AtomLocality::All,
1790                                           forceOutNonbonded->forceWithShiftForces().force());
1791             wallcycle_start_nocount(wcycle, ewcFORCE);
1792         }
1793
1794         /* If there are multiple fshift output buffers we need to reduce them */
1795         if (stepWork.computeVirial)
1796         {
1797             /* This is not in a subcounter because it takes a
1798                negligible and constant-sized amount of time */
1799             nbnxn_atomdata_add_nbat_fshift_to_fshift(
1800                     *nbv->nbat, forceOutNonbonded->forceWithShiftForces().shiftForces());
1801         }
1802     }
1803
1804     // TODO Force flags should include haveFreeEnergyWork for this domain
1805     if (stepWork.useGpuXHalo && (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork))
1806     {
1807         wallcycle_stop(wcycle, ewcFORCE);
1808         /* Wait for non-local coordinate data to be copied from device */
1809         stateGpu->waitCoordinatesReadyOnHost(AtomLocality::NonLocal);
1810         wallcycle_start_nocount(wcycle, ewcFORCE);
1811     }
1812
1813     // Compute wall interactions, when present.
1814     // Note: should be moved to special forces.
1815     if (inputrec.nwall && stepWork.computeNonbondedForces)
1816     {
1817         /* foreign lambda component for walls */
1818         real dvdl_walls = do_walls(inputrec,
1819                                    *fr,
1820                                    box,
1821                                    *mdatoms,
1822                                    x.unpaddedConstArrayRef(),
1823                                    &forceOutMtsLevel0.forceWithVirial(),
1824                                    lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)],
1825                                    enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
1826                                    nrnb);
1827         enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += dvdl_walls;
1828     }
1829
1830     if (stepWork.computeListedForces)
1831     {
1832         /* Check whether we need to take into account PBC in listed interactions */
1833         bool needMolPbc = false;
1834         for (const auto& listedForces : fr->listedForces)
1835         {
1836             if (listedForces.haveCpuListedForces(*fr->fcdata))
1837             {
1838                 needMolPbc = fr->bMolPBC;
1839             }
1840         }
1841
1842         t_pbc pbc;
1843
1844         if (needMolPbc)
1845         {
1846             /* Since all atoms are in the rectangular or triclinic unit-cell,
1847              * only single box vector shifts (2 in x) are required.
1848              */
1849             set_pbc_dd(&pbc, fr->pbcType, DOMAINDECOMP(cr) ? cr->dd->numCells : nullptr, TRUE, box);
1850         }
1851
1852         for (int mtsIndex = 0; mtsIndex < (fr->useMts && stepWork.computeSlowForces ? 2 : 1); mtsIndex++)
1853         {
1854             ListedForces& listedForces = fr->listedForces[mtsIndex];
1855             ForceOutputs& forceOut     = (mtsIndex == 0 ? forceOutMtsLevel0 : *forceOutMtsLevel1);
1856             listedForces.calculate(wcycle,
1857                                    box,
1858                                    inputrec.fepvals.get(),
1859                                    cr,
1860                                    ms,
1861                                    x,
1862                                    xWholeMolecules,
1863                                    fr->fcdata.get(),
1864                                    hist,
1865                                    &forceOut,
1866                                    fr,
1867                                    &pbc,
1868                                    enerd,
1869                                    nrnb,
1870                                    lambda,
1871                                    mdatoms,
1872                                    DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr,
1873                                    stepWork);
1874         }
1875     }
1876
1877     if (stepWork.computeSlowForces)
1878     {
1879         calculateLongRangeNonbondeds(fr,
1880                                      inputrec,
1881                                      cr,
1882                                      nrnb,
1883                                      wcycle,
1884                                      mdatoms,
1885                                      x.unpaddedConstArrayRef(),
1886                                      &forceOutMtsLevel1->forceWithVirial(),
1887                                      enerd,
1888                                      box,
1889                                      lambda,
1890                                      dipoleData.muStateAB,
1891                                      stepWork,
1892                                      ddBalanceRegionHandler);
1893     }
1894
1895     wallcycle_stop(wcycle, ewcFORCE);
1896
1897     // VdW dispersion correction, only computed on master rank to avoid double counting
1898     if ((stepWork.computeEnergy || stepWork.computeVirial) && fr->dispersionCorrection && MASTER(cr))
1899     {
1900         // Calculate long range corrections to pressure and energy
1901         const DispersionCorrection::Correction correction = fr->dispersionCorrection->calculate(
1902                 box, lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)]);
1903
1904         if (stepWork.computeEnergy)
1905         {
1906             enerd->term[F_DISPCORR] = correction.energy;
1907             enerd->term[F_DVDL_VDW] += correction.dvdl;
1908             enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += correction.dvdl;
1909         }
1910         if (stepWork.computeVirial)
1911         {
1912             correction.correctVirial(vir_force);
1913             enerd->term[F_PDISPCORR] = correction.pressure;
1914         }
1915     }
1916
1917     computeSpecialForces(fplog,
1918                          cr,
1919                          inputrec,
1920                          awh,
1921                          enforcedRotation,
1922                          imdSession,
1923                          pull_work,
1924                          step,
1925                          t,
1926                          wcycle,
1927                          fr->forceProviders,
1928                          box,
1929                          x.unpaddedArrayRef(),
1930                          mdatoms,
1931                          lambda,
1932                          stepWork,
1933                          &forceOutMtsLevel0.forceWithVirial(),
1934                          forceOutMtsLevel1 ? &forceOutMtsLevel1->forceWithVirial() : nullptr,
1935                          enerd,
1936                          ed,
1937                          stepWork.doNeighborSearch);
1938
1939     if (havePPDomainDecomposition(cr) && stepWork.computeForces && stepWork.useGpuFHalo
1940         && domainWork.haveCpuLocalForceWork)
1941     {
1942         stateGpu->copyForcesToGpu(forceOutMtsLevel0.forceWithShiftForces().force(), AtomLocality::Local);
1943     }
1944
1945     GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFBufferOps),
1946                "The schedule below does not allow for nonbonded MTS with GPU buffer ops");
1947     GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFHalo),
1948                "The schedule below does not allow for nonbonded MTS with GPU halo exchange");
1949     // Will store the amount of cycles spent waiting for the GPU that
1950     // will be later used in the DLB accounting.
1951     float cycles_wait_gpu = 0;
1952     if (useOrEmulateGpuNb && stepWork.computeNonbondedForces)
1953     {
1954         auto& forceWithShiftForces = forceOutNonbonded->forceWithShiftForces();
1955
1956         /* wait for non-local forces (or calculate in emulation mode) */
1957         if (havePPDomainDecomposition(cr))
1958         {
1959             if (simulationWork.useGpuNonbonded)
1960             {
1961                 cycles_wait_gpu += Nbnxm::gpu_wait_finish_task(
1962                         nbv->gpu_nbv,
1963                         stepWork,
1964                         AtomLocality::NonLocal,
1965                         enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
1966                         enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
1967                         forceWithShiftForces.shiftForces(),
1968                         wcycle);
1969             }
1970             else
1971             {
1972                 wallcycle_start_nocount(wcycle, ewcFORCE);
1973                 do_nb_verlet(
1974                         fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFYes, step, nrnb, wcycle);
1975                 wallcycle_stop(wcycle, ewcFORCE);
1976             }
1977
1978             if (stepWork.useGpuFBufferOps)
1979             {
1980                 // TODO: move this into DomainLifetimeWorkload, including the second part of the
1981                 // condition The bonded and free energy CPU tasks can have non-local force
1982                 // contributions which are a dependency for the GPU force reduction.
1983                 bool haveNonLocalForceContribInCpuBuffer =
1984                         domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork;
1985
1986                 if (haveNonLocalForceContribInCpuBuffer)
1987                 {
1988                     stateGpu->copyForcesToGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
1989                                               AtomLocality::NonLocal);
1990                 }
1991
1992
1993                 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->execute();
1994
1995                 if (!stepWork.useGpuFHalo)
1996                 {
1997                     // copy from GPU input for dd_move_f()
1998                     stateGpu->copyForcesFromGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
1999                                                 AtomLocality::NonLocal);
2000                 }
2001             }
2002             else
2003             {
2004                 nbv->atomdata_add_nbat_f_to_f(AtomLocality::NonLocal, forceWithShiftForces.force());
2005             }
2006
2007             if (fr->nbv->emulateGpu() && stepWork.computeVirial)
2008             {
2009                 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat, forceWithShiftForces.shiftForces());
2010             }
2011         }
2012     }
2013
2014     /* Combining the forces for multiple time stepping before the halo exchange, when possible,
2015      * avoids an extra halo exchange (when DD is used) and post-processing step.
2016      */
2017     const bool combineMtsForcesBeforeHaloExchange =
2018             (stepWork.computeForces && fr->useMts && stepWork.computeSlowForces
2019              && (legacyFlags & GMX_FORCE_DO_NOT_NEED_NORMAL_FORCE) != 0
2020              && !(stepWork.computeVirial || simulationWork.useGpuNonbonded || useGpuPmeOnThisRank));
2021     if (combineMtsForcesBeforeHaloExchange)
2022     {
2023         const int numAtoms = havePPDomainDecomposition(cr) ? dd_numAtomsZones(*cr->dd) : mdatoms->homenr;
2024         combineMtsForces(numAtoms,
2025                          force.unpaddedArrayRef(),
2026                          forceView->forceMtsCombined(),
2027                          inputrec.mtsLevels[1].stepFactor);
2028     }
2029
2030     if (havePPDomainDecomposition(cr))
2031     {
2032         /* We are done with the CPU compute.
2033          * We will now communicate the non-local forces.
2034          * If we use a GPU this will overlap with GPU work, so in that case
2035          * we do not close the DD force balancing region here.
2036          */
2037         ddBalanceRegionHandler.closeAfterForceComputationCpu();
2038
2039         if (stepWork.computeForces)
2040         {
2041
2042             if (stepWork.useGpuFHalo)
2043             {
2044                 // If there exist CPU forces, data from halo exchange should accumulate into these
2045                 bool accumulateForces = domainWork.haveCpuLocalForceWork;
2046                 if (!accumulateForces)
2047                 {
2048                     // Force halo exchange will set a subset of local atoms with remote non-local data
2049                     // First clear local portion of force array, so that untouched atoms are zero
2050                     stateGpu->clearForcesOnGpu(AtomLocality::Local);
2051                 }
2052                 communicateGpuHaloForces(*cr, accumulateForces);
2053             }
2054             else
2055             {
2056                 if (stepWork.useGpuFBufferOps)
2057                 {
2058                     stateGpu->waitForcesReadyOnHost(AtomLocality::NonLocal);
2059                 }
2060
2061                 // Without MTS or with MTS at slow steps with uncombined forces we need to
2062                 // communicate the fast forces
2063                 if (!fr->useMts || !combineMtsForcesBeforeHaloExchange)
2064                 {
2065                     dd_move_f(cr->dd, &forceOutMtsLevel0.forceWithShiftForces(), wcycle);
2066                 }
2067                 // With MTS we need to communicate the slow or combined (in forceOutMtsLevel1) forces
2068                 if (fr->useMts && stepWork.computeSlowForces)
2069                 {
2070                     dd_move_f(cr->dd, &forceOutMtsLevel1->forceWithShiftForces(), wcycle);
2071                 }
2072             }
2073         }
2074     }
2075
2076     // With both nonbonded and PME offloaded a GPU on the same rank, we use
2077     // an alternating wait/reduction scheme.
2078     bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPmeOnThisRank && simulationWork.useGpuNonbonded
2079                              && !DOMAINDECOMP(cr) && !stepWork.useGpuFBufferOps);
2080     if (alternateGpuWait)
2081     {
2082         alternatePmeNbGpuWaitReduce(fr->nbv.get(),
2083                                     fr->pmedata,
2084                                     forceOutNonbonded,
2085                                     forceOutMtsLevel1,
2086                                     enerd,
2087                                     lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
2088                                     stepWork,
2089                                     wcycle);
2090     }
2091
2092     if (!alternateGpuWait && useGpuPmeOnThisRank)
2093     {
2094         pme_gpu_wait_and_reduce(fr->pmedata,
2095                                 stepWork,
2096                                 wcycle,
2097                                 &forceOutMtsLevel1->forceWithVirial(),
2098                                 enerd,
2099                                 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)]);
2100     }
2101
2102     /* Wait for local GPU NB outputs on the non-alternating wait path */
2103     if (!alternateGpuWait && stepWork.computeNonbondedForces && simulationWork.useGpuNonbonded)
2104     {
2105         /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
2106          * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
2107          * but even with a step of 0.1 ms the difference is less than 1%
2108          * of the step time.
2109          */
2110         const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
2111         const float waitCycles               = Nbnxm::gpu_wait_finish_task(
2112                 nbv->gpu_nbv,
2113                 stepWork,
2114                 AtomLocality::Local,
2115                 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
2116                 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
2117                 forceOutNonbonded->forceWithShiftForces().shiftForces(),
2118                 wcycle);
2119
2120         if (ddBalanceRegionHandler.useBalancingRegion())
2121         {
2122             DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
2123             if (stepWork.computeForces && waitCycles <= gpuWaitApiOverheadMargin)
2124             {
2125                 /* We measured few cycles, it could be that the kernel
2126                  * and transfer finished earlier and there was no actual
2127                  * wait time, only API call overhead.
2128                  * Then the actual time could be anywhere between 0 and
2129                  * cycles_wait_est. We will use half of cycles_wait_est.
2130                  */
2131                 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
2132             }
2133             ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
2134         }
2135     }
2136
2137     if (fr->nbv->emulateGpu())
2138     {
2139         // NOTE: emulation kernel is not included in the balancing region,
2140         // but emulation mode does not target performance anyway
2141         wallcycle_start_nocount(wcycle, ewcFORCE);
2142         do_nb_verlet(fr,
2143                      ic,
2144                      enerd,
2145                      stepWork,
2146                      InteractionLocality::Local,
2147                      DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
2148                      step,
2149                      nrnb,
2150                      wcycle);
2151         wallcycle_stop(wcycle, ewcFORCE);
2152     }
2153
2154     // If on GPU PME-PP comms path, receive forces from PME before GPU buffer ops
2155     // TODO refactor this and unify with below default-path call to the same function
2156     if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME) && stepWork.computeSlowForces
2157         && simulationWork.useGpuPmePpCommunication)
2158     {
2159         /* In case of node-splitting, the PP nodes receive the long-range
2160          * forces, virial and energy from the PME nodes here.
2161          */
2162         pme_receive_force_ener(fr,
2163                                cr,
2164                                &forceOutMtsLevel1->forceWithVirial(),
2165                                enerd,
2166                                simulationWork.useGpuPmePpCommunication,
2167                                stepWork.useGpuPmeFReduction,
2168                                wcycle);
2169     }
2170
2171
2172     /* Do the nonbonded GPU (or emulation) force buffer reduction
2173      * on the non-alternating path. */
2174     GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFBufferOps),
2175                "The schedule below does not allow for nonbonded MTS with GPU buffer ops");
2176     if (useOrEmulateGpuNb && !alternateGpuWait)
2177     {
2178         if (stepWork.useGpuFBufferOps)
2179         {
2180             ArrayRef<gmx::RVec> forceWithShift = forceOutNonbonded->forceWithShiftForces().force();
2181
2182             // Flag to specify whether the CPU force buffer has contributions to
2183             // local atoms. This depends on whether there are CPU-based force tasks
2184             // or when DD is active the halo exchange has resulted in contributions
2185             // from the non-local part.
2186             const bool haveLocalForceContribInCpuBuffer =
2187                     (domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr));
2188
2189             // TODO: move these steps as early as possible:
2190             // - CPU f H2D should be as soon as all CPU-side forces are done
2191             // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
2192             //   before the next CPU task that consumes the forces: vsite spread or update)
2193             // - copy is not perfomed if GPU force halo exchange is active, because it would overwrite the result
2194             //   of the halo exchange. In that case the copy is instead performed above, before the exchange.
2195             //   These should be unified.
2196             if (haveLocalForceContribInCpuBuffer && !stepWork.useGpuFHalo)
2197             {
2198                 // Note: AtomLocality::All is used for the non-DD case because, as in this
2199                 // case copyForcesToGpu() uses a separate stream, it allows overlap of
2200                 // CPU force H2D with GPU force tasks on all streams including those in the
2201                 // local stream which would otherwise be implicit dependencies for the
2202                 // transfer and would not overlap.
2203                 auto locality = havePPDomainDecomposition(cr) ? AtomLocality::Local : AtomLocality::All;
2204
2205                 stateGpu->copyForcesToGpu(forceWithShift, locality);
2206             }
2207
2208             if (stepWork.computeNonbondedForces)
2209             {
2210                 fr->gpuForceReduction[gmx::AtomLocality::Local]->execute();
2211             }
2212
2213             // Copy forces to host if they are needed for update or if virtual sites are enabled.
2214             // If there are vsites, we need to copy forces every step to spread vsite forces on host.
2215             // TODO: When the output flags will be included in step workload, this copy can be combined with the
2216             //       copy call done in sim_utils(...) for the output.
2217             // NOTE: If there are virtual sites, the forces are modified on host after this D2H copy. Hence,
2218             //       they should not be copied in do_md(...) for the output.
2219             if (!simulationWork.useGpuUpdate
2220                 || (simulationWork.useGpuUpdate && DOMAINDECOMP(cr) && haveHostPmePpComms) || vsite)
2221             {
2222                 stateGpu->copyForcesFromGpu(forceWithShift, AtomLocality::Local);
2223                 stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
2224             }
2225         }
2226         else if (stepWork.computeNonbondedForces)
2227         {
2228             ArrayRef<gmx::RVec> forceWithShift = forceOutNonbonded->forceWithShiftForces().force();
2229             nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceWithShift);
2230         }
2231     }
2232
2233     launchGpuEndOfStepTasks(
2234             nbv, fr->gpuBonded, fr->pmedata, enerd, *runScheduleWork, useGpuPmeOnThisRank, step, wcycle);
2235
2236     if (DOMAINDECOMP(cr))
2237     {
2238         dd_force_flop_stop(cr->dd, nrnb);
2239     }
2240
2241     const bool haveCombinedMtsForces = (stepWork.computeForces && fr->useMts && stepWork.computeSlowForces
2242                                         && combineMtsForcesBeforeHaloExchange);
2243     if (stepWork.computeForces)
2244     {
2245         postProcessForceWithShiftForces(
2246                 nrnb, wcycle, box, x.unpaddedArrayRef(), &forceOutMtsLevel0, vir_force, *mdatoms, *fr, vsite, stepWork);
2247
2248         if (fr->useMts && stepWork.computeSlowForces && !haveCombinedMtsForces)
2249         {
2250             postProcessForceWithShiftForces(
2251                     nrnb, wcycle, box, x.unpaddedArrayRef(), forceOutMtsLevel1, vir_force, *mdatoms, *fr, vsite, stepWork);
2252         }
2253     }
2254
2255     // TODO refactor this and unify with above GPU PME-PP / GPU update path call to the same function
2256     if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication
2257         && stepWork.computeSlowForces)
2258     {
2259         /* In case of node-splitting, the PP nodes receive the long-range
2260          * forces, virial and energy from the PME nodes here.
2261          */
2262         pme_receive_force_ener(fr,
2263                                cr,
2264                                &forceOutMtsLevel1->forceWithVirial(),
2265                                enerd,
2266                                simulationWork.useGpuPmePpCommunication,
2267                                false,
2268                                wcycle);
2269     }
2270
2271     if (stepWork.computeForces)
2272     {
2273         /* If we don't use MTS or if we already combined the MTS forces before, we only
2274          * need to post-process one ForceOutputs object here, called forceOutCombined,
2275          * otherwise we have to post-process two outputs and then combine them.
2276          */
2277         ForceOutputs& forceOutCombined = (haveCombinedMtsForces ? forceOutMts.value() : forceOutMtsLevel0);
2278         postProcessForces(
2279                 cr, step, nrnb, wcycle, box, x.unpaddedArrayRef(), &forceOutCombined, vir_force, mdatoms, fr, vsite, stepWork);
2280
2281         if (fr->useMts && stepWork.computeSlowForces && !haveCombinedMtsForces)
2282         {
2283             postProcessForces(
2284                     cr, step, nrnb, wcycle, box, x.unpaddedArrayRef(), forceOutMtsLevel1, vir_force, mdatoms, fr, vsite, stepWork);
2285
2286             combineMtsForces(mdatoms->homenr,
2287                              force.unpaddedArrayRef(),
2288                              forceView->forceMtsCombined(),
2289                              inputrec.mtsLevels[1].stepFactor);
2290         }
2291     }
2292
2293     if (stepWork.computeEnergy)
2294     {
2295         /* Compute the final potential energy terms */
2296         accumulatePotentialEnergies(enerd, lambda, inputrec.fepvals.get());
2297
2298         if (!EI_TPI(inputrec.eI))
2299         {
2300             checkPotentialEnergyValidity(step, *enerd, inputrec);
2301         }
2302     }
2303
2304     /* In case we don't have constraints and are using GPUs, the next balancing
2305      * region starts here.
2306      * Some "special" work at the end of do_force_cuts?, such as vsite spread,
2307      * virial calculation and COM pulling, is not thus not included in
2308      * the balance timing, which is ok as most tasks do communication.
2309      */
2310     ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);
2311 }