Remove unneeded members from nb_kernel_data_t
[alexxy/gromacs.git] / src / gromacs / mdlib / sim_util.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013-2019,2020,2021, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "config.h"
40
41 #include <cmath>
42 #include <cstdint>
43 #include <cstdio>
44 #include <cstring>
45
46 #include <array>
47 #include <optional>
48
49 #include "gromacs/applied_forces/awh/awh.h"
50 #include "gromacs/domdec/dlbtiming.h"
51 #include "gromacs/domdec/domdec.h"
52 #include "gromacs/domdec/domdec_struct.h"
53 #include "gromacs/domdec/gpuhaloexchange.h"
54 #include "gromacs/domdec/partition.h"
55 #include "gromacs/essentialdynamics/edsam.h"
56 #include "gromacs/ewald/pme.h"
57 #include "gromacs/ewald/pme_pp.h"
58 #include "gromacs/ewald/pme_pp_comm_gpu.h"
59 #include "gromacs/gmxlib/network.h"
60 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
61 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
62 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
63 #include "gromacs/gmxlib/nrnb.h"
64 #include "gromacs/gpu_utils/gpu_utils.h"
65 #include "gromacs/imd/imd.h"
66 #include "gromacs/listed_forces/disre.h"
67 #include "gromacs/listed_forces/gpubonded.h"
68 #include "gromacs/listed_forces/listed_forces.h"
69 #include "gromacs/listed_forces/orires.h"
70 #include "gromacs/math/arrayrefwithpadding.h"
71 #include "gromacs/math/functions.h"
72 #include "gromacs/math/units.h"
73 #include "gromacs/math/vec.h"
74 #include "gromacs/math/vecdump.h"
75 #include "gromacs/mdlib/calcmu.h"
76 #include "gromacs/mdlib/calcvir.h"
77 #include "gromacs/mdlib/constr.h"
78 #include "gromacs/mdlib/dispersioncorrection.h"
79 #include "gromacs/mdlib/enerdata_utils.h"
80 #include "gromacs/mdlib/force.h"
81 #include "gromacs/mdlib/force_flags.h"
82 #include "gromacs/mdlib/forcerec.h"
83 #include "gromacs/mdlib/gmx_omp_nthreads.h"
84 #include "gromacs/mdlib/update.h"
85 #include "gromacs/mdlib/vsite.h"
86 #include "gromacs/mdlib/wall.h"
87 #include "gromacs/mdlib/wholemoleculetransform.h"
88 #include "gromacs/mdtypes/commrec.h"
89 #include "gromacs/mdtypes/enerdata.h"
90 #include "gromacs/mdtypes/forcebuffers.h"
91 #include "gromacs/mdtypes/forceoutput.h"
92 #include "gromacs/mdtypes/forcerec.h"
93 #include "gromacs/mdtypes/iforceprovider.h"
94 #include "gromacs/mdtypes/inputrec.h"
95 #include "gromacs/mdtypes/md_enums.h"
96 #include "gromacs/mdtypes/mdatom.h"
97 #include "gromacs/mdtypes/multipletimestepping.h"
98 #include "gromacs/mdtypes/simulation_workload.h"
99 #include "gromacs/mdtypes/state.h"
100 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
101 #include "gromacs/nbnxm/gpu_data_mgmt.h"
102 #include "gromacs/nbnxm/nbnxm.h"
103 #include "gromacs/nbnxm/nbnxm_gpu.h"
104 #include "gromacs/pbcutil/ishift.h"
105 #include "gromacs/pbcutil/pbc.h"
106 #include "gromacs/pulling/pull.h"
107 #include "gromacs/pulling/pull_rotation.h"
108 #include "gromacs/timing/cyclecounter.h"
109 #include "gromacs/timing/gpu_timing.h"
110 #include "gromacs/timing/wallcycle.h"
111 #include "gromacs/timing/wallcyclereporting.h"
112 #include "gromacs/timing/walltime_accounting.h"
113 #include "gromacs/topology/topology.h"
114 #include "gromacs/utility/arrayref.h"
115 #include "gromacs/utility/basedefinitions.h"
116 #include "gromacs/utility/cstringutil.h"
117 #include "gromacs/utility/exceptions.h"
118 #include "gromacs/utility/fatalerror.h"
119 #include "gromacs/utility/fixedcapacityvector.h"
120 #include "gromacs/utility/gmxassert.h"
121 #include "gromacs/utility/gmxmpi.h"
122 #include "gromacs/utility/logger.h"
123 #include "gromacs/utility/smalloc.h"
124 #include "gromacs/utility/strconvert.h"
125 #include "gromacs/utility/sysinfo.h"
126
127 #include "gpuforcereduction.h"
128
129 using gmx::ArrayRef;
130 using gmx::AtomLocality;
131 using gmx::DomainLifetimeWorkload;
132 using gmx::ForceOutputs;
133 using gmx::ForceWithShiftForces;
134 using gmx::InteractionLocality;
135 using gmx::RVec;
136 using gmx::SimulationWorkload;
137 using gmx::StepWorkload;
138
139 // TODO: this environment variable allows us to verify before release
140 // that on less common architectures the total cost of polling is not larger than
141 // a blocking wait (so polling does not introduce overhead when the static
142 // PME-first ordering would suffice).
143 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
144
145 static void sum_forces(ArrayRef<RVec> f, ArrayRef<const RVec> forceToAdd)
146 {
147     GMX_ASSERT(f.size() >= forceToAdd.size(), "Accumulation buffer should be sufficiently large");
148     const int end = forceToAdd.size();
149
150     int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
151 #pragma omp parallel for num_threads(nt) schedule(static)
152     for (int i = 0; i < end; i++)
153     {
154         rvec_inc(f[i], forceToAdd[i]);
155     }
156 }
157
158 static void calc_virial(int                              start,
159                         int                              homenr,
160                         const rvec                       x[],
161                         const gmx::ForceWithShiftForces& forceWithShiftForces,
162                         tensor                           vir_part,
163                         const matrix                     box,
164                         t_nrnb*                          nrnb,
165                         const t_forcerec*                fr,
166                         PbcType                          pbcType)
167 {
168     /* The short-range virial from surrounding boxes */
169     const rvec* fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
170     calc_vir(SHIFTS, fr->shift_vec, fshift, vir_part, pbcType == PbcType::Screw, box);
171     inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
172
173     /* Calculate partial virial, for local atoms only, based on short range.
174      * Total virial is computed in global_stat, called from do_md
175      */
176     const rvec* f = as_rvec_array(forceWithShiftForces.force().data());
177     f_calc_vir(start, start + homenr, x, f, vir_part, box);
178     inc_nrnb(nrnb, eNR_VIRIAL, homenr);
179
180     if (debug)
181     {
182         pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
183     }
184 }
185
186 static void pull_potential_wrapper(const t_commrec*               cr,
187                                    const t_inputrec&              ir,
188                                    const matrix                   box,
189                                    gmx::ArrayRef<const gmx::RVec> x,
190                                    gmx::ForceWithVirial*          force,
191                                    const t_mdatoms*               mdatoms,
192                                    gmx_enerdata_t*                enerd,
193                                    pull_t*                        pull_work,
194                                    const real*                    lambda,
195                                    double                         t,
196                                    gmx_wallcycle_t                wcycle)
197 {
198     t_pbc pbc;
199     real  dvdl;
200
201     /* Calculate the center of mass forces, this requires communication,
202      * which is why pull_potential is called close to other communication.
203      */
204     wallcycle_start(wcycle, ewcPULLPOT);
205     set_pbc(&pbc, ir.pbcType, box);
206     dvdl = 0;
207     enerd->term[F_COM_PULL] +=
208             pull_potential(pull_work,
209                            mdatoms->massT,
210                            &pbc,
211                            cr,
212                            t,
213                            lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Restraint)],
214                            as_rvec_array(x.data()),
215                            force,
216                            &dvdl);
217     enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Restraint] += dvdl;
218     wallcycle_stop(wcycle, ewcPULLPOT);
219 }
220
221 static void pme_receive_force_ener(t_forcerec*           fr,
222                                    const t_commrec*      cr,
223                                    gmx::ForceWithVirial* forceWithVirial,
224                                    gmx_enerdata_t*       enerd,
225                                    bool                  useGpuPmePpComms,
226                                    bool                  receivePmeForceToGpu,
227                                    gmx_wallcycle_t       wcycle)
228 {
229     real  e_q, e_lj, dvdl_q, dvdl_lj;
230     float cycles_ppdpme, cycles_seppme;
231
232     cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
233     dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
234
235     /* In case of node-splitting, the PP nodes receive the long-range
236      * forces, virial and energy from the PME nodes here.
237      */
238     wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
239     dvdl_q  = 0;
240     dvdl_lj = 0;
241     gmx_pme_receive_f(fr->pmePpCommGpu.get(),
242                       cr,
243                       forceWithVirial,
244                       &e_q,
245                       &e_lj,
246                       &dvdl_q,
247                       &dvdl_lj,
248                       useGpuPmePpComms,
249                       receivePmeForceToGpu,
250                       &cycles_seppme);
251     enerd->term[F_COUL_RECIP] += e_q;
252     enerd->term[F_LJ_RECIP] += e_lj;
253     enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Coul] += dvdl_q;
254     enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += dvdl_lj;
255
256     if (wcycle)
257     {
258         dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
259     }
260     wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
261 }
262
263 static void print_large_forces(FILE*                fp,
264                                const t_mdatoms*     md,
265                                const t_commrec*     cr,
266                                int64_t              step,
267                                real                 forceTolerance,
268                                ArrayRef<const RVec> x,
269                                ArrayRef<const RVec> f)
270 {
271     real       force2Tolerance = gmx::square(forceTolerance);
272     gmx::index numNonFinite    = 0;
273     for (int i = 0; i < md->homenr; i++)
274     {
275         real force2    = norm2(f[i]);
276         bool nonFinite = !std::isfinite(force2);
277         if (force2 >= force2Tolerance || nonFinite)
278         {
279             fprintf(fp,
280                     "step %" PRId64 " atom %6d  x %8.3f %8.3f %8.3f  force %12.5e\n",
281                     step,
282                     ddglatnr(cr->dd, i),
283                     x[i][XX],
284                     x[i][YY],
285                     x[i][ZZ],
286                     std::sqrt(force2));
287         }
288         if (nonFinite)
289         {
290             numNonFinite++;
291         }
292     }
293     if (numNonFinite > 0)
294     {
295         /* Note that with MPI this fatal call on one rank might interrupt
296          * the printing on other ranks. But we can only avoid that with
297          * an expensive MPI barrier that we would need at each step.
298          */
299         gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
300     }
301 }
302
303 //! When necessary, spreads forces on vsites and computes the virial for \p forceOutputs->forceWithShiftForces()
304 static void postProcessForceWithShiftForces(t_nrnb*                   nrnb,
305                                             gmx_wallcycle_t           wcycle,
306                                             const matrix              box,
307                                             ArrayRef<const RVec>      x,
308                                             ForceOutputs*             forceOutputs,
309                                             tensor                    vir_force,
310                                             const t_mdatoms&          mdatoms,
311                                             const t_forcerec&         fr,
312                                             gmx::VirtualSitesHandler* vsite,
313                                             const StepWorkload&       stepWork)
314 {
315     ForceWithShiftForces& forceWithShiftForces = forceOutputs->forceWithShiftForces();
316
317     /* If we have NoVirSum forces, but we do not calculate the virial,
318      * we later sum the forceWithShiftForces buffer together with
319      * the noVirSum buffer and spread the combined vsite forces at once.
320      */
321     if (vsite && (!forceOutputs->haveForceWithVirial() || stepWork.computeVirial))
322     {
323         using VirialHandling = gmx::VirtualSitesHandler::VirialHandling;
324
325         auto                 f      = forceWithShiftForces.force();
326         auto                 fshift = forceWithShiftForces.shiftForces();
327         const VirialHandling virialHandling =
328                 (stepWork.computeVirial ? VirialHandling::Pbc : VirialHandling::None);
329         vsite->spreadForces(x, f, virialHandling, fshift, nullptr, nrnb, box, wcycle);
330         forceWithShiftForces.haveSpreadVsiteForces() = true;
331     }
332
333     if (stepWork.computeVirial)
334     {
335         /* Calculation of the virial must be done after vsites! */
336         calc_virial(
337                 0, mdatoms.homenr, as_rvec_array(x.data()), forceWithShiftForces, vir_force, box, nrnb, &fr, fr.pbcType);
338     }
339 }
340
341 //! Spread, compute virial for and sum forces, when necessary
342 static void postProcessForces(const t_commrec*          cr,
343                               int64_t                   step,
344                               t_nrnb*                   nrnb,
345                               gmx_wallcycle_t           wcycle,
346                               const matrix              box,
347                               ArrayRef<const RVec>      x,
348                               ForceOutputs*             forceOutputs,
349                               tensor                    vir_force,
350                               const t_mdatoms*          mdatoms,
351                               const t_forcerec*         fr,
352                               gmx::VirtualSitesHandler* vsite,
353                               const StepWorkload&       stepWork)
354 {
355     // Extract the final output force buffer, which is also the buffer for forces with shift forces
356     ArrayRef<RVec> f = forceOutputs->forceWithShiftForces().force();
357
358     if (forceOutputs->haveForceWithVirial())
359     {
360         auto& forceWithVirial = forceOutputs->forceWithVirial();
361
362         if (vsite)
363         {
364             /* Spread the mesh force on virtual sites to the other particles...
365              * This is parallellized. MPI communication is performed
366              * if the constructing atoms aren't local.
367              */
368             GMX_ASSERT(!stepWork.computeVirial || f.data() != forceWithVirial.force_.data(),
369                        "We need separate force buffers for shift and virial forces when "
370                        "computing the virial");
371             GMX_ASSERT(!stepWork.computeVirial
372                                || forceOutputs->forceWithShiftForces().haveSpreadVsiteForces(),
373                        "We should spread the force with shift forces separately when computing "
374                        "the virial");
375             const gmx::VirtualSitesHandler::VirialHandling virialHandling =
376                     (stepWork.computeVirial ? gmx::VirtualSitesHandler::VirialHandling::NonLinear
377                                             : gmx::VirtualSitesHandler::VirialHandling::None);
378             matrix virial = { { 0 } };
379             vsite->spreadForces(x, forceWithVirial.force_, virialHandling, {}, virial, nrnb, box, wcycle);
380             forceWithVirial.addVirialContribution(virial);
381         }
382
383         if (stepWork.computeVirial)
384         {
385             /* Now add the forces, this is local */
386             sum_forces(f, forceWithVirial.force_);
387
388             /* Add the direct virial contributions */
389             GMX_ASSERT(
390                     forceWithVirial.computeVirial_,
391                     "forceWithVirial should request virial computation when we request the virial");
392             m_add(vir_force, forceWithVirial.getVirial(), vir_force);
393
394             if (debug)
395             {
396                 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
397             }
398         }
399     }
400     else
401     {
402         GMX_ASSERT(vsite == nullptr || forceOutputs->forceWithShiftForces().haveSpreadVsiteForces(),
403                    "We should have spread the vsite forces (earlier)");
404     }
405
406     if (fr->print_force >= 0)
407     {
408         print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
409     }
410 }
411
412 static void do_nb_verlet(t_forcerec*                fr,
413                          const interaction_const_t* ic,
414                          gmx_enerdata_t*            enerd,
415                          const StepWorkload&        stepWork,
416                          const InteractionLocality  ilocality,
417                          const int                  clearF,
418                          const int64_t              step,
419                          t_nrnb*                    nrnb,
420                          gmx_wallcycle_t            wcycle)
421 {
422     if (!stepWork.computeNonbondedForces)
423     {
424         /* skip non-bonded calculation */
425         return;
426     }
427
428     nonbonded_verlet_t* nbv = fr->nbv.get();
429
430     /* GPU kernel launch overhead is already timed separately */
431     if (!nbv->useGpu())
432     {
433         /* When dynamic pair-list  pruning is requested, we need to prune
434          * at nstlistPrune steps.
435          */
436         if (nbv->isDynamicPruningStepCpu(step))
437         {
438             /* Prune the pair-list beyond fr->ic->rlistPrune using
439              * the current coordinates of the atoms.
440              */
441             wallcycle_sub_start(wcycle, ewcsNONBONDED_PRUNING);
442             nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
443             wallcycle_sub_stop(wcycle, ewcsNONBONDED_PRUNING);
444         }
445     }
446
447     nbv->dispatchNonbondedKernel(ilocality, *ic, stepWork, clearF, *fr, enerd, nrnb);
448 }
449
450 static inline void clearRVecs(ArrayRef<RVec> v, const bool useOpenmpThreading)
451 {
452     int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, v.ssize());
453
454     /* Note that we would like to avoid this conditional by putting it
455      * into the omp pragma instead, but then we still take the full
456      * omp parallel for overhead (at least with gcc5).
457      */
458     if (!useOpenmpThreading || nth == 1)
459     {
460         for (RVec& elem : v)
461         {
462             clear_rvec(elem);
463         }
464     }
465     else
466     {
467 #pragma omp parallel for num_threads(nth) schedule(static)
468         for (gmx::index i = 0; i < v.ssize(); i++)
469         {
470             clear_rvec(v[i]);
471         }
472     }
473 }
474
475 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
476  *
477  * \param groupOptions  Group options, containing T-coupling options
478  */
479 static real averageKineticEnergyEstimate(const t_grpopts& groupOptions)
480 {
481     real nrdfCoupled   = 0;
482     real nrdfUncoupled = 0;
483     real kineticEnergy = 0;
484     for (int g = 0; g < groupOptions.ngtc; g++)
485     {
486         if (groupOptions.tau_t[g] >= 0)
487         {
488             nrdfCoupled += groupOptions.nrdf[g];
489             kineticEnergy += groupOptions.nrdf[g] * 0.5 * groupOptions.ref_t[g] * gmx::c_boltz;
490         }
491         else
492         {
493             nrdfUncoupled += groupOptions.nrdf[g];
494         }
495     }
496
497     /* This conditional with > also catches nrdf=0 */
498     if (nrdfCoupled > nrdfUncoupled)
499     {
500         return kineticEnergy * (nrdfCoupled + nrdfUncoupled) / nrdfCoupled;
501     }
502     else
503     {
504         return 0;
505     }
506 }
507
508 /*! \brief This routine checks that the potential energy is finite.
509  *
510  * Always checks that the potential energy is finite. If step equals
511  * inputrec.init_step also checks that the magnitude of the potential energy
512  * is reasonable. Terminates with a fatal error when a check fails.
513  * Note that passing this check does not guarantee finite forces,
514  * since those use slightly different arithmetics. But in most cases
515  * there is just a narrow coordinate range where forces are not finite
516  * and energies are finite.
517  *
518  * \param[in] step      The step number, used for checking and printing
519  * \param[in] enerd     The energy data; the non-bonded group energies need to be added to
520  * enerd.term[F_EPOT] before calling this routine \param[in] inputrec  The input record
521  */
522 static void checkPotentialEnergyValidity(int64_t step, const gmx_enerdata_t& enerd, const t_inputrec& inputrec)
523 {
524     /* Threshold valid for comparing absolute potential energy against
525      * the kinetic energy. Normally one should not consider absolute
526      * potential energy values, but with a factor of one million
527      * we should never get false positives.
528      */
529     constexpr real c_thresholdFactor = 1e6;
530
531     bool energyIsNotFinite    = !std::isfinite(enerd.term[F_EPOT]);
532     real averageKineticEnergy = 0;
533     /* We only check for large potential energy at the initial step,
534      * because that is by far the most likely step for this too occur
535      * and because computing the average kinetic energy is not free.
536      * Note: nstcalcenergy >> 1 often does not allow to catch large energies
537      * before they become NaN.
538      */
539     if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
540     {
541         averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
542     }
543
544     if (energyIsNotFinite
545         || (averageKineticEnergy > 0 && enerd.term[F_EPOT] > c_thresholdFactor * averageKineticEnergy))
546     {
547         gmx_fatal(
548                 FARGS,
549                 "Step %" PRId64
550                 ": The total potential energy is %g, which is %s. The LJ and electrostatic "
551                 "contributions to the energy are %g and %g, respectively. A %s potential energy "
552                 "can be caused by overlapping interactions in bonded interactions or very large%s "
553                 "coordinate values. Usually this is caused by a badly- or non-equilibrated initial "
554                 "configuration, incorrect interactions or parameters in the topology.",
555                 step,
556                 enerd.term[F_EPOT],
557                 energyIsNotFinite ? "not finite" : "extremely high",
558                 enerd.term[F_LJ],
559                 enerd.term[F_COUL_SR],
560                 energyIsNotFinite ? "non-finite" : "very high",
561                 energyIsNotFinite ? " or Nan" : "");
562     }
563 }
564
565 /*! \brief Return true if there are special forces computed this step.
566  *
567  * The conditionals exactly correspond to those in computeSpecialForces().
568  */
569 static bool haveSpecialForces(const t_inputrec&          inputrec,
570                               const gmx::ForceProviders& forceProviders,
571                               const pull_t*              pull_work,
572                               const bool                 computeForces,
573                               const gmx_edsam*           ed)
574 {
575
576     return ((computeForces && forceProviders.hasForceProvider()) || // forceProviders
577             (inputrec.bPull && pull_have_potential(*pull_work)) ||  // pull
578             inputrec.bRot ||                                        // enforced rotation
579             (ed != nullptr) ||                                      // flooding
580             (inputrec.bIMD && computeForces));                      // IMD
581 }
582
583 /*! \brief Compute forces and/or energies for special algorithms
584  *
585  * The intention is to collect all calls to algorithms that compute
586  * forces on local atoms only and that do not contribute to the local
587  * virial sum (but add their virial contribution separately).
588  * Eventually these should likely all become ForceProviders.
589  * Within this function the intention is to have algorithms that do
590  * global communication at the end, so global barriers within the MD loop
591  * are as close together as possible.
592  *
593  * \param[in]     fplog            The log file
594  * \param[in]     cr               The communication record
595  * \param[in]     inputrec         The input record
596  * \param[in]     awh              The Awh module (nullptr if none in use).
597  * \param[in]     enforcedRotation Enforced rotation module.
598  * \param[in]     imdSession       The IMD session
599  * \param[in]     pull_work        The pull work structure.
600  * \param[in]     step             The current MD step
601  * \param[in]     t                The current time
602  * \param[in,out] wcycle           Wallcycle accounting struct
603  * \param[in,out] forceProviders   Pointer to a list of force providers
604  * \param[in]     box              The unit cell
605  * \param[in]     x                The coordinates
606  * \param[in]     mdatoms          Per atom properties
607  * \param[in]     lambda           Array of free-energy lambda values
608  * \param[in]     stepWork         Step schedule flags
609  * \param[in,out] forceWithVirialMtsLevel0  Force and virial for MTS level0 forces
610  * \param[in,out] forceWithVirialMtsLevel1  Force and virial for MTS level1 forces, can be nullptr
611  * \param[in,out] enerd            Energy buffer
612  * \param[in,out] ed               Essential dynamics pointer
613  * \param[in]     didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
614  *
615  * \todo Remove didNeighborSearch, which is used incorrectly.
616  * \todo Convert all other algorithms called here to ForceProviders.
617  */
618 static void computeSpecialForces(FILE*                          fplog,
619                                  const t_commrec*               cr,
620                                  const t_inputrec&              inputrec,
621                                  gmx::Awh*                      awh,
622                                  gmx_enfrot*                    enforcedRotation,
623                                  gmx::ImdSession*               imdSession,
624                                  pull_t*                        pull_work,
625                                  int64_t                        step,
626                                  double                         t,
627                                  gmx_wallcycle_t                wcycle,
628                                  gmx::ForceProviders*           forceProviders,
629                                  const matrix                   box,
630                                  gmx::ArrayRef<const gmx::RVec> x,
631                                  const t_mdatoms*               mdatoms,
632                                  gmx::ArrayRef<const real>      lambda,
633                                  const StepWorkload&            stepWork,
634                                  gmx::ForceWithVirial*          forceWithVirialMtsLevel0,
635                                  gmx::ForceWithVirial*          forceWithVirialMtsLevel1,
636                                  gmx_enerdata_t*                enerd,
637                                  gmx_edsam*                     ed,
638                                  bool                           didNeighborSearch)
639 {
640     /* NOTE: Currently all ForceProviders only provide forces.
641      *       When they also provide energies, remove this conditional.
642      */
643     if (stepWork.computeForces)
644     {
645         gmx::ForceProviderInput forceProviderInput(
646                 x,
647                 mdatoms->homenr,
648                 gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->homenr),
649                 gmx::arrayRefFromArray(mdatoms->massT, mdatoms->homenr),
650                 t,
651                 box,
652                 *cr);
653         gmx::ForceProviderOutput forceProviderOutput(forceWithVirialMtsLevel0, enerd);
654
655         /* Collect forces from modules */
656         forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
657     }
658
659     if (inputrec.bPull && pull_have_potential(*pull_work))
660     {
661         const int mtsLevel = forceGroupMtsLevel(inputrec.mtsLevels, gmx::MtsForceGroups::Pull);
662         if (mtsLevel == 0 || stepWork.computeSlowForces)
663         {
664             auto& forceWithVirial = (mtsLevel == 0) ? forceWithVirialMtsLevel0 : forceWithVirialMtsLevel1;
665             pull_potential_wrapper(
666                     cr, inputrec, box, x, forceWithVirial, mdatoms, enerd, pull_work, lambda.data(), t, wcycle);
667         }
668     }
669     if (awh)
670     {
671         const int mtsLevel = forceGroupMtsLevel(inputrec.mtsLevels, gmx::MtsForceGroups::Pull);
672         if (mtsLevel == 0 || stepWork.computeSlowForces)
673         {
674             const bool needForeignEnergyDifferences = awh->needForeignEnergyDifferences(step);
675             std::vector<double> foreignLambdaDeltaH, foreignLambdaDhDl;
676             if (needForeignEnergyDifferences)
677             {
678                 enerd->foreignLambdaTerms.finalizePotentialContributions(
679                         enerd->dvdl_lin, lambda, *inputrec.fepvals);
680                 std::tie(foreignLambdaDeltaH, foreignLambdaDhDl) = enerd->foreignLambdaTerms.getTerms(cr);
681             }
682
683             auto& forceWithVirial = (mtsLevel == 0) ? forceWithVirialMtsLevel0 : forceWithVirialMtsLevel1;
684             enerd->term[F_COM_PULL] += awh->applyBiasForcesAndUpdateBias(inputrec.pbcType,
685                                                                          mdatoms->massT,
686                                                                          foreignLambdaDeltaH,
687                                                                          foreignLambdaDhDl,
688                                                                          box,
689                                                                          forceWithVirial,
690                                                                          t,
691                                                                          step,
692                                                                          wcycle,
693                                                                          fplog);
694         }
695     }
696
697     rvec* f = as_rvec_array(forceWithVirialMtsLevel0->force_.data());
698
699     /* Add the forces from enforced rotation potentials (if any) */
700     if (inputrec.bRot)
701     {
702         wallcycle_start(wcycle, ewcROTadd);
703         enerd->term[F_COM_PULL] += add_rot_forces(enforcedRotation, f, cr, step, t);
704         wallcycle_stop(wcycle, ewcROTadd);
705     }
706
707     if (ed)
708     {
709         /* Note that since init_edsam() is called after the initialization
710          * of forcerec, edsam doesn't request the noVirSum force buffer.
711          * Thus if no other algorithm (e.g. PME) requires it, the forces
712          * here will contribute to the virial.
713          */
714         do_flood(cr, inputrec, as_rvec_array(x.data()), f, ed, box, step, didNeighborSearch);
715     }
716
717     /* Add forces from interactive molecular dynamics (IMD), if any */
718     if (inputrec.bIMD && stepWork.computeForces)
719     {
720         imdSession->applyForces(f);
721     }
722 }
723
724 /*! \brief Launch the prepare_step and spread stages of PME GPU.
725  *
726  * \param[in]  pmedata              The PME structure
727  * \param[in]  box                  The box matrix
728  * \param[in]  stepWork             Step schedule flags
729  * \param[in]  xReadyOnDevice       Event synchronizer indicating that the coordinates are ready in the device memory.
730  * \param[in]  lambdaQ              The Coulomb lambda of the current state.
731  * \param[in]  wcycle               The wallcycle structure
732  */
733 static inline void launchPmeGpuSpread(gmx_pme_t*            pmedata,
734                                       const matrix          box,
735                                       const StepWorkload&   stepWork,
736                                       GpuEventSynchronizer* xReadyOnDevice,
737                                       const real            lambdaQ,
738                                       gmx_wallcycle_t       wcycle)
739 {
740     pme_gpu_prepare_computation(pmedata, box, wcycle, stepWork);
741     pme_gpu_launch_spread(pmedata, xReadyOnDevice, wcycle, lambdaQ);
742 }
743
744 /*! \brief Launch the FFT and gather stages of PME GPU
745  *
746  * This function only implements setting the output forces (no accumulation).
747  *
748  * \param[in]  pmedata        The PME structure
749  * \param[in]  lambdaQ        The Coulomb lambda of the current system state.
750  * \param[in]  wcycle         The wallcycle structure
751  * \param[in]  stepWork       Step schedule flags
752  */
753 static void launchPmeGpuFftAndGather(gmx_pme_t*               pmedata,
754                                      const real               lambdaQ,
755                                      gmx_wallcycle_t          wcycle,
756                                      const gmx::StepWorkload& stepWork)
757 {
758     pme_gpu_launch_complex_transforms(pmedata, wcycle, stepWork);
759     pme_gpu_launch_gather(pmedata, wcycle, lambdaQ);
760 }
761
762 /*! \brief
763  *  Polling wait for either of the PME or nonbonded GPU tasks.
764  *
765  * Instead of a static order in waiting for GPU tasks, this function
766  * polls checking which of the two tasks completes first, and does the
767  * associated force buffer reduction overlapped with the other task.
768  * By doing that, unlike static scheduling order, it can always overlap
769  * one of the reductions, regardless of the GPU task completion order.
770  *
771  * \param[in]     nbv              Nonbonded verlet structure
772  * \param[in,out] pmedata          PME module data
773  * \param[in,out] forceOutputsNonbonded  Force outputs for the non-bonded forces and shift forces
774  * \param[in,out] forceOutputsPme  Force outputs for the PME forces and virial
775  * \param[in,out] enerd            Energy data structure results are reduced into
776  * \param[in]     lambdaQ          The Coulomb lambda of the current system state.
777  * \param[in]     stepWork         Step schedule flags
778  * \param[in]     wcycle           The wallcycle structure
779  */
780 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t* nbv,
781                                         gmx_pme_t*          pmedata,
782                                         gmx::ForceOutputs*  forceOutputsNonbonded,
783                                         gmx::ForceOutputs*  forceOutputsPme,
784                                         gmx_enerdata_t*     enerd,
785                                         const real          lambdaQ,
786                                         const StepWorkload& stepWork,
787                                         gmx_wallcycle_t     wcycle)
788 {
789     bool isPmeGpuDone = false;
790     bool isNbGpuDone  = false;
791
792     gmx::ArrayRef<const gmx::RVec> pmeGpuForces;
793
794     while (!isPmeGpuDone || !isNbGpuDone)
795     {
796         if (!isPmeGpuDone)
797         {
798             GpuTaskCompletion completionType =
799                     (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
800             isPmeGpuDone = pme_gpu_try_finish_task(
801                     pmedata, stepWork, wcycle, &forceOutputsPme->forceWithVirial(), enerd, lambdaQ, completionType);
802         }
803
804         if (!isNbGpuDone)
805         {
806             auto&             forceBuffersNonbonded = forceOutputsNonbonded->forceWithShiftForces();
807             GpuTaskCompletion completionType =
808                     (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
809             isNbGpuDone = Nbnxm::gpu_try_finish_task(
810                     nbv->gpu_nbv,
811                     stepWork,
812                     AtomLocality::Local,
813                     enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
814                     enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
815                     forceBuffersNonbonded.shiftForces(),
816                     completionType,
817                     wcycle);
818
819             if (isNbGpuDone)
820             {
821                 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceBuffersNonbonded.force());
822             }
823         }
824     }
825 }
826
827 /*! \brief Set up the different force buffers; also does clearing.
828  *
829  * \param[in] forceHelperBuffers  Helper force buffers
830  * \param[in] force     force array
831  * \param[in] stepWork  Step schedule flags
832  * \param[out] wcycle   wallcycle recording structure
833  *
834  * \returns             Cleared force output structure
835  */
836 static ForceOutputs setupForceOutputs(ForceHelperBuffers*                 forceHelperBuffers,
837                                       gmx::ArrayRefWithPadding<gmx::RVec> force,
838                                       const StepWorkload&                 stepWork,
839                                       gmx_wallcycle_t                     wcycle)
840 {
841     wallcycle_sub_start(wcycle, ewcsCLEAR_FORCE_BUFFER);
842
843     /* NOTE: We assume fr->shiftForces is all zeros here */
844     gmx::ForceWithShiftForces forceWithShiftForces(
845             force, stepWork.computeVirial, forceHelperBuffers->shiftForces());
846
847     if (stepWork.computeForces)
848     {
849         /* Clear the short- and long-range forces */
850         clearRVecs(forceWithShiftForces.force(), true);
851
852         /* Clear the shift forces */
853         clearRVecs(forceWithShiftForces.shiftForces(), false);
854     }
855
856     /* If we need to compute the virial, we might need a separate
857      * force buffer for algorithms for which the virial is calculated
858      * directly, such as PME. Otherwise, forceWithVirial uses the
859      * the same force (f in legacy calls) buffer as other algorithms.
860      */
861     const bool useSeparateForceWithVirialBuffer =
862             (stepWork.computeForces
863              && (stepWork.computeVirial && forceHelperBuffers->haveDirectVirialContributions()));
864     /* forceWithVirial uses the local atom range only */
865     gmx::ForceWithVirial forceWithVirial(
866             useSeparateForceWithVirialBuffer ? forceHelperBuffers->forceBufferForDirectVirialContributions()
867                                              : force.unpaddedArrayRef(),
868             stepWork.computeVirial);
869
870     if (useSeparateForceWithVirialBuffer)
871     {
872         /* TODO: update comment
873          * We only compute forces on local atoms. Note that vsites can
874          * spread to non-local atoms, but that part of the buffer is
875          * cleared separately in the vsite spreading code.
876          */
877         clearRVecs(forceWithVirial.force_, true);
878     }
879
880     wallcycle_sub_stop(wcycle, ewcsCLEAR_FORCE_BUFFER);
881
882     return ForceOutputs(
883             forceWithShiftForces, forceHelperBuffers->haveDirectVirialContributions(), forceWithVirial);
884 }
885
886
887 /*! \brief Set up flags that have the lifetime of the domain indicating what type of work is there to compute.
888  */
889 static DomainLifetimeWorkload setupDomainLifetimeWorkload(const t_inputrec&         inputrec,
890                                                           const t_forcerec&         fr,
891                                                           const pull_t*             pull_work,
892                                                           const gmx_edsam*          ed,
893                                                           const t_mdatoms&          mdatoms,
894                                                           const SimulationWorkload& simulationWork,
895                                                           const StepWorkload&       stepWork)
896 {
897     DomainLifetimeWorkload domainWork;
898     // Note that haveSpecialForces is constant over the whole run
899     domainWork.haveSpecialForces =
900             haveSpecialForces(inputrec, *fr.forceProviders, pull_work, stepWork.computeForces, ed);
901     domainWork.haveCpuListedForceWork = false;
902     domainWork.haveCpuBondedWork      = false;
903     for (const auto& listedForces : fr.listedForces)
904     {
905         if (listedForces.haveCpuListedForces(*fr.fcdata))
906         {
907             domainWork.haveCpuListedForceWork = true;
908         }
909         if (listedForces.haveCpuBondeds())
910         {
911             domainWork.haveCpuBondedWork = true;
912         }
913     }
914     domainWork.haveGpuBondedWork = ((fr.gpuBonded != nullptr) && fr.gpuBonded->haveInteractions());
915     // Note that haveFreeEnergyWork is constant over the whole run
916     domainWork.haveFreeEnergyWork =
917             (fr.efep != FreeEnergyPerturbationType::No && mdatoms.nPerturbed != 0);
918     // We assume we have local force work if there are CPU
919     // force tasks including PME or nonbondeds.
920     domainWork.haveCpuLocalForceWork =
921             domainWork.haveSpecialForces || domainWork.haveCpuListedForceWork
922             || domainWork.haveFreeEnergyWork || simulationWork.useCpuNonbonded || simulationWork.useCpuPme
923             || simulationWork.haveEwaldSurfaceContribution || inputrec.nwall > 0;
924
925     return domainWork;
926 }
927
928 /*! \brief Set up force flag stuct from the force bitmask.
929  *
930  * \param[in]      legacyFlags          Force bitmask flags used to construct the new flags
931  * \param[in]      mtsLevels            The multiple time-stepping levels, either empty or 2 levels
932  * \param[in]      step                 The current MD step
933  * \param[in]      simulationWork       Simulation workload description.
934  * \param[in]      rankHasPmeDuty       If this rank computes PME.
935  *
936  * \returns New Stepworkload description.
937  */
938 static StepWorkload setupStepWorkload(const int                     legacyFlags,
939                                       ArrayRef<const gmx::MtsLevel> mtsLevels,
940                                       const int64_t                 step,
941                                       const SimulationWorkload&     simulationWork,
942                                       const bool                    rankHasPmeDuty)
943 {
944     GMX_ASSERT(mtsLevels.empty() || mtsLevels.size() == 2, "Expect 0 or 2 MTS levels");
945     const bool computeSlowForces = (mtsLevels.empty() || step % mtsLevels[1].stepFactor == 0);
946
947     StepWorkload flags;
948     flags.stateChanged        = ((legacyFlags & GMX_FORCE_STATECHANGED) != 0);
949     flags.haveDynamicBox      = ((legacyFlags & GMX_FORCE_DYNAMICBOX) != 0);
950     flags.doNeighborSearch    = ((legacyFlags & GMX_FORCE_NS) != 0);
951     flags.computeSlowForces   = computeSlowForces;
952     flags.computeVirial       = ((legacyFlags & GMX_FORCE_VIRIAL) != 0);
953     flags.computeEnergy       = ((legacyFlags & GMX_FORCE_ENERGY) != 0);
954     flags.computeForces       = ((legacyFlags & GMX_FORCE_FORCES) != 0);
955     flags.computeListedForces = ((legacyFlags & GMX_FORCE_LISTED) != 0);
956     flags.computeNonbondedForces =
957             ((legacyFlags & GMX_FORCE_NONBONDED) != 0) && simulationWork.computeNonbonded
958             && !(simulationWork.computeNonbondedAtMtsLevel1 && !computeSlowForces);
959     flags.computeDhdl = ((legacyFlags & GMX_FORCE_DHDL) != 0);
960
961     if (simulationWork.useGpuBufferOps)
962     {
963         GMX_ASSERT(simulationWork.useGpuNonbonded,
964                    "Can only offload buffer ops if nonbonded computation is also offloaded");
965     }
966     flags.useGpuXBufferOps = simulationWork.useGpuBufferOps;
967     // on virial steps the CPU reduction path is taken
968     flags.useGpuFBufferOps = simulationWork.useGpuBufferOps && !flags.computeVirial;
969     flags.useGpuPmeFReduction = flags.computeSlowForces && flags.useGpuFBufferOps && simulationWork.useGpuPme
970                                 && (rankHasPmeDuty || simulationWork.useGpuPmePpCommunication);
971     flags.useGpuXHalo = simulationWork.useGpuHaloExchange;
972     flags.useGpuFHalo = simulationWork.useGpuHaloExchange && flags.useGpuFBufferOps;
973
974     return flags;
975 }
976
977
978 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
979  *
980  * TODO: eliminate \p useGpuPmeOnThisRank when this is
981  * incorporated in DomainLifetimeWorkload.
982  */
983 static void launchGpuEndOfStepTasks(nonbonded_verlet_t*               nbv,
984                                     gmx::GpuBonded*                   gpuBonded,
985                                     gmx_pme_t*                        pmedata,
986                                     gmx_enerdata_t*                   enerd,
987                                     const gmx::MdrunScheduleWorkload& runScheduleWork,
988                                     bool                              useGpuPmeOnThisRank,
989                                     int64_t                           step,
990                                     gmx_wallcycle_t                   wcycle)
991 {
992     if (runScheduleWork.simulationWork.useGpuNonbonded && runScheduleWork.stepWork.computeNonbondedForces)
993     {
994         /* Launch pruning before buffer clearing because the API overhead of the
995          * clear kernel launches can leave the GPU idle while it could be running
996          * the prune kernel.
997          */
998         if (nbv->isDynamicPruningStepGpu(step))
999         {
1000             nbv->dispatchPruneKernelGpu(step);
1001         }
1002
1003         /* now clear the GPU outputs while we finish the step on the CPU */
1004         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1005         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1006         Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, runScheduleWork.stepWork.computeVirial);
1007         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1008         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1009     }
1010
1011     if (useGpuPmeOnThisRank)
1012     {
1013         pme_gpu_reinit_computation(pmedata, wcycle);
1014     }
1015
1016     if (runScheduleWork.domainWork.haveGpuBondedWork && runScheduleWork.stepWork.computeEnergy)
1017     {
1018         // in principle this should be included in the DD balancing region,
1019         // but generally it is infrequent so we'll omit it for the sake of
1020         // simpler code
1021         gpuBonded->waitAccumulateEnergyTerms(enerd);
1022
1023         gpuBonded->clearEnergies();
1024     }
1025 }
1026
1027 //! \brief Data structure to hold dipole-related data and staging arrays
1028 struct DipoleData
1029 {
1030     //! Dipole staging for fast summing over MPI
1031     gmx::DVec muStaging[2] = { { 0.0, 0.0, 0.0 } };
1032     //! Dipole staging for states A and B (index 0 and 1 resp.)
1033     gmx::RVec muStateAB[2] = { { 0.0_real, 0.0_real, 0.0_real } };
1034 };
1035
1036
1037 static void reduceAndUpdateMuTot(DipoleData*                   dipoleData,
1038                                  const t_commrec*              cr,
1039                                  const bool                    haveFreeEnergy,
1040                                  gmx::ArrayRef<const real>     lambda,
1041                                  rvec                          muTotal,
1042                                  const DDBalanceRegionHandler& ddBalanceRegionHandler)
1043 {
1044     if (PAR(cr))
1045     {
1046         gmx_sumd(2 * DIM, dipoleData->muStaging[0], cr);
1047         ddBalanceRegionHandler.reopenRegionCpu();
1048     }
1049     for (int i = 0; i < 2; i++)
1050     {
1051         for (int j = 0; j < DIM; j++)
1052         {
1053             dipoleData->muStateAB[i][j] = dipoleData->muStaging[i][j];
1054         }
1055     }
1056
1057     if (!haveFreeEnergy)
1058     {
1059         copy_rvec(dipoleData->muStateAB[0], muTotal);
1060     }
1061     else
1062     {
1063         for (int j = 0; j < DIM; j++)
1064         {
1065             muTotal[j] = (1.0 - lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)])
1066                                  * dipoleData->muStateAB[0][j]
1067                          + lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)]
1068                                    * dipoleData->muStateAB[1][j];
1069         }
1070     }
1071 }
1072
1073 /*! \brief Combines MTS level0 and level1 force buffes into a full and MTS-combined force buffer.
1074  *
1075  * \param[in]     numAtoms        The number of atoms to combine forces for
1076  * \param[in,out] forceMtsLevel0  Input: F_level0, output: F_level0 + F_level1
1077  * \param[in,out] forceMts        Input: F_level1, output: F_level0 + mtsFactor * F_level1
1078  * \param[in]     mtsFactor       The factor between the level0 and level1 time step
1079  */
1080 static void combineMtsForces(const int      numAtoms,
1081                              ArrayRef<RVec> forceMtsLevel0,
1082                              ArrayRef<RVec> forceMts,
1083                              const real     mtsFactor)
1084 {
1085     const int gmx_unused numThreads = gmx_omp_nthreads_get(emntDefault);
1086 #pragma omp parallel for num_threads(numThreads) schedule(static)
1087     for (int i = 0; i < numAtoms; i++)
1088     {
1089         const RVec forceMtsLevel0Tmp = forceMtsLevel0[i];
1090         forceMtsLevel0[i] += forceMts[i];
1091         forceMts[i] = forceMtsLevel0Tmp + mtsFactor * forceMts[i];
1092     }
1093 }
1094
1095 /*! \brief Setup for the local and non-local GPU force reductions:
1096  * reinitialization plus the registration of forces and dependencies.
1097  *
1098  * \param [in] runScheduleWork               Schedule workload flag structure
1099  * \param [in] cr                            Communication record object
1100  * \param [in] fr                            Force record object
1101  */
1102 static void setupGpuForceReductions(gmx::MdrunScheduleWorkload* runScheduleWork,
1103                                     const t_commrec*            cr,
1104                                     t_forcerec*                 fr)
1105 {
1106
1107     nonbonded_verlet_t*          nbv      = fr->nbv.get();
1108     gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
1109
1110     // (re-)initialize local GPU force reduction
1111     const bool accumulate =
1112             runScheduleWork->domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr);
1113     const int atomStart = 0;
1114     fr->gpuForceReduction[gmx::AtomLocality::Local]->reinit(stateGpu->getForces(),
1115                                                             nbv->getNumAtoms(AtomLocality::Local),
1116                                                             nbv->getGridIndices(),
1117                                                             atomStart,
1118                                                             accumulate,
1119                                                             stateGpu->fReducedOnDevice());
1120
1121     // register forces and add dependencies
1122     fr->gpuForceReduction[gmx::AtomLocality::Local]->registerNbnxmForce(nbv->getGpuForces());
1123
1124     if (runScheduleWork->simulationWork.useGpuPme
1125         && (thisRankHasDuty(cr, DUTY_PME) || runScheduleWork->simulationWork.useGpuPmePpCommunication))
1126     {
1127         DeviceBuffer<gmx::RVec> forcePtr =
1128                 thisRankHasDuty(cr, DUTY_PME) ? pme_gpu_get_device_f(fr->pmedata)
1129                                               :                    // PME force buffer on same GPU
1130                         fr->pmePpCommGpu->getGpuForceStagingPtr(); // buffer received from other GPU
1131         fr->gpuForceReduction[gmx::AtomLocality::Local]->registerRvecForce(forcePtr);
1132
1133         GpuEventSynchronizer* const pmeSynchronizer =
1134                 (thisRankHasDuty(cr, DUTY_PME) ? pme_gpu_get_f_ready_synchronizer(fr->pmedata)
1135                                                : // PME force buffer on same GPU
1136                          fr->pmePpCommGpu->getForcesReadySynchronizer()); // buffer received from other GPU
1137         fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(pmeSynchronizer);
1138     }
1139
1140     if ((runScheduleWork->domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr))
1141         && !runScheduleWork->simulationWork.useGpuHaloExchange)
1142     {
1143         auto forcesReadyLocality = havePPDomainDecomposition(cr) ? AtomLocality::Local : AtomLocality::All;
1144         const bool useGpuForceBufferOps = true;
1145         fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(
1146                 stateGpu->getForcesReadyOnDeviceEvent(forcesReadyLocality, useGpuForceBufferOps));
1147     }
1148
1149     if (runScheduleWork->simulationWork.useGpuHaloExchange)
1150     {
1151         fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(
1152                 cr->dd->gpuHaloExchange[0][0]->getForcesReadyOnDeviceEvent());
1153     }
1154
1155     if (havePPDomainDecomposition(cr))
1156     {
1157         // (re-)initialize non-local GPU force reduction
1158         const bool accumulate = runScheduleWork->domainWork.haveCpuBondedWork
1159                                 || runScheduleWork->domainWork.haveFreeEnergyWork;
1160         const int atomStart = dd_numHomeAtoms(*cr->dd);
1161         fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->reinit(stateGpu->getForces(),
1162                                                                    nbv->getNumAtoms(AtomLocality::NonLocal),
1163                                                                    nbv->getGridIndices(),
1164                                                                    atomStart,
1165                                                                    accumulate);
1166
1167         // register forces and add dependencies
1168         fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->registerNbnxmForce(nbv->getGpuForces());
1169         if (runScheduleWork->domainWork.haveCpuBondedWork || runScheduleWork->domainWork.haveFreeEnergyWork)
1170         {
1171             fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->addDependency(
1172                     stateGpu->getForcesReadyOnDeviceEvent(AtomLocality::NonLocal, true));
1173         }
1174     }
1175 }
1176
1177
1178 void do_force(FILE*                               fplog,
1179               const t_commrec*                    cr,
1180               const gmx_multisim_t*               ms,
1181               const t_inputrec&                   inputrec,
1182               gmx::Awh*                           awh,
1183               gmx_enfrot*                         enforcedRotation,
1184               gmx::ImdSession*                    imdSession,
1185               pull_t*                             pull_work,
1186               int64_t                             step,
1187               t_nrnb*                             nrnb,
1188               gmx_wallcycle_t                     wcycle,
1189               const gmx_localtop_t*               top,
1190               const matrix                        box,
1191               gmx::ArrayRefWithPadding<gmx::RVec> x,
1192               const history_t*                    hist,
1193               gmx::ForceBuffersView*              forceView,
1194               tensor                              vir_force,
1195               const t_mdatoms*                    mdatoms,
1196               gmx_enerdata_t*                     enerd,
1197               gmx::ArrayRef<const real>           lambda,
1198               t_forcerec*                         fr,
1199               gmx::MdrunScheduleWorkload*         runScheduleWork,
1200               gmx::VirtualSitesHandler*           vsite,
1201               rvec                                muTotal,
1202               double                              t,
1203               gmx_edsam*                          ed,
1204               int                                 legacyFlags,
1205               const DDBalanceRegionHandler&       ddBalanceRegionHandler)
1206 {
1207     auto force = forceView->forceWithPadding();
1208     GMX_ASSERT(force.unpaddedArrayRef().ssize() >= fr->natoms_force_constr,
1209                "The size of the force buffer should be at least the number of atoms to compute "
1210                "forces for");
1211
1212     nonbonded_verlet_t*  nbv = fr->nbv.get();
1213     interaction_const_t* ic  = fr->ic.get();
1214
1215     gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
1216
1217     const SimulationWorkload& simulationWork = runScheduleWork->simulationWork;
1218
1219     runScheduleWork->stepWork = setupStepWorkload(
1220             legacyFlags, inputrec.mtsLevels, step, simulationWork, thisRankHasDuty(cr, DUTY_PME));
1221     const StepWorkload& stepWork = runScheduleWork->stepWork;
1222
1223     const bool useGpuPmeOnThisRank =
1224             simulationWork.useGpuPme && thisRankHasDuty(cr, DUTY_PME) && stepWork.computeSlowForces;
1225
1226     /* At a search step we need to start the first balancing region
1227      * somewhere early inside the step after communication during domain
1228      * decomposition (and not during the previous step as usual).
1229      */
1230     if (stepWork.doNeighborSearch)
1231     {
1232         ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
1233     }
1234
1235     clear_mat(vir_force);
1236
1237     if (fr->pbcType != PbcType::No)
1238     {
1239         /* Compute shift vectors every step,
1240          * because of pressure coupling or box deformation!
1241          */
1242         if (stepWork.haveDynamicBox && stepWork.stateChanged)
1243         {
1244             calc_shifts(box, fr->shift_vec);
1245         }
1246
1247         const bool fillGrid = (stepWork.doNeighborSearch && stepWork.stateChanged);
1248         const bool calcCGCM = (fillGrid && !DOMAINDECOMP(cr));
1249         if (calcCGCM)
1250         {
1251             put_atoms_in_box_omp(fr->pbcType,
1252                                  box,
1253                                  x.unpaddedArrayRef().subArray(0, mdatoms->homenr),
1254                                  gmx_omp_nthreads_get(emntDefault));
1255             inc_nrnb(nrnb, eNR_SHIFTX, mdatoms->homenr);
1256         }
1257     }
1258
1259     nbnxn_atomdata_copy_shiftvec(stepWork.haveDynamicBox, fr->shift_vec, nbv->nbat.get());
1260
1261     const bool pmeSendCoordinatesFromGpu =
1262             GMX_MPI && simulationWork.useGpuPmePpCommunication && !(stepWork.doNeighborSearch);
1263     const bool reinitGpuPmePpComms =
1264             GMX_MPI && simulationWork.useGpuPmePpCommunication && (stepWork.doNeighborSearch);
1265
1266     const auto localXReadyOnDevice = (useGpuPmeOnThisRank || simulationWork.useGpuBufferOps)
1267                                              ? stateGpu->getCoordinatesReadyOnDeviceEvent(
1268                                                        AtomLocality::Local, simulationWork, stepWork)
1269                                              : nullptr;
1270
1271     // Copy coordinate from the GPU if update is on the GPU and there
1272     // are forces to be computed on the CPU, or for the computation of
1273     // virial, or if host-side data will be transferred from this task
1274     // to a remote task for halo exchange or PME-PP communication. At
1275     // search steps the current coordinates are already on the host,
1276     // hence copy is not needed.
1277     const bool haveHostPmePpComms =
1278             !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication;
1279
1280     GMX_ASSERT(simulationWork.useGpuHaloExchange
1281                        == ((cr->dd != nullptr) && (!cr->dd->gpuHaloExchange[0].empty())),
1282                "The GPU halo exchange is active, but it has not been constructed.");
1283     const bool haveHostHaloExchangeComms =
1284             havePPDomainDecomposition(cr) && !simulationWork.useGpuHaloExchange;
1285
1286     bool gmx_used_in_debug haveCopiedXFromGpu = false;
1287     if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch
1288         && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial
1289             || haveHostPmePpComms || haveHostHaloExchangeComms))
1290     {
1291         stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1292         haveCopiedXFromGpu = true;
1293     }
1294
1295     // If coordinates are to be sent to PME task from CPU memory, perform that send here.
1296     // Otherwise the send will occur after H2D coordinate transfer.
1297     if (GMX_MPI && !thisRankHasDuty(cr, DUTY_PME) && !pmeSendCoordinatesFromGpu && stepWork.computeSlowForces)
1298     {
1299         /* Send particle coordinates to the pme nodes */
1300         if (!stepWork.doNeighborSearch && simulationWork.useGpuUpdate)
1301         {
1302             stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1303         }
1304
1305         gmx_pme_send_coordinates(fr,
1306                                  cr,
1307                                  box,
1308                                  as_rvec_array(x.unpaddedArrayRef().data()),
1309                                  lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1310                                  lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)],
1311                                  (stepWork.computeVirial || stepWork.computeEnergy),
1312                                  step,
1313                                  simulationWork.useGpuPmePpCommunication,
1314                                  reinitGpuPmePpComms,
1315                                  pmeSendCoordinatesFromGpu,
1316                                  localXReadyOnDevice,
1317                                  wcycle);
1318     }
1319
1320     // Coordinates on the device are needed if PME or BufferOps are offloaded.
1321     // The local coordinates can be copied right away.
1322     // NOTE: Consider moving this copy to right after they are updated and constrained,
1323     //       if the later is not offloaded.
1324     if (useGpuPmeOnThisRank || stepWork.useGpuXBufferOps)
1325     {
1326         if (stepWork.doNeighborSearch)
1327         {
1328             // TODO refactor this to do_md, after partitioning.
1329             stateGpu->reinit(mdatoms->homenr,
1330                              cr->dd != nullptr ? dd_numAtomsZones(*cr->dd) : mdatoms->homenr);
1331             if (useGpuPmeOnThisRank)
1332             {
1333                 // TODO: This should be moved into PME setup function ( pme_gpu_prepare_computation(...) )
1334                 pme_gpu_set_device_x(fr->pmedata, stateGpu->getCoordinates());
1335             }
1336         }
1337         // We need to copy coordinates when:
1338         // 1. Update is not offloaded
1339         // 2. The buffers were reinitialized on search step
1340         if (!simulationWork.useGpuUpdate || stepWork.doNeighborSearch)
1341         {
1342             GMX_ASSERT(stateGpu != nullptr, "stateGpu should not be null");
1343             stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1344         }
1345     }
1346
1347     // If coordinates are to be sent to PME task from GPU memory, perform that send here.
1348     // Otherwise the send will occur before the H2D coordinate transfer.
1349     if (!thisRankHasDuty(cr, DUTY_PME) && pmeSendCoordinatesFromGpu)
1350     {
1351         /* Send particle coordinates to the pme nodes */
1352         gmx_pme_send_coordinates(fr,
1353                                  cr,
1354                                  box,
1355                                  as_rvec_array(x.unpaddedArrayRef().data()),
1356                                  lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1357                                  lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)],
1358                                  (stepWork.computeVirial || stepWork.computeEnergy),
1359                                  step,
1360                                  simulationWork.useGpuPmePpCommunication,
1361                                  reinitGpuPmePpComms,
1362                                  pmeSendCoordinatesFromGpu,
1363                                  localXReadyOnDevice,
1364                                  wcycle);
1365     }
1366
1367     if (useGpuPmeOnThisRank)
1368     {
1369         launchPmeGpuSpread(fr->pmedata,
1370                            box,
1371                            stepWork,
1372                            localXReadyOnDevice,
1373                            lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1374                            wcycle);
1375     }
1376
1377     const gmx::DomainLifetimeWorkload& domainWork = runScheduleWork->domainWork;
1378
1379     /* do gridding for pair search */
1380     if (stepWork.doNeighborSearch)
1381     {
1382         if (fr->wholeMoleculeTransform && stepWork.stateChanged)
1383         {
1384             fr->wholeMoleculeTransform->updateForAtomPbcJumps(x.unpaddedArrayRef(), box);
1385         }
1386
1387         wallcycle_start(wcycle, ewcNS);
1388         if (!DOMAINDECOMP(cr))
1389         {
1390             const rvec vzero       = { 0.0_real, 0.0_real, 0.0_real };
1391             const rvec boxDiagonal = { box[XX][XX], box[YY][YY], box[ZZ][ZZ] };
1392             wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
1393             nbnxn_put_on_grid(nbv,
1394                               box,
1395                               0,
1396                               vzero,
1397                               boxDiagonal,
1398                               nullptr,
1399                               { 0, mdatoms->homenr },
1400                               -1,
1401                               fr->cginfo,
1402                               x.unpaddedArrayRef(),
1403                               0,
1404                               nullptr);
1405             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
1406         }
1407         else
1408         {
1409             wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
1410             nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd), fr->cginfo, x.unpaddedArrayRef());
1411             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
1412         }
1413
1414         nbv->setAtomProperties(gmx::constArrayRefFromArray(mdatoms->typeA, mdatoms->nr),
1415                                gmx::constArrayRefFromArray(mdatoms->chargeA, mdatoms->nr),
1416                                fr->cginfo);
1417
1418         wallcycle_stop(wcycle, ewcNS);
1419
1420         /* initialize the GPU nbnxm atom data and bonded data structures */
1421         if (simulationWork.useGpuNonbonded)
1422         {
1423             // Note: cycle counting only nononbondeds, gpuBonded counts internally
1424             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1425             wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1426             Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1427             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1428             wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1429
1430             if (fr->gpuBonded)
1431             {
1432                 /* Now we put all atoms on the grid, we can assign bonded
1433                  * interactions to the GPU, where the grid order is
1434                  * needed. Also the xq, f and fshift device buffers have
1435                  * been reallocated if needed, so the bonded code can
1436                  * learn about them. */
1437                 // TODO the xq, f, and fshift buffers are now shared
1438                 // resources, so they should be maintained by a
1439                 // higher-level object than the nb module.
1440                 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(nbv->getGridIndices(),
1441                                                                       top->idef,
1442                                                                       Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1443                                                                       Nbnxm::gpu_get_f(nbv->gpu_nbv),
1444                                                                       Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1445             }
1446         }
1447
1448         // Need to run after the GPU-offload bonded interaction lists
1449         // are set up to be able to determine whether there is bonded work.
1450         runScheduleWork->domainWork = setupDomainLifetimeWorkload(
1451                 inputrec, *fr, pull_work, ed, *mdatoms, simulationWork, stepWork);
1452
1453         wallcycle_start_nocount(wcycle, ewcNS);
1454         wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1455         /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1456         nbv->constructPairlist(InteractionLocality::Local, top->excls, step, nrnb);
1457
1458         nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::Local);
1459
1460         wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1461         wallcycle_stop(wcycle, ewcNS);
1462
1463         if (stepWork.useGpuXBufferOps)
1464         {
1465             nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1466         }
1467
1468         if (simulationWork.useGpuBufferOps)
1469         {
1470             setupGpuForceReductions(runScheduleWork, cr, fr);
1471         }
1472     }
1473     else if (!EI_TPI(inputrec.eI) && stepWork.computeNonbondedForces)
1474     {
1475         if (stepWork.useGpuXBufferOps)
1476         {
1477             GMX_ASSERT(stateGpu, "stateGpu should be valid when buffer ops are offloaded");
1478             nbv->convertCoordinatesGpu(AtomLocality::Local, stateGpu->getCoordinates(), localXReadyOnDevice);
1479         }
1480         else
1481         {
1482             if (simulationWork.useGpuUpdate)
1483             {
1484                 GMX_ASSERT(stateGpu, "need a valid stateGpu object");
1485                 GMX_ASSERT(haveCopiedXFromGpu,
1486                            "a wait should only be triggered if copy has been scheduled");
1487                 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1488             }
1489             nbv->convertCoordinates(AtomLocality::Local, x.unpaddedArrayRef());
1490         }
1491     }
1492
1493     if (simulationWork.useGpuNonbonded && (stepWork.computeNonbondedForces || domainWork.haveGpuBondedWork))
1494     {
1495         ddBalanceRegionHandler.openBeforeForceComputationGpu();
1496
1497         wallcycle_start(wcycle, ewcLAUNCH_GPU);
1498         wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1499         Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1500         if (stepWork.doNeighborSearch || !stepWork.useGpuXBufferOps)
1501         {
1502             Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::Local);
1503         }
1504         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1505         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1506         // with X buffer ops offloaded to the GPU on all but the search steps
1507
1508         // bonded work not split into separate local and non-local, so with DD
1509         // we can only launch the kernel after non-local coordinates have been received.
1510         if (domainWork.haveGpuBondedWork && !havePPDomainDecomposition(cr))
1511         {
1512             fr->gpuBonded->setPbcAndlaunchKernel(fr->pbcType, box, fr->bMolPBC, stepWork);
1513         }
1514
1515         /* launch local nonbonded work on GPU */
1516         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1517         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1518         do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFNo, step, nrnb, wcycle);
1519         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1520         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1521     }
1522
1523     if (useGpuPmeOnThisRank)
1524     {
1525         // In PME GPU and mixed mode we launch FFT / gather after the
1526         // X copy/transform to allow overlap as well as after the GPU NB
1527         // launch to avoid FFT launch overhead hijacking the CPU and delaying
1528         // the nonbonded kernel.
1529         launchPmeGpuFftAndGather(fr->pmedata,
1530                                  lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1531                                  wcycle,
1532                                  stepWork);
1533     }
1534
1535     /* Communicate coordinates and sum dipole if necessary +
1536        do non-local pair search */
1537     if (havePPDomainDecomposition(cr))
1538     {
1539         if (stepWork.doNeighborSearch)
1540         {
1541             // TODO: fuse this branch with the above large stepWork.doNeighborSearch block
1542             wallcycle_start_nocount(wcycle, ewcNS);
1543             wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1544             /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1545             nbv->constructPairlist(InteractionLocality::NonLocal, top->excls, step, nrnb);
1546
1547             nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::NonLocal);
1548             wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1549             wallcycle_stop(wcycle, ewcNS);
1550             // TODO refactor this GPU halo exchange re-initialisation
1551             // to location in do_md where GPU halo exchange is
1552             // constructed at partitioning, after above stateGpu
1553             // re-initialization has similarly been refactored
1554             if (simulationWork.useGpuHaloExchange)
1555             {
1556                 reinitGpuHaloExchange(*cr, stateGpu->getCoordinates(), stateGpu->getForces());
1557             }
1558         }
1559         else
1560         {
1561             if (stepWork.useGpuXHalo)
1562             {
1563                 // The following must be called after local setCoordinates (which records an event
1564                 // when the coordinate data has been copied to the device).
1565                 communicateGpuHaloCoordinates(*cr, box, localXReadyOnDevice);
1566
1567                 if (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork)
1568                 {
1569                     // non-local part of coordinate buffer must be copied back to host for CPU work
1570                     stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1571                 }
1572             }
1573             else
1574             {
1575                 if (simulationWork.useGpuUpdate)
1576                 {
1577                     GMX_ASSERT(haveCopiedXFromGpu,
1578                                "a wait should only be triggered if copy has been scheduled");
1579                     stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1580                 }
1581                 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1582             }
1583
1584             if (stepWork.useGpuXBufferOps)
1585             {
1586                 if (!useGpuPmeOnThisRank && !stepWork.useGpuXHalo)
1587                 {
1588                     stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1589                 }
1590                 nbv->convertCoordinatesGpu(AtomLocality::NonLocal,
1591                                            stateGpu->getCoordinates(),
1592                                            stateGpu->getCoordinatesReadyOnDeviceEvent(
1593                                                    AtomLocality::NonLocal, simulationWork, stepWork));
1594             }
1595             else
1596             {
1597                 nbv->convertCoordinates(AtomLocality::NonLocal, x.unpaddedArrayRef());
1598             }
1599         }
1600
1601         if (simulationWork.useGpuNonbonded)
1602         {
1603
1604             if (stepWork.doNeighborSearch || !stepWork.useGpuXBufferOps)
1605             {
1606                 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1607                 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1608                 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::NonLocal);
1609                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1610                 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1611             }
1612
1613             if (domainWork.haveGpuBondedWork)
1614             {
1615                 fr->gpuBonded->setPbcAndlaunchKernel(fr->pbcType, box, fr->bMolPBC, stepWork);
1616             }
1617
1618             /* launch non-local nonbonded tasks on GPU */
1619             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1620             wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1621             do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step, nrnb, wcycle);
1622             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1623             wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1624         }
1625     }
1626
1627     if (simulationWork.useGpuNonbonded && stepWork.computeNonbondedForces)
1628     {
1629         /* launch D2H copy-back F */
1630         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1631         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1632
1633         if (havePPDomainDecomposition(cr))
1634         {
1635             Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::NonLocal);
1636         }
1637         Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::Local);
1638         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1639
1640         if (domainWork.haveGpuBondedWork && stepWork.computeEnergy)
1641         {
1642             fr->gpuBonded->launchEnergyTransfer();
1643         }
1644         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1645     }
1646
1647     gmx::ArrayRef<const gmx::RVec> xWholeMolecules;
1648     if (fr->wholeMoleculeTransform)
1649     {
1650         xWholeMolecules = fr->wholeMoleculeTransform->wholeMoleculeCoordinates(x.unpaddedArrayRef(), box);
1651     }
1652
1653     DipoleData dipoleData;
1654
1655     if (simulationWork.computeMuTot)
1656     {
1657         const int start = 0;
1658
1659         /* Calculate total (local) dipole moment in a temporary common array.
1660          * This makes it possible to sum them over nodes faster.
1661          */
1662         gmx::ArrayRef<const gmx::RVec> xRef =
1663                 (xWholeMolecules.empty() ? x.unpaddedArrayRef() : xWholeMolecules);
1664         calc_mu(start,
1665                 mdatoms->homenr,
1666                 xRef,
1667                 gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->nr),
1668                 gmx::arrayRefFromArray(mdatoms->chargeB, mdatoms->nr),
1669                 mdatoms->nChargePerturbed != 0,
1670                 dipoleData.muStaging[0],
1671                 dipoleData.muStaging[1]);
1672
1673         reduceAndUpdateMuTot(
1674                 &dipoleData, cr, (fr->efep != FreeEnergyPerturbationType::No), lambda, muTotal, ddBalanceRegionHandler);
1675     }
1676
1677     /* Reset energies */
1678     reset_enerdata(enerd);
1679
1680     if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1681     {
1682         wallcycle_start(wcycle, ewcPPDURINGPME);
1683         dd_force_flop_start(cr->dd, nrnb);
1684     }
1685
1686     // For the rest of the CPU tasks that depend on GPU-update produced coordinates,
1687     // this wait ensures that the D2H transfer is complete.
1688     if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch
1689         && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial))
1690     {
1691         GMX_ASSERT(haveCopiedXFromGpu, "a wait should only be triggered if copy has been scheduled");
1692         stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1693     }
1694
1695     if (inputrec.bRot)
1696     {
1697         wallcycle_start(wcycle, ewcROT);
1698         do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step, stepWork.doNeighborSearch);
1699         wallcycle_stop(wcycle, ewcROT);
1700     }
1701
1702     /* Start the force cycle counter.
1703      * Note that a different counter is used for dynamic load balancing.
1704      */
1705     wallcycle_start(wcycle, ewcFORCE);
1706
1707     /* Set up and clear force outputs:
1708      * forceOutMtsLevel0:  everything except what is in the other two outputs
1709      * forceOutMtsLevel1:  PME-mesh and listed-forces group 1
1710      * forceOutNonbonded: non-bonded forces
1711      * Without multiple time stepping all point to the same object.
1712      * With multiple time-stepping the use is different for MTS fast (level0 only) and slow steps.
1713      */
1714     ForceOutputs forceOutMtsLevel0 =
1715             setupForceOutputs(&fr->forceHelperBuffers[0], force, stepWork, wcycle);
1716
1717     // Force output for MTS combined forces, only set at level1 MTS steps
1718     std::optional<ForceOutputs> forceOutMts =
1719             (fr->useMts && stepWork.computeSlowForces)
1720                     ? std::optional(setupForceOutputs(&fr->forceHelperBuffers[1],
1721                                                       forceView->forceMtsCombinedWithPadding(),
1722                                                       stepWork,
1723                                                       wcycle))
1724                     : std::nullopt;
1725
1726     ForceOutputs* forceOutMtsLevel1 =
1727             fr->useMts ? (stepWork.computeSlowForces ? &forceOutMts.value() : nullptr) : &forceOutMtsLevel0;
1728
1729     const bool nonbondedAtMtsLevel1 = runScheduleWork->simulationWork.computeNonbondedAtMtsLevel1;
1730
1731     ForceOutputs* forceOutNonbonded = nonbondedAtMtsLevel1 ? forceOutMtsLevel1 : &forceOutMtsLevel0;
1732
1733     if (inputrec.bPull && pull_have_constraint(*pull_work))
1734     {
1735         clear_pull_forces(pull_work);
1736     }
1737
1738     /* We calculate the non-bonded forces, when done on the CPU, here.
1739      * We do this before calling do_force_lowlevel, because in that
1740      * function, the listed forces are calculated before PME, which
1741      * does communication.  With this order, non-bonded and listed
1742      * force calculation imbalance can be balanced out by the domain
1743      * decomposition load balancing.
1744      */
1745
1746     const bool useOrEmulateGpuNb = simulationWork.useGpuNonbonded || fr->nbv->emulateGpu();
1747
1748     if (!useOrEmulateGpuNb)
1749     {
1750         do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFYes, step, nrnb, wcycle);
1751     }
1752
1753     if (fr->efep != FreeEnergyPerturbationType::No && stepWork.computeNonbondedForces)
1754     {
1755         /* Calculate the local and non-local free energy interactions here.
1756          * Happens here on the CPU both with and without GPU.
1757          */
1758         nbv->dispatchFreeEnergyKernel(InteractionLocality::Local,
1759                                       fr,
1760                                       as_rvec_array(x.unpaddedArrayRef().data()),
1761                                       &forceOutNonbonded->forceWithShiftForces(),
1762                                       *mdatoms,
1763                                       inputrec.fepvals.get(),
1764                                       lambda,
1765                                       enerd,
1766                                       stepWork,
1767                                       nrnb);
1768
1769         if (havePPDomainDecomposition(cr))
1770         {
1771             nbv->dispatchFreeEnergyKernel(InteractionLocality::NonLocal,
1772                                           fr,
1773                                           as_rvec_array(x.unpaddedArrayRef().data()),
1774                                           &forceOutNonbonded->forceWithShiftForces(),
1775                                           *mdatoms,
1776                                           inputrec.fepvals.get(),
1777                                           lambda,
1778                                           enerd,
1779                                           stepWork,
1780                                           nrnb);
1781         }
1782     }
1783
1784     if (stepWork.computeNonbondedForces && !useOrEmulateGpuNb)
1785     {
1786         if (havePPDomainDecomposition(cr))
1787         {
1788             do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step, nrnb, wcycle);
1789         }
1790
1791         if (stepWork.computeForces)
1792         {
1793             /* Add all the non-bonded force to the normal force array.
1794              * This can be split into a local and a non-local part when overlapping
1795              * communication with calculation with domain decomposition.
1796              */
1797             wallcycle_stop(wcycle, ewcFORCE);
1798             nbv->atomdata_add_nbat_f_to_f(AtomLocality::All,
1799                                           forceOutNonbonded->forceWithShiftForces().force());
1800             wallcycle_start_nocount(wcycle, ewcFORCE);
1801         }
1802
1803         /* If there are multiple fshift output buffers we need to reduce them */
1804         if (stepWork.computeVirial)
1805         {
1806             /* This is not in a subcounter because it takes a
1807                negligible and constant-sized amount of time */
1808             nbnxn_atomdata_add_nbat_fshift_to_fshift(
1809                     *nbv->nbat, forceOutNonbonded->forceWithShiftForces().shiftForces());
1810         }
1811     }
1812
1813     // TODO Force flags should include haveFreeEnergyWork for this domain
1814     if (stepWork.useGpuXHalo && (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork))
1815     {
1816         wallcycle_stop(wcycle, ewcFORCE);
1817         /* Wait for non-local coordinate data to be copied from device */
1818         stateGpu->waitCoordinatesReadyOnHost(AtomLocality::NonLocal);
1819         wallcycle_start_nocount(wcycle, ewcFORCE);
1820     }
1821
1822     // Compute wall interactions, when present.
1823     // Note: should be moved to special forces.
1824     if (inputrec.nwall && stepWork.computeNonbondedForces)
1825     {
1826         /* foreign lambda component for walls */
1827         real dvdl_walls = do_walls(inputrec,
1828                                    *fr,
1829                                    box,
1830                                    *mdatoms,
1831                                    x.unpaddedConstArrayRef(),
1832                                    &forceOutMtsLevel0.forceWithVirial(),
1833                                    lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)],
1834                                    enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
1835                                    nrnb);
1836         enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += dvdl_walls;
1837     }
1838
1839     if (stepWork.computeListedForces)
1840     {
1841         /* Check whether we need to take into account PBC in listed interactions */
1842         bool needMolPbc = false;
1843         for (const auto& listedForces : fr->listedForces)
1844         {
1845             if (listedForces.haveCpuListedForces(*fr->fcdata))
1846             {
1847                 needMolPbc = fr->bMolPBC;
1848             }
1849         }
1850
1851         t_pbc pbc;
1852
1853         if (needMolPbc)
1854         {
1855             /* Since all atoms are in the rectangular or triclinic unit-cell,
1856              * only single box vector shifts (2 in x) are required.
1857              */
1858             set_pbc_dd(&pbc, fr->pbcType, DOMAINDECOMP(cr) ? cr->dd->numCells : nullptr, TRUE, box);
1859         }
1860
1861         for (int mtsIndex = 0; mtsIndex < (fr->useMts && stepWork.computeSlowForces ? 2 : 1); mtsIndex++)
1862         {
1863             ListedForces& listedForces = fr->listedForces[mtsIndex];
1864             ForceOutputs& forceOut     = (mtsIndex == 0 ? forceOutMtsLevel0 : *forceOutMtsLevel1);
1865             listedForces.calculate(wcycle,
1866                                    box,
1867                                    inputrec.fepvals.get(),
1868                                    cr,
1869                                    ms,
1870                                    x,
1871                                    xWholeMolecules,
1872                                    fr->fcdata.get(),
1873                                    hist,
1874                                    &forceOut,
1875                                    fr,
1876                                    &pbc,
1877                                    enerd,
1878                                    nrnb,
1879                                    lambda,
1880                                    mdatoms,
1881                                    DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr,
1882                                    stepWork);
1883         }
1884     }
1885
1886     if (stepWork.computeSlowForces)
1887     {
1888         calculateLongRangeNonbondeds(fr,
1889                                      inputrec,
1890                                      cr,
1891                                      nrnb,
1892                                      wcycle,
1893                                      mdatoms,
1894                                      x.unpaddedConstArrayRef(),
1895                                      &forceOutMtsLevel1->forceWithVirial(),
1896                                      enerd,
1897                                      box,
1898                                      lambda,
1899                                      dipoleData.muStateAB,
1900                                      stepWork,
1901                                      ddBalanceRegionHandler);
1902     }
1903
1904     wallcycle_stop(wcycle, ewcFORCE);
1905
1906     // VdW dispersion correction, only computed on master rank to avoid double counting
1907     if ((stepWork.computeEnergy || stepWork.computeVirial) && fr->dispersionCorrection && MASTER(cr))
1908     {
1909         // Calculate long range corrections to pressure and energy
1910         const DispersionCorrection::Correction correction = fr->dispersionCorrection->calculate(
1911                 box, lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)]);
1912
1913         if (stepWork.computeEnergy)
1914         {
1915             enerd->term[F_DISPCORR] = correction.energy;
1916             enerd->term[F_DVDL_VDW] += correction.dvdl;
1917             enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += correction.dvdl;
1918         }
1919         if (stepWork.computeVirial)
1920         {
1921             correction.correctVirial(vir_force);
1922             enerd->term[F_PDISPCORR] = correction.pressure;
1923         }
1924     }
1925
1926     computeSpecialForces(fplog,
1927                          cr,
1928                          inputrec,
1929                          awh,
1930                          enforcedRotation,
1931                          imdSession,
1932                          pull_work,
1933                          step,
1934                          t,
1935                          wcycle,
1936                          fr->forceProviders,
1937                          box,
1938                          x.unpaddedArrayRef(),
1939                          mdatoms,
1940                          lambda,
1941                          stepWork,
1942                          &forceOutMtsLevel0.forceWithVirial(),
1943                          forceOutMtsLevel1 ? &forceOutMtsLevel1->forceWithVirial() : nullptr,
1944                          enerd,
1945                          ed,
1946                          stepWork.doNeighborSearch);
1947
1948     if (havePPDomainDecomposition(cr) && stepWork.computeForces && stepWork.useGpuFHalo
1949         && domainWork.haveCpuLocalForceWork)
1950     {
1951         stateGpu->copyForcesToGpu(forceOutMtsLevel0.forceWithShiftForces().force(), AtomLocality::Local);
1952     }
1953
1954     GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFBufferOps),
1955                "The schedule below does not allow for nonbonded MTS with GPU buffer ops");
1956     GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFHalo),
1957                "The schedule below does not allow for nonbonded MTS with GPU halo exchange");
1958     // Will store the amount of cycles spent waiting for the GPU that
1959     // will be later used in the DLB accounting.
1960     float cycles_wait_gpu = 0;
1961     if (useOrEmulateGpuNb && stepWork.computeNonbondedForces)
1962     {
1963         auto& forceWithShiftForces = forceOutNonbonded->forceWithShiftForces();
1964
1965         /* wait for non-local forces (or calculate in emulation mode) */
1966         if (havePPDomainDecomposition(cr))
1967         {
1968             if (simulationWork.useGpuNonbonded)
1969             {
1970                 cycles_wait_gpu += Nbnxm::gpu_wait_finish_task(
1971                         nbv->gpu_nbv,
1972                         stepWork,
1973                         AtomLocality::NonLocal,
1974                         enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
1975                         enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
1976                         forceWithShiftForces.shiftForces(),
1977                         wcycle);
1978             }
1979             else
1980             {
1981                 wallcycle_start_nocount(wcycle, ewcFORCE);
1982                 do_nb_verlet(
1983                         fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFYes, step, nrnb, wcycle);
1984                 wallcycle_stop(wcycle, ewcFORCE);
1985             }
1986
1987             if (stepWork.useGpuFBufferOps)
1988             {
1989                 // TODO: move this into DomainLifetimeWorkload, including the second part of the
1990                 // condition The bonded and free energy CPU tasks can have non-local force
1991                 // contributions which are a dependency for the GPU force reduction.
1992                 bool haveNonLocalForceContribInCpuBuffer =
1993                         domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork;
1994
1995                 if (haveNonLocalForceContribInCpuBuffer)
1996                 {
1997                     stateGpu->copyForcesToGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
1998                                               AtomLocality::NonLocal);
1999                 }
2000
2001
2002                 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->execute();
2003
2004                 if (!stepWork.useGpuFHalo)
2005                 {
2006                     // copy from GPU input for dd_move_f()
2007                     stateGpu->copyForcesFromGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
2008                                                 AtomLocality::NonLocal);
2009                 }
2010             }
2011             else
2012             {
2013                 nbv->atomdata_add_nbat_f_to_f(AtomLocality::NonLocal, forceWithShiftForces.force());
2014             }
2015
2016             if (fr->nbv->emulateGpu() && stepWork.computeVirial)
2017             {
2018                 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat, forceWithShiftForces.shiftForces());
2019             }
2020         }
2021     }
2022
2023     /* Combining the forces for multiple time stepping before the halo exchange, when possible,
2024      * avoids an extra halo exchange (when DD is used) and post-processing step.
2025      */
2026     const bool combineMtsForcesBeforeHaloExchange =
2027             (stepWork.computeForces && fr->useMts && stepWork.computeSlowForces
2028              && (legacyFlags & GMX_FORCE_DO_NOT_NEED_NORMAL_FORCE) != 0
2029              && !(stepWork.computeVirial || simulationWork.useGpuNonbonded || useGpuPmeOnThisRank));
2030     if (combineMtsForcesBeforeHaloExchange)
2031     {
2032         const int numAtoms = havePPDomainDecomposition(cr) ? dd_numAtomsZones(*cr->dd) : mdatoms->homenr;
2033         combineMtsForces(numAtoms,
2034                          force.unpaddedArrayRef(),
2035                          forceView->forceMtsCombined(),
2036                          inputrec.mtsLevels[1].stepFactor);
2037     }
2038
2039     if (havePPDomainDecomposition(cr))
2040     {
2041         /* We are done with the CPU compute.
2042          * We will now communicate the non-local forces.
2043          * If we use a GPU this will overlap with GPU work, so in that case
2044          * we do not close the DD force balancing region here.
2045          */
2046         ddBalanceRegionHandler.closeAfterForceComputationCpu();
2047
2048         if (stepWork.computeForces)
2049         {
2050
2051             if (stepWork.useGpuFHalo)
2052             {
2053                 // If there exist CPU forces, data from halo exchange should accumulate into these
2054                 bool accumulateForces = domainWork.haveCpuLocalForceWork;
2055                 if (!accumulateForces)
2056                 {
2057                     // Force halo exchange will set a subset of local atoms with remote non-local data
2058                     // First clear local portion of force array, so that untouched atoms are zero
2059                     stateGpu->clearForcesOnGpu(AtomLocality::Local);
2060                 }
2061                 communicateGpuHaloForces(*cr, accumulateForces);
2062             }
2063             else
2064             {
2065                 if (stepWork.useGpuFBufferOps)
2066                 {
2067                     stateGpu->waitForcesReadyOnHost(AtomLocality::NonLocal);
2068                 }
2069
2070                 // Without MTS or with MTS at slow steps with uncombined forces we need to
2071                 // communicate the fast forces
2072                 if (!fr->useMts || !combineMtsForcesBeforeHaloExchange)
2073                 {
2074                     dd_move_f(cr->dd, &forceOutMtsLevel0.forceWithShiftForces(), wcycle);
2075                 }
2076                 // With MTS we need to communicate the slow or combined (in forceOutMtsLevel1) forces
2077                 if (fr->useMts && stepWork.computeSlowForces)
2078                 {
2079                     dd_move_f(cr->dd, &forceOutMtsLevel1->forceWithShiftForces(), wcycle);
2080                 }
2081             }
2082         }
2083     }
2084
2085     // With both nonbonded and PME offloaded a GPU on the same rank, we use
2086     // an alternating wait/reduction scheme.
2087     bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPmeOnThisRank && simulationWork.useGpuNonbonded
2088                              && !DOMAINDECOMP(cr) && !stepWork.useGpuFBufferOps);
2089     if (alternateGpuWait)
2090     {
2091         alternatePmeNbGpuWaitReduce(fr->nbv.get(),
2092                                     fr->pmedata,
2093                                     forceOutNonbonded,
2094                                     forceOutMtsLevel1,
2095                                     enerd,
2096                                     lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
2097                                     stepWork,
2098                                     wcycle);
2099     }
2100
2101     if (!alternateGpuWait && useGpuPmeOnThisRank)
2102     {
2103         pme_gpu_wait_and_reduce(fr->pmedata,
2104                                 stepWork,
2105                                 wcycle,
2106                                 &forceOutMtsLevel1->forceWithVirial(),
2107                                 enerd,
2108                                 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)]);
2109     }
2110
2111     /* Wait for local GPU NB outputs on the non-alternating wait path */
2112     if (!alternateGpuWait && stepWork.computeNonbondedForces && simulationWork.useGpuNonbonded)
2113     {
2114         /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
2115          * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
2116          * but even with a step of 0.1 ms the difference is less than 1%
2117          * of the step time.
2118          */
2119         const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
2120         const float waitCycles               = Nbnxm::gpu_wait_finish_task(
2121                 nbv->gpu_nbv,
2122                 stepWork,
2123                 AtomLocality::Local,
2124                 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
2125                 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
2126                 forceOutNonbonded->forceWithShiftForces().shiftForces(),
2127                 wcycle);
2128
2129         if (ddBalanceRegionHandler.useBalancingRegion())
2130         {
2131             DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
2132             if (stepWork.computeForces && waitCycles <= gpuWaitApiOverheadMargin)
2133             {
2134                 /* We measured few cycles, it could be that the kernel
2135                  * and transfer finished earlier and there was no actual
2136                  * wait time, only API call overhead.
2137                  * Then the actual time could be anywhere between 0 and
2138                  * cycles_wait_est. We will use half of cycles_wait_est.
2139                  */
2140                 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
2141             }
2142             ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
2143         }
2144     }
2145
2146     if (fr->nbv->emulateGpu())
2147     {
2148         // NOTE: emulation kernel is not included in the balancing region,
2149         // but emulation mode does not target performance anyway
2150         wallcycle_start_nocount(wcycle, ewcFORCE);
2151         do_nb_verlet(fr,
2152                      ic,
2153                      enerd,
2154                      stepWork,
2155                      InteractionLocality::Local,
2156                      DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
2157                      step,
2158                      nrnb,
2159                      wcycle);
2160         wallcycle_stop(wcycle, ewcFORCE);
2161     }
2162
2163     // If on GPU PME-PP comms path, receive forces from PME before GPU buffer ops
2164     // TODO refactor this and unify with below default-path call to the same function
2165     if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME) && stepWork.computeSlowForces
2166         && simulationWork.useGpuPmePpCommunication)
2167     {
2168         /* In case of node-splitting, the PP nodes receive the long-range
2169          * forces, virial and energy from the PME nodes here.
2170          */
2171         pme_receive_force_ener(fr,
2172                                cr,
2173                                &forceOutMtsLevel1->forceWithVirial(),
2174                                enerd,
2175                                simulationWork.useGpuPmePpCommunication,
2176                                stepWork.useGpuPmeFReduction,
2177                                wcycle);
2178     }
2179
2180
2181     /* Do the nonbonded GPU (or emulation) force buffer reduction
2182      * on the non-alternating path. */
2183     GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFBufferOps),
2184                "The schedule below does not allow for nonbonded MTS with GPU buffer ops");
2185     if (useOrEmulateGpuNb && !alternateGpuWait)
2186     {
2187         if (stepWork.useGpuFBufferOps)
2188         {
2189             ArrayRef<gmx::RVec> forceWithShift = forceOutNonbonded->forceWithShiftForces().force();
2190
2191             // Flag to specify whether the CPU force buffer has contributions to
2192             // local atoms. This depends on whether there are CPU-based force tasks
2193             // or when DD is active the halo exchange has resulted in contributions
2194             // from the non-local part.
2195             const bool haveLocalForceContribInCpuBuffer =
2196                     (domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr));
2197
2198             // TODO: move these steps as early as possible:
2199             // - CPU f H2D should be as soon as all CPU-side forces are done
2200             // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
2201             //   before the next CPU task that consumes the forces: vsite spread or update)
2202             // - copy is not perfomed if GPU force halo exchange is active, because it would overwrite the result
2203             //   of the halo exchange. In that case the copy is instead performed above, before the exchange.
2204             //   These should be unified.
2205             if (haveLocalForceContribInCpuBuffer && !stepWork.useGpuFHalo)
2206             {
2207                 // Note: AtomLocality::All is used for the non-DD case because, as in this
2208                 // case copyForcesToGpu() uses a separate stream, it allows overlap of
2209                 // CPU force H2D with GPU force tasks on all streams including those in the
2210                 // local stream which would otherwise be implicit dependencies for the
2211                 // transfer and would not overlap.
2212                 auto locality = havePPDomainDecomposition(cr) ? AtomLocality::Local : AtomLocality::All;
2213
2214                 stateGpu->copyForcesToGpu(forceWithShift, locality);
2215             }
2216
2217             if (stepWork.computeNonbondedForces)
2218             {
2219                 fr->gpuForceReduction[gmx::AtomLocality::Local]->execute();
2220             }
2221
2222             // Copy forces to host if they are needed for update or if virtual sites are enabled.
2223             // If there are vsites, we need to copy forces every step to spread vsite forces on host.
2224             // TODO: When the output flags will be included in step workload, this copy can be combined with the
2225             //       copy call done in sim_utils(...) for the output.
2226             // NOTE: If there are virtual sites, the forces are modified on host after this D2H copy. Hence,
2227             //       they should not be copied in do_md(...) for the output.
2228             if (!simulationWork.useGpuUpdate
2229                 || (simulationWork.useGpuUpdate && DOMAINDECOMP(cr) && haveHostPmePpComms) || vsite)
2230             {
2231                 stateGpu->copyForcesFromGpu(forceWithShift, AtomLocality::Local);
2232                 stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
2233             }
2234         }
2235         else if (stepWork.computeNonbondedForces)
2236         {
2237             ArrayRef<gmx::RVec> forceWithShift = forceOutNonbonded->forceWithShiftForces().force();
2238             nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceWithShift);
2239         }
2240     }
2241
2242     launchGpuEndOfStepTasks(
2243             nbv, fr->gpuBonded, fr->pmedata, enerd, *runScheduleWork, useGpuPmeOnThisRank, step, wcycle);
2244
2245     if (DOMAINDECOMP(cr))
2246     {
2247         dd_force_flop_stop(cr->dd, nrnb);
2248     }
2249
2250     const bool haveCombinedMtsForces = (stepWork.computeForces && fr->useMts && stepWork.computeSlowForces
2251                                         && combineMtsForcesBeforeHaloExchange);
2252     if (stepWork.computeForces)
2253     {
2254         postProcessForceWithShiftForces(
2255                 nrnb, wcycle, box, x.unpaddedArrayRef(), &forceOutMtsLevel0, vir_force, *mdatoms, *fr, vsite, stepWork);
2256
2257         if (fr->useMts && stepWork.computeSlowForces && !haveCombinedMtsForces)
2258         {
2259             postProcessForceWithShiftForces(
2260                     nrnb, wcycle, box, x.unpaddedArrayRef(), forceOutMtsLevel1, vir_force, *mdatoms, *fr, vsite, stepWork);
2261         }
2262     }
2263
2264     // TODO refactor this and unify with above GPU PME-PP / GPU update path call to the same function
2265     if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication
2266         && stepWork.computeSlowForces)
2267     {
2268         /* In case of node-splitting, the PP nodes receive the long-range
2269          * forces, virial and energy from the PME nodes here.
2270          */
2271         pme_receive_force_ener(fr,
2272                                cr,
2273                                &forceOutMtsLevel1->forceWithVirial(),
2274                                enerd,
2275                                simulationWork.useGpuPmePpCommunication,
2276                                false,
2277                                wcycle);
2278     }
2279
2280     if (stepWork.computeForces)
2281     {
2282         /* If we don't use MTS or if we already combined the MTS forces before, we only
2283          * need to post-process one ForceOutputs object here, called forceOutCombined,
2284          * otherwise we have to post-process two outputs and then combine them.
2285          */
2286         ForceOutputs& forceOutCombined = (haveCombinedMtsForces ? forceOutMts.value() : forceOutMtsLevel0);
2287         postProcessForces(
2288                 cr, step, nrnb, wcycle, box, x.unpaddedArrayRef(), &forceOutCombined, vir_force, mdatoms, fr, vsite, stepWork);
2289
2290         if (fr->useMts && stepWork.computeSlowForces && !haveCombinedMtsForces)
2291         {
2292             postProcessForces(
2293                     cr, step, nrnb, wcycle, box, x.unpaddedArrayRef(), forceOutMtsLevel1, vir_force, mdatoms, fr, vsite, stepWork);
2294
2295             combineMtsForces(mdatoms->homenr,
2296                              force.unpaddedArrayRef(),
2297                              forceView->forceMtsCombined(),
2298                              inputrec.mtsLevels[1].stepFactor);
2299         }
2300     }
2301
2302     if (stepWork.computeEnergy)
2303     {
2304         /* Compute the final potential energy terms */
2305         accumulatePotentialEnergies(enerd, lambda, inputrec.fepvals.get());
2306
2307         if (!EI_TPI(inputrec.eI))
2308         {
2309             checkPotentialEnergyValidity(step, *enerd, inputrec);
2310         }
2311     }
2312
2313     /* In case we don't have constraints and are using GPUs, the next balancing
2314      * region starts here.
2315      * Some "special" work at the end of do_force_cuts?, such as vsite spread,
2316      * virial calculation and COM pulling, is not thus not included in
2317      * the balance timing, which is ok as most tasks do communication.
2318      */
2319     ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);
2320 }