Clean up ewald module internals
[alexxy/gromacs.git] / src / gromacs / mdlib / sim_util.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013-2019,2020, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "config.h"
40
41 #include <cmath>
42 #include <cstdint>
43 #include <cstdio>
44 #include <cstring>
45
46 #include <array>
47
48 #include "gromacs/awh/awh.h"
49 #include "gromacs/domdec/dlbtiming.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/domdec/gpuhaloexchange.h"
53 #include "gromacs/domdec/partition.h"
54 #include "gromacs/essentialdynamics/edsam.h"
55 #include "gromacs/ewald/pme.h"
56 #include "gromacs/ewald/pme_pp.h"
57 #include "gromacs/ewald/pme_pp_comm_gpu.h"
58 #include "gromacs/gmxlib/network.h"
59 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
60 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
61 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
62 #include "gromacs/gpu_utils/gpu_utils.h"
63 #include "gromacs/imd/imd.h"
64 #include "gromacs/listed_forces/disre.h"
65 #include "gromacs/listed_forces/gpubonded.h"
66 #include "gromacs/listed_forces/listed_forces.h"
67 #include "gromacs/listed_forces/manage_threading.h"
68 #include "gromacs/listed_forces/orires.h"
69 #include "gromacs/math/arrayrefwithpadding.h"
70 #include "gromacs/math/functions.h"
71 #include "gromacs/math/units.h"
72 #include "gromacs/math/vec.h"
73 #include "gromacs/math/vecdump.h"
74 #include "gromacs/mdlib/calcmu.h"
75 #include "gromacs/mdlib/calcvir.h"
76 #include "gromacs/mdlib/constr.h"
77 #include "gromacs/mdlib/enerdata_utils.h"
78 #include "gromacs/mdlib/force.h"
79 #include "gromacs/mdlib/forcerec.h"
80 #include "gromacs/mdlib/gmx_omp_nthreads.h"
81 #include "gromacs/mdlib/qmmm.h"
82 #include "gromacs/mdlib/update.h"
83 #include "gromacs/mdtypes/commrec.h"
84 #include "gromacs/mdtypes/enerdata.h"
85 #include "gromacs/mdtypes/forceoutput.h"
86 #include "gromacs/mdtypes/iforceprovider.h"
87 #include "gromacs/mdtypes/inputrec.h"
88 #include "gromacs/mdtypes/md_enums.h"
89 #include "gromacs/mdtypes/simulation_workload.h"
90 #include "gromacs/mdtypes/state.h"
91 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
92 #include "gromacs/nbnxm/gpu_data_mgmt.h"
93 #include "gromacs/nbnxm/nbnxm.h"
94 #include "gromacs/pbcutil/ishift.h"
95 #include "gromacs/pbcutil/mshift.h"
96 #include "gromacs/pbcutil/pbc.h"
97 #include "gromacs/pulling/pull.h"
98 #include "gromacs/pulling/pull_rotation.h"
99 #include "gromacs/timing/cyclecounter.h"
100 #include "gromacs/timing/gpu_timing.h"
101 #include "gromacs/timing/wallcycle.h"
102 #include "gromacs/timing/wallcyclereporting.h"
103 #include "gromacs/timing/walltime_accounting.h"
104 #include "gromacs/topology/topology.h"
105 #include "gromacs/utility/arrayref.h"
106 #include "gromacs/utility/basedefinitions.h"
107 #include "gromacs/utility/cstringutil.h"
108 #include "gromacs/utility/exceptions.h"
109 #include "gromacs/utility/fatalerror.h"
110 #include "gromacs/utility/fixedcapacityvector.h"
111 #include "gromacs/utility/gmxassert.h"
112 #include "gromacs/utility/gmxmpi.h"
113 #include "gromacs/utility/logger.h"
114 #include "gromacs/utility/smalloc.h"
115 #include "gromacs/utility/strconvert.h"
116 #include "gromacs/utility/sysinfo.h"
117
118 using gmx::AtomLocality;
119 using gmx::DomainLifetimeWorkload;
120 using gmx::ForceOutputs;
121 using gmx::InteractionLocality;
122 using gmx::SimulationWorkload;
123 using gmx::StepWorkload;
124
125 // TODO: this environment variable allows us to verify before release
126 // that on less common architectures the total cost of polling is not larger than
127 // a blocking wait (so polling does not introduce overhead when the static
128 // PME-first ordering would suffice).
129 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
130
131 static void sum_forces(rvec f[], gmx::ArrayRef<const gmx::RVec> forceToAdd)
132 {
133     const int end = forceToAdd.size();
134
135     int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
136 #pragma omp parallel for num_threads(nt) schedule(static)
137     for (int i = 0; i < end; i++)
138     {
139         rvec_inc(f[i], forceToAdd[i]);
140     }
141 }
142
143 static void calc_virial(int                              start,
144                         int                              homenr,
145                         const rvec                       x[],
146                         const gmx::ForceWithShiftForces& forceWithShiftForces,
147                         tensor                           vir_part,
148                         const t_graph*                   graph,
149                         const matrix                     box,
150                         t_nrnb*                          nrnb,
151                         const t_forcerec*                fr,
152                         int                              ePBC)
153 {
154     /* The short-range virial from surrounding boxes */
155     const rvec* fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
156     calc_vir(SHIFTS, fr->shift_vec, fshift, vir_part, ePBC == epbcSCREW, box);
157     inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
158
159     /* Calculate partial virial, for local atoms only, based on short range.
160      * Total virial is computed in global_stat, called from do_md
161      */
162     const rvec* f = as_rvec_array(forceWithShiftForces.force().data());
163     f_calc_vir(start, start + homenr, x, f, vir_part, graph, box);
164     inc_nrnb(nrnb, eNR_VIRIAL, homenr);
165
166     if (debug)
167     {
168         pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
169     }
170 }
171
172 static void pull_potential_wrapper(const t_commrec*               cr,
173                                    const t_inputrec*              ir,
174                                    const matrix                   box,
175                                    gmx::ArrayRef<const gmx::RVec> x,
176                                    gmx::ForceWithVirial*          force,
177                                    const t_mdatoms*               mdatoms,
178                                    gmx_enerdata_t*                enerd,
179                                    pull_t*                        pull_work,
180                                    const real*                    lambda,
181                                    double                         t,
182                                    gmx_wallcycle_t                wcycle)
183 {
184     t_pbc pbc;
185     real  dvdl;
186
187     /* Calculate the center of mass forces, this requires communication,
188      * which is why pull_potential is called close to other communication.
189      */
190     wallcycle_start(wcycle, ewcPULLPOT);
191     set_pbc(&pbc, ir->ePBC, box);
192     dvdl = 0;
193     enerd->term[F_COM_PULL] += pull_potential(pull_work, mdatoms, &pbc, cr, t, lambda[efptRESTRAINT],
194                                               as_rvec_array(x.data()), force, &dvdl);
195     enerd->dvdl_lin[efptRESTRAINT] += dvdl;
196     wallcycle_stop(wcycle, ewcPULLPOT);
197 }
198
199 static void pme_receive_force_ener(t_forcerec*           fr,
200                                    const t_commrec*      cr,
201                                    gmx::ForceWithVirial* forceWithVirial,
202                                    gmx_enerdata_t*       enerd,
203                                    bool                  useGpuPmePpComms,
204                                    bool                  receivePmeForceToGpu,
205                                    gmx_wallcycle_t       wcycle)
206 {
207     real  e_q, e_lj, dvdl_q, dvdl_lj;
208     float cycles_ppdpme, cycles_seppme;
209
210     cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
211     dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
212
213     /* In case of node-splitting, the PP nodes receive the long-range
214      * forces, virial and energy from the PME nodes here.
215      */
216     wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
217     dvdl_q  = 0;
218     dvdl_lj = 0;
219     gmx_pme_receive_f(fr->pmePpCommGpu.get(), cr, forceWithVirial, &e_q, &e_lj, &dvdl_q, &dvdl_lj,
220                       useGpuPmePpComms, receivePmeForceToGpu, &cycles_seppme);
221     enerd->term[F_COUL_RECIP] += e_q;
222     enerd->term[F_LJ_RECIP] += e_lj;
223     enerd->dvdl_lin[efptCOUL] += dvdl_q;
224     enerd->dvdl_lin[efptVDW] += dvdl_lj;
225
226     if (wcycle)
227     {
228         dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
229     }
230     wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
231 }
232
233 static void print_large_forces(FILE*            fp,
234                                const t_mdatoms* md,
235                                const t_commrec* cr,
236                                int64_t          step,
237                                real             forceTolerance,
238                                const rvec*      x,
239                                const rvec*      f)
240 {
241     real       force2Tolerance = gmx::square(forceTolerance);
242     gmx::index numNonFinite    = 0;
243     for (int i = 0; i < md->homenr; i++)
244     {
245         real force2    = norm2(f[i]);
246         bool nonFinite = !std::isfinite(force2);
247         if (force2 >= force2Tolerance || nonFinite)
248         {
249             fprintf(fp, "step %" PRId64 " atom %6d  x %8.3f %8.3f %8.3f  force %12.5e\n", step,
250                     ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
251         }
252         if (nonFinite)
253         {
254             numNonFinite++;
255         }
256     }
257     if (numNonFinite > 0)
258     {
259         /* Note that with MPI this fatal call on one rank might interrupt
260          * the printing on other ranks. But we can only avoid that with
261          * an expensive MPI barrier that we would need at each step.
262          */
263         gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
264     }
265 }
266
267 static void post_process_forces(const t_commrec*      cr,
268                                 int64_t               step,
269                                 t_nrnb*               nrnb,
270                                 gmx_wallcycle_t       wcycle,
271                                 const gmx_localtop_t* top,
272                                 const matrix          box,
273                                 const rvec            x[],
274                                 ForceOutputs*         forceOutputs,
275                                 tensor                vir_force,
276                                 const t_mdatoms*      mdatoms,
277                                 const t_graph*        graph,
278                                 const t_forcerec*     fr,
279                                 const gmx_vsite_t*    vsite,
280                                 const StepWorkload&   stepWork)
281 {
282     rvec* f = as_rvec_array(forceOutputs->forceWithShiftForces().force().data());
283
284     if (fr->haveDirectVirialContributions)
285     {
286         auto& forceWithVirial = forceOutputs->forceWithVirial();
287         rvec* fDirectVir      = as_rvec_array(forceWithVirial.force_.data());
288
289         if (vsite)
290         {
291             /* Spread the mesh force on virtual sites to the other particles...
292              * This is parallellized. MPI communication is performed
293              * if the constructing atoms aren't local.
294              */
295             matrix virial = { { 0 } };
296             spread_vsite_f(vsite, x, fDirectVir, nullptr, stepWork.computeVirial, virial, nrnb,
297                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
298             forceWithVirial.addVirialContribution(virial);
299         }
300
301         if (stepWork.computeVirial)
302         {
303             /* Now add the forces, this is local */
304             sum_forces(f, forceWithVirial.force_);
305
306             /* Add the direct virial contributions */
307             GMX_ASSERT(
308                     forceWithVirial.computeVirial_,
309                     "forceWithVirial should request virial computation when we request the virial");
310             m_add(vir_force, forceWithVirial.getVirial(), vir_force);
311
312             if (debug)
313             {
314                 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
315             }
316         }
317     }
318
319     if (fr->print_force >= 0)
320     {
321         print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
322     }
323 }
324
325 static void do_nb_verlet(t_forcerec*                fr,
326                          const interaction_const_t* ic,
327                          gmx_enerdata_t*            enerd,
328                          const StepWorkload&        stepWork,
329                          const InteractionLocality  ilocality,
330                          const int                  clearF,
331                          const int64_t              step,
332                          t_nrnb*                    nrnb,
333                          gmx_wallcycle_t            wcycle)
334 {
335     if (!stepWork.computeNonbondedForces)
336     {
337         /* skip non-bonded calculation */
338         return;
339     }
340
341     nonbonded_verlet_t* nbv = fr->nbv.get();
342
343     /* GPU kernel launch overhead is already timed separately */
344     if (fr->cutoff_scheme != ecutsVERLET)
345     {
346         gmx_incons("Invalid cut-off scheme passed!");
347     }
348
349     if (!nbv->useGpu())
350     {
351         /* When dynamic pair-list  pruning is requested, we need to prune
352          * at nstlistPrune steps.
353          */
354         if (nbv->isDynamicPruningStepCpu(step))
355         {
356             /* Prune the pair-list beyond fr->ic->rlistPrune using
357              * the current coordinates of the atoms.
358              */
359             wallcycle_sub_start(wcycle, ewcsNONBONDED_PRUNING);
360             nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
361             wallcycle_sub_stop(wcycle, ewcsNONBONDED_PRUNING);
362         }
363     }
364
365     nbv->dispatchNonbondedKernel(ilocality, *ic, stepWork, clearF, *fr, enerd, nrnb);
366 }
367
368 static inline void clear_rvecs_omp(int n, rvec v[])
369 {
370     int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
371
372     /* Note that we would like to avoid this conditional by putting it
373      * into the omp pragma instead, but then we still take the full
374      * omp parallel for overhead (at least with gcc5).
375      */
376     if (nth == 1)
377     {
378         for (int i = 0; i < n; i++)
379         {
380             clear_rvec(v[i]);
381         }
382     }
383     else
384     {
385 #pragma omp parallel for num_threads(nth) schedule(static)
386         for (int i = 0; i < n; i++)
387         {
388             clear_rvec(v[i]);
389         }
390     }
391 }
392
393 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
394  *
395  * \param groupOptions  Group options, containing T-coupling options
396  */
397 static real averageKineticEnergyEstimate(const t_grpopts& groupOptions)
398 {
399     real nrdfCoupled   = 0;
400     real nrdfUncoupled = 0;
401     real kineticEnergy = 0;
402     for (int g = 0; g < groupOptions.ngtc; g++)
403     {
404         if (groupOptions.tau_t[g] >= 0)
405         {
406             nrdfCoupled += groupOptions.nrdf[g];
407             kineticEnergy += groupOptions.nrdf[g] * 0.5 * groupOptions.ref_t[g] * BOLTZ;
408         }
409         else
410         {
411             nrdfUncoupled += groupOptions.nrdf[g];
412         }
413     }
414
415     /* This conditional with > also catches nrdf=0 */
416     if (nrdfCoupled > nrdfUncoupled)
417     {
418         return kineticEnergy * (nrdfCoupled + nrdfUncoupled) / nrdfCoupled;
419     }
420     else
421     {
422         return 0;
423     }
424 }
425
426 /*! \brief This routine checks that the potential energy is finite.
427  *
428  * Always checks that the potential energy is finite. If step equals
429  * inputrec.init_step also checks that the magnitude of the potential energy
430  * is reasonable. Terminates with a fatal error when a check fails.
431  * Note that passing this check does not guarantee finite forces,
432  * since those use slightly different arithmetics. But in most cases
433  * there is just a narrow coordinate range where forces are not finite
434  * and energies are finite.
435  *
436  * \param[in] step      The step number, used for checking and printing
437  * \param[in] enerd     The energy data; the non-bonded group energies need to be added to
438  * enerd.term[F_EPOT] before calling this routine \param[in] inputrec  The input record
439  */
440 static void checkPotentialEnergyValidity(int64_t step, const gmx_enerdata_t& enerd, const t_inputrec& inputrec)
441 {
442     /* Threshold valid for comparing absolute potential energy against
443      * the kinetic energy. Normally one should not consider absolute
444      * potential energy values, but with a factor of one million
445      * we should never get false positives.
446      */
447     constexpr real c_thresholdFactor = 1e6;
448
449     bool energyIsNotFinite    = !std::isfinite(enerd.term[F_EPOT]);
450     real averageKineticEnergy = 0;
451     /* We only check for large potential energy at the initial step,
452      * because that is by far the most likely step for this too occur
453      * and because computing the average kinetic energy is not free.
454      * Note: nstcalcenergy >> 1 often does not allow to catch large energies
455      * before they become NaN.
456      */
457     if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
458     {
459         averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
460     }
461
462     if (energyIsNotFinite
463         || (averageKineticEnergy > 0 && enerd.term[F_EPOT] > c_thresholdFactor * averageKineticEnergy))
464     {
465         gmx_fatal(
466                 FARGS,
467                 "Step %" PRId64
468                 ": The total potential energy is %g, which is %s. The LJ and electrostatic "
469                 "contributions to the energy are %g and %g, respectively. A %s potential energy "
470                 "can be caused by overlapping interactions in bonded interactions or very large%s "
471                 "coordinate values. Usually this is caused by a badly- or non-equilibrated initial "
472                 "configuration, incorrect interactions or parameters in the topology.",
473                 step, enerd.term[F_EPOT], energyIsNotFinite ? "not finite" : "extremely high",
474                 enerd.term[F_LJ], enerd.term[F_COUL_SR],
475                 energyIsNotFinite ? "non-finite" : "very high", energyIsNotFinite ? " or Nan" : "");
476     }
477 }
478
479 /*! \brief Return true if there are special forces computed this step.
480  *
481  * The conditionals exactly correspond to those in computeSpecialForces().
482  */
483 static bool haveSpecialForces(const t_inputrec&          inputrec,
484                               const gmx::ForceProviders& forceProviders,
485                               const pull_t*              pull_work,
486                               const bool                 computeForces,
487                               const gmx_edsam*           ed)
488 {
489
490     return ((computeForces && forceProviders.hasForceProvider()) || // forceProviders
491             (inputrec.bPull && pull_have_potential(pull_work)) ||   // pull
492             inputrec.bRot ||                                        // enforced rotation
493             (ed != nullptr) ||                                      // flooding
494             (inputrec.bIMD && computeForces));                      // IMD
495 }
496
497 /*! \brief Compute forces and/or energies for special algorithms
498  *
499  * The intention is to collect all calls to algorithms that compute
500  * forces on local atoms only and that do not contribute to the local
501  * virial sum (but add their virial contribution separately).
502  * Eventually these should likely all become ForceProviders.
503  * Within this function the intention is to have algorithms that do
504  * global communication at the end, so global barriers within the MD loop
505  * are as close together as possible.
506  *
507  * \param[in]     fplog            The log file
508  * \param[in]     cr               The communication record
509  * \param[in]     inputrec         The input record
510  * \param[in]     awh              The Awh module (nullptr if none in use).
511  * \param[in]     enforcedRotation Enforced rotation module.
512  * \param[in]     imdSession       The IMD session
513  * \param[in]     pull_work        The pull work structure.
514  * \param[in]     step             The current MD step
515  * \param[in]     t                The current time
516  * \param[in,out] wcycle           Wallcycle accounting struct
517  * \param[in,out] forceProviders   Pointer to a list of force providers
518  * \param[in]     box              The unit cell
519  * \param[in]     x                The coordinates
520  * \param[in]     mdatoms          Per atom properties
521  * \param[in]     lambda           Array of free-energy lambda values
522  * \param[in]     stepWork         Step schedule flags
523  * \param[in,out] forceWithVirial  Force and virial buffers
524  * \param[in,out] enerd            Energy buffer
525  * \param[in,out] ed               Essential dynamics pointer
526  * \param[in]     didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
527  *
528  * \todo Remove didNeighborSearch, which is used incorrectly.
529  * \todo Convert all other algorithms called here to ForceProviders.
530  */
531 static void computeSpecialForces(FILE*                          fplog,
532                                  const t_commrec*               cr,
533                                  const t_inputrec*              inputrec,
534                                  gmx::Awh*                      awh,
535                                  gmx_enfrot*                    enforcedRotation,
536                                  gmx::ImdSession*               imdSession,
537                                  pull_t*                        pull_work,
538                                  int64_t                        step,
539                                  double                         t,
540                                  gmx_wallcycle_t                wcycle,
541                                  gmx::ForceProviders*           forceProviders,
542                                  const matrix                   box,
543                                  gmx::ArrayRef<const gmx::RVec> x,
544                                  const t_mdatoms*               mdatoms,
545                                  real*                          lambda,
546                                  const StepWorkload&            stepWork,
547                                  gmx::ForceWithVirial*          forceWithVirial,
548                                  gmx_enerdata_t*                enerd,
549                                  gmx_edsam*                     ed,
550                                  bool                           didNeighborSearch)
551 {
552     /* NOTE: Currently all ForceProviders only provide forces.
553      *       When they also provide energies, remove this conditional.
554      */
555     if (stepWork.computeForces)
556     {
557         gmx::ForceProviderInput  forceProviderInput(x, *mdatoms, t, box, *cr);
558         gmx::ForceProviderOutput forceProviderOutput(forceWithVirial, enerd);
559
560         /* Collect forces from modules */
561         forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
562     }
563
564     if (inputrec->bPull && pull_have_potential(pull_work))
565     {
566         pull_potential_wrapper(cr, inputrec, box, x, forceWithVirial, mdatoms, enerd, pull_work,
567                                lambda, t, wcycle);
568
569         if (awh)
570         {
571             enerd->term[F_COM_PULL] += awh->applyBiasForcesAndUpdateBias(
572                     inputrec->ePBC, *mdatoms, box, forceWithVirial, t, step, wcycle, fplog);
573         }
574     }
575
576     rvec* f = as_rvec_array(forceWithVirial->force_.data());
577
578     /* Add the forces from enforced rotation potentials (if any) */
579     if (inputrec->bRot)
580     {
581         wallcycle_start(wcycle, ewcROTadd);
582         enerd->term[F_COM_PULL] += add_rot_forces(enforcedRotation, f, cr, step, t);
583         wallcycle_stop(wcycle, ewcROTadd);
584     }
585
586     if (ed)
587     {
588         /* Note that since init_edsam() is called after the initialization
589          * of forcerec, edsam doesn't request the noVirSum force buffer.
590          * Thus if no other algorithm (e.g. PME) requires it, the forces
591          * here will contribute to the virial.
592          */
593         do_flood(cr, inputrec, as_rvec_array(x.data()), f, ed, box, step, didNeighborSearch);
594     }
595
596     /* Add forces from interactive molecular dynamics (IMD), if any */
597     if (inputrec->bIMD && stepWork.computeForces)
598     {
599         imdSession->applyForces(f);
600     }
601 }
602
603 /*! \brief Makes PME flags from StepWorkload data.
604  *
605  * \param[in]  stepWork     Step schedule flags
606  * \returns                 PME flags
607  */
608 static int makePmeFlags(const StepWorkload& stepWork)
609 {
610     return GMX_PME_SPREAD | GMX_PME_SOLVE | (stepWork.computeVirial ? GMX_PME_CALC_ENER_VIR : 0)
611            | (stepWork.computeEnergy ? GMX_PME_CALC_ENER_VIR : 0)
612            | (stepWork.computeForces ? GMX_PME_CALC_F : 0);
613 }
614
615 /*! \brief Launch the prepare_step and spread stages of PME GPU.
616  *
617  * \param[in]  pmedata              The PME structure
618  * \param[in]  box                  The box matrix
619  * \param[in]  stepWork             Step schedule flags
620  * \param[in]  pmeFlags             PME flags
621  * \param[in]  xReadyOnDevice       Event synchronizer indicating that the coordinates are ready in
622  * the device memory. \param[in]  wcycle               The wallcycle structure
623  */
624 static inline void launchPmeGpuSpread(gmx_pme_t*            pmedata,
625                                       const matrix          box,
626                                       const StepWorkload&   stepWork,
627                                       int                   pmeFlags,
628                                       GpuEventSynchronizer* xReadyOnDevice,
629                                       gmx_wallcycle_t       wcycle)
630 {
631     pme_gpu_prepare_computation(pmedata, stepWork.haveDynamicBox, box, wcycle, pmeFlags,
632                                 stepWork.useGpuPmeFReduction);
633     pme_gpu_launch_spread(pmedata, xReadyOnDevice, wcycle);
634 }
635
636 /*! \brief Launch the FFT and gather stages of PME GPU
637  *
638  * This function only implements setting the output forces (no accumulation).
639  *
640  * \param[in]  pmedata        The PME structure
641  * \param[in]  wcycle         The wallcycle structure
642  */
643 static void launchPmeGpuFftAndGather(gmx_pme_t* pmedata, gmx_wallcycle_t wcycle)
644 {
645     pme_gpu_launch_complex_transforms(pmedata, wcycle);
646     pme_gpu_launch_gather(pmedata, wcycle, PmeForceOutputHandling::Set);
647 }
648
649 /*! \brief
650  *  Polling wait for either of the PME or nonbonded GPU tasks.
651  *
652  * Instead of a static order in waiting for GPU tasks, this function
653  * polls checking which of the two tasks completes first, and does the
654  * associated force buffer reduction overlapped with the other task.
655  * By doing that, unlike static scheduling order, it can always overlap
656  * one of the reductions, regardless of the GPU task completion order.
657  *
658  * \param[in]     nbv              Nonbonded verlet structure
659  * \param[in,out] pmedata          PME module data
660  * \param[in,out] forceOutputs     Output buffer for the forces and virial
661  * \param[in,out] enerd            Energy data structure results are reduced into
662  * \param[in]     stepWork         Step schedule flags
663  * \param[in]     pmeFlags         PME flags
664  * \param[in]     wcycle           The wallcycle structure
665  */
666 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t* nbv,
667                                         gmx_pme_t*          pmedata,
668                                         gmx::ForceOutputs*  forceOutputs,
669                                         gmx_enerdata_t*     enerd,
670                                         const StepWorkload& stepWork,
671                                         int                 pmeFlags,
672                                         gmx_wallcycle_t     wcycle)
673 {
674     bool isPmeGpuDone = false;
675     bool isNbGpuDone  = false;
676
677
678     gmx::ForceWithShiftForces& forceWithShiftForces = forceOutputs->forceWithShiftForces();
679     gmx::ForceWithVirial&      forceWithVirial      = forceOutputs->forceWithVirial();
680
681     gmx::ArrayRef<const gmx::RVec> pmeGpuForces;
682
683     while (!isPmeGpuDone || !isNbGpuDone)
684     {
685         if (!isPmeGpuDone)
686         {
687             GpuTaskCompletion completionType =
688                     (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
689             isPmeGpuDone = pme_gpu_try_finish_task(pmedata, pmeFlags, wcycle, &forceWithVirial,
690                                                    enerd, completionType);
691         }
692
693         if (!isNbGpuDone)
694         {
695             GpuTaskCompletion completionType =
696                     (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
697             isNbGpuDone = Nbnxm::gpu_try_finish_task(
698                     nbv->gpu_nbv, stepWork, AtomLocality::Local, enerd->grpp.ener[egLJSR].data(),
699                     enerd->grpp.ener[egCOULSR].data(), forceWithShiftForces.shiftForces(),
700                     completionType, wcycle);
701
702             if (isNbGpuDone)
703             {
704                 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceWithShiftForces.force());
705             }
706         }
707     }
708 }
709
710 /*! \brief Set up the different force buffers; also does clearing.
711  *
712  * \param[in] fr        force record pointer
713  * \param[in] pull_work The pull work object.
714  * \param[in] inputrec  input record
715  * \param[in] force     force array
716  * \param[in] stepWork  Step schedule flags
717  * \param[out] wcycle   wallcycle recording structure
718  *
719  * \returns             Cleared force output structure
720  */
721 static ForceOutputs setupForceOutputs(t_forcerec*                         fr,
722                                       pull_t*                             pull_work,
723                                       const t_inputrec&                   inputrec,
724                                       gmx::ArrayRefWithPadding<gmx::RVec> force,
725                                       const StepWorkload&                 stepWork,
726                                       gmx_wallcycle_t                     wcycle)
727 {
728     wallcycle_sub_start(wcycle, ewcsCLEAR_FORCE_BUFFER);
729
730     /* NOTE: We assume fr->shiftForces is all zeros here */
731     gmx::ForceWithShiftForces forceWithShiftForces(force, stepWork.computeVirial, fr->shiftForces);
732
733     if (stepWork.computeForces)
734     {
735         /* Clear the short- and long-range forces */
736         clear_rvecs_omp(fr->natoms_force_constr, as_rvec_array(forceWithShiftForces.force().data()));
737     }
738
739     /* If we need to compute the virial, we might need a separate
740      * force buffer for algorithms for which the virial is calculated
741      * directly, such as PME. Otherwise, forceWithVirial uses the
742      * the same force (f in legacy calls) buffer as other algorithms.
743      */
744     const bool useSeparateForceWithVirialBuffer =
745             (stepWork.computeForces && (stepWork.computeVirial && fr->haveDirectVirialContributions));
746     /* forceWithVirial uses the local atom range only */
747     gmx::ForceWithVirial forceWithVirial(useSeparateForceWithVirialBuffer ? fr->forceBufferForDirectVirialContributions
748                                                                           : force.unpaddedArrayRef(),
749                                          stepWork.computeVirial);
750
751     if (useSeparateForceWithVirialBuffer)
752     {
753         /* TODO: update comment
754          * We only compute forces on local atoms. Note that vsites can
755          * spread to non-local atoms, but that part of the buffer is
756          * cleared separately in the vsite spreading code.
757          */
758         clear_rvecs_omp(forceWithVirial.force_.size(), as_rvec_array(forceWithVirial.force_.data()));
759     }
760
761     if (inputrec.bPull && pull_have_constraint(pull_work))
762     {
763         clear_pull_forces(pull_work);
764     }
765
766     wallcycle_sub_stop(wcycle, ewcsCLEAR_FORCE_BUFFER);
767
768     return ForceOutputs(forceWithShiftForces, forceWithVirial);
769 }
770
771
772 /*! \brief Set up flags that have the lifetime of the domain indicating what type of work is there to compute.
773  */
774 static DomainLifetimeWorkload setupDomainLifetimeWorkload(const t_inputrec&         inputrec,
775                                                           const t_forcerec&         fr,
776                                                           const pull_t*             pull_work,
777                                                           const gmx_edsam*          ed,
778                                                           const t_idef&             idef,
779                                                           const t_fcdata&           fcd,
780                                                           const t_mdatoms&          mdatoms,
781                                                           const SimulationWorkload& simulationWork,
782                                                           const StepWorkload&       stepWork)
783 {
784     DomainLifetimeWorkload domainWork;
785     // Note that haveSpecialForces is constant over the whole run
786     domainWork.haveSpecialForces =
787             haveSpecialForces(inputrec, *fr.forceProviders, pull_work, stepWork.computeForces, ed);
788     domainWork.haveCpuBondedWork  = haveCpuBondeds(fr);
789     domainWork.haveGpuBondedWork  = ((fr.gpuBonded != nullptr) && fr.gpuBonded->haveInteractions());
790     domainWork.haveRestraintsWork = haveRestraints(idef, fcd);
791     domainWork.haveCpuListedForceWork = haveCpuListedForces(fr, idef, fcd);
792     // Note that haveFreeEnergyWork is constant over the whole run
793     domainWork.haveFreeEnergyWork = (fr.efep != efepNO && mdatoms.nPerturbed != 0);
794     // We assume we have local force work if there are CPU
795     // force tasks including PME or nonbondeds.
796     domainWork.haveCpuLocalForceWork =
797             domainWork.haveSpecialForces || domainWork.haveCpuListedForceWork
798             || domainWork.haveFreeEnergyWork || simulationWork.useCpuNonbonded || simulationWork.useCpuPme
799             || simulationWork.haveEwaldSurfaceContribution || inputrec.nwall > 0;
800
801     return domainWork;
802 }
803
804 /*! \brief Set up force flag stuct from the force bitmask.
805  *
806  * \param[in]      legacyFlags          Force bitmask flags used to construct the new flags
807  * \param[in]      isNonbondedOn        Global override, if false forces to turn off all nonbonded calculation.
808  * \param[in]      simulationWork       Simulation workload description.
809  * \param[in]      rankHasPmeDuty       If this rank computes PME.
810  *
811  * \returns New Stepworkload description.
812  */
813 static StepWorkload setupStepWorkload(const int                 legacyFlags,
814                                       const bool                isNonbondedOn,
815                                       const SimulationWorkload& simulationWork,
816                                       const bool                rankHasPmeDuty)
817 {
818     StepWorkload flags;
819     flags.stateChanged           = ((legacyFlags & GMX_FORCE_STATECHANGED) != 0);
820     flags.haveDynamicBox         = ((legacyFlags & GMX_FORCE_DYNAMICBOX) != 0);
821     flags.doNeighborSearch       = ((legacyFlags & GMX_FORCE_NS) != 0);
822     flags.computeVirial          = ((legacyFlags & GMX_FORCE_VIRIAL) != 0);
823     flags.computeEnergy          = ((legacyFlags & GMX_FORCE_ENERGY) != 0);
824     flags.computeForces          = ((legacyFlags & GMX_FORCE_FORCES) != 0);
825     flags.computeListedForces    = ((legacyFlags & GMX_FORCE_LISTED) != 0);
826     flags.computeNonbondedForces = ((legacyFlags & GMX_FORCE_NONBONDED) != 0) && isNonbondedOn;
827     flags.computeDhdl            = ((legacyFlags & GMX_FORCE_DHDL) != 0);
828
829     if (simulationWork.useGpuBufferOps)
830     {
831         GMX_ASSERT(simulationWork.useGpuNonbonded,
832                    "Can only offload buffer ops if nonbonded computation is also offloaded");
833     }
834     flags.useGpuXBufferOps = simulationWork.useGpuBufferOps;
835     // on virial steps the CPU reduction path is taken
836     flags.useGpuFBufferOps    = simulationWork.useGpuBufferOps && !flags.computeVirial;
837     flags.useGpuPmeFReduction = flags.useGpuFBufferOps
838                                 && (simulationWork.useGpuPme
839                                     && (rankHasPmeDuty || simulationWork.useGpuPmePpCommunication));
840
841     return flags;
842 }
843
844
845 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
846  *
847  * TODO: eliminate \p useGpuPmeOnThisRank when this is
848  * incorporated in DomainLifetimeWorkload.
849  */
850 static void launchGpuEndOfStepTasks(nonbonded_verlet_t*               nbv,
851                                     gmx::GpuBonded*                   gpuBonded,
852                                     gmx_pme_t*                        pmedata,
853                                     gmx_enerdata_t*                   enerd,
854                                     const gmx::MdrunScheduleWorkload& runScheduleWork,
855                                     bool                              useGpuPmeOnThisRank,
856                                     int64_t                           step,
857                                     gmx_wallcycle_t                   wcycle)
858 {
859     if (runScheduleWork.simulationWork.useGpuNonbonded)
860     {
861         /* Launch pruning before buffer clearing because the API overhead of the
862          * clear kernel launches can leave the GPU idle while it could be running
863          * the prune kernel.
864          */
865         if (nbv->isDynamicPruningStepGpu(step))
866         {
867             nbv->dispatchPruneKernelGpu(step);
868         }
869
870         /* now clear the GPU outputs while we finish the step on the CPU */
871         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
872         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
873         Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, runScheduleWork.stepWork.computeVirial);
874         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
875         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
876     }
877
878     if (useGpuPmeOnThisRank)
879     {
880         pme_gpu_reinit_computation(pmedata, wcycle);
881     }
882
883     if (runScheduleWork.domainWork.haveGpuBondedWork && runScheduleWork.stepWork.computeEnergy)
884     {
885         // in principle this should be included in the DD balancing region,
886         // but generally it is infrequent so we'll omit it for the sake of
887         // simpler code
888         gpuBonded->waitAccumulateEnergyTerms(enerd);
889
890         gpuBonded->clearEnergies();
891     }
892 }
893
894
895 void do_force(FILE*                               fplog,
896               const t_commrec*                    cr,
897               const gmx_multisim_t*               ms,
898               const t_inputrec*                   inputrec,
899               gmx::Awh*                           awh,
900               gmx_enfrot*                         enforcedRotation,
901               gmx::ImdSession*                    imdSession,
902               pull_t*                             pull_work,
903               int64_t                             step,
904               t_nrnb*                             nrnb,
905               gmx_wallcycle_t                     wcycle,
906               const gmx_localtop_t*               top,
907               const matrix                        box,
908               gmx::ArrayRefWithPadding<gmx::RVec> x,
909               history_t*                          hist,
910               gmx::ArrayRefWithPadding<gmx::RVec> force,
911               tensor                              vir_force,
912               const t_mdatoms*                    mdatoms,
913               gmx_enerdata_t*                     enerd,
914               t_fcdata*                           fcd,
915               gmx::ArrayRef<real>                 lambda,
916               t_graph*                            graph,
917               t_forcerec*                         fr,
918               gmx::MdrunScheduleWorkload*         runScheduleWork,
919               const gmx_vsite_t*                  vsite,
920               rvec                                mu_tot,
921               double                              t,
922               gmx_edsam*                          ed,
923               int                                 legacyFlags,
924               const DDBalanceRegionHandler&       ddBalanceRegionHandler)
925 {
926     int                          i, j;
927     double                       mu[2 * DIM];
928     nonbonded_verlet_t*          nbv      = fr->nbv.get();
929     interaction_const_t*         ic       = fr->ic;
930     gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
931
932     // TODO remove the code below when the legacy flags are not in use anymore
933     /* modify force flag if not doing nonbonded */
934     if (!fr->bNonbonded)
935     {
936         legacyFlags &= ~GMX_FORCE_NONBONDED;
937     }
938
939     const SimulationWorkload& simulationWork = runScheduleWork->simulationWork;
940
941
942     runScheduleWork->stepWork    = setupStepWorkload(legacyFlags, fr->bNonbonded, simulationWork,
943                                                   thisRankHasDuty(cr, DUTY_PME));
944     const StepWorkload& stepWork = runScheduleWork->stepWork;
945
946
947     const bool useGpuPmeOnThisRank = simulationWork.useGpuPme && thisRankHasDuty(cr, DUTY_PME);
948     const int  pmeFlags            = makePmeFlags(stepWork);
949
950     // Switches on whether to use GPU for position and force buffer operations
951     // TODO consider all possible combinations of triggers, and how to combine optimally in each case.
952     const BufferOpsUseGpu useGpuXBufOps =
953             stepWork.useGpuXBufferOps ? BufferOpsUseGpu::True : BufferOpsUseGpu::False;
954     // GPU Force buffer ops are disabled on virial steps, because the virial calc is not yet ported to GPU
955     const BufferOpsUseGpu useGpuFBufOps =
956             stepWork.useGpuFBufferOps ? BufferOpsUseGpu::True : BufferOpsUseGpu::False;
957
958     /* At a search step we need to start the first balancing region
959      * somewhere early inside the step after communication during domain
960      * decomposition (and not during the previous step as usual).
961      */
962     if (stepWork.doNeighborSearch)
963     {
964         ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
965     }
966
967     const int start  = 0;
968     const int homenr = mdatoms->homenr;
969
970     clear_mat(vir_force);
971
972     if (stepWork.stateChanged)
973     {
974         if (inputrecNeedMutot(inputrec))
975         {
976             /* Calculate total (local) dipole moment in a temporary common array.
977              * This makes it possible to sum them over nodes faster.
978              */
979             calc_mu(start, homenr, x.unpaddedArrayRef(), mdatoms->chargeA, mdatoms->chargeB,
980                     mdatoms->nChargePerturbed, mu, mu + DIM);
981         }
982     }
983
984     if (fr->ePBC != epbcNONE)
985     {
986         /* Compute shift vectors every step,
987          * because of pressure coupling or box deformation!
988          */
989         if (stepWork.haveDynamicBox && stepWork.stateChanged)
990         {
991             calc_shifts(box, fr->shift_vec);
992         }
993
994         const bool fillGrid = (stepWork.doNeighborSearch && stepWork.stateChanged);
995         const bool calcCGCM = (fillGrid && !DOMAINDECOMP(cr));
996         if (calcCGCM)
997         {
998             put_atoms_in_box_omp(fr->ePBC, box, x.unpaddedArrayRef().subArray(0, homenr),
999                                  gmx_omp_nthreads_get(emntDefault));
1000             inc_nrnb(nrnb, eNR_SHIFTX, homenr);
1001         }
1002         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1003         {
1004             unshift_self(graph, box, as_rvec_array(x.unpaddedArrayRef().data()));
1005         }
1006     }
1007
1008     nbnxn_atomdata_copy_shiftvec(stepWork.haveDynamicBox, fr->shift_vec, nbv->nbat.get());
1009
1010     // Coordinates on the device are needed if PME or BufferOps are offloaded.
1011     // The local coordinates can be copied right away.
1012     // NOTE: Consider moving this copy to right after they are updated and constrained,
1013     //       if the later is not offloaded.
1014     if (useGpuPmeOnThisRank || useGpuXBufOps == BufferOpsUseGpu::True)
1015     {
1016         if (stepWork.doNeighborSearch)
1017         {
1018             stateGpu->reinit(mdatoms->homenr,
1019                              cr->dd != nullptr ? dd_numAtomsZones(*cr->dd) : mdatoms->homenr);
1020             if (useGpuPmeOnThisRank)
1021             {
1022                 // TODO: This should be moved into PME setup function ( pme_gpu_prepare_computation(...) )
1023                 pme_gpu_set_device_x(fr->pmedata, stateGpu->getCoordinates());
1024             }
1025         }
1026         // We need to copy coordinates when:
1027         // 1. Update is not offloaded
1028         // 2. The buffers were reinitialized on search step
1029         if (!simulationWork.useGpuUpdate || stepWork.doNeighborSearch)
1030         {
1031             stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1032         }
1033     }
1034
1035     // TODO Update this comment when introducing SimulationWorkload
1036     //
1037     // The conditions for gpuHaloExchange e.g. using GPU buffer
1038     // operations were checked before construction, so here we can
1039     // just use it and assert upon any conditions.
1040     gmx::GpuHaloExchange* gpuHaloExchange =
1041             (havePPDomainDecomposition(cr) ? cr->dd->gpuHaloExchange.get() : nullptr);
1042     const bool ddUsesGpuDirectCommunication = (gpuHaloExchange != nullptr);
1043     GMX_ASSERT(!ddUsesGpuDirectCommunication || (useGpuXBufOps == BufferOpsUseGpu::True),
1044                "Must use coordinate buffer ops with GPU halo exchange");
1045     const bool useGpuForcesHaloExchange =
1046             ddUsesGpuDirectCommunication && (useGpuFBufOps == BufferOpsUseGpu::True);
1047
1048     // Copy coordinate from the GPU if update is on the GPU and there
1049     // are forces to be computed on the CPU, or for the computation of
1050     // virial, or if host-side data will be transferred from this task
1051     // to a remote task for halo exchange or PME-PP communication. At
1052     // search steps the current coordinates are already on the host,
1053     // hence copy is not needed.
1054     const bool haveHostPmePpComms =
1055             !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication;
1056     const bool haveHostHaloExchangeComms = havePPDomainDecomposition(cr) && !ddUsesGpuDirectCommunication;
1057
1058     bool gmx_used_in_debug haveCopiedXFromGpu = false;
1059     if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch
1060         && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial
1061             || haveHostPmePpComms || haveHostHaloExchangeComms))
1062     {
1063         stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1064         haveCopiedXFromGpu = true;
1065     }
1066
1067     const auto localXReadyOnDevice = (stateGpu != nullptr)
1068                                              ? stateGpu->getCoordinatesReadyOnDeviceEvent(
1069                                                        AtomLocality::Local, simulationWork, stepWork)
1070                                              : nullptr;
1071
1072 #if GMX_MPI
1073     if (!thisRankHasDuty(cr, DUTY_PME))
1074     {
1075         /* Send particle coordinates to the pme nodes.
1076          * Since this is only implemented for domain decomposition
1077          * and domain decomposition does not use the graph,
1078          * we do not need to worry about shifting.
1079          */
1080         bool reinitGpuPmePpComms = simulationWork.useGpuPmePpCommunication && (stepWork.doNeighborSearch);
1081         bool sendCoordinatesFromGpu =
1082                 simulationWork.useGpuPmePpCommunication && !(stepWork.doNeighborSearch);
1083
1084         if (!stepWork.doNeighborSearch && simulationWork.useGpuUpdate && !sendCoordinatesFromGpu)
1085         {
1086             GMX_RELEASE_ASSERT(false,
1087                                "GPU update and separate PME ranks are only supported with GPU "
1088                                "direct communication!");
1089             // TODO: when this code-path becomes supported add:
1090             // stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1091         }
1092
1093         gmx_pme_send_coordinates(fr, cr, box, as_rvec_array(x.unpaddedArrayRef().data()), lambda[efptCOUL],
1094                                  lambda[efptVDW], (stepWork.computeVirial || stepWork.computeEnergy),
1095                                  step, simulationWork.useGpuPmePpCommunication, reinitGpuPmePpComms,
1096                                  sendCoordinatesFromGpu, localXReadyOnDevice, wcycle);
1097     }
1098 #endif /* GMX_MPI */
1099
1100     if (useGpuPmeOnThisRank)
1101     {
1102         launchPmeGpuSpread(fr->pmedata, box, stepWork, pmeFlags, localXReadyOnDevice, wcycle);
1103     }
1104
1105     /* do gridding for pair search */
1106     if (stepWork.doNeighborSearch)
1107     {
1108         if (graph && stepWork.stateChanged)
1109         {
1110             /* Calculate intramolecular shift vectors to make molecules whole */
1111             mk_mshift(fplog, graph, fr->ePBC, box, as_rvec_array(x.unpaddedArrayRef().data()));
1112         }
1113
1114         // TODO
1115         // - vzero is constant, do we need to pass it?
1116         // - box_diag should be passed directly to nbnxn_put_on_grid
1117         //
1118         rvec vzero;
1119         clear_rvec(vzero);
1120
1121         rvec box_diag;
1122         box_diag[XX] = box[XX][XX];
1123         box_diag[YY] = box[YY][YY];
1124         box_diag[ZZ] = box[ZZ][ZZ];
1125
1126         wallcycle_start(wcycle, ewcNS);
1127         if (!DOMAINDECOMP(cr))
1128         {
1129             wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
1130             nbnxn_put_on_grid(nbv, box, 0, vzero, box_diag, nullptr, { 0, mdatoms->homenr }, -1,
1131                               fr->cginfo, x.unpaddedArrayRef(), 0, nullptr);
1132             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
1133         }
1134         else
1135         {
1136             wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
1137             nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd), fr->cginfo, x.unpaddedArrayRef());
1138             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
1139         }
1140
1141         nbv->setAtomProperties(*mdatoms, fr->cginfo);
1142
1143         wallcycle_stop(wcycle, ewcNS);
1144
1145         /* initialize the GPU nbnxm atom data and bonded data structures */
1146         if (simulationWork.useGpuNonbonded)
1147         {
1148             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1149
1150             wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1151             Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1152             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1153
1154             if (fr->gpuBonded)
1155             {
1156                 /* Now we put all atoms on the grid, we can assign bonded
1157                  * interactions to the GPU, where the grid order is
1158                  * needed. Also the xq, f and fshift device buffers have
1159                  * been reallocated if needed, so the bonded code can
1160                  * learn about them. */
1161                 // TODO the xq, f, and fshift buffers are now shared
1162                 // resources, so they should be maintained by a
1163                 // higher-level object than the nb module.
1164                 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(
1165                         nbv->getGridIndices(), top->idef, Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1166                         Nbnxm::gpu_get_f(nbv->gpu_nbv), Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1167             }
1168             wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1169         }
1170
1171         // Need to run after the GPU-offload bonded interaction lists
1172         // are set up to be able to determine whether there is bonded work.
1173         runScheduleWork->domainWork = setupDomainLifetimeWorkload(
1174                 *inputrec, *fr, pull_work, ed, top->idef, *fcd, *mdatoms, simulationWork, stepWork);
1175
1176         wallcycle_start_nocount(wcycle, ewcNS);
1177         wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1178         /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1179         nbv->constructPairlist(InteractionLocality::Local, top->excls, step, nrnb);
1180
1181         nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::Local);
1182
1183         wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1184         wallcycle_stop(wcycle, ewcNS);
1185
1186         if (useGpuXBufOps == BufferOpsUseGpu::True)
1187         {
1188             nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1189         }
1190         // For force buffer ops, we use the below conditon rather than
1191         // useGpuFBufOps to ensure that init is performed even if this
1192         // NS step is also a virial step (on which f buf ops are deactivated).
1193         if (simulationWork.useGpuBufferOps && simulationWork.useGpuNonbonded && (GMX_GPU == GMX_GPU_CUDA))
1194         {
1195             GMX_ASSERT(stateGpu, "stateGpu should be valid when buffer ops are offloaded");
1196             nbv->atomdata_init_add_nbat_f_to_f_gpu(stateGpu->fReducedOnDevice());
1197         }
1198     }
1199     else if (!EI_TPI(inputrec->eI))
1200     {
1201         if (useGpuXBufOps == BufferOpsUseGpu::True)
1202         {
1203             GMX_ASSERT(stateGpu, "stateGpu should be valid when buffer ops are offloaded");
1204             nbv->convertCoordinatesGpu(AtomLocality::Local, false, stateGpu->getCoordinates(),
1205                                        localXReadyOnDevice);
1206         }
1207         else
1208         {
1209             if (simulationWork.useGpuUpdate)
1210             {
1211                 GMX_ASSERT(stateGpu, "need a valid stateGpu object");
1212                 GMX_ASSERT(haveCopiedXFromGpu,
1213                            "a wait should only be triggered if copy has been scheduled");
1214                 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1215             }
1216             nbv->convertCoordinates(AtomLocality::Local, false, x.unpaddedArrayRef());
1217         }
1218     }
1219
1220     const gmx::DomainLifetimeWorkload& domainWork = runScheduleWork->domainWork;
1221
1222     if (simulationWork.useGpuNonbonded)
1223     {
1224         ddBalanceRegionHandler.openBeforeForceComputationGpu();
1225
1226         wallcycle_start(wcycle, ewcLAUNCH_GPU);
1227
1228         wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1229         Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1230         if (stepWork.doNeighborSearch || (useGpuXBufOps == BufferOpsUseGpu::False))
1231         {
1232             Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::Local);
1233         }
1234         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1235         // with X buffer ops offloaded to the GPU on all but the search steps
1236
1237         // bonded work not split into separate local and non-local, so with DD
1238         // we can only launch the kernel after non-local coordinates have been received.
1239         if (domainWork.haveGpuBondedWork && !havePPDomainDecomposition(cr))
1240         {
1241             wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1242             fr->gpuBonded->launchKernel(fr, stepWork, box);
1243             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1244         }
1245
1246         /* launch local nonbonded work on GPU */
1247         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1248         do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFNo, step, nrnb, wcycle);
1249         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1250         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1251     }
1252
1253     if (useGpuPmeOnThisRank)
1254     {
1255         // In PME GPU and mixed mode we launch FFT / gather after the
1256         // X copy/transform to allow overlap as well as after the GPU NB
1257         // launch to avoid FFT launch overhead hijacking the CPU and delaying
1258         // the nonbonded kernel.
1259         launchPmeGpuFftAndGather(fr->pmedata, wcycle);
1260     }
1261
1262     /* Communicate coordinates and sum dipole if necessary +
1263        do non-local pair search */
1264     if (havePPDomainDecomposition(cr))
1265     {
1266         if (stepWork.doNeighborSearch)
1267         {
1268             // TODO: fuse this branch with the above large stepWork.doNeighborSearch block
1269             wallcycle_start_nocount(wcycle, ewcNS);
1270             wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1271             /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1272             nbv->constructPairlist(InteractionLocality::NonLocal, top->excls, step, nrnb);
1273
1274             nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::NonLocal);
1275             wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1276             wallcycle_stop(wcycle, ewcNS);
1277             if (ddUsesGpuDirectCommunication)
1278             {
1279                 gpuHaloExchange->reinitHalo(stateGpu->getCoordinates(), stateGpu->getForces());
1280             }
1281         }
1282         else
1283         {
1284             if (ddUsesGpuDirectCommunication)
1285             {
1286                 // The following must be called after local setCoordinates (which records an event
1287                 // when the coordinate data has been copied to the device).
1288                 gpuHaloExchange->communicateHaloCoordinates(box, localXReadyOnDevice);
1289
1290                 if (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork)
1291                 {
1292                     // non-local part of coordinate buffer must be copied back to host for CPU work
1293                     stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1294                 }
1295             }
1296             else
1297             {
1298                 // Note: GPU update + DD without direct communication is not supported,
1299                 // a waitCoordinatesReadyOnHost() should be issued if it will be.
1300                 GMX_ASSERT(!simulationWork.useGpuUpdate,
1301                            "GPU update is not supported with CPU halo exchange");
1302                 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1303             }
1304
1305             if (useGpuXBufOps == BufferOpsUseGpu::True)
1306             {
1307                 // The condition here was (pme != nullptr && pme_gpu_get_device_x(fr->pmedata) != nullptr)
1308                 if (!useGpuPmeOnThisRank && !ddUsesGpuDirectCommunication)
1309                 {
1310                     stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1311                 }
1312                 nbv->convertCoordinatesGpu(AtomLocality::NonLocal, false, stateGpu->getCoordinates(),
1313                                            stateGpu->getCoordinatesReadyOnDeviceEvent(
1314                                                    AtomLocality::NonLocal, simulationWork, stepWork));
1315             }
1316             else
1317             {
1318                 nbv->convertCoordinates(AtomLocality::NonLocal, false, x.unpaddedArrayRef());
1319             }
1320         }
1321
1322         if (simulationWork.useGpuNonbonded)
1323         {
1324             wallcycle_start(wcycle, ewcLAUNCH_GPU);
1325
1326             if (stepWork.doNeighborSearch || (useGpuXBufOps == BufferOpsUseGpu::False))
1327             {
1328                 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1329                 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::NonLocal);
1330                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1331             }
1332
1333             if (domainWork.haveGpuBondedWork)
1334             {
1335                 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1336                 fr->gpuBonded->launchKernel(fr, stepWork, box);
1337                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1338             }
1339
1340             /* launch non-local nonbonded tasks on GPU */
1341             wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1342             do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step,
1343                          nrnb, wcycle);
1344             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1345
1346             wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1347         }
1348     }
1349
1350     if (simulationWork.useGpuNonbonded)
1351     {
1352         /* launch D2H copy-back F */
1353         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1354         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1355
1356         if (havePPDomainDecomposition(cr))
1357         {
1358             Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::NonLocal);
1359         }
1360         Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::Local);
1361         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1362
1363         if (domainWork.haveGpuBondedWork && stepWork.computeEnergy)
1364         {
1365             fr->gpuBonded->launchEnergyTransfer();
1366         }
1367         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1368     }
1369
1370     if (stepWork.stateChanged && inputrecNeedMutot(inputrec))
1371     {
1372         if (PAR(cr))
1373         {
1374             gmx_sumd(2 * DIM, mu, cr);
1375
1376             ddBalanceRegionHandler.reopenRegionCpu();
1377         }
1378
1379         for (i = 0; i < 2; i++)
1380         {
1381             for (j = 0; j < DIM; j++)
1382             {
1383                 fr->mu_tot[i][j] = mu[i * DIM + j];
1384             }
1385         }
1386     }
1387     if (mdatoms->nChargePerturbed == 0)
1388     {
1389         copy_rvec(fr->mu_tot[0], mu_tot);
1390     }
1391     else
1392     {
1393         for (j = 0; j < DIM; j++)
1394         {
1395             mu_tot[j] = (1.0 - lambda[efptCOUL]) * fr->mu_tot[0][j] + lambda[efptCOUL] * fr->mu_tot[1][j];
1396         }
1397     }
1398
1399     /* Reset energies */
1400     reset_enerdata(enerd);
1401     /* Clear the shift forces */
1402     // TODO: This should be linked to the shift force buffer in use, or cleared before use instead
1403     for (gmx::RVec& elem : fr->shiftForces)
1404     {
1405         elem = { 0.0_real, 0.0_real, 0.0_real };
1406     }
1407
1408     if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1409     {
1410         wallcycle_start(wcycle, ewcPPDURINGPME);
1411         dd_force_flop_start(cr->dd, nrnb);
1412     }
1413
1414     // For the rest of the CPU tasks that depend on GPU-update produced coordinates,
1415     // this wait ensures that the D2H transfer is complete.
1416     if ((simulationWork.useGpuUpdate)
1417         && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial))
1418     {
1419         stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1420     }
1421
1422     if (inputrec->bRot)
1423     {
1424         wallcycle_start(wcycle, ewcROT);
1425         do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step,
1426                     stepWork.doNeighborSearch);
1427         wallcycle_stop(wcycle, ewcROT);
1428     }
1429
1430     /* Start the force cycle counter.
1431      * Note that a different counter is used for dynamic load balancing.
1432      */
1433     wallcycle_start(wcycle, ewcFORCE);
1434
1435     // Set up and clear force outputs.
1436     // We use std::move to keep the compiler happy, it has no effect.
1437     ForceOutputs forceOut =
1438             setupForceOutputs(fr, pull_work, *inputrec, std::move(force), stepWork, wcycle);
1439
1440     /* We calculate the non-bonded forces, when done on the CPU, here.
1441      * We do this before calling do_force_lowlevel, because in that
1442      * function, the listed forces are calculated before PME, which
1443      * does communication.  With this order, non-bonded and listed
1444      * force calculation imbalance can be balanced out by the domain
1445      * decomposition load balancing.
1446      */
1447
1448     const bool useOrEmulateGpuNb = simulationWork.useGpuNonbonded || fr->nbv->emulateGpu();
1449
1450     if (!useOrEmulateGpuNb)
1451     {
1452         do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFYes, step, nrnb, wcycle);
1453     }
1454
1455     if (fr->efep != efepNO)
1456     {
1457         /* Calculate the local and non-local free energy interactions here.
1458          * Happens here on the CPU both with and without GPU.
1459          */
1460         nbv->dispatchFreeEnergyKernel(InteractionLocality::Local, fr,
1461                                       as_rvec_array(x.unpaddedArrayRef().data()),
1462                                       &forceOut.forceWithShiftForces(), *mdatoms, inputrec->fepvals,
1463                                       lambda.data(), enerd, stepWork, nrnb);
1464
1465         if (havePPDomainDecomposition(cr))
1466         {
1467             nbv->dispatchFreeEnergyKernel(InteractionLocality::NonLocal, fr,
1468                                           as_rvec_array(x.unpaddedArrayRef().data()),
1469                                           &forceOut.forceWithShiftForces(), *mdatoms,
1470                                           inputrec->fepvals, lambda.data(), enerd, stepWork, nrnb);
1471         }
1472     }
1473
1474     if (!useOrEmulateGpuNb)
1475     {
1476         if (havePPDomainDecomposition(cr))
1477         {
1478             do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step,
1479                          nrnb, wcycle);
1480         }
1481
1482         if (stepWork.computeForces)
1483         {
1484             /* Add all the non-bonded force to the normal force array.
1485              * This can be split into a local and a non-local part when overlapping
1486              * communication with calculation with domain decomposition.
1487              */
1488             wallcycle_stop(wcycle, ewcFORCE);
1489             nbv->atomdata_add_nbat_f_to_f(AtomLocality::All, forceOut.forceWithShiftForces().force());
1490             wallcycle_start_nocount(wcycle, ewcFORCE);
1491         }
1492
1493         /* If there are multiple fshift output buffers we need to reduce them */
1494         if (stepWork.computeVirial)
1495         {
1496             /* This is not in a subcounter because it takes a
1497                negligible and constant-sized amount of time */
1498             nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat,
1499                                                      forceOut.forceWithShiftForces().shiftForces());
1500         }
1501     }
1502
1503     /* update QMMMrec, if necessary */
1504     if (fr->bQMMM)
1505     {
1506         update_QMMMrec(cr, fr, as_rvec_array(x.unpaddedArrayRef().data()), mdatoms, box);
1507     }
1508
1509     // TODO Force flags should include haveFreeEnergyWork for this domain
1510     if (ddUsesGpuDirectCommunication && (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork))
1511     {
1512         /* Wait for non-local coordinate data to be copied from device */
1513         stateGpu->waitCoordinatesReadyOnHost(AtomLocality::NonLocal);
1514     }
1515     /* Compute the bonded and non-bonded energies and optionally forces */
1516     do_force_lowlevel(fr, inputrec, &(top->idef), cr, ms, nrnb, wcycle, mdatoms, x, hist, &forceOut, enerd,
1517                       fcd, box, lambda.data(), graph, fr->mu_tot, stepWork, ddBalanceRegionHandler);
1518
1519     wallcycle_stop(wcycle, ewcFORCE);
1520
1521     computeSpecialForces(fplog, cr, inputrec, awh, enforcedRotation, imdSession, pull_work, step, t,
1522                          wcycle, fr->forceProviders, box, x.unpaddedArrayRef(), mdatoms, lambda.data(),
1523                          stepWork, &forceOut.forceWithVirial(), enerd, ed, stepWork.doNeighborSearch);
1524
1525
1526     // Will store the amount of cycles spent waiting for the GPU that
1527     // will be later used in the DLB accounting.
1528     float cycles_wait_gpu = 0;
1529     if (useOrEmulateGpuNb)
1530     {
1531         auto& forceWithShiftForces = forceOut.forceWithShiftForces();
1532
1533         /* wait for non-local forces (or calculate in emulation mode) */
1534         if (havePPDomainDecomposition(cr))
1535         {
1536             if (simulationWork.useGpuNonbonded)
1537             {
1538                 cycles_wait_gpu += Nbnxm::gpu_wait_finish_task(
1539                         nbv->gpu_nbv, stepWork, AtomLocality::NonLocal, enerd->grpp.ener[egLJSR].data(),
1540                         enerd->grpp.ener[egCOULSR].data(), forceWithShiftForces.shiftForces(), wcycle);
1541             }
1542             else
1543             {
1544                 wallcycle_start_nocount(wcycle, ewcFORCE);
1545                 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFYes,
1546                              step, nrnb, wcycle);
1547                 wallcycle_stop(wcycle, ewcFORCE);
1548             }
1549
1550             if (useGpuFBufOps == BufferOpsUseGpu::True)
1551             {
1552                 gmx::FixedCapacityVector<GpuEventSynchronizer*, 1> dependencyList;
1553
1554                 // TODO: move this into DomainLifetimeWorkload, including the second part of the
1555                 // condition The bonded and free energy CPU tasks can have non-local force
1556                 // contributions which are a dependency for the GPU force reduction.
1557                 bool haveNonLocalForceContribInCpuBuffer =
1558                         domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork;
1559
1560                 if (haveNonLocalForceContribInCpuBuffer)
1561                 {
1562                     stateGpu->copyForcesToGpu(forceOut.forceWithShiftForces().force(),
1563                                               AtomLocality::NonLocal);
1564                     dependencyList.push_back(stateGpu->getForcesReadyOnDeviceEvent(
1565                             AtomLocality::NonLocal, useGpuFBufOps == BufferOpsUseGpu::True));
1566                 }
1567
1568                 nbv->atomdata_add_nbat_f_to_f_gpu(AtomLocality::NonLocal, stateGpu->getForces(),
1569                                                   pme_gpu_get_device_f(fr->pmedata), dependencyList,
1570                                                   false, haveNonLocalForceContribInCpuBuffer);
1571                 if (!useGpuForcesHaloExchange)
1572                 {
1573                     // copy from GPU input for dd_move_f()
1574                     stateGpu->copyForcesFromGpu(forceOut.forceWithShiftForces().force(),
1575                                                 AtomLocality::NonLocal);
1576                 }
1577             }
1578             else
1579             {
1580                 nbv->atomdata_add_nbat_f_to_f(AtomLocality::NonLocal, forceWithShiftForces.force());
1581             }
1582
1583
1584             if (fr->nbv->emulateGpu() && stepWork.computeVirial)
1585             {
1586                 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat, forceWithShiftForces.shiftForces());
1587             }
1588         }
1589     }
1590
1591     if (havePPDomainDecomposition(cr))
1592     {
1593         /* We are done with the CPU compute.
1594          * We will now communicate the non-local forces.
1595          * If we use a GPU this will overlap with GPU work, so in that case
1596          * we do not close the DD force balancing region here.
1597          */
1598         ddBalanceRegionHandler.closeAfterForceComputationCpu();
1599
1600         if (stepWork.computeForces)
1601         {
1602
1603             if (useGpuForcesHaloExchange)
1604             {
1605                 if (domainWork.haveCpuLocalForceWork)
1606                 {
1607                     stateGpu->copyForcesToGpu(forceOut.forceWithShiftForces().force(), AtomLocality::Local);
1608                 }
1609                 gpuHaloExchange->communicateHaloForces(domainWork.haveCpuLocalForceWork);
1610             }
1611             else
1612             {
1613                 if (useGpuFBufOps == BufferOpsUseGpu::True)
1614                 {
1615                     stateGpu->waitForcesReadyOnHost(AtomLocality::NonLocal);
1616                 }
1617                 dd_move_f(cr->dd, &forceOut.forceWithShiftForces(), wcycle);
1618             }
1619         }
1620     }
1621
1622     // With both nonbonded and PME offloaded a GPU on the same rank, we use
1623     // an alternating wait/reduction scheme.
1624     bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPmeOnThisRank && simulationWork.useGpuNonbonded
1625                              && !DOMAINDECOMP(cr) && (useGpuFBufOps == BufferOpsUseGpu::False));
1626     if (alternateGpuWait)
1627     {
1628         alternatePmeNbGpuWaitReduce(fr->nbv.get(), fr->pmedata, &forceOut, enerd, stepWork, pmeFlags, wcycle);
1629     }
1630
1631     if (!alternateGpuWait && useGpuPmeOnThisRank)
1632     {
1633         pme_gpu_wait_and_reduce(fr->pmedata, pmeFlags, wcycle, &forceOut.forceWithVirial(), enerd);
1634     }
1635
1636     /* Wait for local GPU NB outputs on the non-alternating wait path */
1637     if (!alternateGpuWait && simulationWork.useGpuNonbonded)
1638     {
1639         /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1640          * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1641          * but even with a step of 0.1 ms the difference is less than 1%
1642          * of the step time.
1643          */
1644         const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
1645         const float waitCycles               = Nbnxm::gpu_wait_finish_task(
1646                 nbv->gpu_nbv, stepWork, AtomLocality::Local, enerd->grpp.ener[egLJSR].data(),
1647                 enerd->grpp.ener[egCOULSR].data(), forceOut.forceWithShiftForces().shiftForces(), wcycle);
1648
1649         if (ddBalanceRegionHandler.useBalancingRegion())
1650         {
1651             DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
1652             if (stepWork.computeForces && waitCycles <= gpuWaitApiOverheadMargin)
1653             {
1654                 /* We measured few cycles, it could be that the kernel
1655                  * and transfer finished earlier and there was no actual
1656                  * wait time, only API call overhead.
1657                  * Then the actual time could be anywhere between 0 and
1658                  * cycles_wait_est. We will use half of cycles_wait_est.
1659                  */
1660                 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
1661             }
1662             ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
1663         }
1664     }
1665
1666     if (fr->nbv->emulateGpu())
1667     {
1668         // NOTE: emulation kernel is not included in the balancing region,
1669         // but emulation mode does not target performance anyway
1670         wallcycle_start_nocount(wcycle, ewcFORCE);
1671         do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local,
1672                      DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes, step, nrnb, wcycle);
1673         wallcycle_stop(wcycle, ewcFORCE);
1674     }
1675
1676     // If on GPU PME-PP comms or GPU update path, receive forces from PME before GPU buffer ops
1677     // TODO refactor this and unify with below default-path call to the same function
1678     if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME)
1679         && (simulationWork.useGpuPmePpCommunication || simulationWork.useGpuUpdate))
1680     {
1681         /* In case of node-splitting, the PP nodes receive the long-range
1682          * forces, virial and energy from the PME nodes here.
1683          */
1684         pme_receive_force_ener(fr, cr, &forceOut.forceWithVirial(), enerd,
1685                                simulationWork.useGpuPmePpCommunication,
1686                                stepWork.useGpuPmeFReduction, wcycle);
1687     }
1688
1689
1690     /* Do the nonbonded GPU (or emulation) force buffer reduction
1691      * on the non-alternating path. */
1692     if (useOrEmulateGpuNb && !alternateGpuWait)
1693     {
1694         // TODO simplify the below conditionals. Pass buffer and sync pointers at init stage rather than here. Unify getter fns for sameGPU/otherGPU cases.
1695         void* pmeForcePtr =
1696                 stepWork.useGpuPmeFReduction
1697                         ? (thisRankHasDuty(cr, DUTY_PME) ? pme_gpu_get_device_f(fr->pmedata)
1698                                                          : // PME force buffer on same GPU
1699                                    fr->pmePpCommGpu->getGpuForceStagingPtr()) // buffer received from other GPU
1700                         : nullptr; // PME reduction not active on GPU
1701
1702         GpuEventSynchronizer* const pmeSynchronizer =
1703                 stepWork.useGpuPmeFReduction
1704                         ? (thisRankHasDuty(cr, DUTY_PME) ? pme_gpu_get_f_ready_synchronizer(fr->pmedata)
1705                                                          : // PME force buffer on same GPU
1706                                    static_cast<GpuEventSynchronizer*>(
1707                                            fr->pmePpCommGpu->getForcesReadySynchronizer())) // buffer received from other GPU
1708                         : nullptr; // PME reduction not active on GPU
1709
1710         gmx::FixedCapacityVector<GpuEventSynchronizer*, 3> dependencyList;
1711
1712         if (stepWork.useGpuPmeFReduction)
1713         {
1714             dependencyList.push_back(pmeSynchronizer);
1715         }
1716
1717         gmx::ArrayRef<gmx::RVec> forceWithShift = forceOut.forceWithShiftForces().force();
1718
1719         if (useGpuFBufOps == BufferOpsUseGpu::True)
1720         {
1721             // Flag to specify whether the CPU force buffer has contributions to
1722             // local atoms. This depends on whether there are CPU-based force tasks
1723             // or when DD is active the halo exchange has resulted in contributions
1724             // from the non-local part.
1725             const bool haveLocalForceContribInCpuBuffer =
1726                     (domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr));
1727
1728             // TODO: move these steps as early as possible:
1729             // - CPU f H2D should be as soon as all CPU-side forces are done
1730             // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
1731             //   before the next CPU task that consumes the forces: vsite spread or update)
1732             // - copy is not perfomed if GPU force halo exchange is active, because it would overwrite the result
1733             //   of the halo exchange. In that case the copy is instead performed above, before the exchange.
1734             //   These should be unified.
1735             if (haveLocalForceContribInCpuBuffer && !useGpuForcesHaloExchange)
1736             {
1737                 // Note: AtomLocality::All is used for the non-DD case because, as in this
1738                 // case copyForcesToGpu() uses a separate stream, it allows overlap of
1739                 // CPU force H2D with GPU force tasks on all streams including those in the
1740                 // local stream which would otherwise be implicit dependencies for the
1741                 // transfer and would not overlap.
1742                 auto locality = havePPDomainDecomposition(cr) ? AtomLocality::Local : AtomLocality::All;
1743
1744                 stateGpu->copyForcesToGpu(forceWithShift, locality);
1745                 dependencyList.push_back(stateGpu->getForcesReadyOnDeviceEvent(
1746                         locality, useGpuFBufOps == BufferOpsUseGpu::True));
1747             }
1748             if (useGpuForcesHaloExchange)
1749             {
1750                 dependencyList.push_back(gpuHaloExchange->getForcesReadyOnDeviceEvent());
1751             }
1752             nbv->atomdata_add_nbat_f_to_f_gpu(AtomLocality::Local, stateGpu->getForces(), pmeForcePtr,
1753                                               dependencyList, stepWork.useGpuPmeFReduction,
1754                                               haveLocalForceContribInCpuBuffer);
1755             // Copy forces to host if they are needed for update or if virtual sites are enabled.
1756             // If there are vsites, we need to copy forces every step to spread vsite forces on host.
1757             // TODO: When the output flags will be included in step workload, this copy can be combined with the
1758             //       copy call done in sim_utils(...) for the output.
1759             // NOTE: If there are virtual sites, the forces are modified on host after this D2H copy. Hence,
1760             //       they should not be copied in do_md(...) for the output.
1761             if (!simulationWork.useGpuUpdate || vsite)
1762             {
1763                 stateGpu->copyForcesFromGpu(forceWithShift, AtomLocality::Local);
1764                 stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
1765             }
1766         }
1767         else
1768         {
1769             nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceWithShift);
1770         }
1771     }
1772
1773     launchGpuEndOfStepTasks(nbv, fr->gpuBonded, fr->pmedata, enerd, *runScheduleWork,
1774                             useGpuPmeOnThisRank, step, wcycle);
1775
1776     if (DOMAINDECOMP(cr))
1777     {
1778         dd_force_flop_stop(cr->dd, nrnb);
1779     }
1780
1781     if (stepWork.computeForces)
1782     {
1783         rvec* f = as_rvec_array(forceOut.forceWithShiftForces().force().data());
1784
1785         /* If we have NoVirSum forces, but we do not calculate the virial,
1786          * we sum fr->f_novirsum=forceOut.f later.
1787          */
1788         if (vsite && !(fr->haveDirectVirialContributions && !stepWork.computeVirial))
1789         {
1790             rvec* fshift = as_rvec_array(forceOut.forceWithShiftForces().shiftForces().data());
1791             spread_vsite_f(vsite, as_rvec_array(x.unpaddedArrayRef().data()), f, fshift, FALSE,
1792                            nullptr, nrnb, &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
1793         }
1794
1795         if (stepWork.computeVirial)
1796         {
1797             /* Calculation of the virial must be done after vsites! */
1798             calc_virial(0, mdatoms->homenr, as_rvec_array(x.unpaddedArrayRef().data()),
1799                         forceOut.forceWithShiftForces(), vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1800         }
1801     }
1802
1803     // TODO refactor this and unify with above GPU PME-PP / GPU update path call to the same function
1804     if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication
1805         && !simulationWork.useGpuUpdate)
1806     {
1807         /* In case of node-splitting, the PP nodes receive the long-range
1808          * forces, virial and energy from the PME nodes here.
1809          */
1810         pme_receive_force_ener(fr, cr, &forceOut.forceWithVirial(), enerd,
1811                                simulationWork.useGpuPmePpCommunication, false, wcycle);
1812     }
1813
1814     if (stepWork.computeForces)
1815     {
1816         post_process_forces(cr, step, nrnb, wcycle, top, box, as_rvec_array(x.unpaddedArrayRef().data()),
1817                             &forceOut, vir_force, mdatoms, graph, fr, vsite, stepWork);
1818     }
1819
1820     if (stepWork.computeEnergy)
1821     {
1822         /* Sum the potential energy terms from group contributions */
1823         sum_epot(&(enerd->grpp), enerd->term);
1824
1825         if (!EI_TPI(inputrec->eI))
1826         {
1827             checkPotentialEnergyValidity(step, *enerd, *inputrec);
1828         }
1829     }
1830
1831     /* In case we don't have constraints and are using GPUs, the next balancing
1832      * region starts here.
1833      * Some "special" work at the end of do_force_cuts?, such as vsite spread,
1834      * virial calculation and COM pulling, is not thus not included in
1835      * the balance timing, which is ok as most tasks do communication.
1836      */
1837     ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);
1838 }