069ea8455f1ea192ca1ea087b8d2ac409aff9bbf
[alexxy/gromacs.git] / src / gromacs / mdlib / sim_util.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "config.h"
40
41 #include <cmath>
42 #include <cstdint>
43 #include <cstdio>
44 #include <cstring>
45
46 #include <array>
47
48 #include "gromacs/awh/awh.h"
49 #include "gromacs/domdec/dlbtiming.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/domdec/gpuhaloexchange.h"
53 #include "gromacs/domdec/partition.h"
54 #include "gromacs/essentialdynamics/edsam.h"
55 #include "gromacs/ewald/pme.h"
56 #include "gromacs/gmxlib/chargegroup.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
59 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
60 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/imd/imd.h"
63 #include "gromacs/listed_forces/disre.h"
64 #include "gromacs/listed_forces/gpubonded.h"
65 #include "gromacs/listed_forces/listed_forces.h"
66 #include "gromacs/listed_forces/manage_threading.h"
67 #include "gromacs/listed_forces/orires.h"
68 #include "gromacs/math/arrayrefwithpadding.h"
69 #include "gromacs/math/functions.h"
70 #include "gromacs/math/units.h"
71 #include "gromacs/math/vec.h"
72 #include "gromacs/math/vecdump.h"
73 #include "gromacs/mdlib/calcmu.h"
74 #include "gromacs/mdlib/calcvir.h"
75 #include "gromacs/mdlib/constr.h"
76 #include "gromacs/mdlib/enerdata_utils.h"
77 #include "gromacs/mdlib/force.h"
78 #include "gromacs/mdlib/forcerec.h"
79 #include "gromacs/mdlib/gmx_omp_nthreads.h"
80 #include "gromacs/mdlib/qmmm.h"
81 #include "gromacs/mdlib/update.h"
82 #include "gromacs/mdtypes/commrec.h"
83 #include "gromacs/mdtypes/enerdata.h"
84 #include "gromacs/mdtypes/forceoutput.h"
85 #include "gromacs/mdtypes/iforceprovider.h"
86 #include "gromacs/mdtypes/inputrec.h"
87 #include "gromacs/mdtypes/md_enums.h"
88 #include "gromacs/mdtypes/simulation_workload.h"
89 #include "gromacs/mdtypes/state.h"
90 #include "gromacs/nbnxm/atomdata.h"
91 #include "gromacs/nbnxm/gpu_data_mgmt.h"
92 #include "gromacs/nbnxm/nbnxm.h"
93 #include "gromacs/pbcutil/ishift.h"
94 #include "gromacs/pbcutil/mshift.h"
95 #include "gromacs/pbcutil/pbc.h"
96 #include "gromacs/pulling/pull.h"
97 #include "gromacs/pulling/pull_rotation.h"
98 #include "gromacs/timing/cyclecounter.h"
99 #include "gromacs/timing/gpu_timing.h"
100 #include "gromacs/timing/wallcycle.h"
101 #include "gromacs/timing/wallcyclereporting.h"
102 #include "gromacs/timing/walltime_accounting.h"
103 #include "gromacs/topology/topology.h"
104 #include "gromacs/utility/arrayref.h"
105 #include "gromacs/utility/basedefinitions.h"
106 #include "gromacs/utility/cstringutil.h"
107 #include "gromacs/utility/exceptions.h"
108 #include "gromacs/utility/fatalerror.h"
109 #include "gromacs/utility/gmxassert.h"
110 #include "gromacs/utility/gmxmpi.h"
111 #include "gromacs/utility/logger.h"
112 #include "gromacs/utility/smalloc.h"
113 #include "gromacs/utility/strconvert.h"
114 #include "gromacs/utility/sysinfo.h"
115
116 using gmx::ForceOutputs;
117 using gmx::StepWorkload;
118 using gmx::DomainLifetimeWorkload;
119
120 // TODO: this environment variable allows us to verify before release
121 // that on less common architectures the total cost of polling is not larger than
122 // a blocking wait (so polling does not introduce overhead when the static
123 // PME-first ordering would suffice).
124 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
125
126 // environment variable to enable GPU buffer ops, to allow incremental and optional
127 // introduction of this functionality.
128 // TODO eventially tie this in with other existing GPU flags.
129 static const bool c_enableGpuBufOps = (getenv("GMX_USE_GPU_BUFFER_OPS") != nullptr);
130
131 static void sum_forces(rvec f[], gmx::ArrayRef<const gmx::RVec> forceToAdd)
132 {
133     const int      end = forceToAdd.size();
134
135     int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
136 #pragma omp parallel for num_threads(nt) schedule(static)
137     for (int i = 0; i < end; i++)
138     {
139         rvec_inc(f[i], forceToAdd[i]);
140     }
141 }
142
143 static void calc_virial(int start, int homenr, const rvec x[],
144                         const gmx::ForceWithShiftForces &forceWithShiftForces,
145                         tensor vir_part, const t_graph *graph, const matrix box,
146                         t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
147 {
148     /* The short-range virial from surrounding boxes */
149     const rvec *fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
150     calc_vir(SHIFTS, fr->shift_vec, fshift, vir_part, ePBC == epbcSCREW, box);
151     inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
152
153     /* Calculate partial virial, for local atoms only, based on short range.
154      * Total virial is computed in global_stat, called from do_md
155      */
156     const rvec *f = as_rvec_array(forceWithShiftForces.force().data());
157     f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
158     inc_nrnb(nrnb, eNR_VIRIAL, homenr);
159
160     if (debug)
161     {
162         pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
163     }
164 }
165
166 static void pull_potential_wrapper(const t_commrec *cr,
167                                    const t_inputrec *ir,
168                                    const matrix box, gmx::ArrayRef<const gmx::RVec> x,
169                                    gmx::ForceWithVirial *force,
170                                    const t_mdatoms *mdatoms,
171                                    gmx_enerdata_t *enerd,
172                                    pull_t *pull_work,
173                                    const real *lambda,
174                                    double t,
175                                    gmx_wallcycle_t wcycle)
176 {
177     t_pbc  pbc;
178     real   dvdl;
179
180     /* Calculate the center of mass forces, this requires communication,
181      * which is why pull_potential is called close to other communication.
182      */
183     wallcycle_start(wcycle, ewcPULLPOT);
184     set_pbc(&pbc, ir->ePBC, box);
185     dvdl                     = 0;
186     enerd->term[F_COM_PULL] +=
187         pull_potential(pull_work, mdatoms, &pbc,
188                        cr, t, lambda[efptRESTRAINT], as_rvec_array(x.data()), force, &dvdl);
189     enerd->dvdl_lin[efptRESTRAINT] += dvdl;
190     wallcycle_stop(wcycle, ewcPULLPOT);
191 }
192
193 static void pme_receive_force_ener(const t_commrec      *cr,
194                                    gmx::ForceWithVirial *forceWithVirial,
195                                    gmx_enerdata_t       *enerd,
196                                    gmx_wallcycle_t       wcycle)
197 {
198     real   e_q, e_lj, dvdl_q, dvdl_lj;
199     float  cycles_ppdpme, cycles_seppme;
200
201     cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
202     dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
203
204     /* In case of node-splitting, the PP nodes receive the long-range
205      * forces, virial and energy from the PME nodes here.
206      */
207     wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
208     dvdl_q  = 0;
209     dvdl_lj = 0;
210     gmx_pme_receive_f(cr, forceWithVirial, &e_q, &e_lj, &dvdl_q, &dvdl_lj,
211                       &cycles_seppme);
212     enerd->term[F_COUL_RECIP] += e_q;
213     enerd->term[F_LJ_RECIP]   += e_lj;
214     enerd->dvdl_lin[efptCOUL] += dvdl_q;
215     enerd->dvdl_lin[efptVDW]  += dvdl_lj;
216
217     if (wcycle)
218     {
219         dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
220     }
221     wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
222 }
223
224 static void print_large_forces(FILE            *fp,
225                                const t_mdatoms *md,
226                                const t_commrec *cr,
227                                int64_t          step,
228                                real             forceTolerance,
229                                const rvec      *x,
230                                const rvec      *f)
231 {
232     real           force2Tolerance = gmx::square(forceTolerance);
233     gmx::index     numNonFinite    = 0;
234     for (int i = 0; i < md->homenr; i++)
235     {
236         real force2    = norm2(f[i]);
237         bool nonFinite = !std::isfinite(force2);
238         if (force2 >= force2Tolerance || nonFinite)
239         {
240             fprintf(fp, "step %" PRId64 " atom %6d  x %8.3f %8.3f %8.3f  force %12.5e\n",
241                     step,
242                     ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
243         }
244         if (nonFinite)
245         {
246             numNonFinite++;
247         }
248     }
249     if (numNonFinite > 0)
250     {
251         /* Note that with MPI this fatal call on one rank might interrupt
252          * the printing on other ranks. But we can only avoid that with
253          * an expensive MPI barrier that we would need at each step.
254          */
255         gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
256     }
257 }
258
259 static void post_process_forces(const t_commrec       *cr,
260                                 int64_t                step,
261                                 t_nrnb                *nrnb,
262                                 gmx_wallcycle_t        wcycle,
263                                 const gmx_localtop_t  *top,
264                                 const matrix           box,
265                                 const rvec             x[],
266                                 ForceOutputs          *forceOutputs,
267                                 tensor                 vir_force,
268                                 const t_mdatoms       *mdatoms,
269                                 const t_graph         *graph,
270                                 const t_forcerec      *fr,
271                                 const gmx_vsite_t     *vsite,
272                                 const StepWorkload    &stepWork)
273 {
274     rvec *f = as_rvec_array(forceOutputs->forceWithShiftForces().force().data());
275
276     if (fr->haveDirectVirialContributions)
277     {
278         auto &forceWithVirial = forceOutputs->forceWithVirial();
279         rvec *fDirectVir      = as_rvec_array(forceWithVirial.force_.data());
280
281         if (vsite)
282         {
283             /* Spread the mesh force on virtual sites to the other particles...
284              * This is parallellized. MPI communication is performed
285              * if the constructing atoms aren't local.
286              */
287             matrix virial = { { 0 } };
288             spread_vsite_f(vsite, x, fDirectVir, nullptr,
289                            stepWork.computeVirial, virial,
290                            nrnb,
291                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
292             forceWithVirial.addVirialContribution(virial);
293         }
294
295         if (stepWork.computeVirial)
296         {
297             /* Now add the forces, this is local */
298             sum_forces(f, forceWithVirial.force_);
299
300             /* Add the direct virial contributions */
301             GMX_ASSERT(forceWithVirial.computeVirial_, "forceWithVirial should request virial computation when we request the virial");
302             m_add(vir_force, forceWithVirial.getVirial(), vir_force);
303
304             if (debug)
305             {
306                 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
307             }
308         }
309     }
310
311     if (fr->print_force >= 0)
312     {
313         print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
314     }
315 }
316
317 static void do_nb_verlet(t_forcerec                       *fr,
318                          const interaction_const_t        *ic,
319                          gmx_enerdata_t                   *enerd,
320                          const StepWorkload               &stepWork,
321                          const Nbnxm::InteractionLocality  ilocality,
322                          const int                         clearF,
323                          const int64_t                     step,
324                          t_nrnb                           *nrnb,
325                          gmx_wallcycle_t                   wcycle)
326 {
327     if (!stepWork.computeNonbondedForces)
328     {
329         /* skip non-bonded calculation */
330         return;
331     }
332
333     nonbonded_verlet_t *nbv  = fr->nbv.get();
334
335     /* GPU kernel launch overhead is already timed separately */
336     if (fr->cutoff_scheme != ecutsVERLET)
337     {
338         gmx_incons("Invalid cut-off scheme passed!");
339     }
340
341     if (!nbv->useGpu())
342     {
343         /* When dynamic pair-list  pruning is requested, we need to prune
344          * at nstlistPrune steps.
345          */
346         if (nbv->isDynamicPruningStepCpu(step))
347         {
348             /* Prune the pair-list beyond fr->ic->rlistPrune using
349              * the current coordinates of the atoms.
350              */
351             wallcycle_sub_start(wcycle, ewcsNONBONDED_PRUNING);
352             nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
353             wallcycle_sub_stop(wcycle, ewcsNONBONDED_PRUNING);
354         }
355     }
356
357     nbv->dispatchNonbondedKernel(ilocality, *ic, stepWork, clearF, *fr, enerd, nrnb);
358 }
359
360 static inline void clear_rvecs_omp(int n, rvec v[])
361 {
362     int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
363
364     /* Note that we would like to avoid this conditional by putting it
365      * into the omp pragma instead, but then we still take the full
366      * omp parallel for overhead (at least with gcc5).
367      */
368     if (nth == 1)
369     {
370         for (int i = 0; i < n; i++)
371         {
372             clear_rvec(v[i]);
373         }
374     }
375     else
376     {
377 #pragma omp parallel for num_threads(nth) schedule(static)
378         for (int i = 0; i < n; i++)
379         {
380             clear_rvec(v[i]);
381         }
382     }
383 }
384
385 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
386  *
387  * \param groupOptions  Group options, containing T-coupling options
388  */
389 static real averageKineticEnergyEstimate(const t_grpopts &groupOptions)
390 {
391     real nrdfCoupled   = 0;
392     real nrdfUncoupled = 0;
393     real kineticEnergy = 0;
394     for (int g = 0; g < groupOptions.ngtc; g++)
395     {
396         if (groupOptions.tau_t[g] >= 0)
397         {
398             nrdfCoupled   += groupOptions.nrdf[g];
399             kineticEnergy += groupOptions.nrdf[g]*0.5*groupOptions.ref_t[g]*BOLTZ;
400         }
401         else
402         {
403             nrdfUncoupled += groupOptions.nrdf[g];
404         }
405     }
406
407     /* This conditional with > also catches nrdf=0 */
408     if (nrdfCoupled > nrdfUncoupled)
409     {
410         return kineticEnergy*(nrdfCoupled + nrdfUncoupled)/nrdfCoupled;
411     }
412     else
413     {
414         return 0;
415     }
416 }
417
418 /*! \brief This routine checks that the potential energy is finite.
419  *
420  * Always checks that the potential energy is finite. If step equals
421  * inputrec.init_step also checks that the magnitude of the potential energy
422  * is reasonable. Terminates with a fatal error when a check fails.
423  * Note that passing this check does not guarantee finite forces,
424  * since those use slightly different arithmetics. But in most cases
425  * there is just a narrow coordinate range where forces are not finite
426  * and energies are finite.
427  *
428  * \param[in] step      The step number, used for checking and printing
429  * \param[in] enerd     The energy data; the non-bonded group energies need to be added to enerd.term[F_EPOT] before calling this routine
430  * \param[in] inputrec  The input record
431  */
432 static void checkPotentialEnergyValidity(int64_t               step,
433                                          const gmx_enerdata_t &enerd,
434                                          const t_inputrec     &inputrec)
435 {
436     /* Threshold valid for comparing absolute potential energy against
437      * the kinetic energy. Normally one should not consider absolute
438      * potential energy values, but with a factor of one million
439      * we should never get false positives.
440      */
441     constexpr real c_thresholdFactor = 1e6;
442
443     bool           energyIsNotFinite    = !std::isfinite(enerd.term[F_EPOT]);
444     real           averageKineticEnergy = 0;
445     /* We only check for large potential energy at the initial step,
446      * because that is by far the most likely step for this too occur
447      * and because computing the average kinetic energy is not free.
448      * Note: nstcalcenergy >> 1 often does not allow to catch large energies
449      * before they become NaN.
450      */
451     if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
452     {
453         averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
454     }
455
456     if (energyIsNotFinite || (averageKineticEnergy > 0 &&
457                               enerd.term[F_EPOT] > c_thresholdFactor*averageKineticEnergy))
458     {
459         gmx_fatal(FARGS, "Step %" PRId64 ": The total potential energy is %g, which is %s. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A %s potential energy can be caused by overlapping interactions in bonded interactions or very large%s coordinate values. Usually this is caused by a badly- or non-equilibrated initial configuration, incorrect interactions or parameters in the topology.",
460                   step,
461                   enerd.term[F_EPOT],
462                   energyIsNotFinite ? "not finite" : "extremely high",
463                   enerd.term[F_LJ],
464                   enerd.term[F_COUL_SR],
465                   energyIsNotFinite ? "non-finite" : "very high",
466                   energyIsNotFinite ? " or Nan" : "");
467     }
468 }
469
470 /*! \brief Return true if there are special forces computed this step.
471  *
472  * The conditionals exactly correspond to those in computeSpecialForces().
473  */
474 static bool
475 haveSpecialForces(const t_inputrec              &inputrec,
476                   const gmx::ForceProviders     &forceProviders,
477                   const pull_t                  *pull_work,
478                   const bool                     computeForces,
479                   const gmx_edsam               *ed)
480 {
481
482     return
483         ((computeForces && forceProviders.hasForceProvider()) ||         // forceProviders
484          (inputrec.bPull && pull_have_potential(pull_work)) ||           // pull
485          inputrec.bRot ||                                                // enforced rotation
486          (ed != nullptr) ||                                              // flooding
487          (inputrec.bIMD && computeForces));                              // IMD
488 }
489
490 /*! \brief Compute forces and/or energies for special algorithms
491  *
492  * The intention is to collect all calls to algorithms that compute
493  * forces on local atoms only and that do not contribute to the local
494  * virial sum (but add their virial contribution separately).
495  * Eventually these should likely all become ForceProviders.
496  * Within this function the intention is to have algorithms that do
497  * global communication at the end, so global barriers within the MD loop
498  * are as close together as possible.
499  *
500  * \param[in]     fplog            The log file
501  * \param[in]     cr               The communication record
502  * \param[in]     inputrec         The input record
503  * \param[in]     awh              The Awh module (nullptr if none in use).
504  * \param[in]     enforcedRotation Enforced rotation module.
505  * \param[in]     imdSession       The IMD session
506  * \param[in]     pull_work        The pull work structure.
507  * \param[in]     step             The current MD step
508  * \param[in]     t                The current time
509  * \param[in,out] wcycle           Wallcycle accounting struct
510  * \param[in,out] forceProviders   Pointer to a list of force providers
511  * \param[in]     box              The unit cell
512  * \param[in]     x                The coordinates
513  * \param[in]     mdatoms          Per atom properties
514  * \param[in]     lambda           Array of free-energy lambda values
515  * \param[in]     stepWork         Step schedule flags
516  * \param[in,out] forceWithVirial  Force and virial buffers
517  * \param[in,out] enerd            Energy buffer
518  * \param[in,out] ed               Essential dynamics pointer
519  * \param[in]     didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
520  *
521  * \todo Remove didNeighborSearch, which is used incorrectly.
522  * \todo Convert all other algorithms called here to ForceProviders.
523  */
524 static void
525 computeSpecialForces(FILE                          *fplog,
526                      const t_commrec               *cr,
527                      const t_inputrec              *inputrec,
528                      gmx::Awh                      *awh,
529                      gmx_enfrot                    *enforcedRotation,
530                      gmx::ImdSession               *imdSession,
531                      pull_t                        *pull_work,
532                      int64_t                        step,
533                      double                         t,
534                      gmx_wallcycle_t                wcycle,
535                      gmx::ForceProviders           *forceProviders,
536                      const matrix                   box,
537                      gmx::ArrayRef<const gmx::RVec> x,
538                      const t_mdatoms               *mdatoms,
539                      real                          *lambda,
540                      const StepWorkload            &stepWork,
541                      gmx::ForceWithVirial          *forceWithVirial,
542                      gmx_enerdata_t                *enerd,
543                      gmx_edsam                     *ed,
544                      bool                           didNeighborSearch)
545 {
546     /* NOTE: Currently all ForceProviders only provide forces.
547      *       When they also provide energies, remove this conditional.
548      */
549     if (stepWork.computeForces)
550     {
551         gmx::ForceProviderInput  forceProviderInput(x, *mdatoms, t, box, *cr);
552         gmx::ForceProviderOutput forceProviderOutput(forceWithVirial, enerd);
553
554         /* Collect forces from modules */
555         forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
556     }
557
558     if (inputrec->bPull && pull_have_potential(pull_work))
559     {
560         pull_potential_wrapper(cr, inputrec, box, x,
561                                forceWithVirial,
562                                mdatoms, enerd, pull_work, lambda, t,
563                                wcycle);
564
565         if (awh)
566         {
567             enerd->term[F_COM_PULL] +=
568                 awh->applyBiasForcesAndUpdateBias(inputrec->ePBC, *mdatoms, box,
569                                                   forceWithVirial,
570                                                   t, step, wcycle, fplog);
571         }
572     }
573
574     rvec *f = as_rvec_array(forceWithVirial->force_.data());
575
576     /* Add the forces from enforced rotation potentials (if any) */
577     if (inputrec->bRot)
578     {
579         wallcycle_start(wcycle, ewcROTadd);
580         enerd->term[F_COM_PULL] += add_rot_forces(enforcedRotation, f, cr, step, t);
581         wallcycle_stop(wcycle, ewcROTadd);
582     }
583
584     if (ed)
585     {
586         /* Note that since init_edsam() is called after the initialization
587          * of forcerec, edsam doesn't request the noVirSum force buffer.
588          * Thus if no other algorithm (e.g. PME) requires it, the forces
589          * here will contribute to the virial.
590          */
591         do_flood(cr, inputrec, as_rvec_array(x.data()), f, ed, box, step, didNeighborSearch);
592     }
593
594     /* Add forces from interactive molecular dynamics (IMD), if any */
595     if (inputrec->bIMD && stepWork.computeForces)
596     {
597         imdSession->applyForces(f);
598     }
599 }
600
601 /*! \brief Launch the prepare_step and spread stages of PME GPU.
602  *
603  * \param[in]  pmedata              The PME structure
604  * \param[in]  box                  The box matrix
605  * \param[in]  x                    Coordinate array
606  * \param[in]  stepWork             Step schedule flags
607  * \param[in]  pmeFlags             PME flags
608  * \param[in]  useGpuForceReduction True if GPU-based force reduction is active this step
609  * \param[in]  wcycle               The wallcycle structure
610  */
611 static inline void launchPmeGpuSpread(gmx_pme_t          *pmedata,
612                                       const matrix        box,
613                                       const rvec          x[],
614                                       const StepWorkload &stepWork,
615                                       int                 pmeFlags,
616                                       bool                useGpuForceReduction,
617                                       gmx_wallcycle_t     wcycle)
618 {
619     pme_gpu_prepare_computation(pmedata, stepWork.haveDynamicBox, box, wcycle, pmeFlags, useGpuForceReduction);
620     pme_gpu_copy_coordinates_to_gpu(pmedata, x, wcycle);
621     pme_gpu_launch_spread(pmedata, wcycle);
622 }
623
624 /*! \brief Launch the FFT and gather stages of PME GPU
625  *
626  * This function only implements setting the output forces (no accumulation).
627  *
628  * \param[in]  pmedata        The PME structure
629  * \param[in]  wcycle         The wallcycle structure
630  */
631 static void launchPmeGpuFftAndGather(gmx_pme_t        *pmedata,
632                                      gmx_wallcycle_t   wcycle)
633 {
634     pme_gpu_launch_complex_transforms(pmedata, wcycle);
635     pme_gpu_launch_gather(pmedata, wcycle, PmeForceOutputHandling::Set);
636 }
637
638 /*! \brief
639  *  Polling wait for either of the PME or nonbonded GPU tasks.
640  *
641  * Instead of a static order in waiting for GPU tasks, this function
642  * polls checking which of the two tasks completes first, and does the
643  * associated force buffer reduction overlapped with the other task.
644  * By doing that, unlike static scheduling order, it can always overlap
645  * one of the reductions, regardless of the GPU task completion order.
646  *
647  * \param[in]     nbv              Nonbonded verlet structure
648  * \param[in,out] pmedata          PME module data
649  * \param[in,out] forceOutputs     Output buffer for the forces and virial
650  * \param[in,out] enerd            Energy data structure results are reduced into
651  * \param[in]     stepWork         Step schedule flags
652  * \param[in]     pmeFlags         PME flags
653  * \param[in]     wcycle           The wallcycle structure
654  */
655 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t *nbv,
656                                         gmx_pme_t          *pmedata,
657                                         gmx::ForceOutputs  *forceOutputs,
658                                         gmx_enerdata_t     *enerd,
659                                         const StepWorkload &stepWork,
660                                         int                 pmeFlags,
661                                         gmx_wallcycle_t     wcycle)
662 {
663     bool isPmeGpuDone = false;
664     bool isNbGpuDone  = false;
665
666
667
668     gmx::ForceWithShiftForces      &forceWithShiftForces = forceOutputs->forceWithShiftForces();
669     gmx::ForceWithVirial           &forceWithVirial      = forceOutputs->forceWithVirial();
670
671     gmx::ArrayRef<const gmx::RVec>  pmeGpuForces;
672
673     while (!isPmeGpuDone || !isNbGpuDone)
674     {
675         if (!isPmeGpuDone)
676         {
677             GpuTaskCompletion completionType = (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
678             isPmeGpuDone = pme_gpu_try_finish_task(pmedata, pmeFlags, wcycle, &forceWithVirial, enerd, completionType);
679         }
680
681         if (!isNbGpuDone)
682         {
683             GpuTaskCompletion completionType = (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
684             isNbGpuDone = Nbnxm::gpu_try_finish_task(nbv->gpu_nbv,
685                                                      stepWork,
686                                                      Nbnxm::AtomLocality::Local,
687                                                      enerd->grpp.ener[egLJSR].data(),
688                                                      enerd->grpp.ener[egCOULSR].data(),
689                                                      forceWithShiftForces.shiftForces(), completionType, wcycle);
690
691             if (isNbGpuDone)
692             {
693                 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local,
694                                               forceWithShiftForces.force());
695             }
696         }
697     }
698 }
699
700 /*! \brief Set up the different force buffers; also does clearing.
701  *
702  * \param[in] fr        force record pointer
703  * \param[in] pull_work The pull work object.
704  * \param[in] inputrec  input record
705  * \param[in] force     force array
706  * \param[in] stepWork  Step schedule flags
707  * \param[out] wcycle   wallcycle recording structure
708  *
709  * \returns             Cleared force output structure
710  */
711 static ForceOutputs
712 setupForceOutputs(t_forcerec                          *fr,
713                   pull_t                              *pull_work,
714                   const t_inputrec                    &inputrec,
715                   gmx::ArrayRefWithPadding<gmx::RVec>  force,
716                   const StepWorkload                  &stepWork,
717                   gmx_wallcycle_t                      wcycle)
718 {
719     wallcycle_sub_start(wcycle, ewcsCLEAR_FORCE_BUFFER);
720
721     /* NOTE: We assume fr->shiftForces is all zeros here */
722     gmx::ForceWithShiftForces forceWithShiftForces(force, stepWork.computeVirial, fr->shiftForces);
723
724     if (stepWork.computeForces)
725     {
726         /* Clear the short- and long-range forces */
727         clear_rvecs_omp(fr->natoms_force_constr,
728                         as_rvec_array(forceWithShiftForces.force().data()));
729     }
730
731     /* If we need to compute the virial, we might need a separate
732      * force buffer for algorithms for which the virial is calculated
733      * directly, such as PME. Otherwise, forceWithVirial uses the
734      * the same force (f in legacy calls) buffer as other algorithms.
735      */
736     const bool useSeparateForceWithVirialBuffer = (stepWork.computeForces &&
737                                                    (stepWork.computeVirial && fr->haveDirectVirialContributions));
738     /* forceWithVirial uses the local atom range only */
739     gmx::ForceWithVirial forceWithVirial(useSeparateForceWithVirialBuffer ?
740                                          fr->forceBufferForDirectVirialContributions : force.unpaddedArrayRef(),
741                                          stepWork.computeVirial);
742
743     if (useSeparateForceWithVirialBuffer)
744     {
745         /* TODO: update comment
746          * We only compute forces on local atoms. Note that vsites can
747          * spread to non-local atoms, but that part of the buffer is
748          * cleared separately in the vsite spreading code.
749          */
750         clear_rvecs_omp(forceWithVirial.force_.size(), as_rvec_array(forceWithVirial.force_.data()));
751     }
752
753     if (inputrec.bPull && pull_have_constraint(pull_work))
754     {
755         clear_pull_forces(pull_work);
756     }
757
758     wallcycle_sub_stop(wcycle, ewcsCLEAR_FORCE_BUFFER);
759
760     return ForceOutputs(forceWithShiftForces, forceWithVirial);
761 }
762
763
764 /*! \brief Set up flags that have the lifetime of the domain indicating what type of work is there to compute.
765  */
766 static void
767 setupDomainLifetimeWorkload(DomainLifetimeWorkload *domainWork,
768                             const t_inputrec       &inputrec,
769                             const t_forcerec       &fr,
770                             const pull_t           *pull_work,
771                             const gmx_edsam        *ed,
772                             const t_idef           &idef,
773                             const t_fcdata         &fcd,
774                             const t_mdatoms        &mdatoms,
775                             const StepWorkload     &stepWork)
776 {
777     // Note that haveSpecialForces is constant over the whole run
778     domainWork->haveSpecialForces      = haveSpecialForces(inputrec, *fr.forceProviders, pull_work, stepWork.computeForces, ed);
779     domainWork->haveCpuBondedWork      = haveCpuBondeds(fr);
780     domainWork->haveGpuBondedWork      = ((fr.gpuBonded != nullptr) && fr.gpuBonded->haveInteractions());
781     domainWork->haveRestraintsWork     = havePositionRestraints(idef, fcd);
782     domainWork->haveCpuListedForceWork = haveCpuListedForces(fr, idef, fcd);
783     // Note that haveFreeEnergyWork is constant over the whole run
784     domainWork->haveFreeEnergyWork     = (fr.efep != efepNO && mdatoms.nPerturbed != 0);
785 }
786
787 /*! \brief Set up force flag stuct from the force bitmask.
788  *
789  * \param[out]     flags                Force schedule flags
790  * \param[in]      legacyFlags          Force bitmask flags used to construct the new flags
791  * \param[in]      isNonbondedOn        Global override, if false forces to turn off all nonbonded calculation.
792  */
793 static void
794 setupStepWorkload(StepWorkload *flags,
795                   const int     legacyFlags,
796                   const bool    isNonbondedOn)
797 {
798     flags->stateChanged           = ((legacyFlags & GMX_FORCE_STATECHANGED) != 0);
799     flags->haveDynamicBox         = ((legacyFlags & GMX_FORCE_DYNAMICBOX) != 0);
800     flags->doNeighborSearch       = ((legacyFlags & GMX_FORCE_NS) != 0);
801     flags->computeVirial          = ((legacyFlags & GMX_FORCE_VIRIAL) != 0);
802     flags->computeEnergy          = ((legacyFlags & GMX_FORCE_ENERGY) != 0);
803     flags->computeForces          = ((legacyFlags & GMX_FORCE_FORCES) != 0);
804     flags->computeListedForces    = ((legacyFlags & GMX_FORCE_LISTED) != 0);
805     flags->computeNonbondedForces = ((legacyFlags & GMX_FORCE_NONBONDED) != 0) && isNonbondedOn;
806     flags->computeDhdl            = ((legacyFlags & GMX_FORCE_DHDL) != 0);
807 }
808
809
810 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
811  *
812  * TODO: eliminate the \p useGpuNonbonded and \p useGpuNonbonded when these are
813  * incorporated in DomainLifetimeWorkload.
814  */
815 static void
816 launchGpuEndOfStepTasks(nonbonded_verlet_t               *nbv,
817                         gmx::GpuBonded                   *gpuBonded,
818                         gmx_pme_t                        *pmedata,
819                         gmx_enerdata_t                   *enerd,
820                         const gmx::MdrunScheduleWorkload &runScheduleWork,
821                         bool                              useGpuNonbonded,
822                         bool                              useGpuPme,
823                         int64_t                           step,
824                         gmx_wallcycle_t                   wcycle)
825 {
826     if (useGpuNonbonded)
827     {
828         /* Launch pruning before buffer clearing because the API overhead of the
829          * clear kernel launches can leave the GPU idle while it could be running
830          * the prune kernel.
831          */
832         if (nbv->isDynamicPruningStepGpu(step))
833         {
834             nbv->dispatchPruneKernelGpu(step);
835         }
836
837         /* now clear the GPU outputs while we finish the step on the CPU */
838         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
839         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
840         Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, runScheduleWork.stepWork.computeVirial);
841         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
842         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
843     }
844
845     if (useGpuPme)
846     {
847         pme_gpu_reinit_computation(pmedata, wcycle);
848     }
849
850     if (runScheduleWork.domainWork.haveGpuBondedWork && runScheduleWork.stepWork.computeEnergy)
851     {
852         // in principle this should be included in the DD balancing region,
853         // but generally it is infrequent so we'll omit it for the sake of
854         // simpler code
855         gpuBonded->waitAccumulateEnergyTerms(enerd);
856
857         gpuBonded->clearEnergies();
858     }
859 }
860
861
862 void do_force(FILE                                     *fplog,
863               const t_commrec                          *cr,
864               const gmx_multisim_t                     *ms,
865               const t_inputrec                         *inputrec,
866               gmx::Awh                                 *awh,
867               gmx_enfrot                               *enforcedRotation,
868               gmx::ImdSession                          *imdSession,
869               pull_t                                   *pull_work,
870               int64_t                                   step,
871               t_nrnb                                   *nrnb,
872               gmx_wallcycle_t                           wcycle,
873               const gmx_localtop_t                     *top,
874               const matrix                              box,
875               gmx::ArrayRefWithPadding<gmx::RVec>       x,
876               history_t                                *hist,
877               gmx::ArrayRefWithPadding<gmx::RVec>       force,
878               tensor                                    vir_force,
879               const t_mdatoms                          *mdatoms,
880               gmx_enerdata_t                           *enerd,
881               t_fcdata                                 *fcd,
882               gmx::ArrayRef<real>                       lambda,
883               t_graph                                  *graph,
884               t_forcerec                               *fr,
885               gmx::MdrunScheduleWorkload               *runScheduleWork,
886               const gmx_vsite_t                        *vsite,
887               rvec                                      mu_tot,
888               double                                    t,
889               gmx_edsam                                *ed,
890               int                                       legacyFlags,
891               const DDBalanceRegionHandler             &ddBalanceRegionHandler)
892 {
893     int                  i, j;
894     double               mu[2*DIM];
895     gmx_bool             bFillGrid, bCalcCGCM;
896     gmx_bool             bUseGPU, bUseOrEmulGPU;
897     nonbonded_verlet_t  *nbv = fr->nbv.get();
898     interaction_const_t *ic  = fr->ic;
899
900     // TODO remove the code below when the legacy flags are not in use anymore
901     /* modify force flag if not doing nonbonded */
902     if (!fr->bNonbonded)
903     {
904         legacyFlags &= ~GMX_FORCE_NONBONDED;
905     }
906     setupStepWorkload(&runScheduleWork->stepWork, legacyFlags, fr->bNonbonded);
907
908     const gmx::StepWorkload &stepWork = runScheduleWork->stepWork;
909
910     bFillGrid     = (stepWork.doNeighborSearch && stepWork.stateChanged);
911     bCalcCGCM     = (bFillGrid && !DOMAINDECOMP(cr));
912     bUseGPU       = fr->nbv->useGpu();
913     bUseOrEmulGPU = bUseGPU || fr->nbv->emulateGpu();
914
915     const auto pmeRunMode = fr->pmedata ? pme_run_mode(fr->pmedata) : PmeRunMode::CPU;
916     // TODO slim this conditional down - inputrec and duty checks should mean the same in proper code!
917     const bool useGpuPme  = EEL_PME(fr->ic->eeltype) && thisRankHasDuty(cr, DUTY_PME) &&
918         ((pmeRunMode == PmeRunMode::GPU) || (pmeRunMode == PmeRunMode::Mixed));
919     const int  pmeFlags = GMX_PME_SPREAD | GMX_PME_SOLVE |
920         (stepWork.computeVirial   ? GMX_PME_CALC_ENER_VIR : 0) |
921         (stepWork.computeEnergy ? GMX_PME_CALC_ENER_VIR : 0) |
922         (stepWork.computeForces   ? GMX_PME_CALC_F : 0);
923
924     // Switches on whether to use GPU for position and force buffer operations
925     // TODO consider all possible combinations of triggers, and how to combine optimally in each case.
926     const BufferOpsUseGpu useGpuXBufOps = (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA)) ?
927         BufferOpsUseGpu::True : BufferOpsUseGpu::False;;
928     // GPU Force buffer ops are disabled on virial steps, because the virial calc is not yet ported to GPU
929     const BufferOpsUseGpu useGpuFBufOps = (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA))
930         && !(stepWork.computeVirial || stepWork.computeEnergy) ?
931         BufferOpsUseGpu::True : BufferOpsUseGpu::False;
932     // TODO: move / add this flag to the internal PME GPU data structures
933     const bool useGpuPmeFReduction = (useGpuFBufOps == BufferOpsUseGpu::True) &&
934         thisRankHasDuty(cr, DUTY_PME) && useGpuPme; // only supported if this rank is perfoming PME on the GPU
935
936     /* At a search step we need to start the first balancing region
937      * somewhere early inside the step after communication during domain
938      * decomposition (and not during the previous step as usual).
939      */
940     if (stepWork.doNeighborSearch)
941     {
942         ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
943     }
944
945     const int start  = 0;
946     const int homenr = mdatoms->homenr;
947
948     clear_mat(vir_force);
949
950     if (stepWork.stateChanged)
951     {
952         if (inputrecNeedMutot(inputrec))
953         {
954             /* Calculate total (local) dipole moment in a temporary common array.
955              * This makes it possible to sum them over nodes faster.
956              */
957             calc_mu(start, homenr,
958                     x.unpaddedArrayRef(), mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
959                     mu, mu+DIM);
960         }
961     }
962
963     if (fr->ePBC != epbcNONE)
964     {
965         /* Compute shift vectors every step,
966          * because of pressure coupling or box deformation!
967          */
968         if (stepWork.haveDynamicBox && stepWork.stateChanged)
969         {
970             calc_shifts(box, fr->shift_vec);
971         }
972
973         if (bCalcCGCM)
974         {
975             put_atoms_in_box_omp(fr->ePBC, box, x.unpaddedArrayRef().subArray(0, homenr), gmx_omp_nthreads_get(emntDefault));
976             inc_nrnb(nrnb, eNR_SHIFTX, homenr);
977         }
978         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
979         {
980             unshift_self(graph, box, as_rvec_array(x.unpaddedArrayRef().data()));
981         }
982     }
983
984     nbnxn_atomdata_copy_shiftvec(stepWork.haveDynamicBox,
985                                  fr->shift_vec, nbv->nbat.get());
986
987 #if GMX_MPI
988     if (!thisRankHasDuty(cr, DUTY_PME))
989     {
990         /* Send particle coordinates to the pme nodes.
991          * Since this is only implemented for domain decomposition
992          * and domain decomposition does not use the graph,
993          * we do not need to worry about shifting.
994          */
995         gmx_pme_send_coordinates(cr, box, as_rvec_array(x.unpaddedArrayRef().data()),
996                                  lambda[efptCOUL], lambda[efptVDW],
997                                  (stepWork.computeVirial || stepWork.computeEnergy),
998                                  step, wcycle);
999     }
1000 #endif /* GMX_MPI */
1001
1002     if (useGpuPme)
1003     {
1004         launchPmeGpuSpread(fr->pmedata, box, as_rvec_array(x.unpaddedArrayRef().data()), stepWork, pmeFlags, useGpuPmeFReduction, wcycle);
1005     }
1006
1007     /* do gridding for pair search */
1008     if (stepWork.doNeighborSearch)
1009     {
1010         if (graph && stepWork.stateChanged)
1011         {
1012             /* Calculate intramolecular shift vectors to make molecules whole */
1013             mk_mshift(fplog, graph, fr->ePBC, box, as_rvec_array(x.unpaddedArrayRef().data()));
1014         }
1015
1016         // TODO
1017         // - vzero is constant, do we need to pass it?
1018         // - box_diag should be passed directly to nbnxn_put_on_grid
1019         //
1020         rvec vzero;
1021         clear_rvec(vzero);
1022
1023         rvec box_diag;
1024         box_diag[XX] = box[XX][XX];
1025         box_diag[YY] = box[YY][YY];
1026         box_diag[ZZ] = box[ZZ][ZZ];
1027
1028         wallcycle_start(wcycle, ewcNS);
1029         if (!DOMAINDECOMP(cr))
1030         {
1031             wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
1032             nbnxn_put_on_grid(nbv, box,
1033                               0, vzero, box_diag,
1034                               nullptr, 0, mdatoms->homenr, -1,
1035                               fr->cginfo, x.unpaddedArrayRef(),
1036                               0, nullptr);
1037             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
1038         }
1039         else
1040         {
1041             wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
1042             nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd),
1043                                        fr->cginfo, x.unpaddedArrayRef());
1044             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
1045         }
1046
1047         nbv->setAtomProperties(*mdatoms, fr->cginfo);
1048
1049         wallcycle_stop(wcycle, ewcNS);
1050
1051         /* initialize the GPU nbnxm atom data and bonded data structures */
1052         if (bUseGPU)
1053         {
1054             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1055
1056             wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1057             Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1058             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1059
1060             if (fr->gpuBonded)
1061             {
1062                 /* Now we put all atoms on the grid, we can assign bonded
1063                  * interactions to the GPU, where the grid order is
1064                  * needed. Also the xq, f and fshift device buffers have
1065                  * been reallocated if needed, so the bonded code can
1066                  * learn about them. */
1067                 // TODO the xq, f, and fshift buffers are now shared
1068                 // resources, so they should be maintained by a
1069                 // higher-level object than the nb module.
1070                 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(nbv->getGridIndices(),
1071                                                                       top->idef,
1072                                                                       Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1073                                                                       Nbnxm::gpu_get_f(nbv->gpu_nbv),
1074                                                                       Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1075             }
1076             wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1077         }
1078     }
1079
1080     if (stepWork.doNeighborSearch)
1081     {
1082         // Need to run after the GPU-offload bonded interaction lists
1083         // are set up to be able to determine whether there is bonded work.
1084         setupDomainLifetimeWorkload(&runScheduleWork->domainWork,
1085                                     *inputrec,
1086                                     *fr,
1087                                     pull_work,
1088                                     ed,
1089                                     top->idef,
1090                                     *fcd,
1091                                     *mdatoms,
1092                                     stepWork);
1093     }
1094
1095     const gmx::DomainLifetimeWorkload &domainWork = runScheduleWork->domainWork;
1096
1097     /* do local pair search */
1098     if (stepWork.doNeighborSearch)
1099     {
1100         // TODO: fuse this branch with the above stepWork.doNeighborSearch block
1101         wallcycle_start_nocount(wcycle, ewcNS);
1102         wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1103         /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1104         nbv->constructPairlist(Nbnxm::InteractionLocality::Local,
1105                                &top->excls, step, nrnb);
1106
1107         nbv->setupGpuShortRangeWork(fr->gpuBonded, Nbnxm::InteractionLocality::Local);
1108
1109         wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1110         wallcycle_stop(wcycle, ewcNS);
1111
1112         if (useGpuXBufOps == BufferOpsUseGpu::True)
1113         {
1114             nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1115         }
1116         // For force buffer ops, we use the below conditon rather than
1117         // useGpuFBufOps to ensure that init is performed even if this
1118         // NS step is also a virial step (on which f buf ops are deactivated).
1119         if (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA))
1120         {
1121             nbv->atomdata_init_add_nbat_f_to_f_gpu();
1122         }
1123     }
1124     else if (!EI_TPI(inputrec->eI))
1125     {
1126         if (useGpuXBufOps == BufferOpsUseGpu::True)
1127         {
1128             // The condition here was (pme != nullptr && pme_gpu_get_device_x(fr->pmedata) != nullptr)
1129             if (!useGpuPme)
1130             {
1131                 nbv->copyCoordinatesToGpu(Nbnxm::AtomLocality::Local, false,
1132                                           x.unpaddedArrayRef());
1133             }
1134             nbv->convertCoordinatesGpu(Nbnxm::AtomLocality::Local, false,
1135                                        useGpuPme ? pme_gpu_get_device_x(fr->pmedata) : nbv->getDeviceCoordinates());
1136         }
1137         else
1138         {
1139             nbv->convertCoordinates(Nbnxm::AtomLocality::Local, false,
1140                                     x.unpaddedArrayRef());
1141         }
1142     }
1143
1144     if (bUseGPU)
1145     {
1146         ddBalanceRegionHandler.openBeforeForceComputationGpu();
1147
1148         wallcycle_start(wcycle, ewcLAUNCH_GPU);
1149
1150         wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1151         Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1152         if (stepWork.doNeighborSearch || (useGpuXBufOps == BufferOpsUseGpu::False))
1153         {
1154             Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1155                                       Nbnxm::AtomLocality::Local);
1156         }
1157         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1158         // with X buffer ops offloaded to the GPU on all but the search steps
1159
1160         // bonded work not split into separate local and non-local, so with DD
1161         // we can only launch the kernel after non-local coordinates have been received.
1162         if (domainWork.haveGpuBondedWork && !havePPDomainDecomposition(cr))
1163         {
1164             wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1165             fr->gpuBonded->launchKernel(fr, stepWork, box);
1166             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1167         }
1168
1169         /* launch local nonbonded work on GPU */
1170         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1171         do_nb_verlet(fr, ic, enerd, stepWork, Nbnxm::InteractionLocality::Local, enbvClearFNo,
1172                      step, nrnb, wcycle);
1173         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1174         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1175     }
1176
1177     if (useGpuPme)
1178     {
1179         // In PME GPU and mixed mode we launch FFT / gather after the
1180         // X copy/transform to allow overlap as well as after the GPU NB
1181         // launch to avoid FFT launch overhead hijacking the CPU and delaying
1182         // the nonbonded kernel.
1183         launchPmeGpuFftAndGather(fr->pmedata, wcycle);
1184     }
1185
1186     // TODO Update this comment when introducing SimulationWorkload
1187     //
1188     // The conditions for gpuHaloExchange e.g. using GPU buffer
1189     // operations were checked before construction, so here we can
1190     // just use it and assert upon any conditions.
1191     gmx::GpuHaloExchange *gpuHaloExchange              = (havePPDomainDecomposition(cr) ? cr->dd->gpuHaloExchange.get() : nullptr);
1192     const bool            ddUsesGpuDirectCommunication = (gpuHaloExchange != nullptr);
1193     GMX_ASSERT(!ddUsesGpuDirectCommunication || (useGpuXBufOps == BufferOpsUseGpu::True),
1194                "Must use coordinate buffer ops with GPU halo exchange");
1195
1196     /* Communicate coordinates and sum dipole if necessary +
1197        do non-local pair search */
1198     if (havePPDomainDecomposition(cr))
1199     {
1200         if (stepWork.doNeighborSearch)
1201         {
1202             // TODO: fuse this branch with the above large stepWork.doNeighborSearch block
1203             wallcycle_start_nocount(wcycle, ewcNS);
1204             wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1205             /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1206             nbv->constructPairlist(Nbnxm::InteractionLocality::NonLocal,
1207                                    &top->excls, step, nrnb);
1208
1209             nbv->setupGpuShortRangeWork(fr->gpuBonded, Nbnxm::InteractionLocality::NonLocal);
1210             wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1211             wallcycle_stop(wcycle, ewcNS);
1212             if (ddUsesGpuDirectCommunication)
1213             {
1214                 rvec* d_x    = static_cast<rvec *> (nbv->get_gpu_xrvec());
1215                 rvec* d_f    = static_cast<rvec *> (nbv->get_gpu_frvec());
1216                 gpuHaloExchange->reinitHalo(d_x, d_f);
1217             }
1218         }
1219         else
1220         {
1221             if (ddUsesGpuDirectCommunication)
1222             {
1223                 // The following must be called after local setCoordinates (which records an event
1224                 // when the coordinate data has been copied to the device).
1225                 gpuHaloExchange->communicateHaloCoordinates(box);
1226
1227                 if (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork)
1228                 {
1229                     //non-local part of coordinate buffer must be copied back to host for CPU work
1230                     nbv->launch_copy_x_from_gpu(as_rvec_array(x.unpaddedArrayRef().data()), Nbnxm::AtomLocality::NonLocal);
1231                 }
1232             }
1233             else
1234             {
1235                 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1236             }
1237
1238             if (useGpuXBufOps == BufferOpsUseGpu::True)
1239             {
1240                 // The condition here was (pme != nullptr && pme_gpu_get_device_x(fr->pmedata) != nullptr)
1241                 if (!useGpuPme && !ddUsesGpuDirectCommunication)
1242                 {
1243                     nbv->copyCoordinatesToGpu(Nbnxm::AtomLocality::NonLocal, false,
1244                                               x.unpaddedArrayRef());
1245                 }
1246                 nbv->convertCoordinatesGpu(Nbnxm::AtomLocality::NonLocal, false,
1247                                            useGpuPme ? pme_gpu_get_device_x(fr->pmedata) : nbv->getDeviceCoordinates());
1248             }
1249             else
1250             {
1251                 nbv->convertCoordinates(Nbnxm::AtomLocality::NonLocal, false,
1252                                         x.unpaddedArrayRef());
1253             }
1254
1255         }
1256
1257         if (bUseGPU)
1258         {
1259             wallcycle_start(wcycle, ewcLAUNCH_GPU);
1260
1261             if (stepWork.doNeighborSearch || (useGpuXBufOps == BufferOpsUseGpu::False))
1262             {
1263                 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1264                 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1265                                           Nbnxm::AtomLocality::NonLocal);
1266                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1267             }
1268
1269             if (domainWork.haveGpuBondedWork)
1270             {
1271                 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1272                 fr->gpuBonded->launchKernel(fr, stepWork, box);
1273                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1274             }
1275
1276             /* launch non-local nonbonded tasks on GPU */
1277             wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1278             do_nb_verlet(fr, ic, enerd, stepWork, Nbnxm::InteractionLocality::NonLocal, enbvClearFNo,
1279                          step, nrnb, wcycle);
1280             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1281
1282             wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1283         }
1284     }
1285
1286     if (bUseGPU)
1287     {
1288         /* launch D2H copy-back F */
1289         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1290         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1291
1292         bool copyBackNbForce  = (useGpuFBufOps == BufferOpsUseGpu::False);
1293
1294         if (havePPDomainDecomposition(cr))
1295         {
1296             Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1297                                       stepWork, Nbnxm::AtomLocality::NonLocal, copyBackNbForce);
1298         }
1299         Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1300                                   stepWork, Nbnxm::AtomLocality::Local, copyBackNbForce);
1301         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1302
1303         if (domainWork.haveGpuBondedWork && stepWork.computeEnergy)
1304         {
1305             fr->gpuBonded->launchEnergyTransfer();
1306         }
1307         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1308     }
1309
1310     if (stepWork.stateChanged && inputrecNeedMutot(inputrec))
1311     {
1312         if (PAR(cr))
1313         {
1314             gmx_sumd(2*DIM, mu, cr);
1315
1316             ddBalanceRegionHandler.reopenRegionCpu();
1317         }
1318
1319         for (i = 0; i < 2; i++)
1320         {
1321             for (j = 0; j < DIM; j++)
1322             {
1323                 fr->mu_tot[i][j] = mu[i*DIM + j];
1324             }
1325         }
1326     }
1327     if (mdatoms->nChargePerturbed == 0)
1328     {
1329         copy_rvec(fr->mu_tot[0], mu_tot);
1330     }
1331     else
1332     {
1333         for (j = 0; j < DIM; j++)
1334         {
1335             mu_tot[j] =
1336                 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1337                 lambda[efptCOUL]*fr->mu_tot[1][j];
1338         }
1339     }
1340
1341     /* Reset energies */
1342     reset_enerdata(enerd);
1343     /* Clear the shift forces */
1344     // TODO: This should be linked to the shift force buffer in use, or cleared before use instead
1345     for (gmx::RVec &elem : fr->shiftForces)
1346     {
1347         elem = { 0.0_real, 0.0_real, 0.0_real };
1348     }
1349
1350     if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1351     {
1352         wallcycle_start(wcycle, ewcPPDURINGPME);
1353         dd_force_flop_start(cr->dd, nrnb);
1354     }
1355
1356     if (inputrec->bRot)
1357     {
1358         wallcycle_start(wcycle, ewcROT);
1359         do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step, stepWork.doNeighborSearch);
1360         wallcycle_stop(wcycle, ewcROT);
1361     }
1362
1363     /* Start the force cycle counter.
1364      * Note that a different counter is used for dynamic load balancing.
1365      */
1366     wallcycle_start(wcycle, ewcFORCE);
1367
1368     // Set up and clear force outputs.
1369     // We use std::move to keep the compiler happy, it has no effect.
1370     ForceOutputs forceOut = setupForceOutputs(fr, pull_work, *inputrec, std::move(force), stepWork, wcycle);
1371
1372     /* We calculate the non-bonded forces, when done on the CPU, here.
1373      * We do this before calling do_force_lowlevel, because in that
1374      * function, the listed forces are calculated before PME, which
1375      * does communication.  With this order, non-bonded and listed
1376      * force calculation imbalance can be balanced out by the domain
1377      * decomposition load balancing.
1378      */
1379
1380     if (!bUseOrEmulGPU)
1381     {
1382         do_nb_verlet(fr, ic, enerd, stepWork, Nbnxm::InteractionLocality::Local, enbvClearFYes,
1383                      step, nrnb, wcycle);
1384     }
1385
1386     if (fr->efep != efepNO)
1387     {
1388         /* Calculate the local and non-local free energy interactions here.
1389          * Happens here on the CPU both with and without GPU.
1390          */
1391         nbv->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::Local,
1392                                       fr, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut.forceWithShiftForces(), *mdatoms,
1393                                       inputrec->fepvals, lambda.data(),
1394                                       enerd, stepWork, nrnb);
1395
1396         if (havePPDomainDecomposition(cr))
1397         {
1398             nbv->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::NonLocal,
1399                                           fr, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut.forceWithShiftForces(), *mdatoms,
1400                                           inputrec->fepvals, lambda.data(),
1401                                           enerd, stepWork, nrnb);
1402         }
1403     }
1404
1405     if (!bUseOrEmulGPU)
1406     {
1407         if (havePPDomainDecomposition(cr))
1408         {
1409             do_nb_verlet(fr, ic, enerd, stepWork, Nbnxm::InteractionLocality::NonLocal, enbvClearFNo,
1410                          step, nrnb, wcycle);
1411         }
1412
1413         if (stepWork.computeForces)
1414         {
1415             /* Add all the non-bonded force to the normal force array.
1416              * This can be split into a local and a non-local part when overlapping
1417              * communication with calculation with domain decomposition.
1418              */
1419             wallcycle_stop(wcycle, ewcFORCE);
1420             nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::All, forceOut.forceWithShiftForces().force());
1421             wallcycle_start_nocount(wcycle, ewcFORCE);
1422         }
1423
1424         /* If there are multiple fshift output buffers we need to reduce them */
1425         if (stepWork.computeVirial)
1426         {
1427             /* This is not in a subcounter because it takes a
1428                negligible and constant-sized amount of time */
1429             nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat,
1430                                                      forceOut.forceWithShiftForces().shiftForces());
1431         }
1432     }
1433
1434     /* update QMMMrec, if necessary */
1435     if (fr->bQMMM)
1436     {
1437         update_QMMMrec(cr, fr, as_rvec_array(x.unpaddedArrayRef().data()), mdatoms, box);
1438     }
1439
1440     // TODO Force flags should include haveFreeEnergyWork for this domain
1441     if (ddUsesGpuDirectCommunication &&
1442         (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork))
1443     {
1444         /* Wait for non-local coordinate data to be copied from device */
1445         nbv->wait_nonlocal_x_copy_D2H_done();
1446     }
1447     /* Compute the bonded and non-bonded energies and optionally forces */
1448     do_force_lowlevel(fr, inputrec, &(top->idef),
1449                       cr, ms, nrnb, wcycle, mdatoms,
1450                       x, hist, &forceOut, enerd, fcd,
1451                       box, lambda.data(), graph, fr->mu_tot,
1452                       stepWork,
1453                       ddBalanceRegionHandler);
1454
1455     wallcycle_stop(wcycle, ewcFORCE);
1456
1457     computeSpecialForces(fplog, cr, inputrec, awh, enforcedRotation,
1458                          imdSession, pull_work, step, t, wcycle,
1459                          fr->forceProviders, box, x.unpaddedArrayRef(), mdatoms, lambda.data(),
1460                          stepWork, &forceOut.forceWithVirial(), enerd,
1461                          ed, stepWork.doNeighborSearch);
1462
1463
1464     // Will store the amount of cycles spent waiting for the GPU that
1465     // will be later used in the DLB accounting.
1466     float cycles_wait_gpu = 0;
1467     if (bUseOrEmulGPU)
1468     {
1469         auto &forceWithShiftForces = forceOut.forceWithShiftForces();
1470
1471         /* wait for non-local forces (or calculate in emulation mode) */
1472         if (havePPDomainDecomposition(cr))
1473         {
1474             if (bUseGPU)
1475             {
1476                 cycles_wait_gpu += Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1477                                                                stepWork, Nbnxm::AtomLocality::NonLocal,
1478                                                                enerd->grpp.ener[egLJSR].data(),
1479                                                                enerd->grpp.ener[egCOULSR].data(),
1480                                                                forceWithShiftForces.shiftForces(),
1481                                                                wcycle);
1482             }
1483             else
1484             {
1485                 wallcycle_start_nocount(wcycle, ewcFORCE);
1486                 do_nb_verlet(fr, ic, enerd, stepWork, Nbnxm::InteractionLocality::NonLocal, enbvClearFYes,
1487                              step, nrnb, wcycle);
1488                 wallcycle_stop(wcycle, ewcFORCE);
1489             }
1490
1491             if (useGpuFBufOps == BufferOpsUseGpu::True)
1492             {
1493                 // TODO: move this into DomainLifetimeWorkload, including the second part of the condition
1494                 // The bonded and free energy CPU tasks can have non-local force contributions
1495                 // which are a dependency for the GPU force reduction.
1496                 bool  haveNonLocalForceContribInCpuBuffer = domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork;
1497
1498                 rvec *f = as_rvec_array(forceWithShiftForces.force().data());
1499                 if (haveNonLocalForceContribInCpuBuffer)
1500                 {
1501                     nbv->launch_copy_f_to_gpu(f, Nbnxm::AtomLocality::NonLocal);
1502                 }
1503                 nbv->atomdata_add_nbat_f_to_f_gpu(Nbnxm::AtomLocality::NonLocal,
1504                                                   nbv->getDeviceForces(),
1505                                                   pme_gpu_get_device_f(fr->pmedata),
1506                                                   pme_gpu_get_f_ready_synchronizer(fr->pmedata),
1507                                                   useGpuPmeFReduction, haveNonLocalForceContribInCpuBuffer);
1508                 nbv->launch_copy_f_from_gpu(f, Nbnxm::AtomLocality::NonLocal);
1509             }
1510             else
1511             {
1512                 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::NonLocal,
1513                                               forceWithShiftForces.force());
1514             }
1515
1516
1517             if (fr->nbv->emulateGpu() && stepWork.computeVirial)
1518             {
1519                 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat,
1520                                                          forceWithShiftForces.shiftForces());
1521             }
1522         }
1523     }
1524
1525     const bool useGpuForcesHaloExchange = ddUsesGpuDirectCommunication && (useGpuFBufOps == BufferOpsUseGpu::True);
1526     const bool useCpuPmeFReduction      = thisRankHasDuty(cr, DUTY_PME) && !useGpuPmeFReduction;
1527     // TODO: move this into DomainLifetimeWorkload, including the second part of the condition
1528     const bool haveCpuLocalForces     = (domainWork.haveSpecialForces || domainWork.haveCpuListedForceWork || useCpuPmeFReduction ||
1529                                          (fr->efep != efepNO));
1530
1531     if (havePPDomainDecomposition(cr))
1532     {
1533         /* We are done with the CPU compute.
1534          * We will now communicate the non-local forces.
1535          * If we use a GPU this will overlap with GPU work, so in that case
1536          * we do not close the DD force balancing region here.
1537          */
1538         ddBalanceRegionHandler.closeAfterForceComputationCpu();
1539
1540         if (stepWork.computeForces)
1541         {
1542             gmx::ArrayRef<gmx::RVec>  force  = forceOut.forceWithShiftForces().force();
1543             rvec                     *f      = as_rvec_array(force.data());
1544
1545             if (useGpuForcesHaloExchange)
1546             {
1547                 if (haveCpuLocalForces)
1548                 {
1549                     nbv->launch_copy_f_to_gpu(f, Nbnxm::AtomLocality::Local);
1550                 }
1551                 bool accumulateHaloForces = haveCpuLocalForces;
1552                 gpuHaloExchange->communicateHaloForces(accumulateHaloForces);
1553             }
1554             else
1555             {
1556                 if (useGpuFBufOps == BufferOpsUseGpu::True)
1557                 {
1558                     nbv->wait_for_gpu_force_reduction(Nbnxm::AtomLocality::NonLocal);
1559                 }
1560                 dd_move_f(cr->dd, &forceOut.forceWithShiftForces(), wcycle);
1561             }
1562
1563         }
1564     }
1565
1566     // With both nonbonded and PME offloaded a GPU on the same rank, we use
1567     // an alternating wait/reduction scheme.
1568     bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPme && bUseGPU && !DOMAINDECOMP(cr) &&
1569                              (useGpuFBufOps == BufferOpsUseGpu::False));
1570     if (alternateGpuWait)
1571     {
1572         alternatePmeNbGpuWaitReduce(fr->nbv.get(), fr->pmedata, &forceOut, enerd,
1573                                     stepWork, pmeFlags, wcycle);
1574     }
1575
1576     if (!alternateGpuWait && useGpuPme)
1577     {
1578         pme_gpu_wait_and_reduce(fr->pmedata, pmeFlags, wcycle, &forceOut.forceWithVirial(), enerd);
1579     }
1580
1581     /* Wait for local GPU NB outputs on the non-alternating wait path */
1582     if (!alternateGpuWait && bUseGPU)
1583     {
1584         /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1585          * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1586          * but even with a step of 0.1 ms the difference is less than 1%
1587          * of the step time.
1588          */
1589         const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
1590         const float waitCycles               =
1591             Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1592                                         stepWork, Nbnxm::AtomLocality::Local,
1593                                         enerd->grpp.ener[egLJSR].data(),
1594                                         enerd->grpp.ener[egCOULSR].data(),
1595                                         forceOut.forceWithShiftForces().shiftForces(),
1596                                         wcycle);
1597
1598         if (ddBalanceRegionHandler.useBalancingRegion())
1599         {
1600             DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
1601             if (stepWork.computeForces &&  waitCycles <= gpuWaitApiOverheadMargin)
1602             {
1603                 /* We measured few cycles, it could be that the kernel
1604                  * and transfer finished earlier and there was no actual
1605                  * wait time, only API call overhead.
1606                  * Then the actual time could be anywhere between 0 and
1607                  * cycles_wait_est. We will use half of cycles_wait_est.
1608                  */
1609                 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
1610             }
1611             ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
1612         }
1613     }
1614
1615     if (fr->nbv->emulateGpu())
1616     {
1617         // NOTE: emulation kernel is not included in the balancing region,
1618         // but emulation mode does not target performance anyway
1619         wallcycle_start_nocount(wcycle, ewcFORCE);
1620         do_nb_verlet(fr, ic, enerd, stepWork, Nbnxm::InteractionLocality::Local,
1621                      DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1622                      step, nrnb, wcycle);
1623         wallcycle_stop(wcycle, ewcFORCE);
1624     }
1625
1626     /* Do the nonbonded GPU (or emulation) force buffer reduction
1627      * on the non-alternating path. */
1628     if (bUseOrEmulGPU && !alternateGpuWait)
1629     {
1630         gmx::ArrayRef<gmx::RVec>  forceWithShift = forceOut.forceWithShiftForces().force();
1631
1632         if (useGpuFBufOps == BufferOpsUseGpu::True)
1633         {
1634             // Flag to specify whether the CPU force buffer has contributions to
1635             // local atoms. This depends on whether there are CPU-based force tasks
1636             // or when DD is active the halo exchange has resulted in contributions
1637             // from the non-local part.
1638             const bool haveLocalForceContribInCpuBuffer = (haveCpuLocalForces || havePPDomainDecomposition(cr));
1639
1640             // TODO: move these steps as early as possible:
1641             // - CPU f H2D should be as soon as all CPU-side forces are done
1642             // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
1643             //   before the next CPU task that consumes the forces: vsite spread or update)
1644             // - copy is not perfomed if GPU force halo exchange is active, because it would overwrite the result
1645             //   of the halo exchange. In that case the copy is instead performed above, before the exchange.
1646             //   These should be unified.
1647             rvec *f = as_rvec_array(forceWithShift.data());
1648             if (haveLocalForceContribInCpuBuffer && !useGpuForcesHaloExchange)
1649             {
1650                 nbv->launch_copy_f_to_gpu(f, Nbnxm::AtomLocality::Local);
1651             }
1652             if (useGpuForcesHaloExchange)
1653             {
1654                 // Add a stream synchronization to satisfy a dependency
1655                 // for the local buffer ops on the result of GPU halo
1656                 // exchange, which operates in the non-local stream and
1657                 // writes to to local parf og the force buffer.
1658                 // TODO improve this through use of an event - see Redmine #3093
1659                 nbv->stream_local_wait_for_nonlocal();
1660             }
1661             nbv->atomdata_add_nbat_f_to_f_gpu(Nbnxm::AtomLocality::Local,
1662                                               nbv->getDeviceForces(),
1663                                               pme_gpu_get_device_f(fr->pmedata),
1664                                               pme_gpu_get_f_ready_synchronizer(fr->pmedata),
1665                                               useGpuPmeFReduction, haveLocalForceContribInCpuBuffer);
1666             nbv->launch_copy_f_from_gpu(f, Nbnxm::AtomLocality::Local);
1667             nbv->wait_for_gpu_force_reduction(Nbnxm::AtomLocality::Local);
1668         }
1669         else
1670         {
1671             nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local, forceWithShift);
1672         }
1673
1674     }
1675
1676     launchGpuEndOfStepTasks(nbv, fr->gpuBonded, fr->pmedata, enerd,
1677                             *runScheduleWork,
1678                             bUseGPU, useGpuPme,
1679                             step,
1680                             wcycle);
1681
1682     if (DOMAINDECOMP(cr))
1683     {
1684         dd_force_flop_stop(cr->dd, nrnb);
1685     }
1686
1687     if (stepWork.computeForces)
1688     {
1689         rvec *f = as_rvec_array(forceOut.forceWithShiftForces().force().data());
1690
1691         /* If we have NoVirSum forces, but we do not calculate the virial,
1692          * we sum fr->f_novirsum=forceOut.f later.
1693          */
1694         if (vsite && !(fr->haveDirectVirialContributions && !stepWork.computeVirial))
1695         {
1696             rvec *fshift = as_rvec_array(forceOut.forceWithShiftForces().shiftForces().data());
1697             spread_vsite_f(vsite, as_rvec_array(x.unpaddedArrayRef().data()), f, fshift, FALSE, nullptr, nrnb,
1698                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
1699         }
1700
1701         if (stepWork.computeVirial)
1702         {
1703             /* Calculation of the virial must be done after vsites! */
1704             calc_virial(0, mdatoms->homenr, as_rvec_array(x.unpaddedArrayRef().data()),
1705                         forceOut.forceWithShiftForces(),
1706                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1707         }
1708     }
1709
1710     if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME))
1711     {
1712         /* In case of node-splitting, the PP nodes receive the long-range
1713          * forces, virial and energy from the PME nodes here.
1714          */
1715         pme_receive_force_ener(cr, &forceOut.forceWithVirial(), enerd, wcycle);
1716     }
1717
1718     if (stepWork.computeForces)
1719     {
1720         post_process_forces(cr, step, nrnb, wcycle,
1721                             top, box, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut,
1722                             vir_force, mdatoms, graph, fr, vsite,
1723                             stepWork);
1724     }
1725
1726     if (stepWork.computeEnergy)
1727     {
1728         /* Sum the potential energy terms from group contributions */
1729         sum_epot(&(enerd->grpp), enerd->term);
1730
1731         if (!EI_TPI(inputrec->eI))
1732         {
1733             checkPotentialEnergyValidity(step, *enerd, *inputrec);
1734         }
1735     }
1736
1737     /* In case we don't have constraints and are using GPUs, the next balancing
1738      * region starts here.
1739      * Some "special" work at the end of do_force_cuts?, such as vsite spread,
1740      * virial calculation and COM pulling, is not thus not included in
1741      * the balance timing, which is ok as most tasks do communication.
1742      */
1743     ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);
1744 }