2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "gromacs/awh/awh.h"
49 #include "gromacs/domdec/dlbtiming.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/domdec/partition.h"
53 #include "gromacs/essentialdynamics/edsam.h"
54 #include "gromacs/ewald/pme.h"
55 #include "gromacs/gmxlib/chargegroup.h"
56 #include "gromacs/gmxlib/network.h"
57 #include "gromacs/gmxlib/nrnb.h"
58 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
59 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
60 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/imd/imd.h"
63 #include "gromacs/listed_forces/bonded.h"
64 #include "gromacs/listed_forces/disre.h"
65 #include "gromacs/listed_forces/gpubonded.h"
66 #include "gromacs/listed_forces/listed_forces.h"
67 #include "gromacs/listed_forces/manage_threading.h"
68 #include "gromacs/listed_forces/orires.h"
69 #include "gromacs/math/arrayrefwithpadding.h"
70 #include "gromacs/math/functions.h"
71 #include "gromacs/math/units.h"
72 #include "gromacs/math/vec.h"
73 #include "gromacs/math/vecdump.h"
74 #include "gromacs/mdlib/calcmu.h"
75 #include "gromacs/mdlib/calcvir.h"
76 #include "gromacs/mdlib/constr.h"
77 #include "gromacs/mdlib/enerdata_utils.h"
78 #include "gromacs/mdlib/force.h"
79 #include "gromacs/mdlib/forcerec.h"
80 #include "gromacs/mdlib/gmx_omp_nthreads.h"
81 #include "gromacs/mdlib/ppforceworkload.h"
82 #include "gromacs/mdlib/qmmm.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdtypes/commrec.h"
85 #include "gromacs/mdtypes/enerdata.h"
86 #include "gromacs/mdtypes/forceoutput.h"
87 #include "gromacs/mdtypes/iforceprovider.h"
88 #include "gromacs/mdtypes/inputrec.h"
89 #include "gromacs/mdtypes/md_enums.h"
90 #include "gromacs/mdtypes/state.h"
91 #include "gromacs/nbnxm/atomdata.h"
92 #include "gromacs/nbnxm/gpu_data_mgmt.h"
93 #include "gromacs/nbnxm/nbnxm.h"
94 #include "gromacs/pbcutil/ishift.h"
95 #include "gromacs/pbcutil/mshift.h"
96 #include "gromacs/pbcutil/pbc.h"
97 #include "gromacs/pulling/pull.h"
98 #include "gromacs/pulling/pull_rotation.h"
99 #include "gromacs/timing/cyclecounter.h"
100 #include "gromacs/timing/gpu_timing.h"
101 #include "gromacs/timing/wallcycle.h"
102 #include "gromacs/timing/wallcyclereporting.h"
103 #include "gromacs/timing/walltime_accounting.h"
104 #include "gromacs/topology/topology.h"
105 #include "gromacs/utility/arrayref.h"
106 #include "gromacs/utility/basedefinitions.h"
107 #include "gromacs/utility/cstringutil.h"
108 #include "gromacs/utility/exceptions.h"
109 #include "gromacs/utility/fatalerror.h"
110 #include "gromacs/utility/gmxassert.h"
111 #include "gromacs/utility/gmxmpi.h"
112 #include "gromacs/utility/logger.h"
113 #include "gromacs/utility/smalloc.h"
114 #include "gromacs/utility/strconvert.h"
115 #include "gromacs/utility/sysinfo.h"
117 // TODO: this environment variable allows us to verify before release
118 // that on less common architectures the total cost of polling is not larger than
119 // a blocking wait (so polling does not introduce overhead when the static
120 // PME-first ordering would suffice).
121 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
123 // environment variable to enable GPU buffer ops, to allow incremental and optional
124 // introduction of this functionality.
125 // TODO eventially tie this in with other existing GPU flags.
126 static const bool c_enableGpuBufOps = (getenv("GMX_USE_GPU_BUFFER_OPS") != nullptr);
128 static void sum_forces(rvec f[], gmx::ArrayRef<const gmx::RVec> forceToAdd)
130 const int end = forceToAdd.size();
132 int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
133 #pragma omp parallel for num_threads(nt) schedule(static)
134 for (int i = 0; i < end; i++)
136 rvec_inc(f[i], forceToAdd[i]);
140 static void calc_virial(int start, int homenr, const rvec x[], const rvec f[],
141 tensor vir_part, const t_graph *graph, const matrix box,
142 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
144 /* The short-range virial from surrounding boxes */
145 calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
146 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
148 /* Calculate partial virial, for local atoms only, based on short range.
149 * Total virial is computed in global_stat, called from do_md
151 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
152 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
156 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
160 static void pull_potential_wrapper(const t_commrec *cr,
161 const t_inputrec *ir,
162 const matrix box, gmx::ArrayRef<const gmx::RVec> x,
163 gmx::ForceWithVirial *force,
164 const t_mdatoms *mdatoms,
165 gmx_enerdata_t *enerd,
169 gmx_wallcycle_t wcycle)
174 /* Calculate the center of mass forces, this requires communication,
175 * which is why pull_potential is called close to other communication.
177 wallcycle_start(wcycle, ewcPULLPOT);
178 set_pbc(&pbc, ir->ePBC, box);
180 enerd->term[F_COM_PULL] +=
181 pull_potential(pull_work, mdatoms, &pbc,
182 cr, t, lambda[efptRESTRAINT], as_rvec_array(x.data()), force, &dvdl);
183 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
184 wallcycle_stop(wcycle, ewcPULLPOT);
187 static void pme_receive_force_ener(const t_commrec *cr,
188 gmx::ForceWithVirial *forceWithVirial,
189 gmx_enerdata_t *enerd,
190 gmx_wallcycle_t wcycle)
192 real e_q, e_lj, dvdl_q, dvdl_lj;
193 float cycles_ppdpme, cycles_seppme;
195 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
196 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
198 /* In case of node-splitting, the PP nodes receive the long-range
199 * forces, virial and energy from the PME nodes here.
201 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
204 gmx_pme_receive_f(cr, forceWithVirial, &e_q, &e_lj, &dvdl_q, &dvdl_lj,
206 enerd->term[F_COUL_RECIP] += e_q;
207 enerd->term[F_LJ_RECIP] += e_lj;
208 enerd->dvdl_lin[efptCOUL] += dvdl_q;
209 enerd->dvdl_lin[efptVDW] += dvdl_lj;
213 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
215 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
218 static void print_large_forces(FILE *fp,
226 real force2Tolerance = gmx::square(forceTolerance);
227 gmx::index numNonFinite = 0;
228 for (int i = 0; i < md->homenr; i++)
230 real force2 = norm2(f[i]);
231 bool nonFinite = !std::isfinite(force2);
232 if (force2 >= force2Tolerance || nonFinite)
234 fprintf(fp, "step %" PRId64 " atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
236 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
243 if (numNonFinite > 0)
245 /* Note that with MPI this fatal call on one rank might interrupt
246 * the printing on other ranks. But we can only avoid that with
247 * an expensive MPI barrier that we would need at each step.
249 gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
253 static void post_process_forces(const t_commrec *cr,
256 gmx_wallcycle_t wcycle,
257 const gmx_localtop_t *top,
261 gmx::ForceWithVirial *forceWithVirial,
263 const t_mdatoms *mdatoms,
264 const t_graph *graph,
265 const t_forcerec *fr,
266 const gmx_vsite_t *vsite,
269 if (fr->haveDirectVirialContributions)
271 rvec *fDirectVir = as_rvec_array(forceWithVirial->force_.data());
275 /* Spread the mesh force on virtual sites to the other particles...
276 * This is parallellized. MPI communication is performed
277 * if the constructing atoms aren't local.
279 matrix virial = { { 0 } };
280 spread_vsite_f(vsite, x, fDirectVir, nullptr,
281 (flags & GMX_FORCE_VIRIAL) != 0, virial,
283 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
284 forceWithVirial->addVirialContribution(virial);
287 if (flags & GMX_FORCE_VIRIAL)
289 /* Now add the forces, this is local */
290 sum_forces(f, forceWithVirial->force_);
292 /* Add the direct virial contributions */
293 GMX_ASSERT(forceWithVirial->computeVirial_, "forceWithVirial should request virial computation when we request the virial");
294 m_add(vir_force, forceWithVirial->getVirial(), vir_force);
298 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
303 if (fr->print_force >= 0)
305 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
309 static void do_nb_verlet(t_forcerec *fr,
310 const interaction_const_t *ic,
311 gmx_enerdata_t *enerd,
313 const Nbnxm::InteractionLocality ilocality,
317 gmx_wallcycle_t wcycle)
319 if (!(flags & GMX_FORCE_NONBONDED))
321 /* skip non-bonded calculation */
325 nonbonded_verlet_t *nbv = fr->nbv.get();
327 /* GPU kernel launch overhead is already timed separately */
328 if (fr->cutoff_scheme != ecutsVERLET)
330 gmx_incons("Invalid cut-off scheme passed!");
335 /* When dynamic pair-list pruning is requested, we need to prune
336 * at nstlistPrune steps.
338 if (nbv->isDynamicPruningStepCpu(step))
340 /* Prune the pair-list beyond fr->ic->rlistPrune using
341 * the current coordinates of the atoms.
343 wallcycle_sub_start(wcycle, ewcsNONBONDED_PRUNING);
344 nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
345 wallcycle_sub_stop(wcycle, ewcsNONBONDED_PRUNING);
349 nbv->dispatchNonbondedKernel(ilocality, *ic, flags, clearF, *fr, enerd, nrnb, wcycle);
352 static inline void clear_rvecs_omp(int n, rvec v[])
354 int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
356 /* Note that we would like to avoid this conditional by putting it
357 * into the omp pragma instead, but then we still take the full
358 * omp parallel for overhead (at least with gcc5).
362 for (int i = 0; i < n; i++)
369 #pragma omp parallel for num_threads(nth) schedule(static)
370 for (int i = 0; i < n; i++)
377 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
379 * \param groupOptions Group options, containing T-coupling options
381 static real averageKineticEnergyEstimate(const t_grpopts &groupOptions)
383 real nrdfCoupled = 0;
384 real nrdfUncoupled = 0;
385 real kineticEnergy = 0;
386 for (int g = 0; g < groupOptions.ngtc; g++)
388 if (groupOptions.tau_t[g] >= 0)
390 nrdfCoupled += groupOptions.nrdf[g];
391 kineticEnergy += groupOptions.nrdf[g]*0.5*groupOptions.ref_t[g]*BOLTZ;
395 nrdfUncoupled += groupOptions.nrdf[g];
399 /* This conditional with > also catches nrdf=0 */
400 if (nrdfCoupled > nrdfUncoupled)
402 return kineticEnergy*(nrdfCoupled + nrdfUncoupled)/nrdfCoupled;
410 /*! \brief This routine checks that the potential energy is finite.
412 * Always checks that the potential energy is finite. If step equals
413 * inputrec.init_step also checks that the magnitude of the potential energy
414 * is reasonable. Terminates with a fatal error when a check fails.
415 * Note that passing this check does not guarantee finite forces,
416 * since those use slightly different arithmetics. But in most cases
417 * there is just a narrow coordinate range where forces are not finite
418 * and energies are finite.
420 * \param[in] step The step number, used for checking and printing
421 * \param[in] enerd The energy data; the non-bonded group energies need to be added to enerd.term[F_EPOT] before calling this routine
422 * \param[in] inputrec The input record
424 static void checkPotentialEnergyValidity(int64_t step,
425 const gmx_enerdata_t &enerd,
426 const t_inputrec &inputrec)
428 /* Threshold valid for comparing absolute potential energy against
429 * the kinetic energy. Normally one should not consider absolute
430 * potential energy values, but with a factor of one million
431 * we should never get false positives.
433 constexpr real c_thresholdFactor = 1e6;
435 bool energyIsNotFinite = !std::isfinite(enerd.term[F_EPOT]);
436 real averageKineticEnergy = 0;
437 /* We only check for large potential energy at the initial step,
438 * because that is by far the most likely step for this too occur
439 * and because computing the average kinetic energy is not free.
440 * Note: nstcalcenergy >> 1 often does not allow to catch large energies
441 * before they become NaN.
443 if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
445 averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
448 if (energyIsNotFinite || (averageKineticEnergy > 0 &&
449 enerd.term[F_EPOT] > c_thresholdFactor*averageKineticEnergy))
451 gmx_fatal(FARGS, "Step %" PRId64 ": The total potential energy is %g, which is %s. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A %s potential energy can be caused by overlapping interactions in bonded interactions or very large%s coordinate values. Usually this is caused by a badly- or non-equilibrated initial configuration, incorrect interactions or parameters in the topology.",
454 energyIsNotFinite ? "not finite" : "extremely high",
456 enerd.term[F_COUL_SR],
457 energyIsNotFinite ? "non-finite" : "very high",
458 energyIsNotFinite ? " or Nan" : "");
462 /*! \brief Return true if there are special forces computed this step.
464 * The conditionals exactly correspond to those in computeSpecialForces().
467 haveSpecialForces(const t_inputrec *inputrec,
468 ForceProviders *forceProviders,
469 const pull_t *pull_work,
473 const bool computeForces = (forceFlags & GMX_FORCE_FORCES) != 0;
476 ((computeForces && forceProviders->hasForceProvider()) || // forceProviders
477 (inputrec->bPull && pull_have_potential(pull_work)) || // pull
478 inputrec->bRot || // enforced rotation
479 (ed != nullptr) || // flooding
480 (inputrec->bIMD && computeForces)); // IMD
483 /*! \brief Compute forces and/or energies for special algorithms
485 * The intention is to collect all calls to algorithms that compute
486 * forces on local atoms only and that do not contribute to the local
487 * virial sum (but add their virial contribution separately).
488 * Eventually these should likely all become ForceProviders.
489 * Within this function the intention is to have algorithms that do
490 * global communication at the end, so global barriers within the MD loop
491 * are as close together as possible.
493 * \param[in] fplog The log file
494 * \param[in] cr The communication record
495 * \param[in] inputrec The input record
496 * \param[in] awh The Awh module (nullptr if none in use).
497 * \param[in] enforcedRotation Enforced rotation module.
498 * \param[in] imdSession The IMD session
499 * \param[in] pull_work The pull work structure.
500 * \param[in] step The current MD step
501 * \param[in] t The current time
502 * \param[in,out] wcycle Wallcycle accounting struct
503 * \param[in,out] forceProviders Pointer to a list of force providers
504 * \param[in] box The unit cell
505 * \param[in] x The coordinates
506 * \param[in] mdatoms Per atom properties
507 * \param[in] lambda Array of free-energy lambda values
508 * \param[in] forceFlags Flags that tell whether we should compute forces/energies/virial
509 * \param[in,out] forceWithVirial Force and virial buffers
510 * \param[in,out] enerd Energy buffer
511 * \param[in,out] ed Essential dynamics pointer
512 * \param[in] bNS Tells if we did neighbor searching this step, used for ED sampling
514 * \todo Remove bNS, which is used incorrectly.
515 * \todo Convert all other algorithms called here to ForceProviders.
518 computeSpecialForces(FILE *fplog,
520 const t_inputrec *inputrec,
522 gmx_enfrot *enforcedRotation,
523 gmx::ImdSession *imdSession,
527 gmx_wallcycle_t wcycle,
528 ForceProviders *forceProviders,
530 gmx::ArrayRef<const gmx::RVec> x,
531 const t_mdatoms *mdatoms,
534 gmx::ForceWithVirial *forceWithVirial,
535 gmx_enerdata_t *enerd,
539 const bool computeForces = (forceFlags & GMX_FORCE_FORCES) != 0;
541 /* NOTE: Currently all ForceProviders only provide forces.
542 * When they also provide energies, remove this conditional.
546 gmx::ForceProviderInput forceProviderInput(x, *mdatoms, t, box, *cr);
547 gmx::ForceProviderOutput forceProviderOutput(forceWithVirial, enerd);
549 /* Collect forces from modules */
550 forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
553 if (inputrec->bPull && pull_have_potential(pull_work))
555 pull_potential_wrapper(cr, inputrec, box, x,
557 mdatoms, enerd, pull_work, lambda, t,
562 enerd->term[F_COM_PULL] +=
563 awh->applyBiasForcesAndUpdateBias(inputrec->ePBC, *mdatoms, box,
565 t, step, wcycle, fplog);
569 rvec *f = as_rvec_array(forceWithVirial->force_.data());
571 /* Add the forces from enforced rotation potentials (if any) */
574 wallcycle_start(wcycle, ewcROTadd);
575 enerd->term[F_COM_PULL] += add_rot_forces(enforcedRotation, f, cr, step, t);
576 wallcycle_stop(wcycle, ewcROTadd);
581 /* Note that since init_edsam() is called after the initialization
582 * of forcerec, edsam doesn't request the noVirSum force buffer.
583 * Thus if no other algorithm (e.g. PME) requires it, the forces
584 * here will contribute to the virial.
586 do_flood(cr, inputrec, as_rvec_array(x.data()), f, ed, box, step, bNS);
589 /* Add forces from interactive molecular dynamics (IMD), if any */
590 if (inputrec->bIMD && computeForces)
592 imdSession->applyForces(f);
596 /*! \brief Launch the prepare_step and spread stages of PME GPU.
598 * \param[in] pmedata The PME structure
599 * \param[in] box The box matrix
600 * \param[in] x Coordinate array
601 * \param[in] flags Force flags
602 * \param[in] pmeFlags PME flags
603 * \param[in] wcycle The wallcycle structure
605 static inline void launchPmeGpuSpread(gmx_pme_t *pmedata,
610 gmx_wallcycle_t wcycle)
612 pme_gpu_prepare_computation(pmedata, (flags & GMX_FORCE_DYNAMICBOX) != 0, box, wcycle, pmeFlags);
613 pme_gpu_launch_spread(pmedata, x, wcycle);
616 /*! \brief Launch the FFT and gather stages of PME GPU
618 * This function only implements setting the output forces (no accumulation).
620 * \param[in] pmedata The PME structure
621 * \param[in] wcycle The wallcycle structure
623 static void launchPmeGpuFftAndGather(gmx_pme_t *pmedata,
624 gmx_wallcycle_t wcycle)
626 pme_gpu_launch_complex_transforms(pmedata, wcycle);
627 pme_gpu_launch_gather(pmedata, wcycle, PmeForceOutputHandling::Set);
631 * Polling wait for either of the PME or nonbonded GPU tasks.
633 * Instead of a static order in waiting for GPU tasks, this function
634 * polls checking which of the two tasks completes first, and does the
635 * associated force buffer reduction overlapped with the other task.
636 * By doing that, unlike static scheduling order, it can always overlap
637 * one of the reductions, regardless of the GPU task completion order.
639 * \param[in] nbv Nonbonded verlet structure
640 * \param[in,out] pmedata PME module data
641 * \param[in,out] force Force array to reduce task outputs into.
642 * \param[in,out] forceWithVirial Force and virial buffers
643 * \param[in,out] fshift Shift force output vector results are reduced into
644 * \param[in,out] enerd Energy data structure results are reduced into
645 * \param[in] flags Force flags
646 * \param[in] pmeFlags PME flags
647 * \param[in] wcycle The wallcycle structure
649 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t *nbv,
651 gmx::ArrayRefWithPadding<gmx::RVec> *force,
652 gmx::ForceWithVirial *forceWithVirial,
654 gmx_enerdata_t *enerd,
657 gmx_wallcycle_t wcycle)
659 bool isPmeGpuDone = false;
660 bool isNbGpuDone = false;
663 gmx::ArrayRef<const gmx::RVec> pmeGpuForces;
665 while (!isPmeGpuDone || !isNbGpuDone)
669 GpuTaskCompletion completionType = (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
670 isPmeGpuDone = pme_gpu_try_finish_task(pmedata, pmeFlags, wcycle, forceWithVirial, enerd, completionType);
675 GpuTaskCompletion completionType = (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
676 wallcycle_start_nocount(wcycle, ewcWAIT_GPU_NB_L);
677 isNbGpuDone = Nbnxm::gpu_try_finish_task(nbv->gpu_nbv,
679 Nbnxm::AtomLocality::Local,
680 enerd->grpp.ener[egLJSR].data(),
681 enerd->grpp.ener[egCOULSR].data(),
682 fshift, completionType);
683 wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
684 // To get the call count right, when the task finished we
685 // issue a start/stop.
686 // TODO: move the ewcWAIT_GPU_NB_L cycle counting into nbnxn_gpu_try_finish_task()
687 // and ewcNB_XF_BUF_OPS counting into nbnxn_atomdata_add_nbat_f_to_f().
690 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
691 wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
693 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local,
694 as_rvec_array(force->unpaddedArrayRef().data()),
695 BufferOpsUseGpu::False,
696 GpuBufferOpsAccumulateForce::Null,
703 /*! \brief Hack structure with force ouput buffers for do_force for the home atoms for this domain */
707 ForceOutputs(rvec *f, gmx::ForceWithVirial const forceWithVirial) :
709 forceWithVirial(forceWithVirial) {}
711 //! Force output buffer used by legacy modules (without SIMD padding)
713 //! Force with direct virial contribution (if there are any; without SIMD padding)
714 gmx::ForceWithVirial forceWithVirial;
717 /*! \brief Set up the different force buffers; also does clearing.
719 * \param[in] fr force record pointer
720 * \param[in] pull_work The pull work object.
721 * \param[in] inputrec input record
722 * \param[in] force force array
723 * \param[in] bDoForces True if force are computed this step
724 * \param[in] doVirial True if virial is computed this step
725 * \param[out] wcycle wallcycle recording structure
727 * \returns Cleared force output structure
730 setupForceOutputs(const t_forcerec *fr,
732 const t_inputrec &inputrec,
733 gmx::ArrayRefWithPadding<gmx::RVec> force,
734 const bool bDoForces,
736 gmx_wallcycle_t wcycle)
738 wallcycle_sub_start(wcycle, ewcsCLEAR_FORCE_BUFFER);
740 /* Temporary solution until all routines take PaddedRVecVector */
741 rvec *const f = as_rvec_array(force.unpaddedArrayRef().data());
744 /* Clear the short- and long-range forces */
745 clear_rvecs_omp(fr->natoms_force_constr, f);
748 /* If we need to compute the virial, we might need a separate
749 * force buffer for algorithms for which the virial is calculated
750 * directly, such as PME. Otherwise, forceWithVirial uses the
751 * the same force (f in legacy calls) buffer as other algorithms.
753 const bool useSeparateForceWithVirialBuffer = (bDoForces && (doVirial && fr->haveDirectVirialContributions));
756 /* forceWithVirial uses the local atom range only */
757 gmx::ForceWithVirial forceWithVirial (useSeparateForceWithVirialBuffer ?
758 *fr->forceBufferForDirectVirialContributions : force.unpaddedArrayRef(),
761 if (useSeparateForceWithVirialBuffer)
763 /* TODO: update comment
764 * We only compute forces on local atoms. Note that vsites can
765 * spread to non-local atoms, but that part of the buffer is
766 * cleared separately in the vsite spreading code.
768 clear_rvecs_omp(forceWithVirial.force_.size(), as_rvec_array(forceWithVirial.force_.data()));
771 if (inputrec.bPull && pull_have_constraint(pull_work))
773 clear_pull_forces(pull_work);
776 wallcycle_sub_stop(wcycle, ewcsCLEAR_FORCE_BUFFER);
778 return ForceOutputs(f, forceWithVirial);
782 /*! \brief Set up flags that indicate what type of work is there to compute.
784 * Currently we only update it at search steps,
785 * but some properties may change more frequently (e.g. virial/non-virial step),
786 * so when including those either the frequency of update (per-step) or the scope
787 * of a flag will change (i.e. a set of flags for nstlist steps).
791 setupForceWorkload(gmx::PpForceWorkload *forceWork,
792 const t_inputrec *inputrec,
793 const t_forcerec *fr,
794 const pull_t *pull_work,
801 forceWork->haveSpecialForces = haveSpecialForces(inputrec, fr->forceProviders, pull_work, forceFlags, ed);
802 forceWork->haveCpuBondedWork = haveCpuBondeds(*fr);
803 forceWork->haveGpuBondedWork = ((fr->gpuBonded != nullptr) && fr->gpuBonded->haveInteractions());
804 forceWork->haveRestraintsWork = havePositionRestraints(idef, *fcd);
805 forceWork->haveCpuListedForceWork = haveCpuListedForces(*fr, idef, *fcd);
809 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
811 * TODO: eliminate the \p useGpuNonbonded and \p useGpuNonbonded when these are
812 * incorporated in PpForceWorkload.
815 launchGpuEndOfStepTasks(nonbonded_verlet_t *nbv,
816 gmx::GpuBonded *gpuBonded,
818 gmx_enerdata_t *enerd,
819 const gmx::PpForceWorkload &forceWorkload,
820 bool useGpuNonbonded,
824 gmx_wallcycle_t wcycle)
828 /* Launch pruning before buffer clearing because the API overhead of the
829 * clear kernel launches can leave the GPU idle while it could be running
832 if (nbv->isDynamicPruningStepGpu(step))
834 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
835 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
836 nbv->dispatchPruneKernelGpu(step);
837 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
838 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
841 /* now clear the GPU outputs while we finish the step on the CPU */
842 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
843 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
844 Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, flags);
845 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
846 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
851 pme_gpu_reinit_computation(pmedata, wcycle);
854 if (forceWorkload.haveGpuBondedWork && (flags & GMX_FORCE_ENERGY))
856 // in principle this should be included in the DD balancing region,
857 // but generally it is infrequent so we'll omit it for the sake of
859 gpuBonded->waitAccumulateEnergyTerms(enerd);
861 gpuBonded->clearEnergies();
866 void do_force(FILE *fplog,
868 const gmx_multisim_t *ms,
869 const t_inputrec *inputrec,
871 gmx_enfrot *enforcedRotation,
872 gmx::ImdSession *imdSession,
876 gmx_wallcycle_t wcycle,
877 const gmx_localtop_t *top,
879 gmx::ArrayRefWithPadding<gmx::RVec> x, //NOLINT(performance-unnecessary-value-param)
881 gmx::ArrayRefWithPadding<gmx::RVec> force, //NOLINT(performance-unnecessary-value-param)
883 const t_mdatoms *mdatoms,
884 gmx_enerdata_t *enerd,
886 gmx::ArrayRef<real> lambda,
889 gmx::PpForceWorkload *ppForceWorkload,
890 const gmx_vsite_t *vsite,
895 const DDBalanceRegionHandler &ddBalanceRegionHandler)
899 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
900 gmx_bool bDoForces, bUseGPU, bUseOrEmulGPU;
901 nonbonded_verlet_t *nbv = fr->nbv.get();
902 interaction_const_t *ic = fr->ic;
904 /* modify force flag if not doing nonbonded */
907 flags &= ~GMX_FORCE_NONBONDED;
909 bStateChanged = ((flags & GMX_FORCE_STATECHANGED) != 0);
910 bNS = ((flags & GMX_FORCE_NS) != 0);
911 bFillGrid = (bNS && bStateChanged);
912 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
913 bDoForces = ((flags & GMX_FORCE_FORCES) != 0);
914 bUseGPU = fr->nbv->useGpu();
915 bUseOrEmulGPU = bUseGPU || fr->nbv->emulateGpu();
917 const auto pmeRunMode = fr->pmedata ? pme_run_mode(fr->pmedata) : PmeRunMode::CPU;
918 // TODO slim this conditional down - inputrec and duty checks should mean the same in proper code!
919 const bool useGpuPme = EEL_PME(fr->ic->eeltype) && thisRankHasDuty(cr, DUTY_PME) &&
920 ((pmeRunMode == PmeRunMode::GPU) || (pmeRunMode == PmeRunMode::Mixed));
921 const int pmeFlags = GMX_PME_SPREAD | GMX_PME_SOLVE |
922 ((flags & GMX_FORCE_VIRIAL) ? GMX_PME_CALC_ENER_VIR : 0) |
923 ((flags & GMX_FORCE_ENERGY) ? GMX_PME_CALC_ENER_VIR : 0) |
924 ((flags & GMX_FORCE_FORCES) ? GMX_PME_CALC_F : 0);
925 const BufferOpsUseGpu useGpuFBufOps = (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA))
926 && !(flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)) ?
927 BufferOpsUseGpu::True : BufferOpsUseGpu::False;
929 const BufferOpsUseGpu useGpuXBufOps = (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA)) ?
930 BufferOpsUseGpu::True : BufferOpsUseGpu::False;;
932 /* At a search step we need to start the first balancing region
933 * somewhere early inside the step after communication during domain
934 * decomposition (and not during the previous step as usual).
938 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
942 const int homenr = mdatoms->homenr;
944 clear_mat(vir_force);
948 if (inputrecNeedMutot(inputrec))
950 /* Calculate total (local) dipole moment in a temporary common array.
951 * This makes it possible to sum them over nodes faster.
953 calc_mu(start, homenr,
954 x.unpaddedArrayRef(), mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
959 if (fr->ePBC != epbcNONE)
961 /* Compute shift vectors every step,
962 * because of pressure coupling or box deformation!
964 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
966 calc_shifts(box, fr->shift_vec);
971 put_atoms_in_box_omp(fr->ePBC, box, x.unpaddedArrayRef().subArray(0, homenr), gmx_omp_nthreads_get(emntDefault));
972 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
974 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
976 unshift_self(graph, box, as_rvec_array(x.unpaddedArrayRef().data()));
980 nbnxn_atomdata_copy_shiftvec((flags & GMX_FORCE_DYNAMICBOX) != 0,
981 fr->shift_vec, nbv->nbat.get());
984 if (!thisRankHasDuty(cr, DUTY_PME))
986 /* Send particle coordinates to the pme nodes.
987 * Since this is only implemented for domain decomposition
988 * and domain decomposition does not use the graph,
989 * we do not need to worry about shifting.
991 gmx_pme_send_coordinates(cr, box, as_rvec_array(x.unpaddedArrayRef().data()),
992 lambda[efptCOUL], lambda[efptVDW],
993 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)) != 0,
1000 launchPmeGpuSpread(fr->pmedata, box, as_rvec_array(x.unpaddedArrayRef().data()), flags, pmeFlags, wcycle);
1003 /* do gridding for pair search */
1006 if (graph && bStateChanged)
1008 /* Calculate intramolecular shift vectors to make molecules whole */
1009 mk_mshift(fplog, graph, fr->ePBC, box, as_rvec_array(x.unpaddedArrayRef().data()));
1013 // - vzero is constant, do we need to pass it?
1014 // - box_diag should be passed directly to nbnxn_put_on_grid
1020 box_diag[XX] = box[XX][XX];
1021 box_diag[YY] = box[YY][YY];
1022 box_diag[ZZ] = box[ZZ][ZZ];
1024 wallcycle_start(wcycle, ewcNS);
1025 if (!DOMAINDECOMP(cr))
1027 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
1028 nbnxn_put_on_grid(nbv, box,
1030 nullptr, 0, mdatoms->homenr, -1,
1031 fr->cginfo, x.unpaddedArrayRef(),
1033 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
1037 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
1038 nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd),
1039 fr->cginfo, x.unpaddedArrayRef());
1040 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
1043 nbv->setAtomProperties(*mdatoms, fr->cginfo);
1045 wallcycle_stop(wcycle, ewcNS);
1047 /* initialize the GPU nbnxm atom data and bonded data structures */
1050 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1052 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1053 Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1054 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1058 /* Now we put all atoms on the grid, we can assign bonded
1059 * interactions to the GPU, where the grid order is
1060 * needed. Also the xq, f and fshift device buffers have
1061 * been reallocated if needed, so the bonded code can
1062 * learn about them. */
1063 // TODO the xq, f, and fshift buffers are now shared
1064 // resources, so they should be maintained by a
1065 // higher-level object than the nb module.
1066 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(nbv->getGridIndices(),
1068 Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1069 Nbnxm::gpu_get_f(nbv->gpu_nbv),
1070 Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1072 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1075 // Need to run after the GPU-offload bonded interaction lists
1076 // are set up to be able to determine whether there is bonded work.
1077 setupForceWorkload(ppForceWorkload,
1087 /* do local pair search */
1090 // TODO: fuse this branch with the above bNS block
1091 wallcycle_start_nocount(wcycle, ewcNS);
1092 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1093 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1094 nbv->constructPairlist(Nbnxm::InteractionLocality::Local,
1095 &top->excls, step, nrnb);
1097 nbv->setupGpuShortRangeWork(fr->gpuBonded, Nbnxm::InteractionLocality::Local);
1099 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1100 wallcycle_stop(wcycle, ewcNS);
1102 if (useGpuXBufOps == BufferOpsUseGpu::True)
1104 nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1106 // For force buffer ops, we use the below conditon rather than
1107 // useGpuFBufOps to ensure that init is performed even if this
1108 // NS step is also a virial step (on which f buf ops are deactivated).
1109 if (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA))
1111 nbv->atomdata_init_add_nbat_f_to_f_gpu(wcycle);
1116 nbv->setCoordinates(Nbnxm::AtomLocality::Local, false,
1117 x.unpaddedArrayRef(), useGpuXBufOps, pme_gpu_get_device_x(fr->pmedata), wcycle);
1122 ddBalanceRegionHandler.openBeforeForceComputationGpu();
1124 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1126 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1127 Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1128 if (bNS || (useGpuXBufOps == BufferOpsUseGpu::False))
1130 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1131 Nbnxm::AtomLocality::Local);
1133 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1134 // with X buffer ops offloaded to the GPU on all but the search steps
1136 // bonded work not split into separate local and non-local, so with DD
1137 // we can only launch the kernel after non-local coordinates have been received.
1138 if (ppForceWorkload->haveGpuBondedWork && !havePPDomainDecomposition(cr))
1140 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1141 fr->gpuBonded->launchKernel(fr, flags, box);
1142 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1145 /* launch local nonbonded work on GPU */
1146 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1147 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::Local, enbvClearFNo,
1148 step, nrnb, wcycle);
1149 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1150 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1155 // In PME GPU and mixed mode we launch FFT / gather after the
1156 // X copy/transform to allow overlap as well as after the GPU NB
1157 // launch to avoid FFT launch overhead hijacking the CPU and delaying
1158 // the nonbonded kernel.
1159 launchPmeGpuFftAndGather(fr->pmedata, wcycle);
1162 /* Communicate coordinates and sum dipole if necessary +
1163 do non-local pair search */
1164 if (havePPDomainDecomposition(cr))
1168 // TODO: fuse this branch with the above large bNS block
1169 wallcycle_start_nocount(wcycle, ewcNS);
1170 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1171 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1172 nbv->constructPairlist(Nbnxm::InteractionLocality::NonLocal,
1173 &top->excls, step, nrnb);
1175 nbv->setupGpuShortRangeWork(fr->gpuBonded, Nbnxm::InteractionLocality::NonLocal);
1176 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1177 wallcycle_stop(wcycle, ewcNS);
1181 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1183 nbv->setCoordinates(Nbnxm::AtomLocality::NonLocal, false,
1184 x.unpaddedArrayRef(), useGpuXBufOps, pme_gpu_get_device_x(fr->pmedata), wcycle);
1190 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1192 if (bNS || (useGpuXBufOps == BufferOpsUseGpu::False))
1194 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1195 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1196 Nbnxm::AtomLocality::NonLocal);
1197 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1200 if (ppForceWorkload->haveGpuBondedWork)
1202 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1203 fr->gpuBonded->launchKernel(fr, flags, box);
1204 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1207 /* launch non-local nonbonded tasks on GPU */
1208 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1209 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::NonLocal, enbvClearFNo,
1210 step, nrnb, wcycle);
1211 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1213 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1219 /* launch D2H copy-back F */
1220 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1221 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1223 bool copyBackNbForce = (useGpuFBufOps == BufferOpsUseGpu::False);
1225 if (havePPDomainDecomposition(cr))
1227 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1228 flags, Nbnxm::AtomLocality::NonLocal, copyBackNbForce);
1230 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1231 flags, Nbnxm::AtomLocality::Local, copyBackNbForce);
1233 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1235 if (ppForceWorkload->haveGpuBondedWork && (flags & GMX_FORCE_ENERGY))
1237 fr->gpuBonded->launchEnergyTransfer();
1239 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1242 if (bStateChanged && inputrecNeedMutot(inputrec))
1246 gmx_sumd(2*DIM, mu, cr);
1248 ddBalanceRegionHandler.reopenRegionCpu();
1251 for (i = 0; i < 2; i++)
1253 for (j = 0; j < DIM; j++)
1255 fr->mu_tot[i][j] = mu[i*DIM + j];
1259 if (fr->efep == efepNO)
1261 copy_rvec(fr->mu_tot[0], mu_tot);
1265 for (j = 0; j < DIM; j++)
1268 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1269 lambda[efptCOUL]*fr->mu_tot[1][j];
1273 /* Reset energies */
1274 reset_enerdata(enerd);
1275 clear_rvecs(SHIFTS, fr->fshift);
1277 if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1279 wallcycle_start(wcycle, ewcPPDURINGPME);
1280 dd_force_flop_start(cr->dd, nrnb);
1285 wallcycle_start(wcycle, ewcROT);
1286 do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step, bNS);
1287 wallcycle_stop(wcycle, ewcROT);
1290 /* Start the force cycle counter.
1291 * Note that a different counter is used for dynamic load balancing.
1293 wallcycle_start(wcycle, ewcFORCE);
1295 // set up and clear force outputs
1296 struct ForceOutputs forceOut = setupForceOutputs(fr, pull_work, *inputrec, force, bDoForces,
1297 ((flags & GMX_FORCE_VIRIAL) != 0), wcycle);
1299 /* We calculate the non-bonded forces, when done on the CPU, here.
1300 * We do this before calling do_force_lowlevel, because in that
1301 * function, the listed forces are calculated before PME, which
1302 * does communication. With this order, non-bonded and listed
1303 * force calculation imbalance can be balanced out by the domain
1304 * decomposition load balancing.
1309 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::Local, enbvClearFYes,
1310 step, nrnb, wcycle);
1313 if (fr->efep != efepNO)
1315 /* Calculate the local and non-local free energy interactions here.
1316 * Happens here on the CPU both with and without GPU.
1318 nbv->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::Local,
1319 fr, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f, *mdatoms,
1320 inputrec->fepvals, lambda.data(),
1321 enerd, flags, nrnb, wcycle);
1323 if (havePPDomainDecomposition(cr))
1325 nbv->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::NonLocal,
1326 fr, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f, *mdatoms,
1327 inputrec->fepvals, lambda.data(),
1328 enerd, flags, nrnb, wcycle);
1334 if (havePPDomainDecomposition(cr))
1336 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::NonLocal, enbvClearFNo,
1337 step, nrnb, wcycle);
1340 /* Add all the non-bonded force to the normal force array.
1341 * This can be split into a local and a non-local part when overlapping
1342 * communication with calculation with domain decomposition.
1344 wallcycle_stop(wcycle, ewcFORCE);
1345 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::All, forceOut.f,
1346 BufferOpsUseGpu::False,
1347 GpuBufferOpsAccumulateForce::Null,
1350 wallcycle_start_nocount(wcycle, ewcFORCE);
1352 /* If there are multiple fshift output buffers we need to reduce them */
1353 if (flags & GMX_FORCE_VIRIAL)
1355 /* This is not in a subcounter because it takes a
1356 negligible and constant-sized amount of time */
1357 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->nbat.get(),
1362 /* update QMMMrec, if necessary */
1365 update_QMMMrec(cr, fr, as_rvec_array(x.unpaddedArrayRef().data()), mdatoms, box);
1368 /* Compute the bonded and non-bonded energies and optionally forces */
1369 do_force_lowlevel(fr, inputrec, &(top->idef),
1370 cr, ms, nrnb, wcycle, mdatoms,
1371 x, hist, forceOut.f, &forceOut.forceWithVirial, enerd, fcd,
1372 box, lambda.data(), graph, fr->mu_tot,
1374 ddBalanceRegionHandler);
1376 wallcycle_stop(wcycle, ewcFORCE);
1378 computeSpecialForces(fplog, cr, inputrec, awh, enforcedRotation,
1379 imdSession, pull_work, step, t, wcycle,
1380 fr->forceProviders, box, x.unpaddedArrayRef(), mdatoms, lambda.data(),
1381 flags, &forceOut.forceWithVirial, enerd,
1384 // flag to specify if CPU force output is preset in force
1385 // buffer. For now, this is true even when useGpuPme == true
1386 // (because on-GPU PME-nonbonded reduction will be added in
1388 // TODO adapt the below when on-GPU PME-nonbonded reduction is available.
1389 bool useCpuPmeReduction = true;
1390 bool haveCpuForces = (ppForceWorkload->haveSpecialForces || ppForceWorkload->haveCpuListedForceWork || useCpuPmeReduction);
1391 // flag to specify if forces should be accumulated in force buffer
1392 // ops. For now, this is solely determined by above haveCpuForces
1393 // flag, but in future developments it will also depend on
1394 // e.g. whether the GPU force halo exchange is active.
1395 GpuBufferOpsAccumulateForce accumulateForce = (useGpuFBufOps == BufferOpsUseGpu::True) &&
1396 haveCpuForces ? GpuBufferOpsAccumulateForce::True :
1397 GpuBufferOpsAccumulateForce::False;
1399 // Will store the amount of cycles spent waiting for the GPU that
1400 // will be later used in the DLB accounting.
1401 float cycles_wait_gpu = 0;
1404 /* wait for non-local forces (or calculate in emulation mode) */
1405 if (havePPDomainDecomposition(cr))
1409 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1410 Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1411 flags, Nbnxm::AtomLocality::NonLocal,
1412 enerd->grpp.ener[egLJSR].data(),
1413 enerd->grpp.ener[egCOULSR].data(),
1415 cycles_wait_gpu += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1419 wallcycle_start_nocount(wcycle, ewcFORCE);
1420 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::NonLocal, enbvClearFYes,
1421 step, nrnb, wcycle);
1422 wallcycle_stop(wcycle, ewcFORCE);
1425 if (useGpuFBufOps == BufferOpsUseGpu::True && haveCpuForces)
1427 nbv->launch_copy_f_to_gpu(forceOut.f, Nbnxm::AtomLocality::NonLocal);
1429 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::NonLocal,
1430 forceOut.f, useGpuFBufOps, accumulateForce, wcycle);
1431 if (useGpuFBufOps == BufferOpsUseGpu::True)
1433 nbv->launch_copy_f_from_gpu(forceOut.f, Nbnxm::AtomLocality::NonLocal);
1436 if (fr->nbv->emulateGpu() && (flags & GMX_FORCE_VIRIAL))
1438 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->nbat.get(),
1444 if (havePPDomainDecomposition(cr))
1446 /* We are done with the CPU compute.
1447 * We will now communicate the non-local forces.
1448 * If we use a GPU this will overlap with GPU work, so in that case
1449 * we do not close the DD force balancing region here.
1451 ddBalanceRegionHandler.closeAfterForceComputationCpu();
1455 if (useGpuFBufOps == BufferOpsUseGpu::True)
1457 nbv->wait_for_gpu_force_reduction(Nbnxm::AtomLocality::NonLocal);
1459 dd_move_f(cr->dd, force.unpaddedArrayRef(), fr->fshift, wcycle);
1463 // With both nonbonded and PME offloaded a GPU on the same rank, we use
1464 // an alternating wait/reduction scheme.
1465 bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPme && bUseGPU && !DOMAINDECOMP(cr) &&
1466 (useGpuFBufOps == BufferOpsUseGpu::False));
1467 if (alternateGpuWait)
1469 alternatePmeNbGpuWaitReduce(fr->nbv.get(), fr->pmedata, &force, &forceOut.forceWithVirial, fr->fshift, enerd,
1470 flags, pmeFlags, wcycle);
1473 if (!alternateGpuWait && useGpuPme)
1475 pme_gpu_wait_and_reduce(fr->pmedata, pmeFlags, wcycle, &forceOut.forceWithVirial, enerd);
1478 /* Wait for local GPU NB outputs on the non-alternating wait path */
1479 if (!alternateGpuWait && bUseGPU)
1481 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1482 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1483 * but even with a step of 0.1 ms the difference is less than 1%
1486 const float gpuWaitApiOverheadMargin = 2e6f; /* cycles */
1488 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1489 Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1490 flags, Nbnxm::AtomLocality::Local,
1491 enerd->grpp.ener[egLJSR].data(),
1492 enerd->grpp.ener[egCOULSR].data(),
1494 float cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1496 if (ddBalanceRegionHandler.useBalancingRegion())
1498 DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
1499 if (bDoForces && cycles_tmp <= gpuWaitApiOverheadMargin)
1501 /* We measured few cycles, it could be that the kernel
1502 * and transfer finished earlier and there was no actual
1503 * wait time, only API call overhead.
1504 * Then the actual time could be anywhere between 0 and
1505 * cycles_wait_est. We will use half of cycles_wait_est.
1507 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
1509 ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
1513 if (fr->nbv->emulateGpu())
1515 // NOTE: emulation kernel is not included in the balancing region,
1516 // but emulation mode does not target performance anyway
1517 wallcycle_start_nocount(wcycle, ewcFORCE);
1518 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::Local,
1519 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1520 step, nrnb, wcycle);
1521 wallcycle_stop(wcycle, ewcFORCE);
1524 /* Do the nonbonded GPU (or emulation) force buffer reduction
1525 * on the non-alternating path. */
1526 if (bUseOrEmulGPU && !alternateGpuWait)
1529 // TODO: move these steps as early as possible:
1530 // - CPU f H2D should be as soon as all CPU-side forces are done
1531 // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
1532 // before the next CPU task that consumes the forces: vsite spread or update)
1534 if (useGpuFBufOps == BufferOpsUseGpu::True && haveCpuForces)
1536 nbv->launch_copy_f_to_gpu(forceOut.f, Nbnxm::AtomLocality::Local);
1538 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local,
1539 forceOut.f, useGpuFBufOps, accumulateForce, wcycle);
1540 if (useGpuFBufOps == BufferOpsUseGpu::True)
1542 nbv->launch_copy_f_from_gpu(forceOut.f, Nbnxm::AtomLocality::Local);
1543 nbv->wait_for_gpu_force_reduction(Nbnxm::AtomLocality::Local);
1547 launchGpuEndOfStepTasks(nbv, fr->gpuBonded, fr->pmedata, enerd,
1554 if (DOMAINDECOMP(cr))
1556 dd_force_flop_stop(cr->dd, nrnb);
1561 /* If we have NoVirSum forces, but we do not calculate the virial,
1562 * we sum fr->f_novirsum=forceOut.f later.
1564 if (vsite && !(fr->haveDirectVirialContributions && !(flags & GMX_FORCE_VIRIAL)))
1566 spread_vsite_f(vsite, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f, fr->fshift, FALSE, nullptr, nrnb,
1567 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
1570 if (flags & GMX_FORCE_VIRIAL)
1572 /* Calculation of the virial must be done after vsites! */
1573 calc_virial(0, mdatoms->homenr, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f,
1574 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1578 if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME))
1580 /* In case of node-splitting, the PP nodes receive the long-range
1581 * forces, virial and energy from the PME nodes here.
1583 pme_receive_force_ener(cr, &forceOut.forceWithVirial, enerd, wcycle);
1588 post_process_forces(cr, step, nrnb, wcycle,
1589 top, box, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f, &forceOut.forceWithVirial,
1590 vir_force, mdatoms, graph, fr, vsite,
1594 if (flags & GMX_FORCE_ENERGY)
1596 /* Sum the potential energy terms from group contributions */
1597 sum_epot(&(enerd->grpp), enerd->term);
1599 if (!EI_TPI(inputrec->eI))
1601 checkPotentialEnergyValidity(step, *enerd, *inputrec);
1605 /* In case we don't have constraints and are using GPUs, the next balancing
1606 * region starts here.
1607 * Some "special" work at the end of do_force_cuts?, such as vsite spread,
1608 * virial calculation and COM pulling, is not thus not included in
1609 * the balance timing, which is ok as most tasks do communication.
1611 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);