2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013-2019,2020,2021, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
49 #include "gromacs/applied_forces/awh/awh.h"
50 #include "gromacs/domdec/dlbtiming.h"
51 #include "gromacs/domdec/domdec.h"
52 #include "gromacs/domdec/domdec_struct.h"
53 #include "gromacs/domdec/gpuhaloexchange.h"
54 #include "gromacs/domdec/partition.h"
55 #include "gromacs/essentialdynamics/edsam.h"
56 #include "gromacs/ewald/pme.h"
57 #include "gromacs/ewald/pme_coordinate_receiver_gpu.h"
58 #include "gromacs/ewald/pme_pp.h"
59 #include "gromacs/ewald/pme_pp_comm_gpu.h"
60 #include "gromacs/gmxlib/network.h"
61 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
62 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
63 #include "gromacs/gmxlib/nrnb.h"
64 #include "gromacs/gpu_utils/gpu_utils.h"
65 #include "gromacs/imd/imd.h"
66 #include "gromacs/listed_forces/disre.h"
67 #include "gromacs/listed_forces/listed_forces_gpu.h"
68 #include "gromacs/listed_forces/listed_forces.h"
69 #include "gromacs/listed_forces/orires.h"
70 #include "gromacs/math/arrayrefwithpadding.h"
71 #include "gromacs/math/functions.h"
72 #include "gromacs/math/units.h"
73 #include "gromacs/math/vec.h"
74 #include "gromacs/math/vecdump.h"
75 #include "gromacs/mdlib/calcmu.h"
76 #include "gromacs/mdlib/calcvir.h"
77 #include "gromacs/mdlib/constr.h"
78 #include "gromacs/mdlib/dispersioncorrection.h"
79 #include "gromacs/mdlib/enerdata_utils.h"
80 #include "gromacs/mdlib/force.h"
81 #include "gromacs/mdlib/force_flags.h"
82 #include "gromacs/mdlib/forcerec.h"
83 #include "gromacs/mdlib/gmx_omp_nthreads.h"
84 #include "gromacs/mdlib/update.h"
85 #include "gromacs/mdlib/vsite.h"
86 #include "gromacs/mdlib/wall.h"
87 #include "gromacs/mdlib/wholemoleculetransform.h"
88 #include "gromacs/mdtypes/commrec.h"
89 #include "gromacs/mdtypes/enerdata.h"
90 #include "gromacs/mdtypes/forcebuffers.h"
91 #include "gromacs/mdtypes/forceoutput.h"
92 #include "gromacs/mdtypes/forcerec.h"
93 #include "gromacs/mdtypes/iforceprovider.h"
94 #include "gromacs/mdtypes/inputrec.h"
95 #include "gromacs/mdtypes/md_enums.h"
96 #include "gromacs/mdtypes/mdatom.h"
97 #include "gromacs/mdtypes/multipletimestepping.h"
98 #include "gromacs/mdtypes/simulation_workload.h"
99 #include "gromacs/mdtypes/state.h"
100 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
101 #include "gromacs/nbnxm/gpu_data_mgmt.h"
102 #include "gromacs/nbnxm/nbnxm.h"
103 #include "gromacs/nbnxm/nbnxm_gpu.h"
104 #include "gromacs/pbcutil/ishift.h"
105 #include "gromacs/pbcutil/pbc.h"
106 #include "gromacs/pulling/pull.h"
107 #include "gromacs/pulling/pull_rotation.h"
108 #include "gromacs/timing/cyclecounter.h"
109 #include "gromacs/timing/gpu_timing.h"
110 #include "gromacs/timing/wallcycle.h"
111 #include "gromacs/timing/wallcyclereporting.h"
112 #include "gromacs/timing/walltime_accounting.h"
113 #include "gromacs/topology/topology.h"
114 #include "gromacs/utility/arrayref.h"
115 #include "gromacs/utility/basedefinitions.h"
116 #include "gromacs/utility/cstringutil.h"
117 #include "gromacs/utility/exceptions.h"
118 #include "gromacs/utility/fatalerror.h"
119 #include "gromacs/utility/fixedcapacityvector.h"
120 #include "gromacs/utility/gmxassert.h"
121 #include "gromacs/utility/gmxmpi.h"
122 #include "gromacs/utility/logger.h"
123 #include "gromacs/utility/smalloc.h"
124 #include "gromacs/utility/strconvert.h"
125 #include "gromacs/utility/sysinfo.h"
127 #include "gpuforcereduction.h"
130 using gmx::AtomLocality;
131 using gmx::DomainLifetimeWorkload;
132 using gmx::ForceOutputs;
133 using gmx::ForceWithShiftForces;
134 using gmx::InteractionLocality;
136 using gmx::SimulationWorkload;
137 using gmx::StepWorkload;
139 // TODO: this environment variable allows us to verify before release
140 // that on less common architectures the total cost of polling is not larger than
141 // a blocking wait (so polling does not introduce overhead when the static
142 // PME-first ordering would suffice).
143 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
145 static void sum_forces(ArrayRef<RVec> f, ArrayRef<const RVec> forceToAdd)
147 GMX_ASSERT(f.size() >= forceToAdd.size(), "Accumulation buffer should be sufficiently large");
148 const int end = forceToAdd.size();
150 int gmx_unused nt = gmx_omp_nthreads_get(ModuleMultiThread::Default);
151 #pragma omp parallel for num_threads(nt) schedule(static)
152 for (int i = 0; i < end; i++)
154 rvec_inc(f[i], forceToAdd[i]);
158 static void calc_virial(int start,
161 const gmx::ForceWithShiftForces& forceWithShiftForces,
165 const t_forcerec* fr,
168 /* The short-range virial from surrounding boxes */
169 const rvec* fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
170 const rvec* shiftVecPointer = as_rvec_array(fr->shift_vec.data());
171 calc_vir(gmx::c_numShiftVectors, shiftVecPointer, fshift, vir_part, pbcType == PbcType::Screw, box);
172 inc_nrnb(nrnb, eNR_VIRIAL, gmx::c_numShiftVectors);
174 /* Calculate partial virial, for local atoms only, based on short range.
175 * Total virial is computed in global_stat, called from do_md
177 const rvec* f = as_rvec_array(forceWithShiftForces.force().data());
178 f_calc_vir(start, start + homenr, x, f, vir_part, box);
179 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
183 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
187 static void pull_potential_wrapper(const t_commrec* cr,
188 const t_inputrec& ir,
190 gmx::ArrayRef<const gmx::RVec> x,
191 gmx::ForceWithVirial* force,
192 const t_mdatoms* mdatoms,
193 gmx_enerdata_t* enerd,
197 gmx_wallcycle* wcycle)
202 /* Calculate the center of mass forces, this requires communication,
203 * which is why pull_potential is called close to other communication.
205 wallcycle_start(wcycle, WallCycleCounter::PullPot);
206 set_pbc(&pbc, ir.pbcType, box);
208 enerd->term[F_COM_PULL] +=
209 pull_potential(pull_work,
210 gmx::arrayRefFromArray(mdatoms->massT, mdatoms->nr),
214 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Restraint)],
218 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Restraint] += dvdl;
219 wallcycle_stop(wcycle, WallCycleCounter::PullPot);
222 static void pme_receive_force_ener(t_forcerec* fr,
224 gmx::ForceWithVirial* forceWithVirial,
225 gmx_enerdata_t* enerd,
226 bool useGpuPmePpComms,
227 bool receivePmeForceToGpu,
228 gmx_wallcycle* wcycle)
230 real e_q, e_lj, dvdl_q, dvdl_lj;
231 float cycles_ppdpme, cycles_seppme;
233 cycles_ppdpme = wallcycle_stop(wcycle, WallCycleCounter::PpDuringPme);
234 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
236 /* In case of node-splitting, the PP nodes receive the long-range
237 * forces, virial and energy from the PME nodes here.
239 wallcycle_start(wcycle, WallCycleCounter::PpPmeWaitRecvF);
242 gmx_pme_receive_f(fr->pmePpCommGpu.get(),
250 receivePmeForceToGpu,
252 enerd->term[F_COUL_RECIP] += e_q;
253 enerd->term[F_LJ_RECIP] += e_lj;
254 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Coul] += dvdl_q;
255 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += dvdl_lj;
259 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
261 wallcycle_stop(wcycle, WallCycleCounter::PpPmeWaitRecvF);
264 static void print_large_forces(FILE* fp,
269 ArrayRef<const RVec> x,
270 ArrayRef<const RVec> f)
272 real force2Tolerance = gmx::square(forceTolerance);
273 gmx::index numNonFinite = 0;
274 for (int i = 0; i < md->homenr; i++)
276 real force2 = norm2(f[i]);
277 bool nonFinite = !std::isfinite(force2);
278 if (force2 >= force2Tolerance || nonFinite)
281 "step %" PRId64 " atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
294 if (numNonFinite > 0)
296 /* Note that with MPI this fatal call on one rank might interrupt
297 * the printing on other ranks. But we can only avoid that with
298 * an expensive MPI barrier that we would need at each step.
300 gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
304 //! When necessary, spreads forces on vsites and computes the virial for \p forceOutputs->forceWithShiftForces()
305 static void postProcessForceWithShiftForces(t_nrnb* nrnb,
306 gmx_wallcycle* wcycle,
308 ArrayRef<const RVec> x,
309 ForceOutputs* forceOutputs,
311 const t_mdatoms& mdatoms,
312 const t_forcerec& fr,
313 gmx::VirtualSitesHandler* vsite,
314 const StepWorkload& stepWork)
316 ForceWithShiftForces& forceWithShiftForces = forceOutputs->forceWithShiftForces();
318 /* If we have NoVirSum forces, but we do not calculate the virial,
319 * we later sum the forceWithShiftForces buffer together with
320 * the noVirSum buffer and spread the combined vsite forces at once.
322 if (vsite && (!forceOutputs->haveForceWithVirial() || stepWork.computeVirial))
324 using VirialHandling = gmx::VirtualSitesHandler::VirialHandling;
326 auto f = forceWithShiftForces.force();
327 auto fshift = forceWithShiftForces.shiftForces();
328 const VirialHandling virialHandling =
329 (stepWork.computeVirial ? VirialHandling::Pbc : VirialHandling::None);
330 vsite->spreadForces(x, f, virialHandling, fshift, nullptr, nrnb, box, wcycle);
331 forceWithShiftForces.haveSpreadVsiteForces() = true;
334 if (stepWork.computeVirial)
336 /* Calculation of the virial must be done after vsites! */
338 0, mdatoms.homenr, as_rvec_array(x.data()), forceWithShiftForces, vir_force, box, nrnb, &fr, fr.pbcType);
342 //! Spread, compute virial for and sum forces, when necessary
343 static void postProcessForces(const t_commrec* cr,
346 gmx_wallcycle* wcycle,
348 ArrayRef<const RVec> x,
349 ForceOutputs* forceOutputs,
351 const t_mdatoms* mdatoms,
352 const t_forcerec* fr,
353 gmx::VirtualSitesHandler* vsite,
354 const StepWorkload& stepWork)
356 // Extract the final output force buffer, which is also the buffer for forces with shift forces
357 ArrayRef<RVec> f = forceOutputs->forceWithShiftForces().force();
359 if (forceOutputs->haveForceWithVirial())
361 auto& forceWithVirial = forceOutputs->forceWithVirial();
365 /* Spread the mesh force on virtual sites to the other particles...
366 * This is parallellized. MPI communication is performed
367 * if the constructing atoms aren't local.
369 GMX_ASSERT(!stepWork.computeVirial || f.data() != forceWithVirial.force_.data(),
370 "We need separate force buffers for shift and virial forces when "
371 "computing the virial");
372 GMX_ASSERT(!stepWork.computeVirial
373 || forceOutputs->forceWithShiftForces().haveSpreadVsiteForces(),
374 "We should spread the force with shift forces separately when computing "
376 const gmx::VirtualSitesHandler::VirialHandling virialHandling =
377 (stepWork.computeVirial ? gmx::VirtualSitesHandler::VirialHandling::NonLinear
378 : gmx::VirtualSitesHandler::VirialHandling::None);
379 matrix virial = { { 0 } };
380 vsite->spreadForces(x, forceWithVirial.force_, virialHandling, {}, virial, nrnb, box, wcycle);
381 forceWithVirial.addVirialContribution(virial);
384 if (stepWork.computeVirial)
386 /* Now add the forces, this is local */
387 sum_forces(f, forceWithVirial.force_);
389 /* Add the direct virial contributions */
391 forceWithVirial.computeVirial_,
392 "forceWithVirial should request virial computation when we request the virial");
393 m_add(vir_force, forceWithVirial.getVirial(), vir_force);
397 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
403 GMX_ASSERT(vsite == nullptr || forceOutputs->forceWithShiftForces().haveSpreadVsiteForces(),
404 "We should have spread the vsite forces (earlier)");
407 if (fr->print_force >= 0)
409 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
413 static void do_nb_verlet(t_forcerec* fr,
414 const interaction_const_t* ic,
415 gmx_enerdata_t* enerd,
416 const StepWorkload& stepWork,
417 const InteractionLocality ilocality,
421 gmx_wallcycle* wcycle)
423 if (!stepWork.computeNonbondedForces)
425 /* skip non-bonded calculation */
429 nonbonded_verlet_t* nbv = fr->nbv.get();
431 /* GPU kernel launch overhead is already timed separately */
434 /* When dynamic pair-list pruning is requested, we need to prune
435 * at nstlistPrune steps.
437 if (nbv->isDynamicPruningStepCpu(step))
439 /* Prune the pair-list beyond fr->ic->rlistPrune using
440 * the current coordinates of the atoms.
442 wallcycle_sub_start(wcycle, WallCycleSubCounter::NonbondedPruning);
443 nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
444 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NonbondedPruning);
448 nbv->dispatchNonbondedKernel(
454 enerd->grpp.energyGroupPairTerms[fr->haveBuckingham ? NonBondedEnergyTerms::BuckinghamSR
455 : NonBondedEnergyTerms::LJSR],
456 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR],
460 static inline void clearRVecs(ArrayRef<RVec> v, const bool useOpenmpThreading)
462 int nth = gmx_omp_nthreads_get_simple_rvec_task(ModuleMultiThread::Default, v.ssize());
464 /* Note that we would like to avoid this conditional by putting it
465 * into the omp pragma instead, but then we still take the full
466 * omp parallel for overhead (at least with gcc5).
468 if (!useOpenmpThreading || nth == 1)
477 #pragma omp parallel for num_threads(nth) schedule(static)
478 for (gmx::index i = 0; i < v.ssize(); i++)
485 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
487 * \param groupOptions Group options, containing T-coupling options
489 static real averageKineticEnergyEstimate(const t_grpopts& groupOptions)
491 real nrdfCoupled = 0;
492 real nrdfUncoupled = 0;
493 real kineticEnergy = 0;
494 for (int g = 0; g < groupOptions.ngtc; g++)
496 if (groupOptions.tau_t[g] >= 0)
498 nrdfCoupled += groupOptions.nrdf[g];
499 kineticEnergy += groupOptions.nrdf[g] * 0.5 * groupOptions.ref_t[g] * gmx::c_boltz;
503 nrdfUncoupled += groupOptions.nrdf[g];
507 /* This conditional with > also catches nrdf=0 */
508 if (nrdfCoupled > nrdfUncoupled)
510 return kineticEnergy * (nrdfCoupled + nrdfUncoupled) / nrdfCoupled;
518 /*! \brief This routine checks that the potential energy is finite.
520 * Always checks that the potential energy is finite. If step equals
521 * inputrec.init_step also checks that the magnitude of the potential energy
522 * is reasonable. Terminates with a fatal error when a check fails.
523 * Note that passing this check does not guarantee finite forces,
524 * since those use slightly different arithmetics. But in most cases
525 * there is just a narrow coordinate range where forces are not finite
526 * and energies are finite.
528 * \param[in] step The step number, used for checking and printing
529 * \param[in] enerd The energy data; the non-bonded group energies need to be added to
530 * enerd.term[F_EPOT] before calling this routine \param[in] inputrec The input record
532 static void checkPotentialEnergyValidity(int64_t step, const gmx_enerdata_t& enerd, const t_inputrec& inputrec)
534 /* Threshold valid for comparing absolute potential energy against
535 * the kinetic energy. Normally one should not consider absolute
536 * potential energy values, but with a factor of one million
537 * we should never get false positives.
539 constexpr real c_thresholdFactor = 1e6;
541 bool energyIsNotFinite = !std::isfinite(enerd.term[F_EPOT]);
542 real averageKineticEnergy = 0;
543 /* We only check for large potential energy at the initial step,
544 * because that is by far the most likely step for this too occur
545 * and because computing the average kinetic energy is not free.
546 * Note: nstcalcenergy >> 1 often does not allow to catch large energies
547 * before they become NaN.
549 if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
551 averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
554 if (energyIsNotFinite
555 || (averageKineticEnergy > 0 && enerd.term[F_EPOT] > c_thresholdFactor * averageKineticEnergy))
560 ": The total potential energy is %g, which is %s. The LJ and electrostatic "
561 "contributions to the energy are %g and %g, respectively. A %s potential energy "
562 "can be caused by overlapping interactions in bonded interactions or very large%s "
563 "coordinate values. Usually this is caused by a badly- or non-equilibrated initial "
564 "configuration, incorrect interactions or parameters in the topology.",
567 energyIsNotFinite ? "not finite" : "extremely high",
569 enerd.term[F_COUL_SR],
570 energyIsNotFinite ? "non-finite" : "very high",
571 energyIsNotFinite ? " or Nan" : "");
575 /*! \brief Return true if there are special forces computed this step.
577 * The conditionals exactly correspond to those in computeSpecialForces().
579 static bool haveSpecialForces(const t_inputrec& inputrec,
580 const gmx::ForceProviders& forceProviders,
581 const pull_t* pull_work,
582 const bool computeForces,
586 return ((computeForces && forceProviders.hasForceProvider()) || // forceProviders
587 (inputrec.bPull && pull_have_potential(*pull_work)) || // pull
588 inputrec.bRot || // enforced rotation
589 (ed != nullptr) || // flooding
590 (inputrec.bIMD && computeForces)); // IMD
593 /*! \brief Compute forces and/or energies for special algorithms
595 * The intention is to collect all calls to algorithms that compute
596 * forces on local atoms only and that do not contribute to the local
597 * virial sum (but add their virial contribution separately).
598 * Eventually these should likely all become ForceProviders.
599 * Within this function the intention is to have algorithms that do
600 * global communication at the end, so global barriers within the MD loop
601 * are as close together as possible.
603 * \param[in] fplog The log file
604 * \param[in] cr The communication record
605 * \param[in] inputrec The input record
606 * \param[in] awh The Awh module (nullptr if none in use).
607 * \param[in] enforcedRotation Enforced rotation module.
608 * \param[in] imdSession The IMD session
609 * \param[in] pull_work The pull work structure.
610 * \param[in] step The current MD step
611 * \param[in] t The current time
612 * \param[in,out] wcycle Wallcycle accounting struct
613 * \param[in,out] forceProviders Pointer to a list of force providers
614 * \param[in] box The unit cell
615 * \param[in] x The coordinates
616 * \param[in] mdatoms Per atom properties
617 * \param[in] lambda Array of free-energy lambda values
618 * \param[in] stepWork Step schedule flags
619 * \param[in,out] forceWithVirialMtsLevel0 Force and virial for MTS level0 forces
620 * \param[in,out] forceWithVirialMtsLevel1 Force and virial for MTS level1 forces, can be nullptr
621 * \param[in,out] enerd Energy buffer
622 * \param[in,out] ed Essential dynamics pointer
623 * \param[in] didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
625 * \todo Remove didNeighborSearch, which is used incorrectly.
626 * \todo Convert all other algorithms called here to ForceProviders.
628 static void computeSpecialForces(FILE* fplog,
630 const t_inputrec& inputrec,
632 gmx_enfrot* enforcedRotation,
633 gmx::ImdSession* imdSession,
637 gmx_wallcycle* wcycle,
638 gmx::ForceProviders* forceProviders,
640 gmx::ArrayRef<const gmx::RVec> x,
641 const t_mdatoms* mdatoms,
642 gmx::ArrayRef<const real> lambda,
643 const StepWorkload& stepWork,
644 gmx::ForceWithVirial* forceWithVirialMtsLevel0,
645 gmx::ForceWithVirial* forceWithVirialMtsLevel1,
646 gmx_enerdata_t* enerd,
648 bool didNeighborSearch)
650 /* NOTE: Currently all ForceProviders only provide forces.
651 * When they also provide energies, remove this conditional.
653 if (stepWork.computeForces)
655 gmx::ForceProviderInput forceProviderInput(
658 gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->homenr),
659 gmx::arrayRefFromArray(mdatoms->massT, mdatoms->homenr),
663 gmx::ForceProviderOutput forceProviderOutput(forceWithVirialMtsLevel0, enerd);
665 /* Collect forces from modules */
666 forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
669 if (inputrec.bPull && pull_have_potential(*pull_work))
671 const int mtsLevel = forceGroupMtsLevel(inputrec.mtsLevels, gmx::MtsForceGroups::Pull);
672 if (mtsLevel == 0 || stepWork.computeSlowForces)
674 auto& forceWithVirial = (mtsLevel == 0) ? forceWithVirialMtsLevel0 : forceWithVirialMtsLevel1;
675 pull_potential_wrapper(
676 cr, inputrec, box, x, forceWithVirial, mdatoms, enerd, pull_work, lambda.data(), t, wcycle);
681 const int mtsLevel = forceGroupMtsLevel(inputrec.mtsLevels, gmx::MtsForceGroups::Pull);
682 if (mtsLevel == 0 || stepWork.computeSlowForces)
684 const bool needForeignEnergyDifferences = awh->needForeignEnergyDifferences(step);
685 std::vector<double> foreignLambdaDeltaH, foreignLambdaDhDl;
686 if (needForeignEnergyDifferences)
688 enerd->foreignLambdaTerms.finalizePotentialContributions(
689 enerd->dvdl_lin, lambda, *inputrec.fepvals);
690 std::tie(foreignLambdaDeltaH, foreignLambdaDhDl) = enerd->foreignLambdaTerms.getTerms(cr);
693 auto& forceWithVirial = (mtsLevel == 0) ? forceWithVirialMtsLevel0 : forceWithVirialMtsLevel1;
694 enerd->term[F_COM_PULL] += awh->applyBiasForcesAndUpdateBias(
696 gmx::arrayRefFromArray(mdatoms->massT, mdatoms->nr),
707 /* Add the forces from enforced rotation potentials (if any) */
710 wallcycle_start(wcycle, WallCycleCounter::RotAdd);
711 enerd->term[F_COM_PULL] +=
712 add_rot_forces(enforcedRotation, forceWithVirialMtsLevel0->force_, cr, step, t);
713 wallcycle_stop(wcycle, WallCycleCounter::RotAdd);
718 /* Note that since init_edsam() is called after the initialization
719 * of forcerec, edsam doesn't request the noVirSum force buffer.
720 * Thus if no other algorithm (e.g. PME) requires it, the forces
721 * here will contribute to the virial.
723 do_flood(cr, inputrec, x, forceWithVirialMtsLevel0->force_, ed, box, step, didNeighborSearch);
726 /* Add forces from interactive molecular dynamics (IMD), if any */
727 if (inputrec.bIMD && stepWork.computeForces)
729 imdSession->applyForces(forceWithVirialMtsLevel0->force_);
733 /*! \brief Launch the prepare_step and spread stages of PME GPU.
735 * \param[in] pmedata The PME structure
736 * \param[in] box The box matrix
737 * \param[in] stepWork Step schedule flags
738 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in the device memory.
739 * \param[in] lambdaQ The Coulomb lambda of the current state.
740 * \param[in] wcycle The wallcycle structure
742 static inline void launchPmeGpuSpread(gmx_pme_t* pmedata,
744 const StepWorkload& stepWork,
745 GpuEventSynchronizer* xReadyOnDevice,
747 gmx_wallcycle* wcycle)
749 pme_gpu_prepare_computation(pmedata, box, wcycle, stepWork);
750 bool useGpuDirectComm = false;
751 gmx::PmeCoordinateReceiverGpu* pmeCoordinateReceiverGpu = nullptr;
752 pme_gpu_launch_spread(
753 pmedata, xReadyOnDevice, wcycle, lambdaQ, useGpuDirectComm, pmeCoordinateReceiverGpu);
756 /*! \brief Launch the FFT and gather stages of PME GPU
758 * This function only implements setting the output forces (no accumulation).
760 * \param[in] pmedata The PME structure
761 * \param[in] lambdaQ The Coulomb lambda of the current system state.
762 * \param[in] wcycle The wallcycle structure
763 * \param[in] stepWork Step schedule flags
765 static void launchPmeGpuFftAndGather(gmx_pme_t* pmedata,
767 gmx_wallcycle* wcycle,
768 const gmx::StepWorkload& stepWork)
770 pme_gpu_launch_complex_transforms(pmedata, wcycle, stepWork);
771 pme_gpu_launch_gather(pmedata, wcycle, lambdaQ);
775 * Polling wait for either of the PME or nonbonded GPU tasks.
777 * Instead of a static order in waiting for GPU tasks, this function
778 * polls checking which of the two tasks completes first, and does the
779 * associated force buffer reduction overlapped with the other task.
780 * By doing that, unlike static scheduling order, it can always overlap
781 * one of the reductions, regardless of the GPU task completion order.
783 * \param[in] nbv Nonbonded verlet structure
784 * \param[in,out] pmedata PME module data
785 * \param[in,out] forceOutputsNonbonded Force outputs for the non-bonded forces and shift forces
786 * \param[in,out] forceOutputsPme Force outputs for the PME forces and virial
787 * \param[in,out] enerd Energy data structure results are reduced into
788 * \param[in] lambdaQ The Coulomb lambda of the current system state.
789 * \param[in] stepWork Step schedule flags
790 * \param[in] wcycle The wallcycle structure
792 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t* nbv,
794 gmx::ForceOutputs* forceOutputsNonbonded,
795 gmx::ForceOutputs* forceOutputsPme,
796 gmx_enerdata_t* enerd,
798 const StepWorkload& stepWork,
799 gmx_wallcycle* wcycle)
801 bool isPmeGpuDone = false;
802 bool isNbGpuDone = false;
804 gmx::ArrayRef<const gmx::RVec> pmeGpuForces;
806 while (!isPmeGpuDone || !isNbGpuDone)
810 GpuTaskCompletion completionType =
811 (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
812 isPmeGpuDone = pme_gpu_try_finish_task(
813 pmedata, stepWork, wcycle, &forceOutputsPme->forceWithVirial(), enerd, lambdaQ, completionType);
818 auto& forceBuffersNonbonded = forceOutputsNonbonded->forceWithShiftForces();
819 GpuTaskCompletion completionType =
820 (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
821 isNbGpuDone = Nbnxm::gpu_try_finish_task(
825 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
826 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
827 forceBuffersNonbonded.shiftForces(),
833 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceBuffersNonbonded.force());
839 /*! \brief Set up the different force buffers; also does clearing.
841 * \param[in] forceHelperBuffers Helper force buffers
842 * \param[in] force force array
843 * \param[in] domainWork Domain lifetime workload flags
844 * \param[in] stepWork Step schedule flags
845 * \param[in] havePpDomainDecomposition Whether we have a PP domain decomposition
846 * \param[out] wcycle wallcycle recording structure
848 * \returns Cleared force output structure
850 static ForceOutputs setupForceOutputs(ForceHelperBuffers* forceHelperBuffers,
851 gmx::ArrayRefWithPadding<gmx::RVec> force,
852 const DomainLifetimeWorkload& domainWork,
853 const StepWorkload& stepWork,
854 const bool havePpDomainDecomposition,
855 gmx_wallcycle* wcycle)
857 wallcycle_sub_start(wcycle, WallCycleSubCounter::ClearForceBuffer);
859 /* NOTE: We assume fr->shiftForces is all zeros here */
860 gmx::ForceWithShiftForces forceWithShiftForces(
861 force, stepWork.computeVirial, forceHelperBuffers->shiftForces());
863 if (stepWork.computeForces
864 && (domainWork.haveCpuLocalForceWork || !stepWork.useGpuFBufferOps
865 || (havePpDomainDecomposition && !stepWork.useGpuFHalo)))
867 /* Clear the short- and long-range forces */
868 clearRVecs(forceWithShiftForces.force(), true);
870 /* Clear the shift forces */
871 clearRVecs(forceWithShiftForces.shiftForces(), false);
874 /* If we need to compute the virial, we might need a separate
875 * force buffer for algorithms for which the virial is calculated
876 * directly, such as PME. Otherwise, forceWithVirial uses the
877 * the same force (f in legacy calls) buffer as other algorithms.
879 const bool useSeparateForceWithVirialBuffer =
880 (stepWork.computeForces
881 && (stepWork.computeVirial && forceHelperBuffers->haveDirectVirialContributions()));
882 /* forceWithVirial uses the local atom range only */
883 gmx::ForceWithVirial forceWithVirial(
884 useSeparateForceWithVirialBuffer ? forceHelperBuffers->forceBufferForDirectVirialContributions()
885 : force.unpaddedArrayRef(),
886 stepWork.computeVirial);
888 if (useSeparateForceWithVirialBuffer)
890 /* TODO: update comment
891 * We only compute forces on local atoms. Note that vsites can
892 * spread to non-local atoms, but that part of the buffer is
893 * cleared separately in the vsite spreading code.
895 clearRVecs(forceWithVirial.force_, true);
898 wallcycle_sub_stop(wcycle, WallCycleSubCounter::ClearForceBuffer);
901 forceWithShiftForces, forceHelperBuffers->haveDirectVirialContributions(), forceWithVirial);
905 /*! \brief Set up flags that have the lifetime of the domain indicating what type of work is there to compute.
907 static DomainLifetimeWorkload setupDomainLifetimeWorkload(const t_inputrec& inputrec,
908 const t_forcerec& fr,
909 const pull_t* pull_work,
911 const t_mdatoms& mdatoms,
912 const SimulationWorkload& simulationWork,
913 const StepWorkload& stepWork)
915 DomainLifetimeWorkload domainWork;
916 // Note that haveSpecialForces is constant over the whole run
917 domainWork.haveSpecialForces =
918 haveSpecialForces(inputrec, *fr.forceProviders, pull_work, stepWork.computeForces, ed);
919 domainWork.haveCpuListedForceWork = false;
920 domainWork.haveCpuBondedWork = false;
921 for (const auto& listedForces : fr.listedForces)
923 if (listedForces.haveCpuListedForces(*fr.fcdata))
925 domainWork.haveCpuListedForceWork = true;
927 if (listedForces.haveCpuBondeds())
929 domainWork.haveCpuBondedWork = true;
932 domainWork.haveGpuBondedWork =
933 ((fr.listedForcesGpu != nullptr) && fr.listedForcesGpu->haveInteractions());
934 // Note that haveFreeEnergyWork is constant over the whole run
935 domainWork.haveFreeEnergyWork =
936 (fr.efep != FreeEnergyPerturbationType::No && mdatoms.nPerturbed != 0);
937 // We assume we have local force work if there are CPU
938 // force tasks including PME or nonbondeds.
939 domainWork.haveCpuLocalForceWork =
940 domainWork.haveSpecialForces || domainWork.haveCpuListedForceWork
941 || domainWork.haveFreeEnergyWork || simulationWork.useCpuNonbonded || simulationWork.useCpuPme
942 || simulationWork.haveEwaldSurfaceContribution || inputrec.nwall > 0;
943 domainWork.haveLocalForceContribInCpuBuffer =
944 domainWork.haveCpuLocalForceWork || simulationWork.havePpDomainDecomposition;
945 domainWork.haveNonLocalForceContribInCpuBuffer =
946 domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork;
951 /*! \brief Set up force flag stuct from the force bitmask.
953 * \param[in] legacyFlags Force bitmask flags used to construct the new flags
954 * \param[in] mtsLevels The multiple time-stepping levels, either empty or 2 levels
955 * \param[in] step The current MD step
956 * \param[in] simulationWork Simulation workload description.
958 * \returns New Stepworkload description.
960 static StepWorkload setupStepWorkload(const int legacyFlags,
961 ArrayRef<const gmx::MtsLevel> mtsLevels,
963 const SimulationWorkload& simulationWork)
965 GMX_ASSERT(mtsLevels.empty() || mtsLevels.size() == 2, "Expect 0 or 2 MTS levels");
966 const bool computeSlowForces = (mtsLevels.empty() || step % mtsLevels[1].stepFactor == 0);
969 flags.stateChanged = ((legacyFlags & GMX_FORCE_STATECHANGED) != 0);
970 flags.haveDynamicBox = ((legacyFlags & GMX_FORCE_DYNAMICBOX) != 0);
971 flags.doNeighborSearch = ((legacyFlags & GMX_FORCE_NS) != 0);
972 flags.computeSlowForces = computeSlowForces;
973 flags.computeVirial = ((legacyFlags & GMX_FORCE_VIRIAL) != 0);
974 flags.computeEnergy = ((legacyFlags & GMX_FORCE_ENERGY) != 0);
975 flags.computeForces = ((legacyFlags & GMX_FORCE_FORCES) != 0);
976 flags.useOnlyMtsCombinedForceBuffer = ((legacyFlags & GMX_FORCE_DO_NOT_NEED_NORMAL_FORCE) != 0);
977 flags.computeListedForces = ((legacyFlags & GMX_FORCE_LISTED) != 0);
978 flags.computeNonbondedForces =
979 ((legacyFlags & GMX_FORCE_NONBONDED) != 0) && simulationWork.computeNonbonded
980 && !(simulationWork.computeNonbondedAtMtsLevel1 && !computeSlowForces);
981 flags.computeDhdl = ((legacyFlags & GMX_FORCE_DHDL) != 0);
983 if (simulationWork.useGpuBufferOps)
985 GMX_ASSERT(simulationWork.useGpuNonbonded,
986 "Can only offload buffer ops if nonbonded computation is also offloaded");
988 flags.useGpuXBufferOps = simulationWork.useGpuBufferOps;
989 // on virial steps the CPU reduction path is taken
990 flags.useGpuFBufferOps = simulationWork.useGpuBufferOps && !flags.computeVirial;
991 const bool rankHasGpuPmeTask = simulationWork.useGpuPme && !simulationWork.haveSeparatePmeRank;
992 flags.useGpuPmeFReduction = flags.computeSlowForces && flags.useGpuFBufferOps
993 && (rankHasGpuPmeTask || simulationWork.useGpuPmePpCommunication);
994 flags.useGpuXHalo = simulationWork.useGpuHaloExchange && !flags.doNeighborSearch;
995 flags.useGpuFHalo = simulationWork.useGpuHaloExchange && flags.useGpuFBufferOps;
996 flags.haveGpuPmeOnThisRank = rankHasGpuPmeTask && flags.computeSlowForces;
997 flags.combineMtsForcesBeforeHaloExchange =
998 (flags.computeForces && simulationWork.useMts && flags.computeSlowForces
999 && flags.useOnlyMtsCombinedForceBuffer
1000 && !(flags.computeVirial || simulationWork.useGpuNonbonded || flags.haveGpuPmeOnThisRank));
1006 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
1009 static void launchGpuEndOfStepTasks(nonbonded_verlet_t* nbv,
1010 gmx::ListedForcesGpu* listedForcesGpu,
1012 gmx_enerdata_t* enerd,
1013 const gmx::MdrunScheduleWorkload& runScheduleWork,
1015 gmx_wallcycle* wcycle)
1017 if (runScheduleWork.simulationWork.useGpuNonbonded && runScheduleWork.stepWork.computeNonbondedForces)
1019 /* Launch pruning before buffer clearing because the API overhead of the
1020 * clear kernel launches can leave the GPU idle while it could be running
1023 if (nbv->isDynamicPruningStepGpu(step))
1025 nbv->dispatchPruneKernelGpu(step);
1028 /* now clear the GPU outputs while we finish the step on the CPU */
1029 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1030 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1031 Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, runScheduleWork.stepWork.computeVirial);
1032 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1033 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1036 if (runScheduleWork.stepWork.haveGpuPmeOnThisRank)
1038 pme_gpu_reinit_computation(pmedata, wcycle);
1041 if (runScheduleWork.domainWork.haveGpuBondedWork && runScheduleWork.stepWork.computeEnergy)
1043 // in principle this should be included in the DD balancing region,
1044 // but generally it is infrequent so we'll omit it for the sake of
1046 listedForcesGpu->waitAccumulateEnergyTerms(enerd);
1048 listedForcesGpu->clearEnergies();
1052 //! \brief Data structure to hold dipole-related data and staging arrays
1055 //! Dipole staging for fast summing over MPI
1056 gmx::DVec muStaging[2] = { { 0.0, 0.0, 0.0 } };
1057 //! Dipole staging for states A and B (index 0 and 1 resp.)
1058 gmx::RVec muStateAB[2] = { { 0.0_real, 0.0_real, 0.0_real } };
1062 static void reduceAndUpdateMuTot(DipoleData* dipoleData,
1063 const t_commrec* cr,
1064 const bool haveFreeEnergy,
1065 gmx::ArrayRef<const real> lambda,
1067 const DDBalanceRegionHandler& ddBalanceRegionHandler)
1071 gmx_sumd(2 * DIM, dipoleData->muStaging[0], cr);
1072 ddBalanceRegionHandler.reopenRegionCpu();
1074 for (int i = 0; i < 2; i++)
1076 for (int j = 0; j < DIM; j++)
1078 dipoleData->muStateAB[i][j] = dipoleData->muStaging[i][j];
1082 if (!haveFreeEnergy)
1084 copy_rvec(dipoleData->muStateAB[0], muTotal);
1088 for (int j = 0; j < DIM; j++)
1090 muTotal[j] = (1.0 - lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)])
1091 * dipoleData->muStateAB[0][j]
1092 + lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)]
1093 * dipoleData->muStateAB[1][j];
1098 /*! \brief Combines MTS level0 and level1 force buffers into a full and MTS-combined force buffer.
1100 * \param[in] numAtoms The number of atoms to combine forces for
1101 * \param[in,out] forceMtsLevel0 Input: F_level0, output: F_level0 + F_level1
1102 * \param[in,out] forceMts Input: F_level1, output: F_level0 + mtsFactor * F_level1
1103 * \param[in] mtsFactor The factor between the level0 and level1 time step
1105 static void combineMtsForces(const int numAtoms,
1106 ArrayRef<RVec> forceMtsLevel0,
1107 ArrayRef<RVec> forceMts,
1108 const real mtsFactor)
1110 const int gmx_unused numThreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
1111 #pragma omp parallel for num_threads(numThreads) schedule(static)
1112 for (int i = 0; i < numAtoms; i++)
1114 const RVec forceMtsLevel0Tmp = forceMtsLevel0[i];
1115 forceMtsLevel0[i] += forceMts[i];
1116 forceMts[i] = forceMtsLevel0Tmp + mtsFactor * forceMts[i];
1120 /*! \brief Setup for the local and non-local GPU force reductions:
1121 * reinitialization plus the registration of forces and dependencies.
1123 * \param [in] runScheduleWork Schedule workload flag structure
1124 * \param [in] cr Communication record object
1125 * \param [in] fr Force record object
1127 static void setupGpuForceReductions(gmx::MdrunScheduleWorkload* runScheduleWork,
1128 const t_commrec* cr,
1132 nonbonded_verlet_t* nbv = fr->nbv.get();
1133 gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
1135 // (re-)initialize local GPU force reduction
1136 const bool accumulate = runScheduleWork->domainWork.haveCpuLocalForceWork
1137 || runScheduleWork->simulationWork.havePpDomainDecomposition;
1138 const int atomStart = 0;
1139 fr->gpuForceReduction[gmx::AtomLocality::Local]->reinit(
1140 stateGpu->getForces(),
1141 nbv->getNumAtoms(AtomLocality::Local),
1142 nbv->getGridIndices(),
1145 stateGpu->fReducedOnDevice(AtomLocality::Local));
1147 // register forces and add dependencies
1148 fr->gpuForceReduction[gmx::AtomLocality::Local]->registerNbnxmForce(Nbnxm::gpu_get_f(nbv->gpu_nbv));
1150 if (runScheduleWork->simulationWork.useGpuPme
1151 && (!runScheduleWork->simulationWork.haveSeparatePmeRank
1152 || runScheduleWork->simulationWork.useGpuPmePpCommunication))
1154 DeviceBuffer<gmx::RVec> forcePtr =
1155 runScheduleWork->simulationWork.haveSeparatePmeRank
1156 ? fr->pmePpCommGpu->getGpuForceStagingPtr() // buffer received from other GPU
1157 : pme_gpu_get_device_f(fr->pmedata); // PME force buffer on same GPU
1158 fr->gpuForceReduction[gmx::AtomLocality::Local]->registerRvecForce(forcePtr);
1160 if (runScheduleWork->simulationWork.haveSeparatePmeRank)
1162 // PME force buffer on remote GPU -
1163 // event synchronizer received from other GPU only in case of thread-mpi
1166 GpuEventSynchronizer* const pmeSynchronizer =
1167 fr->pmePpCommGpu->getForcesReadySynchronizer();
1168 GMX_ASSERT(pmeSynchronizer != nullptr,
1169 "PME force ready cuda event should not be NULL");
1170 fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(pmeSynchronizer);
1175 // PME force buffer on same GPU - add dependency on PME force computation
1176 GpuEventSynchronizer* const pmeSynchronizer = pme_gpu_get_f_ready_synchronizer(fr->pmedata);
1177 GMX_ASSERT(pmeSynchronizer != nullptr, "PME force ready cuda event should not be NULL");
1178 fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(pmeSynchronizer);
1182 if (runScheduleWork->domainWork.haveCpuLocalForceWork
1183 || (runScheduleWork->simulationWork.havePpDomainDecomposition
1184 && !runScheduleWork->simulationWork.useGpuHaloExchange))
1186 fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(
1187 stateGpu->fReadyOnDevice(AtomLocality::Local));
1190 if (runScheduleWork->simulationWork.useGpuHaloExchange)
1192 fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(
1193 cr->dd->gpuHaloExchange[0][0]->getForcesReadyOnDeviceEvent());
1196 if (runScheduleWork->simulationWork.havePpDomainDecomposition)
1198 // (re-)initialize non-local GPU force reduction
1199 const bool accumulate = runScheduleWork->domainWork.haveCpuBondedWork
1200 || runScheduleWork->domainWork.haveFreeEnergyWork;
1201 const int atomStart = dd_numHomeAtoms(*cr->dd);
1202 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->reinit(
1203 stateGpu->getForces(),
1204 nbv->getNumAtoms(AtomLocality::NonLocal),
1205 nbv->getGridIndices(),
1208 stateGpu->fReducedOnDevice(AtomLocality::NonLocal));
1210 // register forces and add dependencies
1211 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->registerNbnxmForce(
1212 Nbnxm::gpu_get_f(nbv->gpu_nbv));
1214 if (runScheduleWork->domainWork.haveNonLocalForceContribInCpuBuffer)
1216 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->addDependency(
1217 stateGpu->fReadyOnDevice(AtomLocality::NonLocal));
1223 /*! \brief Return the number of local atoms.
1225 static int getLocalAtomCount(const gmx_domdec_t* dd, const t_mdatoms& mdatoms, bool havePPDomainDecomposition)
1227 GMX_ASSERT(!(havePPDomainDecomposition && (dd == nullptr)),
1228 "Can't have PP decomposition with dd uninitialized!");
1229 return havePPDomainDecomposition ? dd_numAtomsZones(*dd) : mdatoms.homenr;
1233 void do_force(FILE* fplog,
1234 const t_commrec* cr,
1235 const gmx_multisim_t* ms,
1236 const t_inputrec& inputrec,
1238 gmx_enfrot* enforcedRotation,
1239 gmx::ImdSession* imdSession,
1243 gmx_wallcycle* wcycle,
1244 const gmx_localtop_t* top,
1246 gmx::ArrayRefWithPadding<gmx::RVec> x,
1247 const history_t* hist,
1248 gmx::ForceBuffersView* forceView,
1250 const t_mdatoms* mdatoms,
1251 gmx_enerdata_t* enerd,
1252 gmx::ArrayRef<const real> lambda,
1254 gmx::MdrunScheduleWorkload* runScheduleWork,
1255 gmx::VirtualSitesHandler* vsite,
1259 CpuPpLongRangeNonbondeds* longRangeNonbondeds,
1261 const DDBalanceRegionHandler& ddBalanceRegionHandler)
1263 auto force = forceView->forceWithPadding();
1264 GMX_ASSERT(force.unpaddedArrayRef().ssize() >= fr->natoms_force_constr,
1265 "The size of the force buffer should be at least the number of atoms to compute "
1268 nonbonded_verlet_t* nbv = fr->nbv.get();
1269 interaction_const_t* ic = fr->ic.get();
1271 gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
1273 const SimulationWorkload& simulationWork = runScheduleWork->simulationWork;
1275 runScheduleWork->stepWork = setupStepWorkload(legacyFlags, inputrec.mtsLevels, step, simulationWork);
1276 const StepWorkload& stepWork = runScheduleWork->stepWork;
1278 if (stepWork.useGpuFHalo && !runScheduleWork->domainWork.haveCpuLocalForceWork)
1280 // GPU Force halo exchange will set a subset of local atoms with remote non-local data
1281 // First clear local portion of force array, so that untouched atoms are zero.
1282 // The dependency for this is that forces from previous timestep have been consumed,
1283 // which is satisfied when getCoordinatesReadyOnDeviceEvent has been marked.
1284 stateGpu->clearForcesOnGpu(AtomLocality::Local,
1285 stateGpu->getCoordinatesReadyOnDeviceEvent(
1286 AtomLocality::Local, simulationWork, stepWork));
1289 /* At a search step we need to start the first balancing region
1290 * somewhere early inside the step after communication during domain
1291 * decomposition (and not during the previous step as usual).
1293 if (stepWork.doNeighborSearch)
1295 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
1298 clear_mat(vir_force);
1300 if (fr->pbcType != PbcType::No)
1302 /* Compute shift vectors every step,
1303 * because of pressure coupling or box deformation!
1305 if (stepWork.haveDynamicBox && stepWork.stateChanged)
1307 calc_shifts(box, fr->shift_vec);
1310 const bool fillGrid = (stepWork.doNeighborSearch && stepWork.stateChanged);
1311 const bool calcCGCM = (fillGrid && !haveDDAtomOrdering(*cr));
1314 put_atoms_in_box_omp(fr->pbcType,
1316 x.unpaddedArrayRef().subArray(0, mdatoms->homenr),
1317 gmx_omp_nthreads_get(ModuleMultiThread::Default));
1318 inc_nrnb(nrnb, eNR_SHIFTX, mdatoms->homenr);
1322 nbnxn_atomdata_copy_shiftvec(stepWork.haveDynamicBox, fr->shift_vec, nbv->nbat.get());
1324 const bool pmeSendCoordinatesFromGpu =
1325 simulationWork.useGpuPmePpCommunication && !(stepWork.doNeighborSearch);
1326 const bool reinitGpuPmePpComms =
1327 simulationWork.useGpuPmePpCommunication && (stepWork.doNeighborSearch);
1329 auto* localXReadyOnDevice = (stepWork.haveGpuPmeOnThisRank || simulationWork.useGpuBufferOps)
1330 ? stateGpu->getCoordinatesReadyOnDeviceEvent(
1331 AtomLocality::Local, simulationWork, stepWork)
1334 GMX_ASSERT(simulationWork.useGpuHaloExchange
1335 == ((cr->dd != nullptr) && (!cr->dd->gpuHaloExchange[0].empty())),
1336 "The GPU halo exchange is active, but it has not been constructed.");
1338 bool gmx_used_in_debug haveCopiedXFromGpu = false;
1339 // Copy coordinate from the GPU if update is on the GPU and there
1340 // are forces to be computed on the CPU, or for the computation of
1341 // virial, or if host-side data will be transferred from this task
1342 // to a remote task for halo exchange or PME-PP communication. At
1343 // search steps the current coordinates are already on the host,
1344 // hence copy is not needed.
1345 if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch
1346 && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial
1347 || simulationWork.useCpuPmePpCommunication || simulationWork.useCpuHaloExchange
1348 || simulationWork.computeMuTot))
1350 stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1351 haveCopiedXFromGpu = true;
1354 // Coordinates on the device are needed if PME or BufferOps are offloaded.
1355 // The local coordinates can be copied right away.
1356 // NOTE: Consider moving this copy to right after they are updated and constrained,
1357 // if the later is not offloaded.
1358 if (stepWork.haveGpuPmeOnThisRank || stepWork.useGpuXBufferOps)
1360 if (stepWork.doNeighborSearch)
1362 // TODO refactor this to do_md, after partitioning.
1363 stateGpu->reinit(mdatoms->homenr,
1364 getLocalAtomCount(cr->dd, *mdatoms, simulationWork.havePpDomainDecomposition));
1365 if (stepWork.haveGpuPmeOnThisRank)
1367 // TODO: This should be moved into PME setup function ( pme_gpu_prepare_computation(...) )
1368 pme_gpu_set_device_x(fr->pmedata, stateGpu->getCoordinates());
1371 // We need to copy coordinates when:
1372 // 1. Update is not offloaded
1373 // 2. The buffers were reinitialized on search step
1374 if (!simulationWork.useGpuUpdate || stepWork.doNeighborSearch)
1376 GMX_ASSERT(stateGpu != nullptr, "stateGpu should not be null");
1377 stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1378 if (stepWork.doNeighborSearch)
1380 /* On NS steps, we skip X buffer ops. So, unless we use PME or direct GPU
1381 * communications, we don't wait for the coordinates on the device,
1382 * and we must consume the event here.
1384 const bool eventWillBeConsumedByGpuPme = stepWork.haveGpuPmeOnThisRank;
1385 const bool eventWillBeConsumedByGpuPmePPComm =
1386 (simulationWork.haveSeparatePmeRank && stepWork.computeSlowForces)
1387 && pmeSendCoordinatesFromGpu;
1388 if (!eventWillBeConsumedByGpuPme && !eventWillBeConsumedByGpuPmePPComm)
1390 stateGpu->consumeCoordinatesCopiedToDeviceEvent(AtomLocality::Local);
1396 if (simulationWork.haveSeparatePmeRank && stepWork.computeSlowForces)
1398 /* Send particle coordinates to the pme nodes */
1399 if (!pmeSendCoordinatesFromGpu && !stepWork.doNeighborSearch && simulationWork.useGpuUpdate)
1401 GMX_ASSERT(haveCopiedXFromGpu,
1402 "a wait should only be triggered if copy has been scheduled");
1403 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1406 gmx_pme_send_coordinates(fr,
1409 x.unpaddedArrayRef(),
1410 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1411 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)],
1412 (stepWork.computeVirial || stepWork.computeEnergy),
1414 simulationWork.useGpuPmePpCommunication,
1415 reinitGpuPmePpComms,
1416 pmeSendCoordinatesFromGpu,
1417 stepWork.useGpuPmeFReduction,
1418 localXReadyOnDevice,
1422 if (stepWork.haveGpuPmeOnThisRank)
1424 launchPmeGpuSpread(fr->pmedata,
1427 localXReadyOnDevice,
1428 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1432 const gmx::DomainLifetimeWorkload& domainWork = runScheduleWork->domainWork;
1434 /* do gridding for pair search */
1435 if (stepWork.doNeighborSearch)
1437 if (fr->wholeMoleculeTransform && stepWork.stateChanged)
1439 fr->wholeMoleculeTransform->updateForAtomPbcJumps(x.unpaddedArrayRef(), box);
1442 wallcycle_start(wcycle, WallCycleCounter::NS);
1443 if (!haveDDAtomOrdering(*cr))
1445 const rvec vzero = { 0.0_real, 0.0_real, 0.0_real };
1446 const rvec boxDiagonal = { box[XX][XX], box[YY][YY], box[ZZ][ZZ] };
1447 wallcycle_sub_start(wcycle, WallCycleSubCounter::NBSGridLocal);
1448 nbnxn_put_on_grid(nbv,
1454 { 0, mdatoms->homenr },
1457 x.unpaddedArrayRef(),
1460 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NBSGridLocal);
1464 wallcycle_sub_start(wcycle, WallCycleSubCounter::NBSGridNonLocal);
1465 nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd), fr->atomInfo, x.unpaddedArrayRef());
1466 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NBSGridNonLocal);
1469 nbv->setAtomProperties(gmx::constArrayRefFromArray(mdatoms->typeA, mdatoms->nr),
1470 gmx::constArrayRefFromArray(mdatoms->chargeA, mdatoms->nr),
1473 wallcycle_stop(wcycle, WallCycleCounter::NS);
1475 /* initialize the GPU nbnxm atom data and bonded data structures */
1476 if (simulationWork.useGpuNonbonded)
1478 // Note: cycle counting only nononbondeds, GPU listed forces counts internally
1479 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1480 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1481 Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1482 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1483 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1485 if (fr->listedForcesGpu)
1487 /* Now we put all atoms on the grid, we can assign bonded
1488 * interactions to the GPU, where the grid order is
1489 * needed. Also the xq, f and fshift device buffers have
1490 * been reallocated if needed, so the bonded code can
1491 * learn about them. */
1492 // TODO the xq, f, and fshift buffers are now shared
1493 // resources, so they should be maintained by a
1494 // higher-level object than the nb module.
1495 fr->listedForcesGpu->updateInteractionListsAndDeviceBuffers(
1496 nbv->getGridIndices(),
1498 Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1499 Nbnxm::gpu_get_f(nbv->gpu_nbv),
1500 Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1504 // Need to run after the GPU-offload bonded interaction lists
1505 // are set up to be able to determine whether there is bonded work.
1506 runScheduleWork->domainWork = setupDomainLifetimeWorkload(
1507 inputrec, *fr, pull_work, ed, *mdatoms, simulationWork, stepWork);
1509 wallcycle_start_nocount(wcycle, WallCycleCounter::NS);
1510 wallcycle_sub_start(wcycle, WallCycleSubCounter::NBSSearchLocal);
1511 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1512 nbv->constructPairlist(InteractionLocality::Local, top->excls, step, nrnb);
1514 nbv->setupGpuShortRangeWork(fr->listedForcesGpu.get(), InteractionLocality::Local);
1516 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NBSSearchLocal);
1517 wallcycle_stop(wcycle, WallCycleCounter::NS);
1519 if (stepWork.useGpuXBufferOps)
1521 nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1524 if (simulationWork.useGpuBufferOps)
1526 setupGpuForceReductions(runScheduleWork, cr, fr);
1529 else if (!EI_TPI(inputrec.eI) && stepWork.computeNonbondedForces)
1531 if (stepWork.useGpuXBufferOps)
1533 GMX_ASSERT(stateGpu, "stateGpu should be valid when buffer ops are offloaded");
1534 nbv->convertCoordinatesGpu(AtomLocality::Local, stateGpu->getCoordinates(), localXReadyOnDevice);
1538 if (simulationWork.useGpuUpdate)
1540 GMX_ASSERT(stateGpu, "need a valid stateGpu object");
1541 GMX_ASSERT(haveCopiedXFromGpu,
1542 "a wait should only be triggered if copy has been scheduled");
1543 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1545 nbv->convertCoordinates(AtomLocality::Local, x.unpaddedArrayRef());
1549 if (simulationWork.useGpuNonbonded && (stepWork.computeNonbondedForces || domainWork.haveGpuBondedWork))
1551 ddBalanceRegionHandler.openBeforeForceComputationGpu();
1553 wallcycle_start(wcycle, WallCycleCounter::LaunchGpu);
1554 wallcycle_sub_start(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1555 Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1556 if (stepWork.doNeighborSearch || !stepWork.useGpuXBufferOps)
1558 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::Local);
1560 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1561 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1562 // with X buffer ops offloaded to the GPU on all but the search steps
1564 // bonded work not split into separate local and non-local, so with DD
1565 // we can only launch the kernel after non-local coordinates have been received.
1566 if (domainWork.haveGpuBondedWork && !simulationWork.havePpDomainDecomposition)
1568 fr->listedForcesGpu->setPbcAndlaunchKernel(fr->pbcType, box, fr->bMolPBC, stepWork);
1571 /* launch local nonbonded work on GPU */
1572 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1573 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1574 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFNo, step, nrnb, wcycle);
1575 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1576 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1579 if (stepWork.haveGpuPmeOnThisRank)
1581 // In PME GPU and mixed mode we launch FFT / gather after the
1582 // X copy/transform to allow overlap as well as after the GPU NB
1583 // launch to avoid FFT launch overhead hijacking the CPU and delaying
1584 // the nonbonded kernel.
1585 launchPmeGpuFftAndGather(fr->pmedata,
1586 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1591 /* Communicate coordinates and sum dipole if necessary +
1592 do non-local pair search */
1593 if (simulationWork.havePpDomainDecomposition)
1595 if (stepWork.doNeighborSearch)
1597 // TODO: fuse this branch with the above large stepWork.doNeighborSearch block
1598 wallcycle_start_nocount(wcycle, WallCycleCounter::NS);
1599 wallcycle_sub_start(wcycle, WallCycleSubCounter::NBSSearchNonLocal);
1600 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1601 nbv->constructPairlist(InteractionLocality::NonLocal, top->excls, step, nrnb);
1603 nbv->setupGpuShortRangeWork(fr->listedForcesGpu.get(), InteractionLocality::NonLocal);
1604 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NBSSearchNonLocal);
1605 wallcycle_stop(wcycle, WallCycleCounter::NS);
1606 // TODO refactor this GPU halo exchange re-initialisation
1607 // to location in do_md where GPU halo exchange is
1608 // constructed at partitioning, after above stateGpu
1609 // re-initialization has similarly been refactored
1610 if (simulationWork.useGpuHaloExchange)
1612 reinitGpuHaloExchange(*cr, stateGpu->getCoordinates(), stateGpu->getForces());
1617 GpuEventSynchronizer* gpuCoordinateHaloLaunched = nullptr;
1618 if (stepWork.useGpuXHalo)
1620 // The following must be called after local setCoordinates (which records an event
1621 // when the coordinate data has been copied to the device).
1622 gpuCoordinateHaloLaunched = communicateGpuHaloCoordinates(*cr, box, localXReadyOnDevice);
1624 if (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork)
1626 // non-local part of coordinate buffer must be copied back to host for CPU work
1627 stateGpu->copyCoordinatesFromGpu(
1628 x.unpaddedArrayRef(), AtomLocality::NonLocal, gpuCoordinateHaloLaunched);
1633 if (simulationWork.useGpuUpdate)
1635 GMX_ASSERT(haveCopiedXFromGpu,
1636 "a wait should only be triggered if copy has been scheduled");
1637 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1639 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1642 if (stepWork.useGpuXBufferOps)
1644 if (!stepWork.useGpuXHalo)
1646 stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1648 nbv->convertCoordinatesGpu(
1649 AtomLocality::NonLocal,
1650 stateGpu->getCoordinates(),
1651 stateGpu->getCoordinatesReadyOnDeviceEvent(
1652 AtomLocality::NonLocal, simulationWork, stepWork, gpuCoordinateHaloLaunched));
1656 nbv->convertCoordinates(AtomLocality::NonLocal, x.unpaddedArrayRef());
1660 if (simulationWork.useGpuNonbonded)
1663 if (stepWork.doNeighborSearch || !stepWork.useGpuXBufferOps)
1665 wallcycle_start(wcycle, WallCycleCounter::LaunchGpu);
1666 wallcycle_sub_start(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1667 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::NonLocal);
1668 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1669 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1672 if (domainWork.haveGpuBondedWork)
1674 fr->listedForcesGpu->setPbcAndlaunchKernel(fr->pbcType, box, fr->bMolPBC, stepWork);
1677 /* launch non-local nonbonded tasks on GPU */
1678 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1679 wallcycle_sub_start(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1680 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step, nrnb, wcycle);
1681 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1682 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1686 // With FEP we set up the reduction over threads for local+non-local simultaneously,
1687 // so we need to do that here after the local and non-local pairlist construction.
1688 if (stepWork.doNeighborSearch && fr->efep != FreeEnergyPerturbationType::No)
1690 wallcycle_sub_start(wcycle, WallCycleSubCounter::NonbondedFep);
1691 nbv->setupFepThreadedForceBuffer(fr->natoms_force_constr);
1692 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NonbondedFep);
1695 if (simulationWork.useGpuNonbonded && stepWork.computeNonbondedForces)
1697 /* launch D2H copy-back F */
1698 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1699 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1701 if (simulationWork.havePpDomainDecomposition)
1703 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::NonLocal);
1705 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::Local);
1706 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1708 if (domainWork.haveGpuBondedWork && stepWork.computeEnergy)
1710 fr->listedForcesGpu->launchEnergyTransfer();
1712 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1715 gmx::ArrayRef<const gmx::RVec> xWholeMolecules;
1716 if (fr->wholeMoleculeTransform)
1718 xWholeMolecules = fr->wholeMoleculeTransform->wholeMoleculeCoordinates(x.unpaddedArrayRef(), box);
1721 // For the rest of the CPU tasks that depend on GPU-update produced coordinates,
1722 // this wait ensures that the D2H transfer is complete.
1723 if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch)
1725 const bool needCoordsOnHost = (runScheduleWork->domainWork.haveCpuLocalForceWork
1726 || stepWork.computeVirial || simulationWork.computeMuTot);
1727 const bool haveAlreadyWaited = simulationWork.useCpuHaloExchange;
1728 if (needCoordsOnHost && !haveAlreadyWaited)
1730 GMX_ASSERT(haveCopiedXFromGpu,
1731 "a wait should only be triggered if copy has been scheduled");
1732 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1736 DipoleData dipoleData;
1738 if (simulationWork.computeMuTot)
1740 const int start = 0;
1742 /* Calculate total (local) dipole moment in a temporary common array.
1743 * This makes it possible to sum them over nodes faster.
1745 gmx::ArrayRef<const gmx::RVec> xRef =
1746 (xWholeMolecules.empty() ? x.unpaddedArrayRef() : xWholeMolecules);
1750 mdatoms->chargeA ? gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->nr)
1751 : gmx::ArrayRef<real>{},
1752 mdatoms->chargeB ? gmx::arrayRefFromArray(mdatoms->chargeB, mdatoms->nr)
1753 : gmx::ArrayRef<real>{},
1754 mdatoms->nChargePerturbed != 0,
1755 dipoleData.muStaging[0],
1756 dipoleData.muStaging[1]);
1758 reduceAndUpdateMuTot(
1759 &dipoleData, cr, (fr->efep != FreeEnergyPerturbationType::No), lambda, muTotal, ddBalanceRegionHandler);
1762 /* Reset energies */
1763 reset_enerdata(enerd);
1765 if (haveDDAtomOrdering(*cr) && simulationWork.haveSeparatePmeRank)
1767 wallcycle_start(wcycle, WallCycleCounter::PpDuringPme);
1768 dd_force_flop_start(cr->dd, nrnb);
1773 wallcycle_start(wcycle, WallCycleCounter::Rot);
1774 do_rotation(cr, enforcedRotation, box, x.unpaddedConstArrayRef(), t, step, stepWork.doNeighborSearch);
1775 wallcycle_stop(wcycle, WallCycleCounter::Rot);
1778 /* Start the force cycle counter.
1779 * Note that a different counter is used for dynamic load balancing.
1781 wallcycle_start(wcycle, WallCycleCounter::Force);
1783 /* Set up and clear force outputs:
1784 * forceOutMtsLevel0: everything except what is in the other two outputs
1785 * forceOutMtsLevel1: PME-mesh and listed-forces group 1
1786 * forceOutNonbonded: non-bonded forces
1787 * Without multiple time stepping all point to the same object.
1788 * With multiple time-stepping the use is different for MTS fast (level0 only) and slow steps.
1790 ForceOutputs forceOutMtsLevel0 = setupForceOutputs(
1791 &fr->forceHelperBuffers[0], force, domainWork, stepWork, simulationWork.havePpDomainDecomposition, wcycle);
1793 // Force output for MTS combined forces, only set at level1 MTS steps
1794 std::optional<ForceOutputs> forceOutMts =
1795 (simulationWork.useMts && stepWork.computeSlowForces)
1796 ? std::optional(setupForceOutputs(&fr->forceHelperBuffers[1],
1797 forceView->forceMtsCombinedWithPadding(),
1800 simulationWork.havePpDomainDecomposition,
1804 ForceOutputs* forceOutMtsLevel1 =
1805 simulationWork.useMts ? (stepWork.computeSlowForces ? &forceOutMts.value() : nullptr)
1806 : &forceOutMtsLevel0;
1808 const bool nonbondedAtMtsLevel1 = runScheduleWork->simulationWork.computeNonbondedAtMtsLevel1;
1810 ForceOutputs* forceOutNonbonded = nonbondedAtMtsLevel1 ? forceOutMtsLevel1 : &forceOutMtsLevel0;
1812 if (inputrec.bPull && pull_have_constraint(*pull_work))
1814 clear_pull_forces(pull_work);
1817 /* We calculate the non-bonded forces, when done on the CPU, here.
1818 * We do this before calling do_force_lowlevel, because in that
1819 * function, the listed forces are calculated before PME, which
1820 * does communication. With this order, non-bonded and listed
1821 * force calculation imbalance can be balanced out by the domain
1822 * decomposition load balancing.
1825 const bool useOrEmulateGpuNb = simulationWork.useGpuNonbonded || fr->nbv->emulateGpu();
1827 if (!useOrEmulateGpuNb)
1829 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFYes, step, nrnb, wcycle);
1832 if (fr->efep != FreeEnergyPerturbationType::No && stepWork.computeNonbondedForces)
1834 /* Calculate the local and non-local free energy interactions here.
1835 * Happens here on the CPU both with and without GPU.
1837 nbv->dispatchFreeEnergyKernels(
1839 &forceOutNonbonded->forceWithShiftForces(),
1840 fr->use_simd_kernels,
1847 mdatoms->chargeA ? gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->nr)
1848 : gmx::ArrayRef<real>{},
1849 mdatoms->chargeB ? gmx::arrayRefFromArray(mdatoms->chargeB, mdatoms->nr)
1850 : gmx::ArrayRef<real>{},
1851 mdatoms->typeA ? gmx::arrayRefFromArray(mdatoms->typeA, mdatoms->nr)
1852 : gmx::ArrayRef<int>{},
1853 mdatoms->typeB ? gmx::arrayRefFromArray(mdatoms->typeB, mdatoms->nr)
1854 : gmx::ArrayRef<int>{},
1855 inputrec.fepvals.get(),
1862 if (stepWork.computeNonbondedForces && !useOrEmulateGpuNb)
1864 if (simulationWork.havePpDomainDecomposition)
1866 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step, nrnb, wcycle);
1869 if (stepWork.computeForces)
1871 /* Add all the non-bonded force to the normal force array.
1872 * This can be split into a local and a non-local part when overlapping
1873 * communication with calculation with domain decomposition.
1875 wallcycle_stop(wcycle, WallCycleCounter::Force);
1876 nbv->atomdata_add_nbat_f_to_f(AtomLocality::All,
1877 forceOutNonbonded->forceWithShiftForces().force());
1878 wallcycle_start_nocount(wcycle, WallCycleCounter::Force);
1881 /* If there are multiple fshift output buffers we need to reduce them */
1882 if (stepWork.computeVirial)
1884 /* This is not in a subcounter because it takes a
1885 negligible and constant-sized amount of time */
1886 nbnxn_atomdata_add_nbat_fshift_to_fshift(
1887 *nbv->nbat, forceOutNonbonded->forceWithShiftForces().shiftForces());
1891 // TODO Force flags should include haveFreeEnergyWork for this domain
1892 if (stepWork.useGpuXHalo && (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork))
1894 wallcycle_stop(wcycle, WallCycleCounter::Force);
1895 /* Wait for non-local coordinate data to be copied from device */
1896 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::NonLocal);
1897 wallcycle_start_nocount(wcycle, WallCycleCounter::Force);
1900 // Compute wall interactions, when present.
1901 // Note: should be moved to special forces.
1902 if (inputrec.nwall && stepWork.computeNonbondedForces)
1904 /* foreign lambda component for walls */
1905 real dvdl_walls = do_walls(inputrec,
1908 mdatoms->typeA ? gmx::arrayRefFromArray(mdatoms->typeA, mdatoms->nr)
1909 : gmx::ArrayRef<int>{},
1910 mdatoms->typeB ? gmx::arrayRefFromArray(mdatoms->typeB, mdatoms->nr)
1911 : gmx::ArrayRef<int>{},
1912 mdatoms->cENER ? gmx::arrayRefFromArray(mdatoms->cENER, mdatoms->nr)
1913 : gmx::ArrayRef<unsigned short>{},
1915 mdatoms->nPerturbed,
1916 x.unpaddedConstArrayRef(),
1917 &forceOutMtsLevel0.forceWithVirial(),
1918 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)],
1919 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR],
1921 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += dvdl_walls;
1924 if (stepWork.computeListedForces)
1926 /* Check whether we need to take into account PBC in listed interactions */
1927 bool needMolPbc = false;
1928 for (const auto& listedForces : fr->listedForces)
1930 if (listedForces.haveCpuListedForces(*fr->fcdata))
1932 needMolPbc = fr->bMolPBC;
1940 /* Since all atoms are in the rectangular or triclinic unit-cell,
1941 * only single box vector shifts (2 in x) are required.
1943 set_pbc_dd(&pbc, fr->pbcType, haveDDAtomOrdering(*cr) ? cr->dd->numCells : nullptr, TRUE, box);
1946 for (int mtsIndex = 0; mtsIndex < (simulationWork.useMts && stepWork.computeSlowForces ? 2 : 1);
1949 ListedForces& listedForces = fr->listedForces[mtsIndex];
1950 ForceOutputs& forceOut = (mtsIndex == 0 ? forceOutMtsLevel0 : *forceOutMtsLevel1);
1951 listedForces.calculate(wcycle,
1953 inputrec.fepvals.get(),
1967 haveDDAtomOrdering(*cr) ? cr->dd->globalAtomIndices.data() : nullptr,
1972 if (stepWork.computeSlowForces)
1974 longRangeNonbondeds->calculate(fr->pmedata,
1976 x.unpaddedConstArrayRef(),
1977 &forceOutMtsLevel1->forceWithVirial(),
1981 dipoleData.muStateAB,
1983 ddBalanceRegionHandler);
1986 wallcycle_stop(wcycle, WallCycleCounter::Force);
1988 // VdW dispersion correction, only computed on master rank to avoid double counting
1989 if ((stepWork.computeEnergy || stepWork.computeVirial) && fr->dispersionCorrection && MASTER(cr))
1991 // Calculate long range corrections to pressure and energy
1992 const DispersionCorrection::Correction correction = fr->dispersionCorrection->calculate(
1993 box, lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)]);
1995 if (stepWork.computeEnergy)
1997 enerd->term[F_DISPCORR] = correction.energy;
1998 enerd->term[F_DVDL_VDW] += correction.dvdl;
1999 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += correction.dvdl;
2001 if (stepWork.computeVirial)
2003 correction.correctVirial(vir_force);
2004 enerd->term[F_PDISPCORR] = correction.pressure;
2008 computeSpecialForces(fplog,
2020 x.unpaddedArrayRef(),
2024 &forceOutMtsLevel0.forceWithVirial(),
2025 forceOutMtsLevel1 ? &forceOutMtsLevel1->forceWithVirial() : nullptr,
2028 stepWork.doNeighborSearch);
2030 if (simulationWork.havePpDomainDecomposition && stepWork.computeForces && stepWork.useGpuFHalo
2031 && domainWork.haveCpuLocalForceWork)
2033 stateGpu->copyForcesToGpu(forceOutMtsLevel0.forceWithShiftForces().force(), AtomLocality::Local);
2036 GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFBufferOps),
2037 "The schedule below does not allow for nonbonded MTS with GPU buffer ops");
2038 GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFHalo),
2039 "The schedule below does not allow for nonbonded MTS with GPU halo exchange");
2040 // Will store the amount of cycles spent waiting for the GPU that
2041 // will be later used in the DLB accounting.
2042 float cycles_wait_gpu = 0;
2043 if (useOrEmulateGpuNb && stepWork.computeNonbondedForces)
2045 auto& forceWithShiftForces = forceOutNonbonded->forceWithShiftForces();
2047 /* wait for non-local forces (or calculate in emulation mode) */
2048 if (simulationWork.havePpDomainDecomposition)
2050 if (simulationWork.useGpuNonbonded)
2052 cycles_wait_gpu += Nbnxm::gpu_wait_finish_task(
2055 AtomLocality::NonLocal,
2056 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
2057 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
2058 forceWithShiftForces.shiftForces(),
2063 wallcycle_start_nocount(wcycle, WallCycleCounter::Force);
2065 fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFYes, step, nrnb, wcycle);
2066 wallcycle_stop(wcycle, WallCycleCounter::Force);
2069 if (stepWork.useGpuFBufferOps)
2071 if (domainWork.haveNonLocalForceContribInCpuBuffer)
2073 stateGpu->copyForcesToGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
2074 AtomLocality::NonLocal);
2078 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->execute();
2080 if (!stepWork.useGpuFHalo)
2082 /* We don't explicitly wait for the forces to be reduced on device,
2083 * but wait for them to finish copying to CPU instead.
2084 * So, we manually consume the event, see Issue #3988. */
2085 stateGpu->consumeForcesReducedOnDeviceEvent(AtomLocality::NonLocal);
2086 // copy from GPU input for dd_move_f()
2087 stateGpu->copyForcesFromGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
2088 AtomLocality::NonLocal);
2093 nbv->atomdata_add_nbat_f_to_f(AtomLocality::NonLocal, forceWithShiftForces.force());
2096 if (fr->nbv->emulateGpu() && stepWork.computeVirial)
2098 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat, forceWithShiftForces.shiftForces());
2103 /* Combining the forces for multiple time stepping before the halo exchange, when possible,
2104 * avoids an extra halo exchange (when DD is used) and post-processing step.
2106 if (stepWork.combineMtsForcesBeforeHaloExchange)
2108 combineMtsForces(getLocalAtomCount(cr->dd, *mdatoms, simulationWork.havePpDomainDecomposition),
2109 force.unpaddedArrayRef(),
2110 forceView->forceMtsCombined(),
2111 inputrec.mtsLevels[1].stepFactor);
2114 if (simulationWork.havePpDomainDecomposition)
2116 /* We are done with the CPU compute.
2117 * We will now communicate the non-local forces.
2118 * If we use a GPU this will overlap with GPU work, so in that case
2119 * we do not close the DD force balancing region here.
2121 ddBalanceRegionHandler.closeAfterForceComputationCpu();
2123 if (stepWork.computeForces)
2126 if (stepWork.useGpuFHalo)
2128 // If there exist CPU forces, data from halo exchange should accumulate into these
2129 bool accumulateForces = domainWork.haveCpuLocalForceWork;
2130 gmx::FixedCapacityVector<GpuEventSynchronizer*, 2> gpuForceHaloDependencies;
2131 gpuForceHaloDependencies.push_back(stateGpu->fReadyOnDevice(AtomLocality::Local));
2132 gpuForceHaloDependencies.push_back(stateGpu->fReducedOnDevice(AtomLocality::NonLocal));
2134 communicateGpuHaloForces(*cr, accumulateForces, &gpuForceHaloDependencies);
2138 if (stepWork.useGpuFBufferOps)
2140 stateGpu->waitForcesReadyOnHost(AtomLocality::NonLocal);
2143 // Without MTS or with MTS at slow steps with uncombined forces we need to
2144 // communicate the fast forces
2145 if (!simulationWork.useMts || !stepWork.combineMtsForcesBeforeHaloExchange)
2147 dd_move_f(cr->dd, &forceOutMtsLevel0.forceWithShiftForces(), wcycle);
2149 // With MTS we need to communicate the slow or combined (in forceOutMtsLevel1) forces
2150 if (simulationWork.useMts && stepWork.computeSlowForces)
2152 dd_move_f(cr->dd, &forceOutMtsLevel1->forceWithShiftForces(), wcycle);
2158 // With both nonbonded and PME offloaded a GPU on the same rank, we use
2159 // an alternating wait/reduction scheme.
2160 bool alternateGpuWait =
2161 (!c_disableAlternatingWait && stepWork.haveGpuPmeOnThisRank && simulationWork.useGpuNonbonded
2162 && !simulationWork.havePpDomainDecomposition && !stepWork.useGpuFBufferOps);
2164 if (alternateGpuWait)
2166 alternatePmeNbGpuWaitReduce(fr->nbv.get(),
2171 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
2176 if (!alternateGpuWait && stepWork.haveGpuPmeOnThisRank)
2178 pme_gpu_wait_and_reduce(fr->pmedata,
2181 &forceOutMtsLevel1->forceWithVirial(),
2183 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)]);
2186 /* Wait for local GPU NB outputs on the non-alternating wait path */
2187 if (!alternateGpuWait && stepWork.computeNonbondedForces && simulationWork.useGpuNonbonded)
2189 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
2190 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
2191 * but even with a step of 0.1 ms the difference is less than 1%
2194 const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
2195 const float waitCycles = Nbnxm::gpu_wait_finish_task(
2198 AtomLocality::Local,
2199 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
2200 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
2201 forceOutNonbonded->forceWithShiftForces().shiftForces(),
2204 if (ddBalanceRegionHandler.useBalancingRegion())
2206 DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
2207 if (stepWork.computeForces && waitCycles <= gpuWaitApiOverheadMargin)
2209 /* We measured few cycles, it could be that the kernel
2210 * and transfer finished earlier and there was no actual
2211 * wait time, only API call overhead.
2212 * Then the actual time could be anywhere between 0 and
2213 * cycles_wait_est. We will use half of cycles_wait_est.
2215 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
2217 ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
2221 if (fr->nbv->emulateGpu())
2223 // NOTE: emulation kernel is not included in the balancing region,
2224 // but emulation mode does not target performance anyway
2225 wallcycle_start_nocount(wcycle, WallCycleCounter::Force);
2230 InteractionLocality::Local,
2231 haveDDAtomOrdering(*cr) ? enbvClearFNo : enbvClearFYes,
2235 wallcycle_stop(wcycle, WallCycleCounter::Force);
2238 // If on GPU PME-PP comms path, receive forces from PME before GPU buffer ops
2239 // TODO refactor this and unify with below default-path call to the same function
2240 if (PAR(cr) && simulationWork.haveSeparatePmeRank && simulationWork.useGpuPmePpCommunication
2241 && stepWork.computeSlowForces)
2243 /* In case of node-splitting, the PP nodes receive the long-range
2244 * forces, virial and energy from the PME nodes here.
2246 pme_receive_force_ener(fr,
2248 &forceOutMtsLevel1->forceWithVirial(),
2250 simulationWork.useGpuPmePpCommunication,
2251 stepWork.useGpuPmeFReduction,
2256 /* Do the nonbonded GPU (or emulation) force buffer reduction
2257 * on the non-alternating path. */
2258 GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFBufferOps),
2259 "The schedule below does not allow for nonbonded MTS with GPU buffer ops");
2260 if (useOrEmulateGpuNb && !alternateGpuWait)
2262 if (stepWork.useGpuFBufferOps)
2264 ArrayRef<gmx::RVec> forceWithShift = forceOutNonbonded->forceWithShiftForces().force();
2266 // TODO: move these steps as early as possible:
2267 // - CPU f H2D should be as soon as all CPU-side forces are done
2268 // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
2269 // before the next CPU task that consumes the forces: vsite spread or update)
2270 // - copy is not perfomed if GPU force halo exchange is active, because it would overwrite the result
2271 // of the halo exchange. In that case the copy is instead performed above, before the exchange.
2272 // These should be unified.
2273 if (domainWork.haveLocalForceContribInCpuBuffer && !stepWork.useGpuFHalo)
2275 stateGpu->copyForcesToGpu(forceWithShift, AtomLocality::Local);
2278 if (stepWork.computeNonbondedForces)
2280 fr->gpuForceReduction[gmx::AtomLocality::Local]->execute();
2283 // Copy forces to host if they are needed for update or if virtual sites are enabled.
2284 // If there are vsites, we need to copy forces every step to spread vsite forces on host.
2285 // TODO: When the output flags will be included in step workload, this copy can be combined with the
2286 // copy call done in sim_utils(...) for the output.
2287 // NOTE: If there are virtual sites, the forces are modified on host after this D2H copy. Hence,
2288 // they should not be copied in do_md(...) for the output.
2289 if (!simulationWork.useGpuUpdate
2290 || (simulationWork.useGpuUpdate && haveDDAtomOrdering(*cr) && simulationWork.useCpuPmePpCommunication)
2293 if (stepWork.computeNonbondedForces)
2295 /* We have previously issued force reduction on the GPU, but we will
2296 * not use this event, instead relying on the stream being in-order.
2298 stateGpu->consumeForcesReducedOnDeviceEvent(AtomLocality::Local);
2300 stateGpu->copyForcesFromGpu(forceWithShift, AtomLocality::Local);
2301 stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
2304 else if (stepWork.computeNonbondedForces)
2306 ArrayRef<gmx::RVec> forceWithShift = forceOutNonbonded->forceWithShiftForces().force();
2307 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceWithShift);
2311 launchGpuEndOfStepTasks(
2312 nbv, fr->listedForcesGpu.get(), fr->pmedata, enerd, *runScheduleWork, step, wcycle);
2314 if (haveDDAtomOrdering(*cr))
2316 dd_force_flop_stop(cr->dd, nrnb);
2319 const bool haveCombinedMtsForces = (stepWork.computeForces && simulationWork.useMts && stepWork.computeSlowForces
2320 && stepWork.combineMtsForcesBeforeHaloExchange);
2321 if (stepWork.computeForces)
2323 postProcessForceWithShiftForces(
2324 nrnb, wcycle, box, x.unpaddedArrayRef(), &forceOutMtsLevel0, vir_force, *mdatoms, *fr, vsite, stepWork);
2326 if (simulationWork.useMts && stepWork.computeSlowForces && !haveCombinedMtsForces)
2328 postProcessForceWithShiftForces(
2329 nrnb, wcycle, box, x.unpaddedArrayRef(), forceOutMtsLevel1, vir_force, *mdatoms, *fr, vsite, stepWork);
2333 // TODO refactor this and unify with above GPU PME-PP / GPU update path call to the same function
2334 if (PAR(cr) && simulationWork.haveSeparatePmeRank && simulationWork.useCpuPmePpCommunication
2335 && stepWork.computeSlowForces)
2337 /* In case of node-splitting, the PP nodes receive the long-range
2338 * forces, virial and energy from the PME nodes here.
2340 pme_receive_force_ener(fr,
2342 &forceOutMtsLevel1->forceWithVirial(),
2344 simulationWork.useGpuPmePpCommunication,
2349 if (stepWork.computeForces)
2351 /* If we don't use MTS or if we already combined the MTS forces before, we only
2352 * need to post-process one ForceOutputs object here, called forceOutCombined,
2353 * otherwise we have to post-process two outputs and then combine them.
2355 ForceOutputs& forceOutCombined = (haveCombinedMtsForces ? forceOutMts.value() : forceOutMtsLevel0);
2357 cr, step, nrnb, wcycle, box, x.unpaddedArrayRef(), &forceOutCombined, vir_force, mdatoms, fr, vsite, stepWork);
2359 if (simulationWork.useMts && stepWork.computeSlowForces && !haveCombinedMtsForces)
2362 cr, step, nrnb, wcycle, box, x.unpaddedArrayRef(), forceOutMtsLevel1, vir_force, mdatoms, fr, vsite, stepWork);
2364 combineMtsForces(mdatoms->homenr,
2365 force.unpaddedArrayRef(),
2366 forceView->forceMtsCombined(),
2367 inputrec.mtsLevels[1].stepFactor);
2371 if (stepWork.computeEnergy)
2373 /* Compute the final potential energy terms */
2374 accumulatePotentialEnergies(enerd, lambda, inputrec.fepvals.get());
2376 if (!EI_TPI(inputrec.eI))
2378 checkPotentialEnergyValidity(step, *enerd, inputrec);
2382 /* In case we don't have constraints and are using GPUs, the next balancing
2383 * region starts here.
2384 * Some "special" work at the end of do_force_cuts?, such as vsite spread,
2385 * virial calculation and COM pulling, is not thus not included in
2386 * the balance timing, which is ok as most tasks do communication.
2388 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);