2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013-2019,2020,2021, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
49 #include "gromacs/applied_forces/awh/awh.h"
50 #include "gromacs/domdec/dlbtiming.h"
51 #include "gromacs/domdec/domdec.h"
52 #include "gromacs/domdec/domdec_struct.h"
53 #include "gromacs/domdec/gpuhaloexchange.h"
54 #include "gromacs/domdec/partition.h"
55 #include "gromacs/essentialdynamics/edsam.h"
56 #include "gromacs/ewald/pme.h"
57 #include "gromacs/ewald/pme_coordinate_receiver_gpu.h"
58 #include "gromacs/ewald/pme_pp.h"
59 #include "gromacs/ewald/pme_pp_comm_gpu.h"
60 #include "gromacs/gmxlib/network.h"
61 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
62 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
63 #include "gromacs/gmxlib/nrnb.h"
64 #include "gromacs/gpu_utils/gpu_utils.h"
65 #include "gromacs/imd/imd.h"
66 #include "gromacs/listed_forces/disre.h"
67 #include "gromacs/listed_forces/listed_forces_gpu.h"
68 #include "gromacs/listed_forces/listed_forces.h"
69 #include "gromacs/listed_forces/orires.h"
70 #include "gromacs/math/arrayrefwithpadding.h"
71 #include "gromacs/math/functions.h"
72 #include "gromacs/math/units.h"
73 #include "gromacs/math/vec.h"
74 #include "gromacs/math/vecdump.h"
75 #include "gromacs/mdlib/calcmu.h"
76 #include "gromacs/mdlib/calcvir.h"
77 #include "gromacs/mdlib/constr.h"
78 #include "gromacs/mdlib/dispersioncorrection.h"
79 #include "gromacs/mdlib/enerdata_utils.h"
80 #include "gromacs/mdlib/force.h"
81 #include "gromacs/mdlib/force_flags.h"
82 #include "gromacs/mdlib/forcerec.h"
83 #include "gromacs/mdlib/gmx_omp_nthreads.h"
84 #include "gromacs/mdlib/update.h"
85 #include "gromacs/mdlib/vsite.h"
86 #include "gromacs/mdlib/wall.h"
87 #include "gromacs/mdlib/wholemoleculetransform.h"
88 #include "gromacs/mdtypes/commrec.h"
89 #include "gromacs/mdtypes/enerdata.h"
90 #include "gromacs/mdtypes/forcebuffers.h"
91 #include "gromacs/mdtypes/forceoutput.h"
92 #include "gromacs/mdtypes/forcerec.h"
93 #include "gromacs/mdtypes/iforceprovider.h"
94 #include "gromacs/mdtypes/inputrec.h"
95 #include "gromacs/mdtypes/md_enums.h"
96 #include "gromacs/mdtypes/mdatom.h"
97 #include "gromacs/mdtypes/multipletimestepping.h"
98 #include "gromacs/mdtypes/simulation_workload.h"
99 #include "gromacs/mdtypes/state.h"
100 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
101 #include "gromacs/nbnxm/gpu_data_mgmt.h"
102 #include "gromacs/nbnxm/nbnxm.h"
103 #include "gromacs/nbnxm/nbnxm_gpu.h"
104 #include "gromacs/pbcutil/ishift.h"
105 #include "gromacs/pbcutil/pbc.h"
106 #include "gromacs/pulling/pull.h"
107 #include "gromacs/pulling/pull_rotation.h"
108 #include "gromacs/timing/cyclecounter.h"
109 #include "gromacs/timing/gpu_timing.h"
110 #include "gromacs/timing/wallcycle.h"
111 #include "gromacs/timing/wallcyclereporting.h"
112 #include "gromacs/timing/walltime_accounting.h"
113 #include "gromacs/topology/topology.h"
114 #include "gromacs/utility/arrayref.h"
115 #include "gromacs/utility/basedefinitions.h"
116 #include "gromacs/utility/cstringutil.h"
117 #include "gromacs/utility/exceptions.h"
118 #include "gromacs/utility/fatalerror.h"
119 #include "gromacs/utility/fixedcapacityvector.h"
120 #include "gromacs/utility/gmxassert.h"
121 #include "gromacs/utility/gmxmpi.h"
122 #include "gromacs/utility/logger.h"
123 #include "gromacs/utility/smalloc.h"
124 #include "gromacs/utility/strconvert.h"
125 #include "gromacs/utility/sysinfo.h"
127 #include "gpuforcereduction.h"
130 using gmx::AtomLocality;
131 using gmx::DomainLifetimeWorkload;
132 using gmx::ForceOutputs;
133 using gmx::ForceWithShiftForces;
134 using gmx::InteractionLocality;
136 using gmx::SimulationWorkload;
137 using gmx::StepWorkload;
139 // TODO: this environment variable allows us to verify before release
140 // that on less common architectures the total cost of polling is not larger than
141 // a blocking wait (so polling does not introduce overhead when the static
142 // PME-first ordering would suffice).
143 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
145 static void sum_forces(ArrayRef<RVec> f, ArrayRef<const RVec> forceToAdd)
147 GMX_ASSERT(f.size() >= forceToAdd.size(), "Accumulation buffer should be sufficiently large");
148 const int end = forceToAdd.size();
150 int gmx_unused nt = gmx_omp_nthreads_get(ModuleMultiThread::Default);
151 #pragma omp parallel for num_threads(nt) schedule(static)
152 for (int i = 0; i < end; i++)
154 rvec_inc(f[i], forceToAdd[i]);
158 static void calc_virial(int start,
161 const gmx::ForceWithShiftForces& forceWithShiftForces,
165 const t_forcerec* fr,
168 /* The short-range virial from surrounding boxes */
169 const rvec* fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
170 const rvec* shiftVecPointer = as_rvec_array(fr->shift_vec.data());
171 calc_vir(gmx::c_numShiftVectors, shiftVecPointer, fshift, vir_part, pbcType == PbcType::Screw, box);
172 inc_nrnb(nrnb, eNR_VIRIAL, gmx::c_numShiftVectors);
174 /* Calculate partial virial, for local atoms only, based on short range.
175 * Total virial is computed in global_stat, called from do_md
177 const rvec* f = as_rvec_array(forceWithShiftForces.force().data());
178 f_calc_vir(start, start + homenr, x, f, vir_part, box);
179 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
183 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
187 static void pull_potential_wrapper(const t_commrec* cr,
188 const t_inputrec& ir,
190 gmx::ArrayRef<const gmx::RVec> x,
191 gmx::ForceWithVirial* force,
192 const t_mdatoms* mdatoms,
193 gmx_enerdata_t* enerd,
197 gmx_wallcycle* wcycle)
202 /* Calculate the center of mass forces, this requires communication,
203 * which is why pull_potential is called close to other communication.
205 wallcycle_start(wcycle, WallCycleCounter::PullPot);
206 set_pbc(&pbc, ir.pbcType, box);
208 enerd->term[F_COM_PULL] +=
209 pull_potential(pull_work,
210 gmx::arrayRefFromArray(mdatoms->massT, mdatoms->nr),
214 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Restraint)],
218 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Restraint] += dvdl;
219 wallcycle_stop(wcycle, WallCycleCounter::PullPot);
222 static void pme_receive_force_ener(t_forcerec* fr,
224 gmx::ForceWithVirial* forceWithVirial,
225 gmx_enerdata_t* enerd,
226 bool useGpuPmePpComms,
227 bool receivePmeForceToGpu,
228 gmx_wallcycle* wcycle)
230 real e_q, e_lj, dvdl_q, dvdl_lj;
231 float cycles_ppdpme, cycles_seppme;
233 cycles_ppdpme = wallcycle_stop(wcycle, WallCycleCounter::PpDuringPme);
234 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
236 /* In case of node-splitting, the PP nodes receive the long-range
237 * forces, virial and energy from the PME nodes here.
239 wallcycle_start(wcycle, WallCycleCounter::PpPmeWaitRecvF);
242 gmx_pme_receive_f(fr->pmePpCommGpu.get(),
250 receivePmeForceToGpu,
252 enerd->term[F_COUL_RECIP] += e_q;
253 enerd->term[F_LJ_RECIP] += e_lj;
254 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Coul] += dvdl_q;
255 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += dvdl_lj;
259 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
261 wallcycle_stop(wcycle, WallCycleCounter::PpPmeWaitRecvF);
264 static void print_large_forces(FILE* fp,
269 ArrayRef<const RVec> x,
270 ArrayRef<const RVec> f)
272 real force2Tolerance = gmx::square(forceTolerance);
273 gmx::index numNonFinite = 0;
274 for (int i = 0; i < md->homenr; i++)
276 real force2 = norm2(f[i]);
277 bool nonFinite = !std::isfinite(force2);
278 if (force2 >= force2Tolerance || nonFinite)
281 "step %" PRId64 " atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
294 if (numNonFinite > 0)
296 /* Note that with MPI this fatal call on one rank might interrupt
297 * the printing on other ranks. But we can only avoid that with
298 * an expensive MPI barrier that we would need at each step.
300 gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
304 //! When necessary, spreads forces on vsites and computes the virial for \p forceOutputs->forceWithShiftForces()
305 static void postProcessForceWithShiftForces(t_nrnb* nrnb,
306 gmx_wallcycle* wcycle,
308 ArrayRef<const RVec> x,
309 ForceOutputs* forceOutputs,
311 const t_mdatoms& mdatoms,
312 const t_forcerec& fr,
313 gmx::VirtualSitesHandler* vsite,
314 const StepWorkload& stepWork)
316 ForceWithShiftForces& forceWithShiftForces = forceOutputs->forceWithShiftForces();
318 /* If we have NoVirSum forces, but we do not calculate the virial,
319 * we later sum the forceWithShiftForces buffer together with
320 * the noVirSum buffer and spread the combined vsite forces at once.
322 if (vsite && (!forceOutputs->haveForceWithVirial() || stepWork.computeVirial))
324 using VirialHandling = gmx::VirtualSitesHandler::VirialHandling;
326 auto f = forceWithShiftForces.force();
327 auto fshift = forceWithShiftForces.shiftForces();
328 const VirialHandling virialHandling =
329 (stepWork.computeVirial ? VirialHandling::Pbc : VirialHandling::None);
330 vsite->spreadForces(x, f, virialHandling, fshift, nullptr, nrnb, box, wcycle);
331 forceWithShiftForces.haveSpreadVsiteForces() = true;
334 if (stepWork.computeVirial)
336 /* Calculation of the virial must be done after vsites! */
338 0, mdatoms.homenr, as_rvec_array(x.data()), forceWithShiftForces, vir_force, box, nrnb, &fr, fr.pbcType);
342 //! Spread, compute virial for and sum forces, when necessary
343 static void postProcessForces(const t_commrec* cr,
346 gmx_wallcycle* wcycle,
348 ArrayRef<const RVec> x,
349 ForceOutputs* forceOutputs,
351 const t_mdatoms* mdatoms,
352 const t_forcerec* fr,
353 gmx::VirtualSitesHandler* vsite,
354 const StepWorkload& stepWork)
356 // Extract the final output force buffer, which is also the buffer for forces with shift forces
357 ArrayRef<RVec> f = forceOutputs->forceWithShiftForces().force();
359 if (forceOutputs->haveForceWithVirial())
361 auto& forceWithVirial = forceOutputs->forceWithVirial();
365 /* Spread the mesh force on virtual sites to the other particles...
366 * This is parallellized. MPI communication is performed
367 * if the constructing atoms aren't local.
369 GMX_ASSERT(!stepWork.computeVirial || f.data() != forceWithVirial.force_.data(),
370 "We need separate force buffers for shift and virial forces when "
371 "computing the virial");
372 GMX_ASSERT(!stepWork.computeVirial
373 || forceOutputs->forceWithShiftForces().haveSpreadVsiteForces(),
374 "We should spread the force with shift forces separately when computing "
376 const gmx::VirtualSitesHandler::VirialHandling virialHandling =
377 (stepWork.computeVirial ? gmx::VirtualSitesHandler::VirialHandling::NonLinear
378 : gmx::VirtualSitesHandler::VirialHandling::None);
379 matrix virial = { { 0 } };
380 vsite->spreadForces(x, forceWithVirial.force_, virialHandling, {}, virial, nrnb, box, wcycle);
381 forceWithVirial.addVirialContribution(virial);
384 if (stepWork.computeVirial)
386 /* Now add the forces, this is local */
387 sum_forces(f, forceWithVirial.force_);
389 /* Add the direct virial contributions */
391 forceWithVirial.computeVirial_,
392 "forceWithVirial should request virial computation when we request the virial");
393 m_add(vir_force, forceWithVirial.getVirial(), vir_force);
397 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
403 GMX_ASSERT(vsite == nullptr || forceOutputs->forceWithShiftForces().haveSpreadVsiteForces(),
404 "We should have spread the vsite forces (earlier)");
407 if (fr->print_force >= 0)
409 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
413 static void do_nb_verlet(t_forcerec* fr,
414 const interaction_const_t* ic,
415 gmx_enerdata_t* enerd,
416 const StepWorkload& stepWork,
417 const InteractionLocality ilocality,
421 gmx_wallcycle* wcycle)
423 if (!stepWork.computeNonbondedForces)
425 /* skip non-bonded calculation */
429 nonbonded_verlet_t* nbv = fr->nbv.get();
431 /* GPU kernel launch overhead is already timed separately */
434 /* When dynamic pair-list pruning is requested, we need to prune
435 * at nstlistPrune steps.
437 if (nbv->isDynamicPruningStepCpu(step))
439 /* Prune the pair-list beyond fr->ic->rlistPrune using
440 * the current coordinates of the atoms.
442 wallcycle_sub_start(wcycle, WallCycleSubCounter::NonbondedPruning);
443 nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
444 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NonbondedPruning);
448 nbv->dispatchNonbondedKernel(
454 enerd->grpp.energyGroupPairTerms[fr->haveBuckingham ? NonBondedEnergyTerms::BuckinghamSR
455 : NonBondedEnergyTerms::LJSR],
456 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR],
460 static inline void clearRVecs(ArrayRef<RVec> v, const bool useOpenmpThreading)
462 int nth = gmx_omp_nthreads_get_simple_rvec_task(ModuleMultiThread::Default, v.ssize());
464 /* Note that we would like to avoid this conditional by putting it
465 * into the omp pragma instead, but then we still take the full
466 * omp parallel for overhead (at least with gcc5).
468 if (!useOpenmpThreading || nth == 1)
477 #pragma omp parallel for num_threads(nth) schedule(static)
478 for (gmx::index i = 0; i < v.ssize(); i++)
485 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
487 * \param groupOptions Group options, containing T-coupling options
489 static real averageKineticEnergyEstimate(const t_grpopts& groupOptions)
491 real nrdfCoupled = 0;
492 real nrdfUncoupled = 0;
493 real kineticEnergy = 0;
494 for (int g = 0; g < groupOptions.ngtc; g++)
496 if (groupOptions.tau_t[g] >= 0)
498 nrdfCoupled += groupOptions.nrdf[g];
499 kineticEnergy += groupOptions.nrdf[g] * 0.5 * groupOptions.ref_t[g] * gmx::c_boltz;
503 nrdfUncoupled += groupOptions.nrdf[g];
507 /* This conditional with > also catches nrdf=0 */
508 if (nrdfCoupled > nrdfUncoupled)
510 return kineticEnergy * (nrdfCoupled + nrdfUncoupled) / nrdfCoupled;
518 /*! \brief This routine checks that the potential energy is finite.
520 * Always checks that the potential energy is finite. If step equals
521 * inputrec.init_step also checks that the magnitude of the potential energy
522 * is reasonable. Terminates with a fatal error when a check fails.
523 * Note that passing this check does not guarantee finite forces,
524 * since those use slightly different arithmetics. But in most cases
525 * there is just a narrow coordinate range where forces are not finite
526 * and energies are finite.
528 * \param[in] step The step number, used for checking and printing
529 * \param[in] enerd The energy data; the non-bonded group energies need to be added to
530 * \c enerd.term[F_EPOT] before calling this routine
531 * \param[in] inputrec The input record
533 static void checkPotentialEnergyValidity(int64_t step, const gmx_enerdata_t& enerd, const t_inputrec& inputrec)
535 /* Threshold valid for comparing absolute potential energy against
536 * the kinetic energy. Normally one should not consider absolute
537 * potential energy values, but with a factor of one million
538 * we should never get false positives.
540 constexpr real c_thresholdFactor = 1e6;
542 bool energyIsNotFinite = !std::isfinite(enerd.term[F_EPOT]);
543 real averageKineticEnergy = 0;
544 /* We only check for large potential energy at the initial step,
545 * because that is by far the most likely step for this too occur
546 * and because computing the average kinetic energy is not free.
547 * Note: nstcalcenergy >> 1 often does not allow to catch large energies
548 * before they become NaN.
550 if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
552 averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
555 if (energyIsNotFinite
556 || (averageKineticEnergy > 0 && enerd.term[F_EPOT] > c_thresholdFactor * averageKineticEnergy))
561 ": The total potential energy is %g, which is %s. The LJ and electrostatic "
562 "contributions to the energy are %g and %g, respectively. A %s potential energy "
563 "can be caused by overlapping interactions in bonded interactions or very large%s "
564 "coordinate values. Usually this is caused by a badly- or non-equilibrated initial "
565 "configuration, incorrect interactions or parameters in the topology.",
568 energyIsNotFinite ? "not finite" : "extremely high",
570 enerd.term[F_COUL_SR],
571 energyIsNotFinite ? "non-finite" : "very high",
572 energyIsNotFinite ? " or Nan" : "");
576 /*! \brief Return true if there are special forces computed this step.
578 * The conditionals exactly correspond to those in computeSpecialForces().
580 static bool haveSpecialForces(const t_inputrec& inputrec,
581 const gmx::ForceProviders& forceProviders,
582 const pull_t* pull_work,
583 const bool computeForces,
587 return ((computeForces && forceProviders.hasForceProvider()) || // forceProviders
588 (inputrec.bPull && pull_have_potential(*pull_work)) || // pull
589 inputrec.bRot || // enforced rotation
590 (ed != nullptr) || // flooding
591 (inputrec.bIMD && computeForces)); // IMD
594 /*! \brief Compute forces and/or energies for special algorithms
596 * The intention is to collect all calls to algorithms that compute
597 * forces on local atoms only and that do not contribute to the local
598 * virial sum (but add their virial contribution separately).
599 * Eventually these should likely all become ForceProviders.
600 * Within this function the intention is to have algorithms that do
601 * global communication at the end, so global barriers within the MD loop
602 * are as close together as possible.
604 * \param[in] fplog The log file
605 * \param[in] cr The communication record
606 * \param[in] inputrec The input record
607 * \param[in] awh The Awh module (nullptr if none in use).
608 * \param[in] enforcedRotation Enforced rotation module.
609 * \param[in] imdSession The IMD session
610 * \param[in] pull_work The pull work structure.
611 * \param[in] step The current MD step
612 * \param[in] t The current time
613 * \param[in,out] wcycle Wallcycle accounting struct
614 * \param[in,out] forceProviders Pointer to a list of force providers
615 * \param[in] box The unit cell
616 * \param[in] x The coordinates
617 * \param[in] mdatoms Per atom properties
618 * \param[in] lambda Array of free-energy lambda values
619 * \param[in] stepWork Step schedule flags
620 * \param[in,out] forceWithVirialMtsLevel0 Force and virial for MTS level0 forces
621 * \param[in,out] forceWithVirialMtsLevel1 Force and virial for MTS level1 forces, can be nullptr
622 * \param[in,out] enerd Energy buffer
623 * \param[in,out] ed Essential dynamics pointer
624 * \param[in] didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
626 * \todo Remove didNeighborSearch, which is used incorrectly.
627 * \todo Convert all other algorithms called here to ForceProviders.
629 static void computeSpecialForces(FILE* fplog,
631 const t_inputrec& inputrec,
633 gmx_enfrot* enforcedRotation,
634 gmx::ImdSession* imdSession,
638 gmx_wallcycle* wcycle,
639 gmx::ForceProviders* forceProviders,
641 gmx::ArrayRef<const gmx::RVec> x,
642 const t_mdatoms* mdatoms,
643 gmx::ArrayRef<const real> lambda,
644 const StepWorkload& stepWork,
645 gmx::ForceWithVirial* forceWithVirialMtsLevel0,
646 gmx::ForceWithVirial* forceWithVirialMtsLevel1,
647 gmx_enerdata_t* enerd,
649 bool didNeighborSearch)
651 /* NOTE: Currently all ForceProviders only provide forces.
652 * When they also provide energies, remove this conditional.
654 if (stepWork.computeForces)
656 gmx::ForceProviderInput forceProviderInput(
659 gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->homenr),
660 gmx::arrayRefFromArray(mdatoms->massT, mdatoms->homenr),
664 gmx::ForceProviderOutput forceProviderOutput(forceWithVirialMtsLevel0, enerd);
666 /* Collect forces from modules */
667 forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
670 if (inputrec.bPull && pull_have_potential(*pull_work))
672 const int mtsLevel = forceGroupMtsLevel(inputrec.mtsLevels, gmx::MtsForceGroups::Pull);
673 if (mtsLevel == 0 || stepWork.computeSlowForces)
675 auto& forceWithVirial = (mtsLevel == 0) ? forceWithVirialMtsLevel0 : forceWithVirialMtsLevel1;
676 pull_potential_wrapper(
677 cr, inputrec, box, x, forceWithVirial, mdatoms, enerd, pull_work, lambda.data(), t, wcycle);
682 const int mtsLevel = forceGroupMtsLevel(inputrec.mtsLevels, gmx::MtsForceGroups::Pull);
683 if (mtsLevel == 0 || stepWork.computeSlowForces)
685 const bool needForeignEnergyDifferences = awh->needForeignEnergyDifferences(step);
686 std::vector<double> foreignLambdaDeltaH, foreignLambdaDhDl;
687 if (needForeignEnergyDifferences)
689 enerd->foreignLambdaTerms.finalizePotentialContributions(
690 enerd->dvdl_lin, lambda, *inputrec.fepvals);
691 std::tie(foreignLambdaDeltaH, foreignLambdaDhDl) = enerd->foreignLambdaTerms.getTerms(cr);
694 auto& forceWithVirial = (mtsLevel == 0) ? forceWithVirialMtsLevel0 : forceWithVirialMtsLevel1;
695 enerd->term[F_COM_PULL] += awh->applyBiasForcesAndUpdateBias(
697 gmx::arrayRefFromArray(mdatoms->massT, mdatoms->nr),
708 /* Add the forces from enforced rotation potentials (if any) */
711 wallcycle_start(wcycle, WallCycleCounter::RotAdd);
712 enerd->term[F_COM_PULL] +=
713 add_rot_forces(enforcedRotation, forceWithVirialMtsLevel0->force_, cr, step, t);
714 wallcycle_stop(wcycle, WallCycleCounter::RotAdd);
719 /* Note that since init_edsam() is called after the initialization
720 * of forcerec, edsam doesn't request the noVirSum force buffer.
721 * Thus if no other algorithm (e.g. PME) requires it, the forces
722 * here will contribute to the virial.
724 do_flood(cr, inputrec, x, forceWithVirialMtsLevel0->force_, ed, box, step, didNeighborSearch);
727 /* Add forces from interactive molecular dynamics (IMD), if any */
728 if (inputrec.bIMD && stepWork.computeForces)
730 imdSession->applyForces(forceWithVirialMtsLevel0->force_);
734 /*! \brief Launch the prepare_step and spread stages of PME GPU.
736 * \param[in] pmedata The PME structure
737 * \param[in] box The box matrix
738 * \param[in] stepWork Step schedule flags
739 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in the device memory.
740 * \param[in] lambdaQ The Coulomb lambda of the current state.
741 * \param[in] wcycle The wallcycle structure
743 static inline void launchPmeGpuSpread(gmx_pme_t* pmedata,
745 const StepWorkload& stepWork,
746 GpuEventSynchronizer* xReadyOnDevice,
748 gmx_wallcycle* wcycle)
750 pme_gpu_prepare_computation(pmedata, box, wcycle, stepWork);
751 bool useGpuDirectComm = false;
752 gmx::PmeCoordinateReceiverGpu* pmeCoordinateReceiverGpu = nullptr;
753 pme_gpu_launch_spread(
754 pmedata, xReadyOnDevice, wcycle, lambdaQ, useGpuDirectComm, pmeCoordinateReceiverGpu);
757 /*! \brief Launch the FFT and gather stages of PME GPU
759 * This function only implements setting the output forces (no accumulation).
761 * \param[in] pmedata The PME structure
762 * \param[in] lambdaQ The Coulomb lambda of the current system state.
763 * \param[in] wcycle The wallcycle structure
764 * \param[in] stepWork Step schedule flags
766 static void launchPmeGpuFftAndGather(gmx_pme_t* pmedata,
768 gmx_wallcycle* wcycle,
769 const gmx::StepWorkload& stepWork)
771 pme_gpu_launch_complex_transforms(pmedata, wcycle, stepWork);
772 pme_gpu_launch_gather(pmedata, wcycle, lambdaQ);
776 * Polling wait for either of the PME or nonbonded GPU tasks.
778 * Instead of a static order in waiting for GPU tasks, this function
779 * polls checking which of the two tasks completes first, and does the
780 * associated force buffer reduction overlapped with the other task.
781 * By doing that, unlike static scheduling order, it can always overlap
782 * one of the reductions, regardless of the GPU task completion order.
784 * \param[in] nbv Nonbonded verlet structure
785 * \param[in,out] pmedata PME module data
786 * \param[in,out] forceOutputsNonbonded Force outputs for the non-bonded forces and shift forces
787 * \param[in,out] forceOutputsPme Force outputs for the PME forces and virial
788 * \param[in,out] enerd Energy data structure results are reduced into
789 * \param[in] lambdaQ The Coulomb lambda of the current system state.
790 * \param[in] stepWork Step schedule flags
791 * \param[in] wcycle The wallcycle structure
793 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t* nbv,
795 gmx::ForceOutputs* forceOutputsNonbonded,
796 gmx::ForceOutputs* forceOutputsPme,
797 gmx_enerdata_t* enerd,
799 const StepWorkload& stepWork,
800 gmx_wallcycle* wcycle)
802 bool isPmeGpuDone = false;
803 bool isNbGpuDone = false;
805 gmx::ArrayRef<const gmx::RVec> pmeGpuForces;
807 while (!isPmeGpuDone || !isNbGpuDone)
811 GpuTaskCompletion completionType =
812 (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
813 isPmeGpuDone = pme_gpu_try_finish_task(
814 pmedata, stepWork, wcycle, &forceOutputsPme->forceWithVirial(), enerd, lambdaQ, completionType);
819 auto& forceBuffersNonbonded = forceOutputsNonbonded->forceWithShiftForces();
820 GpuTaskCompletion completionType =
821 (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
822 isNbGpuDone = Nbnxm::gpu_try_finish_task(
826 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
827 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
828 forceBuffersNonbonded.shiftForces(),
834 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceBuffersNonbonded.force());
840 /*! \brief Set up the different force buffers; also does clearing.
842 * \param[in] forceHelperBuffers Helper force buffers
843 * \param[in] force force array
844 * \param[in] domainWork Domain lifetime workload flags
845 * \param[in] stepWork Step schedule flags
846 * \param[in] havePpDomainDecomposition Whether we have a PP domain decomposition
847 * \param[out] wcycle wallcycle recording structure
849 * \returns Cleared force output structure
851 static ForceOutputs setupForceOutputs(ForceHelperBuffers* forceHelperBuffers,
852 gmx::ArrayRefWithPadding<gmx::RVec> force,
853 const DomainLifetimeWorkload& domainWork,
854 const StepWorkload& stepWork,
855 const bool havePpDomainDecomposition,
856 gmx_wallcycle* wcycle)
858 wallcycle_sub_start(wcycle, WallCycleSubCounter::ClearForceBuffer);
860 /* NOTE: We assume fr->shiftForces is all zeros here */
861 gmx::ForceWithShiftForces forceWithShiftForces(
862 force, stepWork.computeVirial, forceHelperBuffers->shiftForces());
864 if (stepWork.computeForces
865 && (domainWork.haveCpuLocalForceWork || !stepWork.useGpuFBufferOps
866 || (havePpDomainDecomposition && !stepWork.useGpuFHalo)))
868 /* Clear the short- and long-range forces */
869 clearRVecs(forceWithShiftForces.force(), true);
871 /* Clear the shift forces */
872 clearRVecs(forceWithShiftForces.shiftForces(), false);
875 /* If we need to compute the virial, we might need a separate
876 * force buffer for algorithms for which the virial is calculated
877 * directly, such as PME. Otherwise, forceWithVirial uses the
878 * the same force (f in legacy calls) buffer as other algorithms.
880 const bool useSeparateForceWithVirialBuffer =
881 (stepWork.computeForces
882 && (stepWork.computeVirial && forceHelperBuffers->haveDirectVirialContributions()));
883 /* forceWithVirial uses the local atom range only */
884 gmx::ForceWithVirial forceWithVirial(
885 useSeparateForceWithVirialBuffer ? forceHelperBuffers->forceBufferForDirectVirialContributions()
886 : force.unpaddedArrayRef(),
887 stepWork.computeVirial);
889 if (useSeparateForceWithVirialBuffer)
891 /* TODO: update comment
892 * We only compute forces on local atoms. Note that vsites can
893 * spread to non-local atoms, but that part of the buffer is
894 * cleared separately in the vsite spreading code.
896 clearRVecs(forceWithVirial.force_, true);
899 wallcycle_sub_stop(wcycle, WallCycleSubCounter::ClearForceBuffer);
902 forceWithShiftForces, forceHelperBuffers->haveDirectVirialContributions(), forceWithVirial);
906 /*! \brief Set up flags that have the lifetime of the domain indicating what type of work is there to compute.
908 static DomainLifetimeWorkload setupDomainLifetimeWorkload(const t_inputrec& inputrec,
909 const t_forcerec& fr,
910 const pull_t* pull_work,
912 const t_mdatoms& mdatoms,
913 const SimulationWorkload& simulationWork,
914 const StepWorkload& stepWork)
916 DomainLifetimeWorkload domainWork;
917 // Note that haveSpecialForces is constant over the whole run
918 domainWork.haveSpecialForces =
919 haveSpecialForces(inputrec, *fr.forceProviders, pull_work, stepWork.computeForces, ed);
920 domainWork.haveCpuListedForceWork = false;
921 domainWork.haveCpuBondedWork = false;
922 for (const auto& listedForces : fr.listedForces)
924 if (listedForces.haveCpuListedForces(*fr.fcdata))
926 domainWork.haveCpuListedForceWork = true;
928 if (listedForces.haveCpuBondeds())
930 domainWork.haveCpuBondedWork = true;
933 domainWork.haveGpuBondedWork =
934 ((fr.listedForcesGpu != nullptr) && fr.listedForcesGpu->haveInteractions());
935 // Note that haveFreeEnergyWork is constant over the whole run
936 domainWork.haveFreeEnergyWork =
937 (fr.efep != FreeEnergyPerturbationType::No && mdatoms.nPerturbed != 0);
938 // We assume we have local force work if there are CPU
939 // force tasks including PME or nonbondeds.
940 domainWork.haveCpuLocalForceWork =
941 domainWork.haveSpecialForces || domainWork.haveCpuListedForceWork
942 || domainWork.haveFreeEnergyWork || simulationWork.useCpuNonbonded || simulationWork.useCpuPme
943 || simulationWork.haveEwaldSurfaceContribution || inputrec.nwall > 0;
944 domainWork.haveLocalForceContribInCpuBuffer =
945 domainWork.haveCpuLocalForceWork || simulationWork.havePpDomainDecomposition;
946 domainWork.haveNonLocalForceContribInCpuBuffer =
947 domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork;
952 /*! \brief Set up force flag stuct from the force bitmask.
954 * \param[in] legacyFlags Force bitmask flags used to construct the new flags
955 * \param[in] mtsLevels The multiple time-stepping levels, either empty or 2 levels
956 * \param[in] step The current MD step
957 * \param[in] simulationWork Simulation workload description.
959 * \returns New Stepworkload description.
961 static StepWorkload setupStepWorkload(const int legacyFlags,
962 ArrayRef<const gmx::MtsLevel> mtsLevels,
964 const SimulationWorkload& simulationWork)
966 GMX_ASSERT(mtsLevels.empty() || mtsLevels.size() == 2, "Expect 0 or 2 MTS levels");
967 const bool computeSlowForces = (mtsLevels.empty() || step % mtsLevels[1].stepFactor == 0);
970 flags.stateChanged = ((legacyFlags & GMX_FORCE_STATECHANGED) != 0);
971 flags.haveDynamicBox = ((legacyFlags & GMX_FORCE_DYNAMICBOX) != 0);
972 flags.doNeighborSearch = ((legacyFlags & GMX_FORCE_NS) != 0);
973 flags.computeSlowForces = computeSlowForces;
974 flags.computeVirial = ((legacyFlags & GMX_FORCE_VIRIAL) != 0);
975 flags.computeEnergy = ((legacyFlags & GMX_FORCE_ENERGY) != 0);
976 flags.computeForces = ((legacyFlags & GMX_FORCE_FORCES) != 0);
977 flags.useOnlyMtsCombinedForceBuffer = ((legacyFlags & GMX_FORCE_DO_NOT_NEED_NORMAL_FORCE) != 0);
978 flags.computeListedForces = ((legacyFlags & GMX_FORCE_LISTED) != 0);
979 flags.computeNonbondedForces =
980 ((legacyFlags & GMX_FORCE_NONBONDED) != 0) && simulationWork.computeNonbonded
981 && !(simulationWork.computeNonbondedAtMtsLevel1 && !computeSlowForces);
982 flags.computeDhdl = ((legacyFlags & GMX_FORCE_DHDL) != 0);
984 if (simulationWork.useGpuXBufferOps || simulationWork.useGpuFBufferOps)
986 GMX_ASSERT(simulationWork.useGpuNonbonded,
987 "Can only offload buffer ops if nonbonded computation is also offloaded");
989 flags.useGpuXBufferOps = simulationWork.useGpuXBufferOps && !flags.doNeighborSearch;
990 // on virial steps the CPU reduction path is taken
991 flags.useGpuFBufferOps = simulationWork.useGpuFBufferOps && !flags.computeVirial;
992 const bool rankHasGpuPmeTask = simulationWork.useGpuPme && !simulationWork.haveSeparatePmeRank;
993 flags.useGpuPmeFReduction = flags.computeSlowForces && flags.useGpuFBufferOps
994 && (rankHasGpuPmeTask || simulationWork.useGpuPmePpCommunication);
995 flags.useGpuXHalo = simulationWork.useGpuHaloExchange && !flags.doNeighborSearch;
996 flags.useGpuFHalo = simulationWork.useGpuHaloExchange && flags.useGpuFBufferOps;
997 flags.haveGpuPmeOnThisRank = rankHasGpuPmeTask && flags.computeSlowForces;
998 flags.combineMtsForcesBeforeHaloExchange =
999 (flags.computeForces && simulationWork.useMts && flags.computeSlowForces
1000 && flags.useOnlyMtsCombinedForceBuffer
1001 && !(flags.computeVirial || simulationWork.useGpuNonbonded || flags.haveGpuPmeOnThisRank));
1007 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
1010 static void launchGpuEndOfStepTasks(nonbonded_verlet_t* nbv,
1011 gmx::ListedForcesGpu* listedForcesGpu,
1013 gmx_enerdata_t* enerd,
1014 const gmx::MdrunScheduleWorkload& runScheduleWork,
1016 gmx_wallcycle* wcycle)
1018 if (runScheduleWork.simulationWork.useGpuNonbonded && runScheduleWork.stepWork.computeNonbondedForces)
1020 /* Launch pruning before buffer clearing because the API overhead of the
1021 * clear kernel launches can leave the GPU idle while it could be running
1024 if (nbv->isDynamicPruningStepGpu(step))
1026 nbv->dispatchPruneKernelGpu(step);
1029 /* now clear the GPU outputs while we finish the step on the CPU */
1030 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1031 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1032 Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, runScheduleWork.stepWork.computeVirial);
1033 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1034 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1037 if (runScheduleWork.stepWork.haveGpuPmeOnThisRank)
1039 pme_gpu_reinit_computation(pmedata, wcycle);
1042 if (runScheduleWork.domainWork.haveGpuBondedWork && runScheduleWork.stepWork.computeEnergy)
1044 // in principle this should be included in the DD balancing region,
1045 // but generally it is infrequent so we'll omit it for the sake of
1047 listedForcesGpu->waitAccumulateEnergyTerms(enerd);
1049 listedForcesGpu->clearEnergies();
1053 //! \brief Data structure to hold dipole-related data and staging arrays
1056 //! Dipole staging for fast summing over MPI
1057 gmx::DVec muStaging[2] = { { 0.0, 0.0, 0.0 } };
1058 //! Dipole staging for states A and B (index 0 and 1 resp.)
1059 gmx::RVec muStateAB[2] = { { 0.0_real, 0.0_real, 0.0_real } };
1063 static void reduceAndUpdateMuTot(DipoleData* dipoleData,
1064 const t_commrec* cr,
1065 const bool haveFreeEnergy,
1066 gmx::ArrayRef<const real> lambda,
1068 const DDBalanceRegionHandler& ddBalanceRegionHandler)
1072 gmx_sumd(2 * DIM, dipoleData->muStaging[0], cr);
1073 ddBalanceRegionHandler.reopenRegionCpu();
1075 for (int i = 0; i < 2; i++)
1077 for (int j = 0; j < DIM; j++)
1079 dipoleData->muStateAB[i][j] = dipoleData->muStaging[i][j];
1083 if (!haveFreeEnergy)
1085 copy_rvec(dipoleData->muStateAB[0], muTotal);
1089 for (int j = 0; j < DIM; j++)
1091 muTotal[j] = (1.0 - lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)])
1092 * dipoleData->muStateAB[0][j]
1093 + lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)]
1094 * dipoleData->muStateAB[1][j];
1099 /*! \brief Combines MTS level0 and level1 force buffers into a full and MTS-combined force buffer.
1101 * \param[in] numAtoms The number of atoms to combine forces for
1102 * \param[in,out] forceMtsLevel0 Input: F_level0, output: F_level0 + F_level1
1103 * \param[in,out] forceMts Input: F_level1, output: F_level0 + mtsFactor * F_level1
1104 * \param[in] mtsFactor The factor between the level0 and level1 time step
1106 static void combineMtsForces(const int numAtoms,
1107 ArrayRef<RVec> forceMtsLevel0,
1108 ArrayRef<RVec> forceMts,
1109 const real mtsFactor)
1111 const int gmx_unused numThreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
1112 #pragma omp parallel for num_threads(numThreads) schedule(static)
1113 for (int i = 0; i < numAtoms; i++)
1115 const RVec forceMtsLevel0Tmp = forceMtsLevel0[i];
1116 forceMtsLevel0[i] += forceMts[i];
1117 forceMts[i] = forceMtsLevel0Tmp + mtsFactor * forceMts[i];
1121 /*! \brief Setup for the local GPU force reduction:
1122 * reinitialization plus the registration of forces and dependencies.
1124 * \param [in] runScheduleWork Schedule workload flag structure
1125 * \param [in] nbv Non-bonded Verlet object
1126 * \param [in] stateGpu GPU state propagator object
1127 * \param [in] gpuForceReduction GPU force reduction object
1128 * \param [in] pmePpCommGpu PME-PP GPU communication object
1129 * \param [in] pmedata PME data object
1130 * \param [in] dd Domain decomposition object
1132 static void setupLocalGpuForceReduction(const gmx::MdrunScheduleWorkload* runScheduleWork,
1133 const nonbonded_verlet_t* nbv,
1134 gmx::StatePropagatorDataGpu* stateGpu,
1135 gmx::GpuForceReduction* gpuForceReduction,
1136 gmx::PmePpCommGpu* pmePpCommGpu,
1137 const gmx_pme_t* pmedata,
1138 const gmx_domdec_t* dd)
1140 GMX_ASSERT(!runScheduleWork->simulationWork.useMts,
1141 "GPU force reduction is not compatible with MTS");
1143 // (re-)initialize local GPU force reduction
1144 const bool accumulate = runScheduleWork->domainWork.haveCpuLocalForceWork
1145 || runScheduleWork->simulationWork.havePpDomainDecomposition;
1146 const int atomStart = 0;
1147 gpuForceReduction->reinit(stateGpu->getForces(),
1148 nbv->getNumAtoms(AtomLocality::Local),
1149 nbv->getGridIndices(),
1152 stateGpu->fReducedOnDevice(AtomLocality::Local));
1154 // register forces and add dependencies
1155 gpuForceReduction->registerNbnxmForce(Nbnxm::gpu_get_f(nbv->gpu_nbv));
1157 DeviceBuffer<gmx::RVec> pmeForcePtr;
1158 GpuEventSynchronizer* pmeSynchronizer = nullptr;
1159 bool havePmeContribution = false;
1161 if (runScheduleWork->simulationWork.useGpuPme && !runScheduleWork->simulationWork.haveSeparatePmeRank)
1163 pmeForcePtr = pme_gpu_get_device_f(pmedata);
1164 pmeSynchronizer = pme_gpu_get_f_ready_synchronizer(pmedata);
1165 havePmeContribution = true;
1167 else if (runScheduleWork->simulationWork.useGpuPmePpCommunication)
1169 pmeForcePtr = pmePpCommGpu->getGpuForceStagingPtr();
1172 pmeSynchronizer = pmePpCommGpu->getForcesReadySynchronizer();
1174 havePmeContribution = true;
1177 if (havePmeContribution)
1179 gpuForceReduction->registerRvecForce(pmeForcePtr);
1180 if (!runScheduleWork->simulationWork.useGpuPmePpCommunication || GMX_THREAD_MPI)
1182 GMX_ASSERT(pmeSynchronizer != nullptr, "PME force ready cuda event should not be NULL");
1183 gpuForceReduction->addDependency(pmeSynchronizer);
1187 if (runScheduleWork->domainWork.haveCpuLocalForceWork
1188 || (runScheduleWork->simulationWork.havePpDomainDecomposition
1189 && !runScheduleWork->simulationWork.useGpuHaloExchange))
1191 gpuForceReduction->addDependency(stateGpu->fReadyOnDevice(AtomLocality::Local));
1194 if (runScheduleWork->simulationWork.useGpuHaloExchange)
1196 gpuForceReduction->addDependency(dd->gpuHaloExchange[0][0]->getForcesReadyOnDeviceEvent());
1200 /*! \brief Setup for the non-local GPU force reduction:
1201 * reinitialization plus the registration of forces and dependencies.
1203 * \param [in] runScheduleWork Schedule workload flag structure
1204 * \param [in] nbv Non-bonded Verlet object
1205 * \param [in] stateGpu GPU state propagator object
1206 * \param [in] gpuForceReduction GPU force reduction object
1207 * \param [in] dd Domain decomposition object
1209 static void setupNonLocalGpuForceReduction(const gmx::MdrunScheduleWorkload* runScheduleWork,
1210 const nonbonded_verlet_t* nbv,
1211 gmx::StatePropagatorDataGpu* stateGpu,
1212 gmx::GpuForceReduction* gpuForceReduction,
1213 const gmx_domdec_t* dd)
1215 // (re-)initialize non-local GPU force reduction
1216 const bool accumulate = runScheduleWork->domainWork.haveCpuBondedWork
1217 || runScheduleWork->domainWork.haveFreeEnergyWork;
1218 const int atomStart = dd_numHomeAtoms(*dd);
1219 gpuForceReduction->reinit(stateGpu->getForces(),
1220 nbv->getNumAtoms(AtomLocality::NonLocal),
1221 nbv->getGridIndices(),
1224 stateGpu->fReducedOnDevice(AtomLocality::NonLocal));
1226 // register forces and add dependencies
1227 gpuForceReduction->registerNbnxmForce(Nbnxm::gpu_get_f(nbv->gpu_nbv));
1229 if (runScheduleWork->domainWork.haveNonLocalForceContribInCpuBuffer)
1231 gpuForceReduction->addDependency(stateGpu->fReadyOnDevice(AtomLocality::NonLocal));
1236 /*! \brief Return the number of local atoms.
1238 static int getLocalAtomCount(const gmx_domdec_t* dd, const t_mdatoms& mdatoms, bool havePPDomainDecomposition)
1240 GMX_ASSERT(!(havePPDomainDecomposition && (dd == nullptr)),
1241 "Can't have PP decomposition with dd uninitialized!");
1242 return havePPDomainDecomposition ? dd_numAtomsZones(*dd) : mdatoms.homenr;
1246 void do_force(FILE* fplog,
1247 const t_commrec* cr,
1248 const gmx_multisim_t* ms,
1249 const t_inputrec& inputrec,
1251 gmx_enfrot* enforcedRotation,
1252 gmx::ImdSession* imdSession,
1256 gmx_wallcycle* wcycle,
1257 const gmx_localtop_t* top,
1259 gmx::ArrayRefWithPadding<gmx::RVec> x,
1260 const history_t* hist,
1261 gmx::ForceBuffersView* forceView,
1263 const t_mdatoms* mdatoms,
1264 gmx_enerdata_t* enerd,
1265 gmx::ArrayRef<const real> lambda,
1267 gmx::MdrunScheduleWorkload* runScheduleWork,
1268 gmx::VirtualSitesHandler* vsite,
1272 CpuPpLongRangeNonbondeds* longRangeNonbondeds,
1274 const DDBalanceRegionHandler& ddBalanceRegionHandler)
1276 auto force = forceView->forceWithPadding();
1277 GMX_ASSERT(force.unpaddedArrayRef().ssize() >= fr->natoms_force_constr,
1278 "The size of the force buffer should be at least the number of atoms to compute "
1281 nonbonded_verlet_t* nbv = fr->nbv.get();
1282 interaction_const_t* ic = fr->ic.get();
1284 gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
1286 const SimulationWorkload& simulationWork = runScheduleWork->simulationWork;
1288 runScheduleWork->stepWork = setupStepWorkload(legacyFlags, inputrec.mtsLevels, step, simulationWork);
1289 const StepWorkload& stepWork = runScheduleWork->stepWork;
1291 if (stepWork.useGpuFHalo && !runScheduleWork->domainWork.haveCpuLocalForceWork)
1293 // GPU Force halo exchange will set a subset of local atoms with remote non-local data
1294 // First clear local portion of force array, so that untouched atoms are zero.
1295 // The dependency for this is that forces from previous timestep have been consumed,
1296 // which is satisfied when getCoordinatesReadyOnDeviceEvent has been marked.
1297 stateGpu->clearForcesOnGpu(AtomLocality::Local,
1298 stateGpu->getCoordinatesReadyOnDeviceEvent(
1299 AtomLocality::Local, simulationWork, stepWork));
1302 /* At a search step we need to start the first balancing region
1303 * somewhere early inside the step after communication during domain
1304 * decomposition (and not during the previous step as usual).
1306 if (stepWork.doNeighborSearch)
1308 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
1311 clear_mat(vir_force);
1313 if (fr->pbcType != PbcType::No)
1315 /* Compute shift vectors every step,
1316 * because of pressure coupling or box deformation!
1318 if (stepWork.haveDynamicBox && stepWork.stateChanged)
1320 calc_shifts(box, fr->shift_vec);
1323 const bool fillGrid = (stepWork.doNeighborSearch && stepWork.stateChanged);
1324 const bool calcCGCM = (fillGrid && !haveDDAtomOrdering(*cr));
1327 put_atoms_in_box_omp(fr->pbcType,
1329 x.unpaddedArrayRef().subArray(0, mdatoms->homenr),
1330 gmx_omp_nthreads_get(ModuleMultiThread::Default));
1331 inc_nrnb(nrnb, eNR_SHIFTX, mdatoms->homenr);
1335 nbnxn_atomdata_copy_shiftvec(stepWork.haveDynamicBox, fr->shift_vec, nbv->nbat.get());
1337 const bool pmeSendCoordinatesFromGpu =
1338 simulationWork.useGpuPmePpCommunication && !(stepWork.doNeighborSearch);
1339 const bool reinitGpuPmePpComms =
1340 simulationWork.useGpuPmePpCommunication && (stepWork.doNeighborSearch);
1342 auto* localXReadyOnDevice = (stepWork.haveGpuPmeOnThisRank || simulationWork.useGpuXBufferOps)
1343 ? stateGpu->getCoordinatesReadyOnDeviceEvent(
1344 AtomLocality::Local, simulationWork, stepWork)
1347 GMX_ASSERT(simulationWork.useGpuHaloExchange
1348 == ((cr->dd != nullptr) && (!cr->dd->gpuHaloExchange[0].empty())),
1349 "The GPU halo exchange is active, but it has not been constructed.");
1351 bool gmx_used_in_debug haveCopiedXFromGpu = false;
1352 // Copy coordinate from the GPU if update is on the GPU and there
1353 // are forces to be computed on the CPU, or for the computation of
1354 // virial, or if host-side data will be transferred from this task
1355 // to a remote task for halo exchange or PME-PP communication. At
1356 // search steps the current coordinates are already on the host,
1357 // hence copy is not needed.
1358 if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch
1359 && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial
1360 || simulationWork.useCpuPmePpCommunication || simulationWork.useCpuHaloExchange
1361 || simulationWork.computeMuTot))
1363 stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1364 haveCopiedXFromGpu = true;
1367 if (stepWork.doNeighborSearch
1368 && (stepWork.haveGpuPmeOnThisRank || simulationWork.useGpuXBufferOps || simulationWork.useGpuFBufferOps))
1370 // TODO refactor this to do_md, after partitioning.
1371 stateGpu->reinit(mdatoms->homenr,
1372 getLocalAtomCount(cr->dd, *mdatoms, simulationWork.havePpDomainDecomposition));
1373 if (stepWork.haveGpuPmeOnThisRank)
1375 // TODO: This should be moved into PME setup function ( pme_gpu_prepare_computation(...) )
1376 pme_gpu_set_device_x(fr->pmedata, stateGpu->getCoordinates());
1380 // Coordinates on the device are needed if PME or BufferOps are offloaded.
1381 // The local coordinates can be copied right away.
1382 // NOTE: Consider moving this copy to right after they are updated and constrained,
1383 // if the later is not offloaded.
1384 if (stepWork.haveGpuPmeOnThisRank || stepWork.useGpuXBufferOps)
1386 // We need to copy coordinates when:
1387 // 1. Update is not offloaded
1388 // 2. The buffers were reinitialized on search step
1389 if (!simulationWork.useGpuUpdate || stepWork.doNeighborSearch)
1391 GMX_ASSERT(stateGpu != nullptr, "stateGpu should not be null");
1392 stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1393 if (stepWork.doNeighborSearch)
1395 /* On NS steps, we skip X buffer ops. So, unless we use PME or direct GPU
1396 * communications, we don't wait for the coordinates on the device,
1397 * and we must consume the event here.
1399 const bool eventWillBeConsumedByGpuPme = stepWork.haveGpuPmeOnThisRank;
1400 const bool eventWillBeConsumedByGpuPmePPComm =
1401 (simulationWork.haveSeparatePmeRank && stepWork.computeSlowForces)
1402 && pmeSendCoordinatesFromGpu;
1403 if (!eventWillBeConsumedByGpuPme && !eventWillBeConsumedByGpuPmePPComm)
1405 stateGpu->consumeCoordinatesCopiedToDeviceEvent(AtomLocality::Local);
1411 if (simulationWork.haveSeparatePmeRank && stepWork.computeSlowForces)
1413 /* Send particle coordinates to the pme nodes */
1414 if (!pmeSendCoordinatesFromGpu && !stepWork.doNeighborSearch && simulationWork.useGpuUpdate)
1416 GMX_ASSERT(haveCopiedXFromGpu,
1417 "a wait should only be triggered if copy has been scheduled");
1418 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1421 gmx_pme_send_coordinates(fr,
1424 x.unpaddedArrayRef(),
1425 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1426 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)],
1427 (stepWork.computeVirial || stepWork.computeEnergy),
1429 simulationWork.useGpuPmePpCommunication,
1430 reinitGpuPmePpComms,
1431 pmeSendCoordinatesFromGpu,
1432 stepWork.useGpuPmeFReduction,
1433 localXReadyOnDevice,
1437 if (stepWork.haveGpuPmeOnThisRank)
1439 launchPmeGpuSpread(fr->pmedata,
1442 localXReadyOnDevice,
1443 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1447 const gmx::DomainLifetimeWorkload& domainWork = runScheduleWork->domainWork;
1449 /* do gridding for pair search */
1450 if (stepWork.doNeighborSearch)
1452 if (fr->wholeMoleculeTransform && stepWork.stateChanged)
1454 fr->wholeMoleculeTransform->updateForAtomPbcJumps(x.unpaddedArrayRef(), box);
1457 wallcycle_start(wcycle, WallCycleCounter::NS);
1458 if (!haveDDAtomOrdering(*cr))
1460 const rvec vzero = { 0.0_real, 0.0_real, 0.0_real };
1461 const rvec boxDiagonal = { box[XX][XX], box[YY][YY], box[ZZ][ZZ] };
1462 wallcycle_sub_start(wcycle, WallCycleSubCounter::NBSGridLocal);
1463 nbnxn_put_on_grid(nbv,
1469 { 0, mdatoms->homenr },
1472 x.unpaddedArrayRef(),
1475 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NBSGridLocal);
1479 wallcycle_sub_start(wcycle, WallCycleSubCounter::NBSGridNonLocal);
1480 nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd), fr->atomInfo, x.unpaddedArrayRef());
1481 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NBSGridNonLocal);
1484 nbv->setAtomProperties(gmx::constArrayRefFromArray(mdatoms->typeA, mdatoms->nr),
1485 gmx::constArrayRefFromArray(mdatoms->chargeA, mdatoms->nr),
1488 wallcycle_stop(wcycle, WallCycleCounter::NS);
1490 /* initialize the GPU nbnxm atom data and bonded data structures */
1491 if (simulationWork.useGpuNonbonded)
1493 // Note: cycle counting only nononbondeds, GPU listed forces counts internally
1494 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1495 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1496 Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1497 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1498 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1500 if (fr->listedForcesGpu)
1502 /* Now we put all atoms on the grid, we can assign bonded
1503 * interactions to the GPU, where the grid order is
1504 * needed. Also the xq, f and fshift device buffers have
1505 * been reallocated if needed, so the bonded code can
1506 * learn about them. */
1507 // TODO the xq, f, and fshift buffers are now shared
1508 // resources, so they should be maintained by a
1509 // higher-level object than the nb module.
1510 fr->listedForcesGpu->updateInteractionListsAndDeviceBuffers(
1511 nbv->getGridIndices(),
1513 Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1514 Nbnxm::gpu_get_f(nbv->gpu_nbv),
1515 Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1519 // Need to run after the GPU-offload bonded interaction lists
1520 // are set up to be able to determine whether there is bonded work.
1521 runScheduleWork->domainWork = setupDomainLifetimeWorkload(
1522 inputrec, *fr, pull_work, ed, *mdatoms, simulationWork, stepWork);
1524 wallcycle_start_nocount(wcycle, WallCycleCounter::NS);
1525 wallcycle_sub_start(wcycle, WallCycleSubCounter::NBSSearchLocal);
1526 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1527 nbv->constructPairlist(InteractionLocality::Local, top->excls, step, nrnb);
1529 nbv->setupGpuShortRangeWork(fr->listedForcesGpu.get(), InteractionLocality::Local);
1531 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NBSSearchLocal);
1532 wallcycle_stop(wcycle, WallCycleCounter::NS);
1534 if (simulationWork.useGpuXBufferOps)
1536 nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1539 if (simulationWork.useGpuFBufferOps)
1541 setupLocalGpuForceReduction(runScheduleWork,
1544 fr->gpuForceReduction[gmx::AtomLocality::Local].get(),
1545 fr->pmePpCommGpu.get(),
1548 if (runScheduleWork->simulationWork.havePpDomainDecomposition)
1550 setupNonLocalGpuForceReduction(runScheduleWork,
1553 fr->gpuForceReduction[gmx::AtomLocality::NonLocal].get(),
1558 else if (!EI_TPI(inputrec.eI) && stepWork.computeNonbondedForces)
1560 if (stepWork.useGpuXBufferOps)
1562 GMX_ASSERT(stateGpu, "stateGpu should be valid when buffer ops are offloaded");
1563 nbv->convertCoordinatesGpu(AtomLocality::Local, stateGpu->getCoordinates(), localXReadyOnDevice);
1567 if (simulationWork.useGpuUpdate)
1569 GMX_ASSERT(stateGpu, "need a valid stateGpu object");
1570 GMX_ASSERT(haveCopiedXFromGpu,
1571 "a wait should only be triggered if copy has been scheduled");
1572 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1574 nbv->convertCoordinates(AtomLocality::Local, x.unpaddedArrayRef());
1578 if (simulationWork.useGpuNonbonded && (stepWork.computeNonbondedForces || domainWork.haveGpuBondedWork))
1580 ddBalanceRegionHandler.openBeforeForceComputationGpu();
1582 wallcycle_start(wcycle, WallCycleCounter::LaunchGpu);
1583 wallcycle_sub_start(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1584 Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1585 if (!stepWork.useGpuXBufferOps)
1587 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::Local);
1589 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1590 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1591 // with X buffer ops offloaded to the GPU on all but the search steps
1593 // bonded work not split into separate local and non-local, so with DD
1594 // we can only launch the kernel after non-local coordinates have been received.
1595 if (domainWork.haveGpuBondedWork && !simulationWork.havePpDomainDecomposition)
1597 fr->listedForcesGpu->setPbcAndlaunchKernel(fr->pbcType, box, fr->bMolPBC, stepWork);
1600 /* launch local nonbonded work on GPU */
1601 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1602 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1603 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFNo, step, nrnb, wcycle);
1604 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1605 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1608 if (stepWork.haveGpuPmeOnThisRank)
1610 // In PME GPU and mixed mode we launch FFT / gather after the
1611 // X copy/transform to allow overlap as well as after the GPU NB
1612 // launch to avoid FFT launch overhead hijacking the CPU and delaying
1613 // the nonbonded kernel.
1614 launchPmeGpuFftAndGather(fr->pmedata,
1615 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1620 /* Communicate coordinates and sum dipole if necessary +
1621 do non-local pair search */
1622 if (simulationWork.havePpDomainDecomposition)
1624 if (stepWork.doNeighborSearch)
1626 // TODO: fuse this branch with the above large stepWork.doNeighborSearch block
1627 wallcycle_start_nocount(wcycle, WallCycleCounter::NS);
1628 wallcycle_sub_start(wcycle, WallCycleSubCounter::NBSSearchNonLocal);
1629 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1630 nbv->constructPairlist(InteractionLocality::NonLocal, top->excls, step, nrnb);
1632 nbv->setupGpuShortRangeWork(fr->listedForcesGpu.get(), InteractionLocality::NonLocal);
1633 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NBSSearchNonLocal);
1634 wallcycle_stop(wcycle, WallCycleCounter::NS);
1635 // TODO refactor this GPU halo exchange re-initialisation
1636 // to location in do_md where GPU halo exchange is
1637 // constructed at partitioning, after above stateGpu
1638 // re-initialization has similarly been refactored
1639 if (simulationWork.useGpuHaloExchange)
1641 reinitGpuHaloExchange(*cr, stateGpu->getCoordinates(), stateGpu->getForces());
1646 GpuEventSynchronizer* gpuCoordinateHaloLaunched = nullptr;
1647 if (stepWork.useGpuXHalo)
1649 // The following must be called after local setCoordinates (which records an event
1650 // when the coordinate data has been copied to the device).
1651 gpuCoordinateHaloLaunched = communicateGpuHaloCoordinates(*cr, box, localXReadyOnDevice);
1653 if (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork)
1655 // non-local part of coordinate buffer must be copied back to host for CPU work
1656 stateGpu->copyCoordinatesFromGpu(
1657 x.unpaddedArrayRef(), AtomLocality::NonLocal, gpuCoordinateHaloLaunched);
1662 if (simulationWork.useGpuUpdate)
1664 GMX_ASSERT(haveCopiedXFromGpu,
1665 "a wait should only be triggered if copy has been scheduled");
1666 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1668 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1671 if (stepWork.useGpuXBufferOps)
1673 if (!stepWork.useGpuXHalo)
1675 stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1677 nbv->convertCoordinatesGpu(
1678 AtomLocality::NonLocal,
1679 stateGpu->getCoordinates(),
1680 stateGpu->getCoordinatesReadyOnDeviceEvent(
1681 AtomLocality::NonLocal, simulationWork, stepWork, gpuCoordinateHaloLaunched));
1685 nbv->convertCoordinates(AtomLocality::NonLocal, x.unpaddedArrayRef());
1689 if (simulationWork.useGpuNonbonded)
1692 if (!stepWork.useGpuXBufferOps)
1694 wallcycle_start(wcycle, WallCycleCounter::LaunchGpu);
1695 wallcycle_sub_start(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1696 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::NonLocal);
1697 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1698 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1701 if (domainWork.haveGpuBondedWork)
1703 fr->listedForcesGpu->setPbcAndlaunchKernel(fr->pbcType, box, fr->bMolPBC, stepWork);
1706 /* launch non-local nonbonded tasks on GPU */
1707 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1708 wallcycle_sub_start(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1709 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step, nrnb, wcycle);
1710 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1711 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1715 // With FEP we set up the reduction over threads for local+non-local simultaneously,
1716 // so we need to do that here after the local and non-local pairlist construction.
1717 if (stepWork.doNeighborSearch && fr->efep != FreeEnergyPerturbationType::No)
1719 wallcycle_sub_start(wcycle, WallCycleSubCounter::NonbondedFep);
1720 nbv->setupFepThreadedForceBuffer(fr->natoms_force_constr);
1721 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NonbondedFep);
1724 if (simulationWork.useGpuNonbonded && stepWork.computeNonbondedForces)
1726 /* launch D2H copy-back F */
1727 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1728 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1730 if (simulationWork.havePpDomainDecomposition)
1732 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::NonLocal);
1734 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::Local);
1735 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1737 if (domainWork.haveGpuBondedWork && stepWork.computeEnergy)
1739 fr->listedForcesGpu->launchEnergyTransfer();
1741 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1744 gmx::ArrayRef<const gmx::RVec> xWholeMolecules;
1745 if (fr->wholeMoleculeTransform)
1747 xWholeMolecules = fr->wholeMoleculeTransform->wholeMoleculeCoordinates(x.unpaddedArrayRef(), box);
1750 // For the rest of the CPU tasks that depend on GPU-update produced coordinates,
1751 // this wait ensures that the D2H transfer is complete.
1752 if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch)
1754 const bool needCoordsOnHost = (runScheduleWork->domainWork.haveCpuLocalForceWork
1755 || stepWork.computeVirial || simulationWork.computeMuTot);
1756 const bool haveAlreadyWaited = simulationWork.useCpuHaloExchange;
1757 if (needCoordsOnHost && !haveAlreadyWaited)
1759 GMX_ASSERT(haveCopiedXFromGpu,
1760 "a wait should only be triggered if copy has been scheduled");
1761 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1765 DipoleData dipoleData;
1767 if (simulationWork.computeMuTot)
1769 const int start = 0;
1771 /* Calculate total (local) dipole moment in a temporary common array.
1772 * This makes it possible to sum them over nodes faster.
1774 gmx::ArrayRef<const gmx::RVec> xRef =
1775 (xWholeMolecules.empty() ? x.unpaddedArrayRef() : xWholeMolecules);
1779 mdatoms->chargeA ? gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->nr)
1780 : gmx::ArrayRef<real>{},
1781 mdatoms->chargeB ? gmx::arrayRefFromArray(mdatoms->chargeB, mdatoms->nr)
1782 : gmx::ArrayRef<real>{},
1783 mdatoms->nChargePerturbed != 0,
1784 dipoleData.muStaging[0],
1785 dipoleData.muStaging[1]);
1787 reduceAndUpdateMuTot(
1788 &dipoleData, cr, (fr->efep != FreeEnergyPerturbationType::No), lambda, muTotal, ddBalanceRegionHandler);
1791 /* Reset energies */
1792 reset_enerdata(enerd);
1794 if (haveDDAtomOrdering(*cr) && simulationWork.haveSeparatePmeRank)
1796 wallcycle_start(wcycle, WallCycleCounter::PpDuringPme);
1797 dd_force_flop_start(cr->dd, nrnb);
1802 wallcycle_start(wcycle, WallCycleCounter::Rot);
1803 do_rotation(cr, enforcedRotation, box, x.unpaddedConstArrayRef(), t, step, stepWork.doNeighborSearch);
1804 wallcycle_stop(wcycle, WallCycleCounter::Rot);
1807 /* Start the force cycle counter.
1808 * Note that a different counter is used for dynamic load balancing.
1810 wallcycle_start(wcycle, WallCycleCounter::Force);
1812 /* Set up and clear force outputs:
1813 * forceOutMtsLevel0: everything except what is in the other two outputs
1814 * forceOutMtsLevel1: PME-mesh and listed-forces group 1
1815 * forceOutNonbonded: non-bonded forces
1816 * Without multiple time stepping all point to the same object.
1817 * With multiple time-stepping the use is different for MTS fast (level0 only) and slow steps.
1819 ForceOutputs forceOutMtsLevel0 = setupForceOutputs(
1820 &fr->forceHelperBuffers[0], force, domainWork, stepWork, simulationWork.havePpDomainDecomposition, wcycle);
1822 // Force output for MTS combined forces, only set at level1 MTS steps
1823 std::optional<ForceOutputs> forceOutMts =
1824 (simulationWork.useMts && stepWork.computeSlowForces)
1825 ? std::optional(setupForceOutputs(&fr->forceHelperBuffers[1],
1826 forceView->forceMtsCombinedWithPadding(),
1829 simulationWork.havePpDomainDecomposition,
1833 ForceOutputs* forceOutMtsLevel1 =
1834 simulationWork.useMts ? (stepWork.computeSlowForces ? &forceOutMts.value() : nullptr)
1835 : &forceOutMtsLevel0;
1837 const bool nonbondedAtMtsLevel1 = runScheduleWork->simulationWork.computeNonbondedAtMtsLevel1;
1839 ForceOutputs* forceOutNonbonded = nonbondedAtMtsLevel1 ? forceOutMtsLevel1 : &forceOutMtsLevel0;
1841 if (inputrec.bPull && pull_have_constraint(*pull_work))
1843 clear_pull_forces(pull_work);
1846 /* We calculate the non-bonded forces, when done on the CPU, here.
1847 * We do this before calling do_force_lowlevel, because in that
1848 * function, the listed forces are calculated before PME, which
1849 * does communication. With this order, non-bonded and listed
1850 * force calculation imbalance can be balanced out by the domain
1851 * decomposition load balancing.
1854 const bool useOrEmulateGpuNb = simulationWork.useGpuNonbonded || fr->nbv->emulateGpu();
1856 if (!useOrEmulateGpuNb)
1858 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFYes, step, nrnb, wcycle);
1861 if (fr->efep != FreeEnergyPerturbationType::No && stepWork.computeNonbondedForces)
1863 /* Calculate the local and non-local free energy interactions here.
1864 * Happens here on the CPU both with and without GPU.
1866 nbv->dispatchFreeEnergyKernels(
1868 &forceOutNonbonded->forceWithShiftForces(),
1869 fr->use_simd_kernels,
1876 mdatoms->chargeA ? gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->nr)
1877 : gmx::ArrayRef<real>{},
1878 mdatoms->chargeB ? gmx::arrayRefFromArray(mdatoms->chargeB, mdatoms->nr)
1879 : gmx::ArrayRef<real>{},
1880 mdatoms->typeA ? gmx::arrayRefFromArray(mdatoms->typeA, mdatoms->nr)
1881 : gmx::ArrayRef<int>{},
1882 mdatoms->typeB ? gmx::arrayRefFromArray(mdatoms->typeB, mdatoms->nr)
1883 : gmx::ArrayRef<int>{},
1884 inputrec.fepvals.get(),
1891 if (stepWork.computeNonbondedForces && !useOrEmulateGpuNb)
1893 if (simulationWork.havePpDomainDecomposition)
1895 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step, nrnb, wcycle);
1898 if (stepWork.computeForces)
1900 /* Add all the non-bonded force to the normal force array.
1901 * This can be split into a local and a non-local part when overlapping
1902 * communication with calculation with domain decomposition.
1904 wallcycle_stop(wcycle, WallCycleCounter::Force);
1905 nbv->atomdata_add_nbat_f_to_f(AtomLocality::All,
1906 forceOutNonbonded->forceWithShiftForces().force());
1907 wallcycle_start_nocount(wcycle, WallCycleCounter::Force);
1910 /* If there are multiple fshift output buffers we need to reduce them */
1911 if (stepWork.computeVirial)
1913 /* This is not in a subcounter because it takes a
1914 negligible and constant-sized amount of time */
1915 nbnxn_atomdata_add_nbat_fshift_to_fshift(
1916 *nbv->nbat, forceOutNonbonded->forceWithShiftForces().shiftForces());
1920 // TODO Force flags should include haveFreeEnergyWork for this domain
1921 if (stepWork.useGpuXHalo && (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork))
1923 wallcycle_stop(wcycle, WallCycleCounter::Force);
1924 /* Wait for non-local coordinate data to be copied from device */
1925 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::NonLocal);
1926 wallcycle_start_nocount(wcycle, WallCycleCounter::Force);
1929 // Compute wall interactions, when present.
1930 // Note: should be moved to special forces.
1931 if (inputrec.nwall && stepWork.computeNonbondedForces)
1933 /* foreign lambda component for walls */
1934 real dvdl_walls = do_walls(inputrec,
1937 mdatoms->typeA ? gmx::arrayRefFromArray(mdatoms->typeA, mdatoms->nr)
1938 : gmx::ArrayRef<int>{},
1939 mdatoms->typeB ? gmx::arrayRefFromArray(mdatoms->typeB, mdatoms->nr)
1940 : gmx::ArrayRef<int>{},
1941 mdatoms->cENER ? gmx::arrayRefFromArray(mdatoms->cENER, mdatoms->nr)
1942 : gmx::ArrayRef<unsigned short>{},
1944 mdatoms->nPerturbed,
1945 x.unpaddedConstArrayRef(),
1946 &forceOutMtsLevel0.forceWithVirial(),
1947 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)],
1948 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR],
1950 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += dvdl_walls;
1953 if (stepWork.computeListedForces)
1955 /* Check whether we need to take into account PBC in listed interactions */
1956 bool needMolPbc = false;
1957 for (const auto& listedForces : fr->listedForces)
1959 if (listedForces.haveCpuListedForces(*fr->fcdata))
1961 needMolPbc = fr->bMolPBC;
1969 /* Since all atoms are in the rectangular or triclinic unit-cell,
1970 * only single box vector shifts (2 in x) are required.
1972 set_pbc_dd(&pbc, fr->pbcType, haveDDAtomOrdering(*cr) ? cr->dd->numCells : nullptr, TRUE, box);
1975 for (int mtsIndex = 0; mtsIndex < (simulationWork.useMts && stepWork.computeSlowForces ? 2 : 1);
1978 ListedForces& listedForces = fr->listedForces[mtsIndex];
1979 ForceOutputs& forceOut = (mtsIndex == 0 ? forceOutMtsLevel0 : *forceOutMtsLevel1);
1980 listedForces.calculate(wcycle,
1982 inputrec.fepvals.get(),
1996 haveDDAtomOrdering(*cr) ? cr->dd->globalAtomIndices.data() : nullptr,
2001 if (stepWork.computeSlowForces)
2003 longRangeNonbondeds->calculate(fr->pmedata,
2005 x.unpaddedConstArrayRef(),
2006 &forceOutMtsLevel1->forceWithVirial(),
2010 dipoleData.muStateAB,
2012 ddBalanceRegionHandler);
2015 wallcycle_stop(wcycle, WallCycleCounter::Force);
2017 // VdW dispersion correction, only computed on master rank to avoid double counting
2018 if ((stepWork.computeEnergy || stepWork.computeVirial) && fr->dispersionCorrection && MASTER(cr))
2020 // Calculate long range corrections to pressure and energy
2021 const DispersionCorrection::Correction correction = fr->dispersionCorrection->calculate(
2022 box, lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)]);
2024 if (stepWork.computeEnergy)
2026 enerd->term[F_DISPCORR] = correction.energy;
2027 enerd->term[F_DVDL_VDW] += correction.dvdl;
2028 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += correction.dvdl;
2030 if (stepWork.computeVirial)
2032 correction.correctVirial(vir_force);
2033 enerd->term[F_PDISPCORR] = correction.pressure;
2037 computeSpecialForces(fplog,
2049 x.unpaddedArrayRef(),
2053 &forceOutMtsLevel0.forceWithVirial(),
2054 forceOutMtsLevel1 ? &forceOutMtsLevel1->forceWithVirial() : nullptr,
2057 stepWork.doNeighborSearch);
2059 if (simulationWork.havePpDomainDecomposition && stepWork.computeForces && stepWork.useGpuFHalo
2060 && domainWork.haveCpuLocalForceWork)
2062 stateGpu->copyForcesToGpu(forceOutMtsLevel0.forceWithShiftForces().force(), AtomLocality::Local);
2065 GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFBufferOps),
2066 "The schedule below does not allow for nonbonded MTS with GPU buffer ops");
2067 GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFHalo),
2068 "The schedule below does not allow for nonbonded MTS with GPU halo exchange");
2069 // Will store the amount of cycles spent waiting for the GPU that
2070 // will be later used in the DLB accounting.
2071 float cycles_wait_gpu = 0;
2072 if (useOrEmulateGpuNb && stepWork.computeNonbondedForces)
2074 auto& forceWithShiftForces = forceOutNonbonded->forceWithShiftForces();
2076 /* wait for non-local forces (or calculate in emulation mode) */
2077 if (simulationWork.havePpDomainDecomposition)
2079 if (simulationWork.useGpuNonbonded)
2081 cycles_wait_gpu += Nbnxm::gpu_wait_finish_task(
2084 AtomLocality::NonLocal,
2085 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
2086 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
2087 forceWithShiftForces.shiftForces(),
2092 wallcycle_start_nocount(wcycle, WallCycleCounter::Force);
2094 fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFYes, step, nrnb, wcycle);
2095 wallcycle_stop(wcycle, WallCycleCounter::Force);
2098 if (stepWork.useGpuFBufferOps)
2100 if (domainWork.haveNonLocalForceContribInCpuBuffer)
2102 stateGpu->copyForcesToGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
2103 AtomLocality::NonLocal);
2107 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->execute();
2109 if (!stepWork.useGpuFHalo)
2111 /* We don't explicitly wait for the forces to be reduced on device,
2112 * but wait for them to finish copying to CPU instead.
2113 * So, we manually consume the event, see Issue #3988. */
2114 stateGpu->consumeForcesReducedOnDeviceEvent(AtomLocality::NonLocal);
2115 // copy from GPU input for dd_move_f()
2116 stateGpu->copyForcesFromGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
2117 AtomLocality::NonLocal);
2122 nbv->atomdata_add_nbat_f_to_f(AtomLocality::NonLocal, forceWithShiftForces.force());
2125 if (fr->nbv->emulateGpu() && stepWork.computeVirial)
2127 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat, forceWithShiftForces.shiftForces());
2132 /* Combining the forces for multiple time stepping before the halo exchange, when possible,
2133 * avoids an extra halo exchange (when DD is used) and post-processing step.
2135 if (stepWork.combineMtsForcesBeforeHaloExchange)
2137 combineMtsForces(getLocalAtomCount(cr->dd, *mdatoms, simulationWork.havePpDomainDecomposition),
2138 force.unpaddedArrayRef(),
2139 forceView->forceMtsCombined(),
2140 inputrec.mtsLevels[1].stepFactor);
2143 if (simulationWork.havePpDomainDecomposition)
2145 /* We are done with the CPU compute.
2146 * We will now communicate the non-local forces.
2147 * If we use a GPU this will overlap with GPU work, so in that case
2148 * we do not close the DD force balancing region here.
2150 ddBalanceRegionHandler.closeAfterForceComputationCpu();
2152 if (stepWork.computeForces)
2155 if (stepWork.useGpuFHalo)
2157 // If there exist CPU forces, data from halo exchange should accumulate into these
2158 bool accumulateForces = domainWork.haveCpuLocalForceWork;
2159 gmx::FixedCapacityVector<GpuEventSynchronizer*, 2> gpuForceHaloDependencies;
2160 gpuForceHaloDependencies.push_back(stateGpu->fReadyOnDevice(AtomLocality::Local));
2161 gpuForceHaloDependencies.push_back(stateGpu->fReducedOnDevice(AtomLocality::NonLocal));
2163 communicateGpuHaloForces(*cr, accumulateForces, &gpuForceHaloDependencies);
2167 if (stepWork.useGpuFBufferOps)
2169 stateGpu->waitForcesReadyOnHost(AtomLocality::NonLocal);
2172 // Without MTS or with MTS at slow steps with uncombined forces we need to
2173 // communicate the fast forces
2174 if (!simulationWork.useMts || !stepWork.combineMtsForcesBeforeHaloExchange)
2176 dd_move_f(cr->dd, &forceOutMtsLevel0.forceWithShiftForces(), wcycle);
2178 // With MTS we need to communicate the slow or combined (in forceOutMtsLevel1) forces
2179 if (simulationWork.useMts && stepWork.computeSlowForces)
2181 dd_move_f(cr->dd, &forceOutMtsLevel1->forceWithShiftForces(), wcycle);
2187 // With both nonbonded and PME offloaded a GPU on the same rank, we use
2188 // an alternating wait/reduction scheme.
2189 bool alternateGpuWait =
2190 (!c_disableAlternatingWait && stepWork.haveGpuPmeOnThisRank && simulationWork.useGpuNonbonded
2191 && !simulationWork.havePpDomainDecomposition && !stepWork.useGpuFBufferOps);
2193 if (alternateGpuWait)
2195 alternatePmeNbGpuWaitReduce(fr->nbv.get(),
2200 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
2205 if (!alternateGpuWait && stepWork.haveGpuPmeOnThisRank)
2207 pme_gpu_wait_and_reduce(fr->pmedata,
2210 &forceOutMtsLevel1->forceWithVirial(),
2212 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)]);
2215 /* Wait for local GPU NB outputs on the non-alternating wait path */
2216 if (!alternateGpuWait && stepWork.computeNonbondedForces && simulationWork.useGpuNonbonded)
2218 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
2219 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
2220 * but even with a step of 0.1 ms the difference is less than 1%
2223 const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
2224 const float waitCycles = Nbnxm::gpu_wait_finish_task(
2227 AtomLocality::Local,
2228 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
2229 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
2230 forceOutNonbonded->forceWithShiftForces().shiftForces(),
2233 if (ddBalanceRegionHandler.useBalancingRegion())
2235 DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
2236 if (stepWork.computeForces && waitCycles <= gpuWaitApiOverheadMargin)
2238 /* We measured few cycles, it could be that the kernel
2239 * and transfer finished earlier and there was no actual
2240 * wait time, only API call overhead.
2241 * Then the actual time could be anywhere between 0 and
2242 * cycles_wait_est. We will use half of cycles_wait_est.
2244 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
2246 ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
2250 if (fr->nbv->emulateGpu())
2252 // NOTE: emulation kernel is not included in the balancing region,
2253 // but emulation mode does not target performance anyway
2254 wallcycle_start_nocount(wcycle, WallCycleCounter::Force);
2259 InteractionLocality::Local,
2260 haveDDAtomOrdering(*cr) ? enbvClearFNo : enbvClearFYes,
2264 wallcycle_stop(wcycle, WallCycleCounter::Force);
2267 // If on GPU PME-PP comms path, receive forces from PME before GPU buffer ops
2268 // TODO refactor this and unify with below default-path call to the same function
2269 if (PAR(cr) && simulationWork.haveSeparatePmeRank && simulationWork.useGpuPmePpCommunication
2270 && stepWork.computeSlowForces)
2272 /* In case of node-splitting, the PP nodes receive the long-range
2273 * forces, virial and energy from the PME nodes here.
2275 pme_receive_force_ener(fr,
2277 &forceOutMtsLevel1->forceWithVirial(),
2279 simulationWork.useGpuPmePpCommunication,
2280 stepWork.useGpuPmeFReduction,
2285 /* Do the nonbonded GPU (or emulation) force buffer reduction
2286 * on the non-alternating path. */
2287 GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFBufferOps),
2288 "The schedule below does not allow for nonbonded MTS with GPU buffer ops");
2289 if (useOrEmulateGpuNb && !alternateGpuWait)
2291 if (stepWork.useGpuFBufferOps)
2293 ArrayRef<gmx::RVec> forceWithShift = forceOutNonbonded->forceWithShiftForces().force();
2295 // TODO: move these steps as early as possible:
2296 // - CPU f H2D should be as soon as all CPU-side forces are done
2297 // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
2298 // before the next CPU task that consumes the forces: vsite spread or update)
2299 // - copy is not perfomed if GPU force halo exchange is active, because it would overwrite the result
2300 // of the halo exchange. In that case the copy is instead performed above, before the exchange.
2301 // These should be unified.
2302 if (domainWork.haveLocalForceContribInCpuBuffer && !stepWork.useGpuFHalo)
2304 stateGpu->copyForcesToGpu(forceWithShift, AtomLocality::Local);
2307 if (stepWork.computeNonbondedForces)
2309 fr->gpuForceReduction[gmx::AtomLocality::Local]->execute();
2312 // Copy forces to host if they are needed for update or if virtual sites are enabled.
2313 // If there are vsites, we need to copy forces every step to spread vsite forces on host.
2314 // TODO: When the output flags will be included in step workload, this copy can be combined with the
2315 // copy call done in sim_utils(...) for the output.
2316 // NOTE: If there are virtual sites, the forces are modified on host after this D2H copy. Hence,
2317 // they should not be copied in do_md(...) for the output.
2318 if (!simulationWork.useGpuUpdate
2319 || (simulationWork.useGpuUpdate && haveDDAtomOrdering(*cr) && simulationWork.useCpuPmePpCommunication)
2322 if (stepWork.computeNonbondedForces)
2324 /* We have previously issued force reduction on the GPU, but we will
2325 * not use this event, instead relying on the stream being in-order.
2327 stateGpu->consumeForcesReducedOnDeviceEvent(AtomLocality::Local);
2329 stateGpu->copyForcesFromGpu(forceWithShift, AtomLocality::Local);
2330 stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
2333 else if (stepWork.computeNonbondedForces)
2335 ArrayRef<gmx::RVec> forceWithShift = forceOutNonbonded->forceWithShiftForces().force();
2336 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceWithShift);
2340 launchGpuEndOfStepTasks(
2341 nbv, fr->listedForcesGpu.get(), fr->pmedata, enerd, *runScheduleWork, step, wcycle);
2343 if (haveDDAtomOrdering(*cr))
2345 dd_force_flop_stop(cr->dd, nrnb);
2348 const bool haveCombinedMtsForces = (stepWork.computeForces && simulationWork.useMts && stepWork.computeSlowForces
2349 && stepWork.combineMtsForcesBeforeHaloExchange);
2350 if (stepWork.computeForces)
2352 postProcessForceWithShiftForces(
2353 nrnb, wcycle, box, x.unpaddedArrayRef(), &forceOutMtsLevel0, vir_force, *mdatoms, *fr, vsite, stepWork);
2355 if (simulationWork.useMts && stepWork.computeSlowForces && !haveCombinedMtsForces)
2357 postProcessForceWithShiftForces(
2358 nrnb, wcycle, box, x.unpaddedArrayRef(), forceOutMtsLevel1, vir_force, *mdatoms, *fr, vsite, stepWork);
2362 // TODO refactor this and unify with above GPU PME-PP / GPU update path call to the same function
2363 if (PAR(cr) && simulationWork.haveSeparatePmeRank && simulationWork.useCpuPmePpCommunication
2364 && stepWork.computeSlowForces)
2366 /* In case of node-splitting, the PP nodes receive the long-range
2367 * forces, virial and energy from the PME nodes here.
2369 pme_receive_force_ener(fr,
2371 &forceOutMtsLevel1->forceWithVirial(),
2373 simulationWork.useGpuPmePpCommunication,
2378 if (stepWork.computeForces)
2380 /* If we don't use MTS or if we already combined the MTS forces before, we only
2381 * need to post-process one ForceOutputs object here, called forceOutCombined,
2382 * otherwise we have to post-process two outputs and then combine them.
2384 ForceOutputs& forceOutCombined = (haveCombinedMtsForces ? forceOutMts.value() : forceOutMtsLevel0);
2386 cr, step, nrnb, wcycle, box, x.unpaddedArrayRef(), &forceOutCombined, vir_force, mdatoms, fr, vsite, stepWork);
2388 if (simulationWork.useMts && stepWork.computeSlowForces && !haveCombinedMtsForces)
2391 cr, step, nrnb, wcycle, box, x.unpaddedArrayRef(), forceOutMtsLevel1, vir_force, mdatoms, fr, vsite, stepWork);
2393 combineMtsForces(mdatoms->homenr,
2394 force.unpaddedArrayRef(),
2395 forceView->forceMtsCombined(),
2396 inputrec.mtsLevels[1].stepFactor);
2400 if (stepWork.computeEnergy)
2402 /* Compute the final potential energy terms */
2403 accumulatePotentialEnergies(enerd, lambda, inputrec.fepvals.get());
2405 if (!EI_TPI(inputrec.eI))
2407 checkPotentialEnergyValidity(step, *enerd, inputrec);
2411 /* In case we don't have constraints and are using GPUs, the next balancing
2412 * region starts here.
2413 * Some "special" work at the end of do_force_cuts?, such as vsite spread,
2414 * virial calculation and COM pulling, is not thus not included in
2415 * the balance timing, which is ok as most tasks do communication.
2417 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);