2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013-2019,2020,2021, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
49 #include "gromacs/applied_forces/awh/awh.h"
50 #include "gromacs/domdec/dlbtiming.h"
51 #include "gromacs/domdec/domdec.h"
52 #include "gromacs/domdec/domdec_struct.h"
53 #include "gromacs/domdec/gpuhaloexchange.h"
54 #include "gromacs/domdec/partition.h"
55 #include "gromacs/essentialdynamics/edsam.h"
56 #include "gromacs/ewald/pme.h"
57 #include "gromacs/ewald/pme_pp.h"
58 #include "gromacs/ewald/pme_pp_comm_gpu.h"
59 #include "gromacs/gmxlib/network.h"
60 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
61 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
62 #include "gromacs/gmxlib/nrnb.h"
63 #include "gromacs/gpu_utils/gpu_utils.h"
64 #include "gromacs/imd/imd.h"
65 #include "gromacs/listed_forces/disre.h"
66 #include "gromacs/listed_forces/gpubonded.h"
67 #include "gromacs/listed_forces/listed_forces.h"
68 #include "gromacs/listed_forces/orires.h"
69 #include "gromacs/math/arrayrefwithpadding.h"
70 #include "gromacs/math/functions.h"
71 #include "gromacs/math/units.h"
72 #include "gromacs/math/vec.h"
73 #include "gromacs/math/vecdump.h"
74 #include "gromacs/mdlib/calcmu.h"
75 #include "gromacs/mdlib/calcvir.h"
76 #include "gromacs/mdlib/constr.h"
77 #include "gromacs/mdlib/dispersioncorrection.h"
78 #include "gromacs/mdlib/enerdata_utils.h"
79 #include "gromacs/mdlib/force.h"
80 #include "gromacs/mdlib/force_flags.h"
81 #include "gromacs/mdlib/forcerec.h"
82 #include "gromacs/mdlib/gmx_omp_nthreads.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdlib/vsite.h"
85 #include "gromacs/mdlib/wall.h"
86 #include "gromacs/mdlib/wholemoleculetransform.h"
87 #include "gromacs/mdtypes/commrec.h"
88 #include "gromacs/mdtypes/enerdata.h"
89 #include "gromacs/mdtypes/forcebuffers.h"
90 #include "gromacs/mdtypes/forceoutput.h"
91 #include "gromacs/mdtypes/forcerec.h"
92 #include "gromacs/mdtypes/iforceprovider.h"
93 #include "gromacs/mdtypes/inputrec.h"
94 #include "gromacs/mdtypes/md_enums.h"
95 #include "gromacs/mdtypes/mdatom.h"
96 #include "gromacs/mdtypes/multipletimestepping.h"
97 #include "gromacs/mdtypes/simulation_workload.h"
98 #include "gromacs/mdtypes/state.h"
99 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
100 #include "gromacs/nbnxm/gpu_data_mgmt.h"
101 #include "gromacs/nbnxm/nbnxm.h"
102 #include "gromacs/nbnxm/nbnxm_gpu.h"
103 #include "gromacs/pbcutil/ishift.h"
104 #include "gromacs/pbcutil/pbc.h"
105 #include "gromacs/pulling/pull.h"
106 #include "gromacs/pulling/pull_rotation.h"
107 #include "gromacs/timing/cyclecounter.h"
108 #include "gromacs/timing/gpu_timing.h"
109 #include "gromacs/timing/wallcycle.h"
110 #include "gromacs/timing/wallcyclereporting.h"
111 #include "gromacs/timing/walltime_accounting.h"
112 #include "gromacs/topology/topology.h"
113 #include "gromacs/utility/arrayref.h"
114 #include "gromacs/utility/basedefinitions.h"
115 #include "gromacs/utility/cstringutil.h"
116 #include "gromacs/utility/exceptions.h"
117 #include "gromacs/utility/fatalerror.h"
118 #include "gromacs/utility/fixedcapacityvector.h"
119 #include "gromacs/utility/gmxassert.h"
120 #include "gromacs/utility/gmxmpi.h"
121 #include "gromacs/utility/logger.h"
122 #include "gromacs/utility/smalloc.h"
123 #include "gromacs/utility/strconvert.h"
124 #include "gromacs/utility/sysinfo.h"
126 #include "gpuforcereduction.h"
129 using gmx::AtomLocality;
130 using gmx::DomainLifetimeWorkload;
131 using gmx::ForceOutputs;
132 using gmx::ForceWithShiftForces;
133 using gmx::InteractionLocality;
135 using gmx::SimulationWorkload;
136 using gmx::StepWorkload;
138 // TODO: this environment variable allows us to verify before release
139 // that on less common architectures the total cost of polling is not larger than
140 // a blocking wait (so polling does not introduce overhead when the static
141 // PME-first ordering would suffice).
142 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
144 static void sum_forces(ArrayRef<RVec> f, ArrayRef<const RVec> forceToAdd)
146 GMX_ASSERT(f.size() >= forceToAdd.size(), "Accumulation buffer should be sufficiently large");
147 const int end = forceToAdd.size();
149 int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
150 #pragma omp parallel for num_threads(nt) schedule(static)
151 for (int i = 0; i < end; i++)
153 rvec_inc(f[i], forceToAdd[i]);
157 static void calc_virial(int start,
160 const gmx::ForceWithShiftForces& forceWithShiftForces,
164 const t_forcerec* fr,
167 /* The short-range virial from surrounding boxes */
168 const rvec* fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
169 const rvec* shiftVecPointer = as_rvec_array(fr->shift_vec.data());
170 calc_vir(gmx::c_numShiftVectors, shiftVecPointer, fshift, vir_part, pbcType == PbcType::Screw, box);
171 inc_nrnb(nrnb, eNR_VIRIAL, gmx::c_numShiftVectors);
173 /* Calculate partial virial, for local atoms only, based on short range.
174 * Total virial is computed in global_stat, called from do_md
176 const rvec* f = as_rvec_array(forceWithShiftForces.force().data());
177 f_calc_vir(start, start + homenr, x, f, vir_part, box);
178 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
182 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
186 static void pull_potential_wrapper(const t_commrec* cr,
187 const t_inputrec& ir,
189 gmx::ArrayRef<const gmx::RVec> x,
190 gmx::ForceWithVirial* force,
191 const t_mdatoms* mdatoms,
192 gmx_enerdata_t* enerd,
196 gmx_wallcycle* wcycle)
201 /* Calculate the center of mass forces, this requires communication,
202 * which is why pull_potential is called close to other communication.
204 wallcycle_start(wcycle, WallCycleCounter::PullPot);
205 set_pbc(&pbc, ir.pbcType, box);
207 enerd->term[F_COM_PULL] +=
208 pull_potential(pull_work,
209 gmx::arrayRefFromArray(mdatoms->massT, mdatoms->nr),
213 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Restraint)],
217 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Restraint] += dvdl;
218 wallcycle_stop(wcycle, WallCycleCounter::PullPot);
221 static void pme_receive_force_ener(t_forcerec* fr,
223 gmx::ForceWithVirial* forceWithVirial,
224 gmx_enerdata_t* enerd,
225 bool useGpuPmePpComms,
226 bool receivePmeForceToGpu,
227 gmx_wallcycle* wcycle)
229 real e_q, e_lj, dvdl_q, dvdl_lj;
230 float cycles_ppdpme, cycles_seppme;
232 cycles_ppdpme = wallcycle_stop(wcycle, WallCycleCounter::PpDuringPme);
233 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
235 /* In case of node-splitting, the PP nodes receive the long-range
236 * forces, virial and energy from the PME nodes here.
238 wallcycle_start(wcycle, WallCycleCounter::PpPmeWaitRecvF);
241 gmx_pme_receive_f(fr->pmePpCommGpu.get(),
249 receivePmeForceToGpu,
251 enerd->term[F_COUL_RECIP] += e_q;
252 enerd->term[F_LJ_RECIP] += e_lj;
253 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Coul] += dvdl_q;
254 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += dvdl_lj;
258 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
260 wallcycle_stop(wcycle, WallCycleCounter::PpPmeWaitRecvF);
263 static void print_large_forces(FILE* fp,
268 ArrayRef<const RVec> x,
269 ArrayRef<const RVec> f)
271 real force2Tolerance = gmx::square(forceTolerance);
272 gmx::index numNonFinite = 0;
273 for (int i = 0; i < md->homenr; i++)
275 real force2 = norm2(f[i]);
276 bool nonFinite = !std::isfinite(force2);
277 if (force2 >= force2Tolerance || nonFinite)
280 "step %" PRId64 " atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
293 if (numNonFinite > 0)
295 /* Note that with MPI this fatal call on one rank might interrupt
296 * the printing on other ranks. But we can only avoid that with
297 * an expensive MPI barrier that we would need at each step.
299 gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
303 //! When necessary, spreads forces on vsites and computes the virial for \p forceOutputs->forceWithShiftForces()
304 static void postProcessForceWithShiftForces(t_nrnb* nrnb,
305 gmx_wallcycle* wcycle,
307 ArrayRef<const RVec> x,
308 ForceOutputs* forceOutputs,
310 const t_mdatoms& mdatoms,
311 const t_forcerec& fr,
312 gmx::VirtualSitesHandler* vsite,
313 const StepWorkload& stepWork)
315 ForceWithShiftForces& forceWithShiftForces = forceOutputs->forceWithShiftForces();
317 /* If we have NoVirSum forces, but we do not calculate the virial,
318 * we later sum the forceWithShiftForces buffer together with
319 * the noVirSum buffer and spread the combined vsite forces at once.
321 if (vsite && (!forceOutputs->haveForceWithVirial() || stepWork.computeVirial))
323 using VirialHandling = gmx::VirtualSitesHandler::VirialHandling;
325 auto f = forceWithShiftForces.force();
326 auto fshift = forceWithShiftForces.shiftForces();
327 const VirialHandling virialHandling =
328 (stepWork.computeVirial ? VirialHandling::Pbc : VirialHandling::None);
329 vsite->spreadForces(x, f, virialHandling, fshift, nullptr, nrnb, box, wcycle);
330 forceWithShiftForces.haveSpreadVsiteForces() = true;
333 if (stepWork.computeVirial)
335 /* Calculation of the virial must be done after vsites! */
337 0, mdatoms.homenr, as_rvec_array(x.data()), forceWithShiftForces, vir_force, box, nrnb, &fr, fr.pbcType);
341 //! Spread, compute virial for and sum forces, when necessary
342 static void postProcessForces(const t_commrec* cr,
345 gmx_wallcycle* wcycle,
347 ArrayRef<const RVec> x,
348 ForceOutputs* forceOutputs,
350 const t_mdatoms* mdatoms,
351 const t_forcerec* fr,
352 gmx::VirtualSitesHandler* vsite,
353 const StepWorkload& stepWork)
355 // Extract the final output force buffer, which is also the buffer for forces with shift forces
356 ArrayRef<RVec> f = forceOutputs->forceWithShiftForces().force();
358 if (forceOutputs->haveForceWithVirial())
360 auto& forceWithVirial = forceOutputs->forceWithVirial();
364 /* Spread the mesh force on virtual sites to the other particles...
365 * This is parallellized. MPI communication is performed
366 * if the constructing atoms aren't local.
368 GMX_ASSERT(!stepWork.computeVirial || f.data() != forceWithVirial.force_.data(),
369 "We need separate force buffers for shift and virial forces when "
370 "computing the virial");
371 GMX_ASSERT(!stepWork.computeVirial
372 || forceOutputs->forceWithShiftForces().haveSpreadVsiteForces(),
373 "We should spread the force with shift forces separately when computing "
375 const gmx::VirtualSitesHandler::VirialHandling virialHandling =
376 (stepWork.computeVirial ? gmx::VirtualSitesHandler::VirialHandling::NonLinear
377 : gmx::VirtualSitesHandler::VirialHandling::None);
378 matrix virial = { { 0 } };
379 vsite->spreadForces(x, forceWithVirial.force_, virialHandling, {}, virial, nrnb, box, wcycle);
380 forceWithVirial.addVirialContribution(virial);
383 if (stepWork.computeVirial)
385 /* Now add the forces, this is local */
386 sum_forces(f, forceWithVirial.force_);
388 /* Add the direct virial contributions */
390 forceWithVirial.computeVirial_,
391 "forceWithVirial should request virial computation when we request the virial");
392 m_add(vir_force, forceWithVirial.getVirial(), vir_force);
396 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
402 GMX_ASSERT(vsite == nullptr || forceOutputs->forceWithShiftForces().haveSpreadVsiteForces(),
403 "We should have spread the vsite forces (earlier)");
406 if (fr->print_force >= 0)
408 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
412 static void do_nb_verlet(t_forcerec* fr,
413 const interaction_const_t* ic,
414 gmx_enerdata_t* enerd,
415 const StepWorkload& stepWork,
416 const InteractionLocality ilocality,
420 gmx_wallcycle* wcycle)
422 if (!stepWork.computeNonbondedForces)
424 /* skip non-bonded calculation */
428 nonbonded_verlet_t* nbv = fr->nbv.get();
430 /* GPU kernel launch overhead is already timed separately */
433 /* When dynamic pair-list pruning is requested, we need to prune
434 * at nstlistPrune steps.
436 if (nbv->isDynamicPruningStepCpu(step))
438 /* Prune the pair-list beyond fr->ic->rlistPrune using
439 * the current coordinates of the atoms.
441 wallcycle_sub_start(wcycle, WallCycleSubCounter::NonbondedPruning);
442 nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
443 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NonbondedPruning);
447 nbv->dispatchNonbondedKernel(ilocality, *ic, stepWork, clearF, *fr, enerd, nrnb);
450 static inline void clearRVecs(ArrayRef<RVec> v, const bool useOpenmpThreading)
452 int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, v.ssize());
454 /* Note that we would like to avoid this conditional by putting it
455 * into the omp pragma instead, but then we still take the full
456 * omp parallel for overhead (at least with gcc5).
458 if (!useOpenmpThreading || nth == 1)
467 #pragma omp parallel for num_threads(nth) schedule(static)
468 for (gmx::index i = 0; i < v.ssize(); i++)
475 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
477 * \param groupOptions Group options, containing T-coupling options
479 static real averageKineticEnergyEstimate(const t_grpopts& groupOptions)
481 real nrdfCoupled = 0;
482 real nrdfUncoupled = 0;
483 real kineticEnergy = 0;
484 for (int g = 0; g < groupOptions.ngtc; g++)
486 if (groupOptions.tau_t[g] >= 0)
488 nrdfCoupled += groupOptions.nrdf[g];
489 kineticEnergy += groupOptions.nrdf[g] * 0.5 * groupOptions.ref_t[g] * gmx::c_boltz;
493 nrdfUncoupled += groupOptions.nrdf[g];
497 /* This conditional with > also catches nrdf=0 */
498 if (nrdfCoupled > nrdfUncoupled)
500 return kineticEnergy * (nrdfCoupled + nrdfUncoupled) / nrdfCoupled;
508 /*! \brief This routine checks that the potential energy is finite.
510 * Always checks that the potential energy is finite. If step equals
511 * inputrec.init_step also checks that the magnitude of the potential energy
512 * is reasonable. Terminates with a fatal error when a check fails.
513 * Note that passing this check does not guarantee finite forces,
514 * since those use slightly different arithmetics. But in most cases
515 * there is just a narrow coordinate range where forces are not finite
516 * and energies are finite.
518 * \param[in] step The step number, used for checking and printing
519 * \param[in] enerd The energy data; the non-bonded group energies need to be added to
520 * enerd.term[F_EPOT] before calling this routine \param[in] inputrec The input record
522 static void checkPotentialEnergyValidity(int64_t step, const gmx_enerdata_t& enerd, const t_inputrec& inputrec)
524 /* Threshold valid for comparing absolute potential energy against
525 * the kinetic energy. Normally one should not consider absolute
526 * potential energy values, but with a factor of one million
527 * we should never get false positives.
529 constexpr real c_thresholdFactor = 1e6;
531 bool energyIsNotFinite = !std::isfinite(enerd.term[F_EPOT]);
532 real averageKineticEnergy = 0;
533 /* We only check for large potential energy at the initial step,
534 * because that is by far the most likely step for this too occur
535 * and because computing the average kinetic energy is not free.
536 * Note: nstcalcenergy >> 1 often does not allow to catch large energies
537 * before they become NaN.
539 if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
541 averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
544 if (energyIsNotFinite
545 || (averageKineticEnergy > 0 && enerd.term[F_EPOT] > c_thresholdFactor * averageKineticEnergy))
550 ": The total potential energy is %g, which is %s. The LJ and electrostatic "
551 "contributions to the energy are %g and %g, respectively. A %s potential energy "
552 "can be caused by overlapping interactions in bonded interactions or very large%s "
553 "coordinate values. Usually this is caused by a badly- or non-equilibrated initial "
554 "configuration, incorrect interactions or parameters in the topology.",
557 energyIsNotFinite ? "not finite" : "extremely high",
559 enerd.term[F_COUL_SR],
560 energyIsNotFinite ? "non-finite" : "very high",
561 energyIsNotFinite ? " or Nan" : "");
565 /*! \brief Return true if there are special forces computed this step.
567 * The conditionals exactly correspond to those in computeSpecialForces().
569 static bool haveSpecialForces(const t_inputrec& inputrec,
570 const gmx::ForceProviders& forceProviders,
571 const pull_t* pull_work,
572 const bool computeForces,
576 return ((computeForces && forceProviders.hasForceProvider()) || // forceProviders
577 (inputrec.bPull && pull_have_potential(*pull_work)) || // pull
578 inputrec.bRot || // enforced rotation
579 (ed != nullptr) || // flooding
580 (inputrec.bIMD && computeForces)); // IMD
583 /*! \brief Compute forces and/or energies for special algorithms
585 * The intention is to collect all calls to algorithms that compute
586 * forces on local atoms only and that do not contribute to the local
587 * virial sum (but add their virial contribution separately).
588 * Eventually these should likely all become ForceProviders.
589 * Within this function the intention is to have algorithms that do
590 * global communication at the end, so global barriers within the MD loop
591 * are as close together as possible.
593 * \param[in] fplog The log file
594 * \param[in] cr The communication record
595 * \param[in] inputrec The input record
596 * \param[in] awh The Awh module (nullptr if none in use).
597 * \param[in] enforcedRotation Enforced rotation module.
598 * \param[in] imdSession The IMD session
599 * \param[in] pull_work The pull work structure.
600 * \param[in] step The current MD step
601 * \param[in] t The current time
602 * \param[in,out] wcycle Wallcycle accounting struct
603 * \param[in,out] forceProviders Pointer to a list of force providers
604 * \param[in] box The unit cell
605 * \param[in] x The coordinates
606 * \param[in] mdatoms Per atom properties
607 * \param[in] lambda Array of free-energy lambda values
608 * \param[in] stepWork Step schedule flags
609 * \param[in,out] forceWithVirialMtsLevel0 Force and virial for MTS level0 forces
610 * \param[in,out] forceWithVirialMtsLevel1 Force and virial for MTS level1 forces, can be nullptr
611 * \param[in,out] enerd Energy buffer
612 * \param[in,out] ed Essential dynamics pointer
613 * \param[in] didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
615 * \todo Remove didNeighborSearch, which is used incorrectly.
616 * \todo Convert all other algorithms called here to ForceProviders.
618 static void computeSpecialForces(FILE* fplog,
620 const t_inputrec& inputrec,
622 gmx_enfrot* enforcedRotation,
623 gmx::ImdSession* imdSession,
627 gmx_wallcycle* wcycle,
628 gmx::ForceProviders* forceProviders,
630 gmx::ArrayRef<const gmx::RVec> x,
631 const t_mdatoms* mdatoms,
632 gmx::ArrayRef<const real> lambda,
633 const StepWorkload& stepWork,
634 gmx::ForceWithVirial* forceWithVirialMtsLevel0,
635 gmx::ForceWithVirial* forceWithVirialMtsLevel1,
636 gmx_enerdata_t* enerd,
638 bool didNeighborSearch)
640 /* NOTE: Currently all ForceProviders only provide forces.
641 * When they also provide energies, remove this conditional.
643 if (stepWork.computeForces)
645 gmx::ForceProviderInput forceProviderInput(
648 gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->homenr),
649 gmx::arrayRefFromArray(mdatoms->massT, mdatoms->homenr),
653 gmx::ForceProviderOutput forceProviderOutput(forceWithVirialMtsLevel0, enerd);
655 /* Collect forces from modules */
656 forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
659 if (inputrec.bPull && pull_have_potential(*pull_work))
661 const int mtsLevel = forceGroupMtsLevel(inputrec.mtsLevels, gmx::MtsForceGroups::Pull);
662 if (mtsLevel == 0 || stepWork.computeSlowForces)
664 auto& forceWithVirial = (mtsLevel == 0) ? forceWithVirialMtsLevel0 : forceWithVirialMtsLevel1;
665 pull_potential_wrapper(
666 cr, inputrec, box, x, forceWithVirial, mdatoms, enerd, pull_work, lambda.data(), t, wcycle);
671 const int mtsLevel = forceGroupMtsLevel(inputrec.mtsLevels, gmx::MtsForceGroups::Pull);
672 if (mtsLevel == 0 || stepWork.computeSlowForces)
674 const bool needForeignEnergyDifferences = awh->needForeignEnergyDifferences(step);
675 std::vector<double> foreignLambdaDeltaH, foreignLambdaDhDl;
676 if (needForeignEnergyDifferences)
678 enerd->foreignLambdaTerms.finalizePotentialContributions(
679 enerd->dvdl_lin, lambda, *inputrec.fepvals);
680 std::tie(foreignLambdaDeltaH, foreignLambdaDhDl) = enerd->foreignLambdaTerms.getTerms(cr);
683 auto& forceWithVirial = (mtsLevel == 0) ? forceWithVirialMtsLevel0 : forceWithVirialMtsLevel1;
684 enerd->term[F_COM_PULL] += awh->applyBiasForcesAndUpdateBias(
686 gmx::arrayRefFromArray(mdatoms->massT, mdatoms->nr),
697 /* Add the forces from enforced rotation potentials (if any) */
700 wallcycle_start(wcycle, WallCycleCounter::RotAdd);
701 enerd->term[F_COM_PULL] +=
702 add_rot_forces(enforcedRotation, forceWithVirialMtsLevel0->force_, cr, step, t);
703 wallcycle_stop(wcycle, WallCycleCounter::RotAdd);
708 /* Note that since init_edsam() is called after the initialization
709 * of forcerec, edsam doesn't request the noVirSum force buffer.
710 * Thus if no other algorithm (e.g. PME) requires it, the forces
711 * here will contribute to the virial.
713 do_flood(cr, inputrec, x, forceWithVirialMtsLevel0->force_, ed, box, step, didNeighborSearch);
716 /* Add forces from interactive molecular dynamics (IMD), if any */
717 if (inputrec.bIMD && stepWork.computeForces)
719 imdSession->applyForces(forceWithVirialMtsLevel0->force_);
723 /*! \brief Launch the prepare_step and spread stages of PME GPU.
725 * \param[in] pmedata The PME structure
726 * \param[in] box The box matrix
727 * \param[in] stepWork Step schedule flags
728 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in the device memory.
729 * \param[in] lambdaQ The Coulomb lambda of the current state.
730 * \param[in] wcycle The wallcycle structure
732 static inline void launchPmeGpuSpread(gmx_pme_t* pmedata,
734 const StepWorkload& stepWork,
735 GpuEventSynchronizer* xReadyOnDevice,
737 gmx_wallcycle* wcycle)
739 pme_gpu_prepare_computation(pmedata, box, wcycle, stepWork);
740 pme_gpu_launch_spread(pmedata, xReadyOnDevice, wcycle, lambdaQ);
743 /*! \brief Launch the FFT and gather stages of PME GPU
745 * This function only implements setting the output forces (no accumulation).
747 * \param[in] pmedata The PME structure
748 * \param[in] lambdaQ The Coulomb lambda of the current system state.
749 * \param[in] wcycle The wallcycle structure
750 * \param[in] stepWork Step schedule flags
752 static void launchPmeGpuFftAndGather(gmx_pme_t* pmedata,
754 gmx_wallcycle* wcycle,
755 const gmx::StepWorkload& stepWork)
757 pme_gpu_launch_complex_transforms(pmedata, wcycle, stepWork);
758 pme_gpu_launch_gather(pmedata, wcycle, lambdaQ);
762 * Polling wait for either of the PME or nonbonded GPU tasks.
764 * Instead of a static order in waiting for GPU tasks, this function
765 * polls checking which of the two tasks completes first, and does the
766 * associated force buffer reduction overlapped with the other task.
767 * By doing that, unlike static scheduling order, it can always overlap
768 * one of the reductions, regardless of the GPU task completion order.
770 * \param[in] nbv Nonbonded verlet structure
771 * \param[in,out] pmedata PME module data
772 * \param[in,out] forceOutputsNonbonded Force outputs for the non-bonded forces and shift forces
773 * \param[in,out] forceOutputsPme Force outputs for the PME forces and virial
774 * \param[in,out] enerd Energy data structure results are reduced into
775 * \param[in] lambdaQ The Coulomb lambda of the current system state.
776 * \param[in] stepWork Step schedule flags
777 * \param[in] wcycle The wallcycle structure
779 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t* nbv,
781 gmx::ForceOutputs* forceOutputsNonbonded,
782 gmx::ForceOutputs* forceOutputsPme,
783 gmx_enerdata_t* enerd,
785 const StepWorkload& stepWork,
786 gmx_wallcycle* wcycle)
788 bool isPmeGpuDone = false;
789 bool isNbGpuDone = false;
791 gmx::ArrayRef<const gmx::RVec> pmeGpuForces;
793 while (!isPmeGpuDone || !isNbGpuDone)
797 GpuTaskCompletion completionType =
798 (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
799 isPmeGpuDone = pme_gpu_try_finish_task(
800 pmedata, stepWork, wcycle, &forceOutputsPme->forceWithVirial(), enerd, lambdaQ, completionType);
805 auto& forceBuffersNonbonded = forceOutputsNonbonded->forceWithShiftForces();
806 GpuTaskCompletion completionType =
807 (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
808 isNbGpuDone = Nbnxm::gpu_try_finish_task(
812 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
813 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
814 forceBuffersNonbonded.shiftForces(),
820 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceBuffersNonbonded.force());
826 /*! \brief Set up the different force buffers; also does clearing.
828 * \param[in] forceHelperBuffers Helper force buffers
829 * \param[in] force force array
830 * \param[in] domainWork Domain lifetime workload flags
831 * \param[in] stepWork Step schedule flags
832 * \param[in] havePpDomainDecomposition Whether we have a PP domain decomposition
833 * \param[out] wcycle wallcycle recording structure
835 * \returns Cleared force output structure
837 static ForceOutputs setupForceOutputs(ForceHelperBuffers* forceHelperBuffers,
838 gmx::ArrayRefWithPadding<gmx::RVec> force,
839 const DomainLifetimeWorkload& domainWork,
840 const StepWorkload& stepWork,
841 const bool havePpDomainDecomposition,
842 gmx_wallcycle* wcycle)
844 wallcycle_sub_start(wcycle, WallCycleSubCounter::ClearForceBuffer);
846 /* NOTE: We assume fr->shiftForces is all zeros here */
847 gmx::ForceWithShiftForces forceWithShiftForces(
848 force, stepWork.computeVirial, forceHelperBuffers->shiftForces());
850 if (stepWork.computeForces
851 && (domainWork.haveCpuLocalForceWork || !stepWork.useGpuFBufferOps
852 || (havePpDomainDecomposition && !stepWork.useGpuFHalo)))
854 /* Clear the short- and long-range forces */
855 clearRVecs(forceWithShiftForces.force(), true);
857 /* Clear the shift forces */
858 clearRVecs(forceWithShiftForces.shiftForces(), false);
861 /* If we need to compute the virial, we might need a separate
862 * force buffer for algorithms for which the virial is calculated
863 * directly, such as PME. Otherwise, forceWithVirial uses the
864 * the same force (f in legacy calls) buffer as other algorithms.
866 const bool useSeparateForceWithVirialBuffer =
867 (stepWork.computeForces
868 && (stepWork.computeVirial && forceHelperBuffers->haveDirectVirialContributions()));
869 /* forceWithVirial uses the local atom range only */
870 gmx::ForceWithVirial forceWithVirial(
871 useSeparateForceWithVirialBuffer ? forceHelperBuffers->forceBufferForDirectVirialContributions()
872 : force.unpaddedArrayRef(),
873 stepWork.computeVirial);
875 if (useSeparateForceWithVirialBuffer)
877 /* TODO: update comment
878 * We only compute forces on local atoms. Note that vsites can
879 * spread to non-local atoms, but that part of the buffer is
880 * cleared separately in the vsite spreading code.
882 clearRVecs(forceWithVirial.force_, true);
885 wallcycle_sub_stop(wcycle, WallCycleSubCounter::ClearForceBuffer);
888 forceWithShiftForces, forceHelperBuffers->haveDirectVirialContributions(), forceWithVirial);
892 /*! \brief Set up flags that have the lifetime of the domain indicating what type of work is there to compute.
894 static DomainLifetimeWorkload setupDomainLifetimeWorkload(const t_inputrec& inputrec,
895 const t_forcerec& fr,
896 const pull_t* pull_work,
898 const t_mdatoms& mdatoms,
899 const SimulationWorkload& simulationWork,
900 const StepWorkload& stepWork)
902 DomainLifetimeWorkload domainWork;
903 // Note that haveSpecialForces is constant over the whole run
904 domainWork.haveSpecialForces =
905 haveSpecialForces(inputrec, *fr.forceProviders, pull_work, stepWork.computeForces, ed);
906 domainWork.haveCpuListedForceWork = false;
907 domainWork.haveCpuBondedWork = false;
908 for (const auto& listedForces : fr.listedForces)
910 if (listedForces.haveCpuListedForces(*fr.fcdata))
912 domainWork.haveCpuListedForceWork = true;
914 if (listedForces.haveCpuBondeds())
916 domainWork.haveCpuBondedWork = true;
919 domainWork.haveGpuBondedWork = ((fr.gpuBonded != nullptr) && fr.gpuBonded->haveInteractions());
920 // Note that haveFreeEnergyWork is constant over the whole run
921 domainWork.haveFreeEnergyWork =
922 (fr.efep != FreeEnergyPerturbationType::No && mdatoms.nPerturbed != 0);
923 // We assume we have local force work if there are CPU
924 // force tasks including PME or nonbondeds.
925 domainWork.haveCpuLocalForceWork =
926 domainWork.haveSpecialForces || domainWork.haveCpuListedForceWork
927 || domainWork.haveFreeEnergyWork || simulationWork.useCpuNonbonded || simulationWork.useCpuPme
928 || simulationWork.haveEwaldSurfaceContribution || inputrec.nwall > 0;
933 /*! \brief Set up force flag stuct from the force bitmask.
935 * \param[in] legacyFlags Force bitmask flags used to construct the new flags
936 * \param[in] mtsLevels The multiple time-stepping levels, either empty or 2 levels
937 * \param[in] step The current MD step
938 * \param[in] simulationWork Simulation workload description.
939 * \param[in] rankHasPmeDuty If this rank computes PME.
941 * \returns New Stepworkload description.
943 static StepWorkload setupStepWorkload(const int legacyFlags,
944 ArrayRef<const gmx::MtsLevel> mtsLevels,
946 const SimulationWorkload& simulationWork,
947 const bool rankHasPmeDuty)
949 GMX_ASSERT(mtsLevels.empty() || mtsLevels.size() == 2, "Expect 0 or 2 MTS levels");
950 const bool computeSlowForces = (mtsLevels.empty() || step % mtsLevels[1].stepFactor == 0);
953 flags.stateChanged = ((legacyFlags & GMX_FORCE_STATECHANGED) != 0);
954 flags.haveDynamicBox = ((legacyFlags & GMX_FORCE_DYNAMICBOX) != 0);
955 flags.doNeighborSearch = ((legacyFlags & GMX_FORCE_NS) != 0);
956 flags.computeSlowForces = computeSlowForces;
957 flags.computeVirial = ((legacyFlags & GMX_FORCE_VIRIAL) != 0);
958 flags.computeEnergy = ((legacyFlags & GMX_FORCE_ENERGY) != 0);
959 flags.computeForces = ((legacyFlags & GMX_FORCE_FORCES) != 0);
960 flags.computeListedForces = ((legacyFlags & GMX_FORCE_LISTED) != 0);
961 flags.computeNonbondedForces =
962 ((legacyFlags & GMX_FORCE_NONBONDED) != 0) && simulationWork.computeNonbonded
963 && !(simulationWork.computeNonbondedAtMtsLevel1 && !computeSlowForces);
964 flags.computeDhdl = ((legacyFlags & GMX_FORCE_DHDL) != 0);
966 if (simulationWork.useGpuBufferOps)
968 GMX_ASSERT(simulationWork.useGpuNonbonded,
969 "Can only offload buffer ops if nonbonded computation is also offloaded");
971 flags.useGpuXBufferOps = simulationWork.useGpuBufferOps;
972 // on virial steps the CPU reduction path is taken
973 flags.useGpuFBufferOps = simulationWork.useGpuBufferOps && !flags.computeVirial;
974 flags.useGpuPmeFReduction = flags.computeSlowForces && flags.useGpuFBufferOps && simulationWork.useGpuPme
975 && (rankHasPmeDuty || simulationWork.useGpuPmePpCommunication);
976 flags.useGpuXHalo = simulationWork.useGpuHaloExchange;
977 flags.useGpuFHalo = simulationWork.useGpuHaloExchange && flags.useGpuFBufferOps;
983 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
985 * TODO: eliminate \p useGpuPmeOnThisRank when this is
986 * incorporated in DomainLifetimeWorkload.
988 static void launchGpuEndOfStepTasks(nonbonded_verlet_t* nbv,
989 gmx::GpuBonded* gpuBonded,
991 gmx_enerdata_t* enerd,
992 const gmx::MdrunScheduleWorkload& runScheduleWork,
993 bool useGpuPmeOnThisRank,
995 gmx_wallcycle* wcycle)
997 if (runScheduleWork.simulationWork.useGpuNonbonded && runScheduleWork.stepWork.computeNonbondedForces)
999 /* Launch pruning before buffer clearing because the API overhead of the
1000 * clear kernel launches can leave the GPU idle while it could be running
1003 if (nbv->isDynamicPruningStepGpu(step))
1005 nbv->dispatchPruneKernelGpu(step);
1008 /* now clear the GPU outputs while we finish the step on the CPU */
1009 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1010 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1011 Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, runScheduleWork.stepWork.computeVirial);
1012 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1013 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1016 if (useGpuPmeOnThisRank)
1018 pme_gpu_reinit_computation(pmedata, wcycle);
1021 if (runScheduleWork.domainWork.haveGpuBondedWork && runScheduleWork.stepWork.computeEnergy)
1023 // in principle this should be included in the DD balancing region,
1024 // but generally it is infrequent so we'll omit it for the sake of
1026 gpuBonded->waitAccumulateEnergyTerms(enerd);
1028 gpuBonded->clearEnergies();
1032 //! \brief Data structure to hold dipole-related data and staging arrays
1035 //! Dipole staging for fast summing over MPI
1036 gmx::DVec muStaging[2] = { { 0.0, 0.0, 0.0 } };
1037 //! Dipole staging for states A and B (index 0 and 1 resp.)
1038 gmx::RVec muStateAB[2] = { { 0.0_real, 0.0_real, 0.0_real } };
1042 static void reduceAndUpdateMuTot(DipoleData* dipoleData,
1043 const t_commrec* cr,
1044 const bool haveFreeEnergy,
1045 gmx::ArrayRef<const real> lambda,
1047 const DDBalanceRegionHandler& ddBalanceRegionHandler)
1051 gmx_sumd(2 * DIM, dipoleData->muStaging[0], cr);
1052 ddBalanceRegionHandler.reopenRegionCpu();
1054 for (int i = 0; i < 2; i++)
1056 for (int j = 0; j < DIM; j++)
1058 dipoleData->muStateAB[i][j] = dipoleData->muStaging[i][j];
1062 if (!haveFreeEnergy)
1064 copy_rvec(dipoleData->muStateAB[0], muTotal);
1068 for (int j = 0; j < DIM; j++)
1070 muTotal[j] = (1.0 - lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)])
1071 * dipoleData->muStateAB[0][j]
1072 + lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)]
1073 * dipoleData->muStateAB[1][j];
1078 /*! \brief Combines MTS level0 and level1 force buffes into a full and MTS-combined force buffer.
1080 * \param[in] numAtoms The number of atoms to combine forces for
1081 * \param[in,out] forceMtsLevel0 Input: F_level0, output: F_level0 + F_level1
1082 * \param[in,out] forceMts Input: F_level1, output: F_level0 + mtsFactor * F_level1
1083 * \param[in] mtsFactor The factor between the level0 and level1 time step
1085 static void combineMtsForces(const int numAtoms,
1086 ArrayRef<RVec> forceMtsLevel0,
1087 ArrayRef<RVec> forceMts,
1088 const real mtsFactor)
1090 const int gmx_unused numThreads = gmx_omp_nthreads_get(emntDefault);
1091 #pragma omp parallel for num_threads(numThreads) schedule(static)
1092 for (int i = 0; i < numAtoms; i++)
1094 const RVec forceMtsLevel0Tmp = forceMtsLevel0[i];
1095 forceMtsLevel0[i] += forceMts[i];
1096 forceMts[i] = forceMtsLevel0Tmp + mtsFactor * forceMts[i];
1100 /*! \brief Setup for the local and non-local GPU force reductions:
1101 * reinitialization plus the registration of forces and dependencies.
1103 * \param [in] runScheduleWork Schedule workload flag structure
1104 * \param [in] cr Communication record object
1105 * \param [in] fr Force record object
1107 static void setupGpuForceReductions(gmx::MdrunScheduleWorkload* runScheduleWork,
1108 const t_commrec* cr,
1112 nonbonded_verlet_t* nbv = fr->nbv.get();
1113 gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
1115 // (re-)initialize local GPU force reduction
1116 const bool accumulate =
1117 runScheduleWork->domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr);
1118 const int atomStart = 0;
1119 fr->gpuForceReduction[gmx::AtomLocality::Local]->reinit(stateGpu->getForces(),
1120 nbv->getNumAtoms(AtomLocality::Local),
1121 nbv->getGridIndices(),
1124 stateGpu->fReducedOnDevice());
1126 // register forces and add dependencies
1127 fr->gpuForceReduction[gmx::AtomLocality::Local]->registerNbnxmForce(nbv->getGpuForces());
1129 if (runScheduleWork->simulationWork.useGpuPme
1130 && (thisRankHasDuty(cr, DUTY_PME) || runScheduleWork->simulationWork.useGpuPmePpCommunication))
1132 DeviceBuffer<gmx::RVec> forcePtr =
1133 thisRankHasDuty(cr, DUTY_PME) ? pme_gpu_get_device_f(fr->pmedata)
1134 : // PME force buffer on same GPU
1135 fr->pmePpCommGpu->getGpuForceStagingPtr(); // buffer received from other GPU
1136 fr->gpuForceReduction[gmx::AtomLocality::Local]->registerRvecForce(forcePtr);
1138 GpuEventSynchronizer* const pmeSynchronizer =
1139 (thisRankHasDuty(cr, DUTY_PME) ? pme_gpu_get_f_ready_synchronizer(fr->pmedata)
1140 : // PME force buffer on same GPU
1141 fr->pmePpCommGpu->getForcesReadySynchronizer()); // buffer received from other GPU
1145 GMX_ASSERT(pmeSynchronizer != nullptr, "PME force ready cuda event should not be NULL");
1146 fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(pmeSynchronizer);
1150 if ((runScheduleWork->domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr))
1151 && !runScheduleWork->simulationWork.useGpuHaloExchange)
1153 auto forcesReadyLocality = havePPDomainDecomposition(cr) ? AtomLocality::Local : AtomLocality::All;
1154 const bool useGpuForceBufferOps = true;
1155 fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(
1156 stateGpu->getForcesReadyOnDeviceEvent(forcesReadyLocality, useGpuForceBufferOps));
1159 if (runScheduleWork->simulationWork.useGpuHaloExchange)
1161 fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(
1162 cr->dd->gpuHaloExchange[0][0]->getForcesReadyOnDeviceEvent());
1165 if (havePPDomainDecomposition(cr))
1167 // (re-)initialize non-local GPU force reduction
1168 const bool accumulate = runScheduleWork->domainWork.haveCpuBondedWork
1169 || runScheduleWork->domainWork.haveFreeEnergyWork;
1170 const int atomStart = dd_numHomeAtoms(*cr->dd);
1171 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->reinit(stateGpu->getForces(),
1172 nbv->getNumAtoms(AtomLocality::NonLocal),
1173 nbv->getGridIndices(),
1177 // register forces and add dependencies
1178 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->registerNbnxmForce(nbv->getGpuForces());
1179 if (runScheduleWork->domainWork.haveCpuBondedWork || runScheduleWork->domainWork.haveFreeEnergyWork)
1181 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->addDependency(
1182 stateGpu->getForcesReadyOnDeviceEvent(AtomLocality::NonLocal, true));
1188 void do_force(FILE* fplog,
1189 const t_commrec* cr,
1190 const gmx_multisim_t* ms,
1191 const t_inputrec& inputrec,
1193 gmx_enfrot* enforcedRotation,
1194 gmx::ImdSession* imdSession,
1198 gmx_wallcycle* wcycle,
1199 const gmx_localtop_t* top,
1201 gmx::ArrayRefWithPadding<gmx::RVec> x,
1202 const history_t* hist,
1203 gmx::ForceBuffersView* forceView,
1205 const t_mdatoms* mdatoms,
1206 gmx_enerdata_t* enerd,
1207 gmx::ArrayRef<const real> lambda,
1209 gmx::MdrunScheduleWorkload* runScheduleWork,
1210 gmx::VirtualSitesHandler* vsite,
1215 const DDBalanceRegionHandler& ddBalanceRegionHandler)
1217 auto force = forceView->forceWithPadding();
1218 GMX_ASSERT(force.unpaddedArrayRef().ssize() >= fr->natoms_force_constr,
1219 "The size of the force buffer should be at least the number of atoms to compute "
1222 nonbonded_verlet_t* nbv = fr->nbv.get();
1223 interaction_const_t* ic = fr->ic.get();
1225 gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
1227 const SimulationWorkload& simulationWork = runScheduleWork->simulationWork;
1229 runScheduleWork->stepWork = setupStepWorkload(
1230 legacyFlags, inputrec.mtsLevels, step, simulationWork, thisRankHasDuty(cr, DUTY_PME));
1231 const StepWorkload& stepWork = runScheduleWork->stepWork;
1233 const bool useGpuPmeOnThisRank =
1234 simulationWork.useGpuPme && thisRankHasDuty(cr, DUTY_PME) && stepWork.computeSlowForces;
1236 /* At a search step we need to start the first balancing region
1237 * somewhere early inside the step after communication during domain
1238 * decomposition (and not during the previous step as usual).
1240 if (stepWork.doNeighborSearch)
1242 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
1245 clear_mat(vir_force);
1247 if (fr->pbcType != PbcType::No)
1249 /* Compute shift vectors every step,
1250 * because of pressure coupling or box deformation!
1252 if (stepWork.haveDynamicBox && stepWork.stateChanged)
1254 calc_shifts(box, fr->shift_vec);
1257 const bool fillGrid = (stepWork.doNeighborSearch && stepWork.stateChanged);
1258 const bool calcCGCM = (fillGrid && !DOMAINDECOMP(cr));
1261 put_atoms_in_box_omp(fr->pbcType,
1263 x.unpaddedArrayRef().subArray(0, mdatoms->homenr),
1264 gmx_omp_nthreads_get(emntDefault));
1265 inc_nrnb(nrnb, eNR_SHIFTX, mdatoms->homenr);
1269 nbnxn_atomdata_copy_shiftvec(stepWork.haveDynamicBox, fr->shift_vec, nbv->nbat.get());
1271 const bool pmeSendCoordinatesFromGpu =
1272 GMX_MPI && simulationWork.useGpuPmePpCommunication && !(stepWork.doNeighborSearch);
1273 const bool reinitGpuPmePpComms =
1274 GMX_MPI && simulationWork.useGpuPmePpCommunication && (stepWork.doNeighborSearch);
1276 const auto localXReadyOnDevice = (useGpuPmeOnThisRank || simulationWork.useGpuBufferOps)
1277 ? stateGpu->getCoordinatesReadyOnDeviceEvent(
1278 AtomLocality::Local, simulationWork, stepWork)
1281 // Copy coordinate from the GPU if update is on the GPU and there
1282 // are forces to be computed on the CPU, or for the computation of
1283 // virial, or if host-side data will be transferred from this task
1284 // to a remote task for halo exchange or PME-PP communication. At
1285 // search steps the current coordinates are already on the host,
1286 // hence copy is not needed.
1287 const bool haveHostPmePpComms =
1288 !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication;
1290 GMX_ASSERT(simulationWork.useGpuHaloExchange
1291 == ((cr->dd != nullptr) && (!cr->dd->gpuHaloExchange[0].empty())),
1292 "The GPU halo exchange is active, but it has not been constructed.");
1293 const bool haveHostHaloExchangeComms =
1294 havePPDomainDecomposition(cr) && !simulationWork.useGpuHaloExchange;
1296 bool gmx_used_in_debug haveCopiedXFromGpu = false;
1297 if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch
1298 && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial
1299 || haveHostPmePpComms || haveHostHaloExchangeComms || simulationWork.computeMuTot))
1301 stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1302 haveCopiedXFromGpu = true;
1305 // Coordinates on the device are needed if PME or BufferOps are offloaded.
1306 // The local coordinates can be copied right away.
1307 // NOTE: Consider moving this copy to right after they are updated and constrained,
1308 // if the later is not offloaded.
1309 if (useGpuPmeOnThisRank || stepWork.useGpuXBufferOps)
1311 if (stepWork.doNeighborSearch)
1313 // TODO refactor this to do_md, after partitioning.
1314 stateGpu->reinit(mdatoms->homenr,
1315 cr->dd != nullptr ? dd_numAtomsZones(*cr->dd) : mdatoms->homenr);
1316 if (useGpuPmeOnThisRank)
1318 // TODO: This should be moved into PME setup function ( pme_gpu_prepare_computation(...) )
1319 pme_gpu_set_device_x(fr->pmedata, stateGpu->getCoordinates());
1322 // We need to copy coordinates when:
1323 // 1. Update is not offloaded
1324 // 2. The buffers were reinitialized on search step
1325 if (!simulationWork.useGpuUpdate || stepWork.doNeighborSearch)
1327 GMX_ASSERT(stateGpu != nullptr, "stateGpu should not be null");
1328 stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1332 if (GMX_MPI && !thisRankHasDuty(cr, DUTY_PME) && stepWork.computeSlowForces)
1334 /* Send particle coordinates to the pme nodes */
1335 if (!pmeSendCoordinatesFromGpu && !stepWork.doNeighborSearch && simulationWork.useGpuUpdate)
1337 GMX_ASSERT(haveCopiedXFromGpu,
1338 "a wait should only be triggered if copy has been scheduled");
1339 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1342 gmx_pme_send_coordinates(fr,
1345 as_rvec_array(x.unpaddedArrayRef().data()),
1346 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1347 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)],
1348 (stepWork.computeVirial || stepWork.computeEnergy),
1350 simulationWork.useGpuPmePpCommunication,
1351 reinitGpuPmePpComms,
1352 pmeSendCoordinatesFromGpu,
1353 localXReadyOnDevice,
1357 if (useGpuPmeOnThisRank)
1359 launchPmeGpuSpread(fr->pmedata,
1362 localXReadyOnDevice,
1363 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1367 const gmx::DomainLifetimeWorkload& domainWork = runScheduleWork->domainWork;
1369 /* do gridding for pair search */
1370 if (stepWork.doNeighborSearch)
1372 if (fr->wholeMoleculeTransform && stepWork.stateChanged)
1374 fr->wholeMoleculeTransform->updateForAtomPbcJumps(x.unpaddedArrayRef(), box);
1377 wallcycle_start(wcycle, WallCycleCounter::NS);
1378 if (!DOMAINDECOMP(cr))
1380 const rvec vzero = { 0.0_real, 0.0_real, 0.0_real };
1381 const rvec boxDiagonal = { box[XX][XX], box[YY][YY], box[ZZ][ZZ] };
1382 wallcycle_sub_start(wcycle, WallCycleSubCounter::NBSGridLocal);
1383 nbnxn_put_on_grid(nbv,
1389 { 0, mdatoms->homenr },
1392 x.unpaddedArrayRef(),
1395 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NBSGridLocal);
1399 wallcycle_sub_start(wcycle, WallCycleSubCounter::NBSGridNonLocal);
1400 nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd), fr->cginfo, x.unpaddedArrayRef());
1401 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NBSGridNonLocal);
1404 nbv->setAtomProperties(gmx::constArrayRefFromArray(mdatoms->typeA, mdatoms->nr),
1405 gmx::constArrayRefFromArray(mdatoms->chargeA, mdatoms->nr),
1408 wallcycle_stop(wcycle, WallCycleCounter::NS);
1410 /* initialize the GPU nbnxm atom data and bonded data structures */
1411 if (simulationWork.useGpuNonbonded)
1413 // Note: cycle counting only nononbondeds, gpuBonded counts internally
1414 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1415 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1416 Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1417 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1418 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1422 /* Now we put all atoms on the grid, we can assign bonded
1423 * interactions to the GPU, where the grid order is
1424 * needed. Also the xq, f and fshift device buffers have
1425 * been reallocated if needed, so the bonded code can
1426 * learn about them. */
1427 // TODO the xq, f, and fshift buffers are now shared
1428 // resources, so they should be maintained by a
1429 // higher-level object than the nb module.
1430 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(nbv->getGridIndices(),
1432 Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1433 Nbnxm::gpu_get_f(nbv->gpu_nbv),
1434 Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1438 // Need to run after the GPU-offload bonded interaction lists
1439 // are set up to be able to determine whether there is bonded work.
1440 runScheduleWork->domainWork = setupDomainLifetimeWorkload(
1441 inputrec, *fr, pull_work, ed, *mdatoms, simulationWork, stepWork);
1443 wallcycle_start_nocount(wcycle, WallCycleCounter::NS);
1444 wallcycle_sub_start(wcycle, WallCycleSubCounter::NBSSearchLocal);
1445 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1446 nbv->constructPairlist(InteractionLocality::Local, top->excls, step, nrnb);
1448 nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::Local);
1450 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NBSSearchLocal);
1451 wallcycle_stop(wcycle, WallCycleCounter::NS);
1453 if (stepWork.useGpuXBufferOps)
1455 nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1458 if (simulationWork.useGpuBufferOps)
1460 setupGpuForceReductions(runScheduleWork, cr, fr);
1463 else if (!EI_TPI(inputrec.eI) && stepWork.computeNonbondedForces)
1465 if (stepWork.useGpuXBufferOps)
1467 GMX_ASSERT(stateGpu, "stateGpu should be valid when buffer ops are offloaded");
1468 nbv->convertCoordinatesGpu(AtomLocality::Local, stateGpu->getCoordinates(), localXReadyOnDevice);
1472 if (simulationWork.useGpuUpdate)
1474 GMX_ASSERT(stateGpu, "need a valid stateGpu object");
1475 GMX_ASSERT(haveCopiedXFromGpu,
1476 "a wait should only be triggered if copy has been scheduled");
1477 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1479 nbv->convertCoordinates(AtomLocality::Local, x.unpaddedArrayRef());
1483 if (simulationWork.useGpuNonbonded && (stepWork.computeNonbondedForces || domainWork.haveGpuBondedWork))
1485 ddBalanceRegionHandler.openBeforeForceComputationGpu();
1487 wallcycle_start(wcycle, WallCycleCounter::LaunchGpu);
1488 wallcycle_sub_start(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1489 Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1490 if (stepWork.doNeighborSearch || !stepWork.useGpuXBufferOps)
1492 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::Local);
1494 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1495 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1496 // with X buffer ops offloaded to the GPU on all but the search steps
1498 // bonded work not split into separate local and non-local, so with DD
1499 // we can only launch the kernel after non-local coordinates have been received.
1500 if (domainWork.haveGpuBondedWork && !havePPDomainDecomposition(cr))
1502 fr->gpuBonded->setPbcAndlaunchKernel(fr->pbcType, box, fr->bMolPBC, stepWork);
1505 /* launch local nonbonded work on GPU */
1506 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1507 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1508 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFNo, step, nrnb, wcycle);
1509 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1510 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1513 if (useGpuPmeOnThisRank)
1515 // In PME GPU and mixed mode we launch FFT / gather after the
1516 // X copy/transform to allow overlap as well as after the GPU NB
1517 // launch to avoid FFT launch overhead hijacking the CPU and delaying
1518 // the nonbonded kernel.
1519 launchPmeGpuFftAndGather(fr->pmedata,
1520 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
1525 /* Communicate coordinates and sum dipole if necessary +
1526 do non-local pair search */
1527 if (havePPDomainDecomposition(cr))
1529 if (stepWork.doNeighborSearch)
1531 // TODO: fuse this branch with the above large stepWork.doNeighborSearch block
1532 wallcycle_start_nocount(wcycle, WallCycleCounter::NS);
1533 wallcycle_sub_start(wcycle, WallCycleSubCounter::NBSSearchNonLocal);
1534 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1535 nbv->constructPairlist(InteractionLocality::NonLocal, top->excls, step, nrnb);
1537 nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::NonLocal);
1538 wallcycle_sub_stop(wcycle, WallCycleSubCounter::NBSSearchNonLocal);
1539 wallcycle_stop(wcycle, WallCycleCounter::NS);
1540 // TODO refactor this GPU halo exchange re-initialisation
1541 // to location in do_md where GPU halo exchange is
1542 // constructed at partitioning, after above stateGpu
1543 // re-initialization has similarly been refactored
1544 if (simulationWork.useGpuHaloExchange)
1546 reinitGpuHaloExchange(*cr, stateGpu->getCoordinates(), stateGpu->getForces());
1551 if (stepWork.useGpuXHalo)
1553 // The following must be called after local setCoordinates (which records an event
1554 // when the coordinate data has been copied to the device).
1555 communicateGpuHaloCoordinates(*cr, box, localXReadyOnDevice);
1557 if (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork)
1559 // non-local part of coordinate buffer must be copied back to host for CPU work
1560 stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1565 if (simulationWork.useGpuUpdate)
1567 GMX_ASSERT(haveCopiedXFromGpu,
1568 "a wait should only be triggered if copy has been scheduled");
1569 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1571 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1574 if (stepWork.useGpuXBufferOps)
1576 if (!useGpuPmeOnThisRank && !stepWork.useGpuXHalo)
1578 stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1580 nbv->convertCoordinatesGpu(AtomLocality::NonLocal,
1581 stateGpu->getCoordinates(),
1582 stateGpu->getCoordinatesReadyOnDeviceEvent(
1583 AtomLocality::NonLocal, simulationWork, stepWork));
1587 nbv->convertCoordinates(AtomLocality::NonLocal, x.unpaddedArrayRef());
1591 if (simulationWork.useGpuNonbonded)
1594 if (stepWork.doNeighborSearch || !stepWork.useGpuXBufferOps)
1596 wallcycle_start(wcycle, WallCycleCounter::LaunchGpu);
1597 wallcycle_sub_start(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1598 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::NonLocal);
1599 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1600 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1603 if (domainWork.haveGpuBondedWork)
1605 fr->gpuBonded->setPbcAndlaunchKernel(fr->pbcType, box, fr->bMolPBC, stepWork);
1608 /* launch non-local nonbonded tasks on GPU */
1609 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1610 wallcycle_sub_start(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1611 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step, nrnb, wcycle);
1612 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1613 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1617 if (simulationWork.useGpuNonbonded && stepWork.computeNonbondedForces)
1619 /* launch D2H copy-back F */
1620 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
1621 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1623 if (havePPDomainDecomposition(cr))
1625 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::NonLocal);
1627 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::Local);
1628 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuNonBonded);
1630 if (domainWork.haveGpuBondedWork && stepWork.computeEnergy)
1632 fr->gpuBonded->launchEnergyTransfer();
1634 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
1637 gmx::ArrayRef<const gmx::RVec> xWholeMolecules;
1638 if (fr->wholeMoleculeTransform)
1640 xWholeMolecules = fr->wholeMoleculeTransform->wholeMoleculeCoordinates(x.unpaddedArrayRef(), box);
1643 DipoleData dipoleData;
1645 if (simulationWork.computeMuTot)
1647 const int start = 0;
1649 if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch)
1651 GMX_ASSERT(haveCopiedXFromGpu,
1652 "a wait should only be triggered if copy has been scheduled");
1653 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1656 /* Calculate total (local) dipole moment in a temporary common array.
1657 * This makes it possible to sum them over nodes faster.
1659 gmx::ArrayRef<const gmx::RVec> xRef =
1660 (xWholeMolecules.empty() ? x.unpaddedArrayRef() : xWholeMolecules);
1664 gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->nr),
1665 gmx::arrayRefFromArray(mdatoms->chargeB, mdatoms->nr),
1666 mdatoms->nChargePerturbed != 0,
1667 dipoleData.muStaging[0],
1668 dipoleData.muStaging[1]);
1670 reduceAndUpdateMuTot(
1671 &dipoleData, cr, (fr->efep != FreeEnergyPerturbationType::No), lambda, muTotal, ddBalanceRegionHandler);
1674 /* Reset energies */
1675 reset_enerdata(enerd);
1677 if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1679 wallcycle_start(wcycle, WallCycleCounter::PpDuringPme);
1680 dd_force_flop_start(cr->dd, nrnb);
1683 // For the rest of the CPU tasks that depend on GPU-update produced coordinates,
1684 // this wait ensures that the D2H transfer is complete.
1685 if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch
1686 && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial))
1688 GMX_ASSERT(haveCopiedXFromGpu, "a wait should only be triggered if copy has been scheduled");
1689 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1694 wallcycle_start(wcycle, WallCycleCounter::Rot);
1695 do_rotation(cr, enforcedRotation, box, x.unpaddedConstArrayRef(), t, step, stepWork.doNeighborSearch);
1696 wallcycle_stop(wcycle, WallCycleCounter::Rot);
1699 /* Start the force cycle counter.
1700 * Note that a different counter is used for dynamic load balancing.
1702 wallcycle_start(wcycle, WallCycleCounter::Force);
1704 /* Set up and clear force outputs:
1705 * forceOutMtsLevel0: everything except what is in the other two outputs
1706 * forceOutMtsLevel1: PME-mesh and listed-forces group 1
1707 * forceOutNonbonded: non-bonded forces
1708 * Without multiple time stepping all point to the same object.
1709 * With multiple time-stepping the use is different for MTS fast (level0 only) and slow steps.
1711 ForceOutputs forceOutMtsLevel0 = setupForceOutputs(
1712 &fr->forceHelperBuffers[0], force, domainWork, stepWork, havePPDomainDecomposition(cr), wcycle);
1714 // Force output for MTS combined forces, only set at level1 MTS steps
1715 std::optional<ForceOutputs> forceOutMts =
1716 (fr->useMts && stepWork.computeSlowForces)
1717 ? std::optional(setupForceOutputs(&fr->forceHelperBuffers[1],
1718 forceView->forceMtsCombinedWithPadding(),
1721 havePPDomainDecomposition(cr),
1725 ForceOutputs* forceOutMtsLevel1 =
1726 fr->useMts ? (stepWork.computeSlowForces ? &forceOutMts.value() : nullptr) : &forceOutMtsLevel0;
1728 const bool nonbondedAtMtsLevel1 = runScheduleWork->simulationWork.computeNonbondedAtMtsLevel1;
1730 ForceOutputs* forceOutNonbonded = nonbondedAtMtsLevel1 ? forceOutMtsLevel1 : &forceOutMtsLevel0;
1732 if (inputrec.bPull && pull_have_constraint(*pull_work))
1734 clear_pull_forces(pull_work);
1737 /* We calculate the non-bonded forces, when done on the CPU, here.
1738 * We do this before calling do_force_lowlevel, because in that
1739 * function, the listed forces are calculated before PME, which
1740 * does communication. With this order, non-bonded and listed
1741 * force calculation imbalance can be balanced out by the domain
1742 * decomposition load balancing.
1745 const bool useOrEmulateGpuNb = simulationWork.useGpuNonbonded || fr->nbv->emulateGpu();
1747 if (!useOrEmulateGpuNb)
1749 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFYes, step, nrnb, wcycle);
1752 if (fr->efep != FreeEnergyPerturbationType::No && stepWork.computeNonbondedForces)
1754 /* Calculate the local and non-local free energy interactions here.
1755 * Happens here on the CPU both with and without GPU.
1757 nbv->dispatchFreeEnergyKernel(InteractionLocality::Local,
1759 x.unpaddedArrayRef(),
1760 &forceOutNonbonded->forceWithShiftForces(),
1761 gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->nr),
1762 gmx::arrayRefFromArray(mdatoms->chargeB, mdatoms->nr),
1763 gmx::arrayRefFromArray(mdatoms->typeA, mdatoms->nr),
1764 gmx::arrayRefFromArray(mdatoms->typeB, mdatoms->nr),
1765 inputrec.fepvals.get(),
1771 if (havePPDomainDecomposition(cr))
1773 nbv->dispatchFreeEnergyKernel(InteractionLocality::NonLocal,
1775 x.unpaddedArrayRef(),
1776 &forceOutNonbonded->forceWithShiftForces(),
1777 gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->nr),
1778 gmx::arrayRefFromArray(mdatoms->chargeB, mdatoms->nr),
1779 gmx::arrayRefFromArray(mdatoms->typeA, mdatoms->nr),
1780 gmx::arrayRefFromArray(mdatoms->typeB, mdatoms->nr),
1781 inputrec.fepvals.get(),
1789 if (stepWork.computeNonbondedForces && !useOrEmulateGpuNb)
1791 if (havePPDomainDecomposition(cr))
1793 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step, nrnb, wcycle);
1796 if (stepWork.computeForces)
1798 /* Add all the non-bonded force to the normal force array.
1799 * This can be split into a local and a non-local part when overlapping
1800 * communication with calculation with domain decomposition.
1802 wallcycle_stop(wcycle, WallCycleCounter::Force);
1803 nbv->atomdata_add_nbat_f_to_f(AtomLocality::All,
1804 forceOutNonbonded->forceWithShiftForces().force());
1805 wallcycle_start_nocount(wcycle, WallCycleCounter::Force);
1808 /* If there are multiple fshift output buffers we need to reduce them */
1809 if (stepWork.computeVirial)
1811 /* This is not in a subcounter because it takes a
1812 negligible and constant-sized amount of time */
1813 nbnxn_atomdata_add_nbat_fshift_to_fshift(
1814 *nbv->nbat, forceOutNonbonded->forceWithShiftForces().shiftForces());
1818 // TODO Force flags should include haveFreeEnergyWork for this domain
1819 if (stepWork.useGpuXHalo && (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork))
1821 wallcycle_stop(wcycle, WallCycleCounter::Force);
1822 /* Wait for non-local coordinate data to be copied from device */
1823 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::NonLocal);
1824 wallcycle_start_nocount(wcycle, WallCycleCounter::Force);
1827 // Compute wall interactions, when present.
1828 // Note: should be moved to special forces.
1829 if (inputrec.nwall && stepWork.computeNonbondedForces)
1831 /* foreign lambda component for walls */
1832 real dvdl_walls = do_walls(inputrec,
1836 x.unpaddedConstArrayRef(),
1837 &forceOutMtsLevel0.forceWithVirial(),
1838 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)],
1839 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
1841 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += dvdl_walls;
1844 if (stepWork.computeListedForces)
1846 /* Check whether we need to take into account PBC in listed interactions */
1847 bool needMolPbc = false;
1848 for (const auto& listedForces : fr->listedForces)
1850 if (listedForces.haveCpuListedForces(*fr->fcdata))
1852 needMolPbc = fr->bMolPBC;
1860 /* Since all atoms are in the rectangular or triclinic unit-cell,
1861 * only single box vector shifts (2 in x) are required.
1863 set_pbc_dd(&pbc, fr->pbcType, DOMAINDECOMP(cr) ? cr->dd->numCells : nullptr, TRUE, box);
1866 for (int mtsIndex = 0; mtsIndex < (fr->useMts && stepWork.computeSlowForces ? 2 : 1); mtsIndex++)
1868 ListedForces& listedForces = fr->listedForces[mtsIndex];
1869 ForceOutputs& forceOut = (mtsIndex == 0 ? forceOutMtsLevel0 : *forceOutMtsLevel1);
1870 listedForces.calculate(wcycle,
1872 inputrec.fepvals.get(),
1886 DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr,
1891 if (stepWork.computeSlowForces)
1893 calculateLongRangeNonbondeds(fr,
1899 x.unpaddedConstArrayRef(),
1900 &forceOutMtsLevel1->forceWithVirial(),
1904 dipoleData.muStateAB,
1906 ddBalanceRegionHandler);
1909 wallcycle_stop(wcycle, WallCycleCounter::Force);
1911 // VdW dispersion correction, only computed on master rank to avoid double counting
1912 if ((stepWork.computeEnergy || stepWork.computeVirial) && fr->dispersionCorrection && MASTER(cr))
1914 // Calculate long range corrections to pressure and energy
1915 const DispersionCorrection::Correction correction = fr->dispersionCorrection->calculate(
1916 box, lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)]);
1918 if (stepWork.computeEnergy)
1920 enerd->term[F_DISPCORR] = correction.energy;
1921 enerd->term[F_DVDL_VDW] += correction.dvdl;
1922 enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] += correction.dvdl;
1924 if (stepWork.computeVirial)
1926 correction.correctVirial(vir_force);
1927 enerd->term[F_PDISPCORR] = correction.pressure;
1931 computeSpecialForces(fplog,
1943 x.unpaddedArrayRef(),
1947 &forceOutMtsLevel0.forceWithVirial(),
1948 forceOutMtsLevel1 ? &forceOutMtsLevel1->forceWithVirial() : nullptr,
1951 stepWork.doNeighborSearch);
1953 if (havePPDomainDecomposition(cr) && stepWork.computeForces && stepWork.useGpuFHalo
1954 && domainWork.haveCpuLocalForceWork)
1956 stateGpu->copyForcesToGpu(forceOutMtsLevel0.forceWithShiftForces().force(), AtomLocality::Local);
1959 GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFBufferOps),
1960 "The schedule below does not allow for nonbonded MTS with GPU buffer ops");
1961 GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFHalo),
1962 "The schedule below does not allow for nonbonded MTS with GPU halo exchange");
1963 // Will store the amount of cycles spent waiting for the GPU that
1964 // will be later used in the DLB accounting.
1965 float cycles_wait_gpu = 0;
1966 if (useOrEmulateGpuNb && stepWork.computeNonbondedForces)
1968 auto& forceWithShiftForces = forceOutNonbonded->forceWithShiftForces();
1970 /* wait for non-local forces (or calculate in emulation mode) */
1971 if (havePPDomainDecomposition(cr))
1973 if (simulationWork.useGpuNonbonded)
1975 cycles_wait_gpu += Nbnxm::gpu_wait_finish_task(
1978 AtomLocality::NonLocal,
1979 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
1980 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
1981 forceWithShiftForces.shiftForces(),
1986 wallcycle_start_nocount(wcycle, WallCycleCounter::Force);
1988 fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFYes, step, nrnb, wcycle);
1989 wallcycle_stop(wcycle, WallCycleCounter::Force);
1992 if (stepWork.useGpuFBufferOps)
1994 // TODO: move this into DomainLifetimeWorkload, including the second part of the
1995 // condition The bonded and free energy CPU tasks can have non-local force
1996 // contributions which are a dependency for the GPU force reduction.
1997 bool haveNonLocalForceContribInCpuBuffer =
1998 domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork;
2000 if (haveNonLocalForceContribInCpuBuffer)
2002 stateGpu->copyForcesToGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
2003 AtomLocality::NonLocal);
2007 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->execute();
2009 if (!stepWork.useGpuFHalo)
2011 // copy from GPU input for dd_move_f()
2012 stateGpu->copyForcesFromGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
2013 AtomLocality::NonLocal);
2018 nbv->atomdata_add_nbat_f_to_f(AtomLocality::NonLocal, forceWithShiftForces.force());
2021 if (fr->nbv->emulateGpu() && stepWork.computeVirial)
2023 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat, forceWithShiftForces.shiftForces());
2028 /* Combining the forces for multiple time stepping before the halo exchange, when possible,
2029 * avoids an extra halo exchange (when DD is used) and post-processing step.
2031 const bool combineMtsForcesBeforeHaloExchange =
2032 (stepWork.computeForces && fr->useMts && stepWork.computeSlowForces
2033 && (legacyFlags & GMX_FORCE_DO_NOT_NEED_NORMAL_FORCE) != 0
2034 && !(stepWork.computeVirial || simulationWork.useGpuNonbonded || useGpuPmeOnThisRank));
2035 if (combineMtsForcesBeforeHaloExchange)
2037 const int numAtoms = havePPDomainDecomposition(cr) ? dd_numAtomsZones(*cr->dd) : mdatoms->homenr;
2038 combineMtsForces(numAtoms,
2039 force.unpaddedArrayRef(),
2040 forceView->forceMtsCombined(),
2041 inputrec.mtsLevels[1].stepFactor);
2044 if (havePPDomainDecomposition(cr))
2046 /* We are done with the CPU compute.
2047 * We will now communicate the non-local forces.
2048 * If we use a GPU this will overlap with GPU work, so in that case
2049 * we do not close the DD force balancing region here.
2051 ddBalanceRegionHandler.closeAfterForceComputationCpu();
2053 if (stepWork.computeForces)
2056 if (stepWork.useGpuFHalo)
2058 // If there exist CPU forces, data from halo exchange should accumulate into these
2059 bool accumulateForces = domainWork.haveCpuLocalForceWork;
2060 if (!accumulateForces)
2062 // Force halo exchange will set a subset of local atoms with remote non-local data
2063 // First clear local portion of force array, so that untouched atoms are zero
2064 stateGpu->clearForcesOnGpu(AtomLocality::Local);
2066 communicateGpuHaloForces(*cr, accumulateForces);
2070 if (stepWork.useGpuFBufferOps)
2072 stateGpu->waitForcesReadyOnHost(AtomLocality::NonLocal);
2075 // Without MTS or with MTS at slow steps with uncombined forces we need to
2076 // communicate the fast forces
2077 if (!fr->useMts || !combineMtsForcesBeforeHaloExchange)
2079 dd_move_f(cr->dd, &forceOutMtsLevel0.forceWithShiftForces(), wcycle);
2081 // With MTS we need to communicate the slow or combined (in forceOutMtsLevel1) forces
2082 if (fr->useMts && stepWork.computeSlowForces)
2084 dd_move_f(cr->dd, &forceOutMtsLevel1->forceWithShiftForces(), wcycle);
2090 // With both nonbonded and PME offloaded a GPU on the same rank, we use
2091 // an alternating wait/reduction scheme.
2092 bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPmeOnThisRank && simulationWork.useGpuNonbonded
2093 && !DOMAINDECOMP(cr) && !stepWork.useGpuFBufferOps);
2094 if (alternateGpuWait)
2096 alternatePmeNbGpuWaitReduce(fr->nbv.get(),
2101 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)],
2106 if (!alternateGpuWait && useGpuPmeOnThisRank)
2108 pme_gpu_wait_and_reduce(fr->pmedata,
2111 &forceOutMtsLevel1->forceWithVirial(),
2113 lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)]);
2116 /* Wait for local GPU NB outputs on the non-alternating wait path */
2117 if (!alternateGpuWait && stepWork.computeNonbondedForces && simulationWork.useGpuNonbonded)
2119 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
2120 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
2121 * but even with a step of 0.1 ms the difference is less than 1%
2124 const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
2125 const float waitCycles = Nbnxm::gpu_wait_finish_task(
2128 AtomLocality::Local,
2129 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
2130 enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
2131 forceOutNonbonded->forceWithShiftForces().shiftForces(),
2134 if (ddBalanceRegionHandler.useBalancingRegion())
2136 DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
2137 if (stepWork.computeForces && waitCycles <= gpuWaitApiOverheadMargin)
2139 /* We measured few cycles, it could be that the kernel
2140 * and transfer finished earlier and there was no actual
2141 * wait time, only API call overhead.
2142 * Then the actual time could be anywhere between 0 and
2143 * cycles_wait_est. We will use half of cycles_wait_est.
2145 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
2147 ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
2151 if (fr->nbv->emulateGpu())
2153 // NOTE: emulation kernel is not included in the balancing region,
2154 // but emulation mode does not target performance anyway
2155 wallcycle_start_nocount(wcycle, WallCycleCounter::Force);
2160 InteractionLocality::Local,
2161 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
2165 wallcycle_stop(wcycle, WallCycleCounter::Force);
2168 // If on GPU PME-PP comms path, receive forces from PME before GPU buffer ops
2169 // TODO refactor this and unify with below default-path call to the same function
2170 if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME) && stepWork.computeSlowForces
2171 && simulationWork.useGpuPmePpCommunication)
2173 /* In case of node-splitting, the PP nodes receive the long-range
2174 * forces, virial and energy from the PME nodes here.
2176 pme_receive_force_ener(fr,
2178 &forceOutMtsLevel1->forceWithVirial(),
2180 simulationWork.useGpuPmePpCommunication,
2181 stepWork.useGpuPmeFReduction,
2186 /* Do the nonbonded GPU (or emulation) force buffer reduction
2187 * on the non-alternating path. */
2188 GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFBufferOps),
2189 "The schedule below does not allow for nonbonded MTS with GPU buffer ops");
2190 if (useOrEmulateGpuNb && !alternateGpuWait)
2192 if (stepWork.useGpuFBufferOps)
2194 ArrayRef<gmx::RVec> forceWithShift = forceOutNonbonded->forceWithShiftForces().force();
2196 // Flag to specify whether the CPU force buffer has contributions to
2197 // local atoms. This depends on whether there are CPU-based force tasks
2198 // or when DD is active the halo exchange has resulted in contributions
2199 // from the non-local part.
2200 const bool haveLocalForceContribInCpuBuffer =
2201 (domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr));
2203 // TODO: move these steps as early as possible:
2204 // - CPU f H2D should be as soon as all CPU-side forces are done
2205 // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
2206 // before the next CPU task that consumes the forces: vsite spread or update)
2207 // - copy is not perfomed if GPU force halo exchange is active, because it would overwrite the result
2208 // of the halo exchange. In that case the copy is instead performed above, before the exchange.
2209 // These should be unified.
2210 if (haveLocalForceContribInCpuBuffer && !stepWork.useGpuFHalo)
2212 // Note: AtomLocality::All is used for the non-DD case because, as in this
2213 // case copyForcesToGpu() uses a separate stream, it allows overlap of
2214 // CPU force H2D with GPU force tasks on all streams including those in the
2215 // local stream which would otherwise be implicit dependencies for the
2216 // transfer and would not overlap.
2217 auto locality = havePPDomainDecomposition(cr) ? AtomLocality::Local : AtomLocality::All;
2219 stateGpu->copyForcesToGpu(forceWithShift, locality);
2222 if (stepWork.computeNonbondedForces)
2224 fr->gpuForceReduction[gmx::AtomLocality::Local]->execute();
2227 // Copy forces to host if they are needed for update or if virtual sites are enabled.
2228 // If there are vsites, we need to copy forces every step to spread vsite forces on host.
2229 // TODO: When the output flags will be included in step workload, this copy can be combined with the
2230 // copy call done in sim_utils(...) for the output.
2231 // NOTE: If there are virtual sites, the forces are modified on host after this D2H copy. Hence,
2232 // they should not be copied in do_md(...) for the output.
2233 if (!simulationWork.useGpuUpdate
2234 || (simulationWork.useGpuUpdate && DOMAINDECOMP(cr) && haveHostPmePpComms) || vsite)
2236 stateGpu->copyForcesFromGpu(forceWithShift, AtomLocality::Local);
2237 stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
2240 else if (stepWork.computeNonbondedForces)
2242 ArrayRef<gmx::RVec> forceWithShift = forceOutNonbonded->forceWithShiftForces().force();
2243 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceWithShift);
2247 launchGpuEndOfStepTasks(
2248 nbv, fr->gpuBonded, fr->pmedata, enerd, *runScheduleWork, useGpuPmeOnThisRank, step, wcycle);
2250 if (DOMAINDECOMP(cr))
2252 dd_force_flop_stop(cr->dd, nrnb);
2255 const bool haveCombinedMtsForces = (stepWork.computeForces && fr->useMts && stepWork.computeSlowForces
2256 && combineMtsForcesBeforeHaloExchange);
2257 if (stepWork.computeForces)
2259 postProcessForceWithShiftForces(
2260 nrnb, wcycle, box, x.unpaddedArrayRef(), &forceOutMtsLevel0, vir_force, *mdatoms, *fr, vsite, stepWork);
2262 if (fr->useMts && stepWork.computeSlowForces && !haveCombinedMtsForces)
2264 postProcessForceWithShiftForces(
2265 nrnb, wcycle, box, x.unpaddedArrayRef(), forceOutMtsLevel1, vir_force, *mdatoms, *fr, vsite, stepWork);
2269 // TODO refactor this and unify with above GPU PME-PP / GPU update path call to the same function
2270 if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication
2271 && stepWork.computeSlowForces)
2273 /* In case of node-splitting, the PP nodes receive the long-range
2274 * forces, virial and energy from the PME nodes here.
2276 pme_receive_force_ener(fr,
2278 &forceOutMtsLevel1->forceWithVirial(),
2280 simulationWork.useGpuPmePpCommunication,
2285 if (stepWork.computeForces)
2287 /* If we don't use MTS or if we already combined the MTS forces before, we only
2288 * need to post-process one ForceOutputs object here, called forceOutCombined,
2289 * otherwise we have to post-process two outputs and then combine them.
2291 ForceOutputs& forceOutCombined = (haveCombinedMtsForces ? forceOutMts.value() : forceOutMtsLevel0);
2293 cr, step, nrnb, wcycle, box, x.unpaddedArrayRef(), &forceOutCombined, vir_force, mdatoms, fr, vsite, stepWork);
2295 if (fr->useMts && stepWork.computeSlowForces && !haveCombinedMtsForces)
2298 cr, step, nrnb, wcycle, box, x.unpaddedArrayRef(), forceOutMtsLevel1, vir_force, mdatoms, fr, vsite, stepWork);
2300 combineMtsForces(mdatoms->homenr,
2301 force.unpaddedArrayRef(),
2302 forceView->forceMtsCombined(),
2303 inputrec.mtsLevels[1].stepFactor);
2307 if (stepWork.computeEnergy)
2309 /* Compute the final potential energy terms */
2310 accumulatePotentialEnergies(enerd, lambda, inputrec.fepvals.get());
2312 if (!EI_TPI(inputrec.eI))
2314 checkPotentialEnergyValidity(step, *enerd, inputrec);
2318 /* In case we don't have constraints and are using GPUs, the next balancing
2319 * region starts here.
2320 * Some "special" work at the end of do_force_cuts?, such as vsite spread,
2321 * virial calculation and COM pulling, is not thus not included in
2322 * the balance timing, which is ok as most tasks do communication.
2324 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);