2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "gromacs/awh/awh.h"
49 #include "gromacs/domdec/dlbtiming.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/domdec/gpuhaloexchange.h"
53 #include "gromacs/domdec/partition.h"
54 #include "gromacs/essentialdynamics/edsam.h"
55 #include "gromacs/ewald/pme.h"
56 #include "gromacs/ewald/pme_pp_comm_gpu.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
59 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
60 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/imd/imd.h"
63 #include "gromacs/listed_forces/disre.h"
64 #include "gromacs/listed_forces/gpubonded.h"
65 #include "gromacs/listed_forces/listed_forces.h"
66 #include "gromacs/listed_forces/manage_threading.h"
67 #include "gromacs/listed_forces/orires.h"
68 #include "gromacs/math/arrayrefwithpadding.h"
69 #include "gromacs/math/functions.h"
70 #include "gromacs/math/units.h"
71 #include "gromacs/math/vec.h"
72 #include "gromacs/math/vecdump.h"
73 #include "gromacs/mdlib/calcmu.h"
74 #include "gromacs/mdlib/calcvir.h"
75 #include "gromacs/mdlib/constr.h"
76 #include "gromacs/mdlib/enerdata_utils.h"
77 #include "gromacs/mdlib/force.h"
78 #include "gromacs/mdlib/forcerec.h"
79 #include "gromacs/mdlib/gmx_omp_nthreads.h"
80 #include "gromacs/mdlib/qmmm.h"
81 #include "gromacs/mdlib/update.h"
82 #include "gromacs/mdtypes/commrec.h"
83 #include "gromacs/mdtypes/enerdata.h"
84 #include "gromacs/mdtypes/forceoutput.h"
85 #include "gromacs/mdtypes/iforceprovider.h"
86 #include "gromacs/mdtypes/inputrec.h"
87 #include "gromacs/mdtypes/md_enums.h"
88 #include "gromacs/mdtypes/simulation_workload.h"
89 #include "gromacs/mdtypes/state.h"
90 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
91 #include "gromacs/nbnxm/gpu_data_mgmt.h"
92 #include "gromacs/nbnxm/nbnxm.h"
93 #include "gromacs/pbcutil/ishift.h"
94 #include "gromacs/pbcutil/mshift.h"
95 #include "gromacs/pbcutil/pbc.h"
96 #include "gromacs/pulling/pull.h"
97 #include "gromacs/pulling/pull_rotation.h"
98 #include "gromacs/timing/cyclecounter.h"
99 #include "gromacs/timing/gpu_timing.h"
100 #include "gromacs/timing/wallcycle.h"
101 #include "gromacs/timing/wallcyclereporting.h"
102 #include "gromacs/timing/walltime_accounting.h"
103 #include "gromacs/topology/topology.h"
104 #include "gromacs/utility/arrayref.h"
105 #include "gromacs/utility/basedefinitions.h"
106 #include "gromacs/utility/cstringutil.h"
107 #include "gromacs/utility/exceptions.h"
108 #include "gromacs/utility/fatalerror.h"
109 #include "gromacs/utility/fixedcapacityvector.h"
110 #include "gromacs/utility/gmxassert.h"
111 #include "gromacs/utility/gmxmpi.h"
112 #include "gromacs/utility/logger.h"
113 #include "gromacs/utility/smalloc.h"
114 #include "gromacs/utility/strconvert.h"
115 #include "gromacs/utility/sysinfo.h"
117 using gmx::ForceOutputs;
118 using gmx::StepWorkload;
119 using gmx::DomainLifetimeWorkload;
120 using gmx::SimulationWorkload;
121 using gmx::AtomLocality;
122 using gmx::InteractionLocality;
124 // TODO: this environment variable allows us to verify before release
125 // that on less common architectures the total cost of polling is not larger than
126 // a blocking wait (so polling does not introduce overhead when the static
127 // PME-first ordering would suffice).
128 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
130 static void sum_forces(rvec f[], gmx::ArrayRef<const gmx::RVec> forceToAdd)
132 const int end = forceToAdd.size();
134 int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
135 #pragma omp parallel for num_threads(nt) schedule(static)
136 for (int i = 0; i < end; i++)
138 rvec_inc(f[i], forceToAdd[i]);
142 static void calc_virial(int start, int homenr, const rvec x[],
143 const gmx::ForceWithShiftForces &forceWithShiftForces,
144 tensor vir_part, const t_graph *graph, const matrix box,
145 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
147 /* The short-range virial from surrounding boxes */
148 const rvec *fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
149 calc_vir(SHIFTS, fr->shift_vec, fshift, vir_part, ePBC == epbcSCREW, box);
150 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
152 /* Calculate partial virial, for local atoms only, based on short range.
153 * Total virial is computed in global_stat, called from do_md
155 const rvec *f = as_rvec_array(forceWithShiftForces.force().data());
156 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
157 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
161 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
165 static void pull_potential_wrapper(const t_commrec *cr,
166 const t_inputrec *ir,
167 const matrix box, gmx::ArrayRef<const gmx::RVec> x,
168 gmx::ForceWithVirial *force,
169 const t_mdatoms *mdatoms,
170 gmx_enerdata_t *enerd,
174 gmx_wallcycle_t wcycle)
179 /* Calculate the center of mass forces, this requires communication,
180 * which is why pull_potential is called close to other communication.
182 wallcycle_start(wcycle, ewcPULLPOT);
183 set_pbc(&pbc, ir->ePBC, box);
185 enerd->term[F_COM_PULL] +=
186 pull_potential(pull_work, mdatoms, &pbc,
187 cr, t, lambda[efptRESTRAINT], as_rvec_array(x.data()), force, &dvdl);
188 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
189 wallcycle_stop(wcycle, ewcPULLPOT);
192 static void pme_receive_force_ener(t_forcerec *fr,
194 gmx::ForceWithVirial *forceWithVirial,
195 gmx_enerdata_t *enerd,
196 bool useGpuPmePpComms,
197 bool receivePmeForceToGpu,
198 gmx_wallcycle_t wcycle)
200 real e_q, e_lj, dvdl_q, dvdl_lj;
201 float cycles_ppdpme, cycles_seppme;
203 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
204 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
206 /* In case of node-splitting, the PP nodes receive the long-range
207 * forces, virial and energy from the PME nodes here.
209 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
212 gmx_pme_receive_f(fr->pmePpCommGpu.get(),
213 cr, forceWithVirial, &e_q, &e_lj, &dvdl_q, &dvdl_lj,
214 useGpuPmePpComms, receivePmeForceToGpu, &cycles_seppme);
215 enerd->term[F_COUL_RECIP] += e_q;
216 enerd->term[F_LJ_RECIP] += e_lj;
217 enerd->dvdl_lin[efptCOUL] += dvdl_q;
218 enerd->dvdl_lin[efptVDW] += dvdl_lj;
222 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
224 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
227 static void print_large_forces(FILE *fp,
235 real force2Tolerance = gmx::square(forceTolerance);
236 gmx::index numNonFinite = 0;
237 for (int i = 0; i < md->homenr; i++)
239 real force2 = norm2(f[i]);
240 bool nonFinite = !std::isfinite(force2);
241 if (force2 >= force2Tolerance || nonFinite)
243 fprintf(fp, "step %" PRId64 " atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
245 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
252 if (numNonFinite > 0)
254 /* Note that with MPI this fatal call on one rank might interrupt
255 * the printing on other ranks. But we can only avoid that with
256 * an expensive MPI barrier that we would need at each step.
258 gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
262 static void post_process_forces(const t_commrec *cr,
265 gmx_wallcycle_t wcycle,
266 const gmx_localtop_t *top,
269 ForceOutputs *forceOutputs,
271 const t_mdatoms *mdatoms,
272 const t_graph *graph,
273 const t_forcerec *fr,
274 const gmx_vsite_t *vsite,
275 const StepWorkload &stepWork)
277 rvec *f = as_rvec_array(forceOutputs->forceWithShiftForces().force().data());
279 if (fr->haveDirectVirialContributions)
281 auto &forceWithVirial = forceOutputs->forceWithVirial();
282 rvec *fDirectVir = as_rvec_array(forceWithVirial.force_.data());
286 /* Spread the mesh force on virtual sites to the other particles...
287 * This is parallellized. MPI communication is performed
288 * if the constructing atoms aren't local.
290 matrix virial = { { 0 } };
291 spread_vsite_f(vsite, x, fDirectVir, nullptr,
292 stepWork.computeVirial, virial,
294 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
295 forceWithVirial.addVirialContribution(virial);
298 if (stepWork.computeVirial)
300 /* Now add the forces, this is local */
301 sum_forces(f, forceWithVirial.force_);
303 /* Add the direct virial contributions */
304 GMX_ASSERT(forceWithVirial.computeVirial_, "forceWithVirial should request virial computation when we request the virial");
305 m_add(vir_force, forceWithVirial.getVirial(), vir_force);
309 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
314 if (fr->print_force >= 0)
316 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
320 static void do_nb_verlet(t_forcerec *fr,
321 const interaction_const_t *ic,
322 gmx_enerdata_t *enerd,
323 const StepWorkload &stepWork,
324 const InteractionLocality ilocality,
328 gmx_wallcycle_t wcycle)
330 if (!stepWork.computeNonbondedForces)
332 /* skip non-bonded calculation */
336 nonbonded_verlet_t *nbv = fr->nbv.get();
338 /* GPU kernel launch overhead is already timed separately */
339 if (fr->cutoff_scheme != ecutsVERLET)
341 gmx_incons("Invalid cut-off scheme passed!");
346 /* When dynamic pair-list pruning is requested, we need to prune
347 * at nstlistPrune steps.
349 if (nbv->isDynamicPruningStepCpu(step))
351 /* Prune the pair-list beyond fr->ic->rlistPrune using
352 * the current coordinates of the atoms.
354 wallcycle_sub_start(wcycle, ewcsNONBONDED_PRUNING);
355 nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
356 wallcycle_sub_stop(wcycle, ewcsNONBONDED_PRUNING);
360 nbv->dispatchNonbondedKernel(ilocality, *ic, stepWork, clearF, *fr, enerd, nrnb);
363 static inline void clear_rvecs_omp(int n, rvec v[])
365 int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
367 /* Note that we would like to avoid this conditional by putting it
368 * into the omp pragma instead, but then we still take the full
369 * omp parallel for overhead (at least with gcc5).
373 for (int i = 0; i < n; i++)
380 #pragma omp parallel for num_threads(nth) schedule(static)
381 for (int i = 0; i < n; i++)
388 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
390 * \param groupOptions Group options, containing T-coupling options
392 static real averageKineticEnergyEstimate(const t_grpopts &groupOptions)
394 real nrdfCoupled = 0;
395 real nrdfUncoupled = 0;
396 real kineticEnergy = 0;
397 for (int g = 0; g < groupOptions.ngtc; g++)
399 if (groupOptions.tau_t[g] >= 0)
401 nrdfCoupled += groupOptions.nrdf[g];
402 kineticEnergy += groupOptions.nrdf[g]*0.5*groupOptions.ref_t[g]*BOLTZ;
406 nrdfUncoupled += groupOptions.nrdf[g];
410 /* This conditional with > also catches nrdf=0 */
411 if (nrdfCoupled > nrdfUncoupled)
413 return kineticEnergy*(nrdfCoupled + nrdfUncoupled)/nrdfCoupled;
421 /*! \brief This routine checks that the potential energy is finite.
423 * Always checks that the potential energy is finite. If step equals
424 * inputrec.init_step also checks that the magnitude of the potential energy
425 * is reasonable. Terminates with a fatal error when a check fails.
426 * Note that passing this check does not guarantee finite forces,
427 * since those use slightly different arithmetics. But in most cases
428 * there is just a narrow coordinate range where forces are not finite
429 * and energies are finite.
431 * \param[in] step The step number, used for checking and printing
432 * \param[in] enerd The energy data; the non-bonded group energies need to be added to enerd.term[F_EPOT] before calling this routine
433 * \param[in] inputrec The input record
435 static void checkPotentialEnergyValidity(int64_t step,
436 const gmx_enerdata_t &enerd,
437 const t_inputrec &inputrec)
439 /* Threshold valid for comparing absolute potential energy against
440 * the kinetic energy. Normally one should not consider absolute
441 * potential energy values, but with a factor of one million
442 * we should never get false positives.
444 constexpr real c_thresholdFactor = 1e6;
446 bool energyIsNotFinite = !std::isfinite(enerd.term[F_EPOT]);
447 real averageKineticEnergy = 0;
448 /* We only check for large potential energy at the initial step,
449 * because that is by far the most likely step for this too occur
450 * and because computing the average kinetic energy is not free.
451 * Note: nstcalcenergy >> 1 often does not allow to catch large energies
452 * before they become NaN.
454 if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
456 averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
459 if (energyIsNotFinite || (averageKineticEnergy > 0 &&
460 enerd.term[F_EPOT] > c_thresholdFactor*averageKineticEnergy))
462 gmx_fatal(FARGS, "Step %" PRId64 ": The total potential energy is %g, which is %s. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A %s potential energy can be caused by overlapping interactions in bonded interactions or very large%s coordinate values. Usually this is caused by a badly- or non-equilibrated initial configuration, incorrect interactions or parameters in the topology.",
465 energyIsNotFinite ? "not finite" : "extremely high",
467 enerd.term[F_COUL_SR],
468 energyIsNotFinite ? "non-finite" : "very high",
469 energyIsNotFinite ? " or Nan" : "");
473 /*! \brief Return true if there are special forces computed this step.
475 * The conditionals exactly correspond to those in computeSpecialForces().
478 haveSpecialForces(const t_inputrec &inputrec,
479 const gmx::ForceProviders &forceProviders,
480 const pull_t *pull_work,
481 const bool computeForces,
486 ((computeForces && forceProviders.hasForceProvider()) || // forceProviders
487 (inputrec.bPull && pull_have_potential(pull_work)) || // pull
488 inputrec.bRot || // enforced rotation
489 (ed != nullptr) || // flooding
490 (inputrec.bIMD && computeForces)); // IMD
493 /*! \brief Compute forces and/or energies for special algorithms
495 * The intention is to collect all calls to algorithms that compute
496 * forces on local atoms only and that do not contribute to the local
497 * virial sum (but add their virial contribution separately).
498 * Eventually these should likely all become ForceProviders.
499 * Within this function the intention is to have algorithms that do
500 * global communication at the end, so global barriers within the MD loop
501 * are as close together as possible.
503 * \param[in] fplog The log file
504 * \param[in] cr The communication record
505 * \param[in] inputrec The input record
506 * \param[in] awh The Awh module (nullptr if none in use).
507 * \param[in] enforcedRotation Enforced rotation module.
508 * \param[in] imdSession The IMD session
509 * \param[in] pull_work The pull work structure.
510 * \param[in] step The current MD step
511 * \param[in] t The current time
512 * \param[in,out] wcycle Wallcycle accounting struct
513 * \param[in,out] forceProviders Pointer to a list of force providers
514 * \param[in] box The unit cell
515 * \param[in] x The coordinates
516 * \param[in] mdatoms Per atom properties
517 * \param[in] lambda Array of free-energy lambda values
518 * \param[in] stepWork Step schedule flags
519 * \param[in,out] forceWithVirial Force and virial buffers
520 * \param[in,out] enerd Energy buffer
521 * \param[in,out] ed Essential dynamics pointer
522 * \param[in] didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
524 * \todo Remove didNeighborSearch, which is used incorrectly.
525 * \todo Convert all other algorithms called here to ForceProviders.
528 computeSpecialForces(FILE *fplog,
530 const t_inputrec *inputrec,
532 gmx_enfrot *enforcedRotation,
533 gmx::ImdSession *imdSession,
537 gmx_wallcycle_t wcycle,
538 gmx::ForceProviders *forceProviders,
540 gmx::ArrayRef<const gmx::RVec> x,
541 const t_mdatoms *mdatoms,
543 const StepWorkload &stepWork,
544 gmx::ForceWithVirial *forceWithVirial,
545 gmx_enerdata_t *enerd,
547 bool didNeighborSearch)
549 /* NOTE: Currently all ForceProviders only provide forces.
550 * When they also provide energies, remove this conditional.
552 if (stepWork.computeForces)
554 gmx::ForceProviderInput forceProviderInput(x, *mdatoms, t, box, *cr);
555 gmx::ForceProviderOutput forceProviderOutput(forceWithVirial, enerd);
557 /* Collect forces from modules */
558 forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
561 if (inputrec->bPull && pull_have_potential(pull_work))
563 pull_potential_wrapper(cr, inputrec, box, x,
565 mdatoms, enerd, pull_work, lambda, t,
570 enerd->term[F_COM_PULL] +=
571 awh->applyBiasForcesAndUpdateBias(inputrec->ePBC, *mdatoms, box,
573 t, step, wcycle, fplog);
577 rvec *f = as_rvec_array(forceWithVirial->force_.data());
579 /* Add the forces from enforced rotation potentials (if any) */
582 wallcycle_start(wcycle, ewcROTadd);
583 enerd->term[F_COM_PULL] += add_rot_forces(enforcedRotation, f, cr, step, t);
584 wallcycle_stop(wcycle, ewcROTadd);
589 /* Note that since init_edsam() is called after the initialization
590 * of forcerec, edsam doesn't request the noVirSum force buffer.
591 * Thus if no other algorithm (e.g. PME) requires it, the forces
592 * here will contribute to the virial.
594 do_flood(cr, inputrec, as_rvec_array(x.data()), f, ed, box, step, didNeighborSearch);
597 /* Add forces from interactive molecular dynamics (IMD), if any */
598 if (inputrec->bIMD && stepWork.computeForces)
600 imdSession->applyForces(f);
604 /*! \brief Makes PME flags from StepWorkload data.
606 * \param[in] stepWork Step schedule flags
609 static int makePmeFlags(const StepWorkload &stepWork)
611 return GMX_PME_SPREAD | GMX_PME_SOLVE |
612 (stepWork.computeVirial ? GMX_PME_CALC_ENER_VIR : 0) |
613 (stepWork.computeEnergy ? GMX_PME_CALC_ENER_VIR : 0) |
614 (stepWork.computeForces ? GMX_PME_CALC_F : 0);
617 /*! \brief Launch the prepare_step and spread stages of PME GPU.
619 * \param[in] pmedata The PME structure
620 * \param[in] box The box matrix
621 * \param[in] stepWork Step schedule flags
622 * \param[in] pmeFlags PME flags
623 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in the device memory.
624 * \param[in] wcycle The wallcycle structure
626 static inline void launchPmeGpuSpread(gmx_pme_t *pmedata,
628 const StepWorkload &stepWork,
630 GpuEventSynchronizer *xReadyOnDevice,
631 gmx_wallcycle_t wcycle)
633 pme_gpu_prepare_computation(pmedata, stepWork.haveDynamicBox, box, wcycle, pmeFlags, stepWork.useGpuPmeFReduction);
634 pme_gpu_launch_spread(pmedata, xReadyOnDevice, wcycle);
637 /*! \brief Launch the FFT and gather stages of PME GPU
639 * This function only implements setting the output forces (no accumulation).
641 * \param[in] pmedata The PME structure
642 * \param[in] wcycle The wallcycle structure
644 static void launchPmeGpuFftAndGather(gmx_pme_t *pmedata,
645 gmx_wallcycle_t wcycle)
647 pme_gpu_launch_complex_transforms(pmedata, wcycle);
648 pme_gpu_launch_gather(pmedata, wcycle, PmeForceOutputHandling::Set);
652 * Polling wait for either of the PME or nonbonded GPU tasks.
654 * Instead of a static order in waiting for GPU tasks, this function
655 * polls checking which of the two tasks completes first, and does the
656 * associated force buffer reduction overlapped with the other task.
657 * By doing that, unlike static scheduling order, it can always overlap
658 * one of the reductions, regardless of the GPU task completion order.
660 * \param[in] nbv Nonbonded verlet structure
661 * \param[in,out] pmedata PME module data
662 * \param[in,out] forceOutputs Output buffer for the forces and virial
663 * \param[in,out] enerd Energy data structure results are reduced into
664 * \param[in] stepWork Step schedule flags
665 * \param[in] pmeFlags PME flags
666 * \param[in] wcycle The wallcycle structure
668 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t *nbv,
670 gmx::ForceOutputs *forceOutputs,
671 gmx_enerdata_t *enerd,
672 const StepWorkload &stepWork,
674 gmx_wallcycle_t wcycle)
676 bool isPmeGpuDone = false;
677 bool isNbGpuDone = false;
681 gmx::ForceWithShiftForces &forceWithShiftForces = forceOutputs->forceWithShiftForces();
682 gmx::ForceWithVirial &forceWithVirial = forceOutputs->forceWithVirial();
684 gmx::ArrayRef<const gmx::RVec> pmeGpuForces;
686 while (!isPmeGpuDone || !isNbGpuDone)
690 GpuTaskCompletion completionType = (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
691 isPmeGpuDone = pme_gpu_try_finish_task(pmedata, pmeFlags, wcycle, &forceWithVirial, enerd, completionType);
696 GpuTaskCompletion completionType = (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
697 isNbGpuDone = Nbnxm::gpu_try_finish_task(nbv->gpu_nbv,
700 enerd->grpp.ener[egLJSR].data(),
701 enerd->grpp.ener[egCOULSR].data(),
702 forceWithShiftForces.shiftForces(), completionType, wcycle);
706 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local,
707 forceWithShiftForces.force());
713 /*! \brief Set up the different force buffers; also does clearing.
715 * \param[in] fr force record pointer
716 * \param[in] pull_work The pull work object.
717 * \param[in] inputrec input record
718 * \param[in] force force array
719 * \param[in] stepWork Step schedule flags
720 * \param[out] wcycle wallcycle recording structure
722 * \returns Cleared force output structure
725 setupForceOutputs(t_forcerec *fr,
727 const t_inputrec &inputrec,
728 gmx::ArrayRefWithPadding<gmx::RVec> force,
729 const StepWorkload &stepWork,
730 gmx_wallcycle_t wcycle)
732 wallcycle_sub_start(wcycle, ewcsCLEAR_FORCE_BUFFER);
734 /* NOTE: We assume fr->shiftForces is all zeros here */
735 gmx::ForceWithShiftForces forceWithShiftForces(force, stepWork.computeVirial, fr->shiftForces);
737 if (stepWork.computeForces)
739 /* Clear the short- and long-range forces */
740 clear_rvecs_omp(fr->natoms_force_constr,
741 as_rvec_array(forceWithShiftForces.force().data()));
744 /* If we need to compute the virial, we might need a separate
745 * force buffer for algorithms for which the virial is calculated
746 * directly, such as PME. Otherwise, forceWithVirial uses the
747 * the same force (f in legacy calls) buffer as other algorithms.
749 const bool useSeparateForceWithVirialBuffer = (stepWork.computeForces &&
750 (stepWork.computeVirial && fr->haveDirectVirialContributions));
751 /* forceWithVirial uses the local atom range only */
752 gmx::ForceWithVirial forceWithVirial(useSeparateForceWithVirialBuffer ?
753 fr->forceBufferForDirectVirialContributions : force.unpaddedArrayRef(),
754 stepWork.computeVirial);
756 if (useSeparateForceWithVirialBuffer)
758 /* TODO: update comment
759 * We only compute forces on local atoms. Note that vsites can
760 * spread to non-local atoms, but that part of the buffer is
761 * cleared separately in the vsite spreading code.
763 clear_rvecs_omp(forceWithVirial.force_.size(), as_rvec_array(forceWithVirial.force_.data()));
766 if (inputrec.bPull && pull_have_constraint(pull_work))
768 clear_pull_forces(pull_work);
771 wallcycle_sub_stop(wcycle, ewcsCLEAR_FORCE_BUFFER);
773 return ForceOutputs(forceWithShiftForces, forceWithVirial);
777 /*! \brief Set up flags that have the lifetime of the domain indicating what type of work is there to compute.
779 static DomainLifetimeWorkload
780 setupDomainLifetimeWorkload(const t_inputrec &inputrec,
781 const t_forcerec &fr,
782 const pull_t *pull_work,
786 const t_mdatoms &mdatoms,
787 const SimulationWorkload &simulationWork,
788 const StepWorkload &stepWork)
790 DomainLifetimeWorkload domainWork;
791 // Note that haveSpecialForces is constant over the whole run
792 domainWork.haveSpecialForces = haveSpecialForces(inputrec, *fr.forceProviders, pull_work, stepWork.computeForces, ed);
793 domainWork.haveCpuBondedWork = haveCpuBondeds(fr);
794 domainWork.haveGpuBondedWork = ((fr.gpuBonded != nullptr) && fr.gpuBonded->haveInteractions());
795 domainWork.haveRestraintsWork = haveRestraints(idef, fcd);
796 domainWork.haveCpuListedForceWork = haveCpuListedForces(fr, idef, fcd);
797 // Note that haveFreeEnergyWork is constant over the whole run
798 domainWork.haveFreeEnergyWork = (fr.efep != efepNO && mdatoms.nPerturbed != 0);
799 // We assume we have local force work if there are CPU
800 // force tasks including PME or nonbondeds.
801 domainWork.haveCpuLocalForceWork = domainWork.haveSpecialForces || domainWork.haveCpuListedForceWork || domainWork.haveFreeEnergyWork ||
802 simulationWork.useCpuNonbonded || simulationWork.useCpuPme;
806 /*! \brief Set up force flag stuct from the force bitmask.
808 * \param[in] legacyFlags Force bitmask flags used to construct the new flags
809 * \param[in] isNonbondedOn Global override, if false forces to turn off all nonbonded calculation.
810 * \param[in] simulationWork Simulation workload description.
811 * \param[in] rankHasPmeDuty If this rank computes PME.
813 * \returns New Stepworkload description.
816 setupStepWorkload(const int legacyFlags,
817 const bool isNonbondedOn,
818 const SimulationWorkload &simulationWork,
819 const bool rankHasPmeDuty)
822 flags.stateChanged = ((legacyFlags & GMX_FORCE_STATECHANGED) != 0);
823 flags.haveDynamicBox = ((legacyFlags & GMX_FORCE_DYNAMICBOX) != 0);
824 flags.doNeighborSearch = ((legacyFlags & GMX_FORCE_NS) != 0);
825 flags.computeVirial = ((legacyFlags & GMX_FORCE_VIRIAL) != 0);
826 flags.computeEnergy = ((legacyFlags & GMX_FORCE_ENERGY) != 0);
827 flags.computeForces = ((legacyFlags & GMX_FORCE_FORCES) != 0);
828 flags.computeListedForces = ((legacyFlags & GMX_FORCE_LISTED) != 0);
829 flags.computeNonbondedForces = ((legacyFlags & GMX_FORCE_NONBONDED) != 0) && isNonbondedOn;
830 flags.computeDhdl = ((legacyFlags & GMX_FORCE_DHDL) != 0);
832 if (simulationWork.useGpuBufferOps)
834 GMX_ASSERT(simulationWork.useGpuNonbonded, "Can only offload buffer ops if nonbonded computation is also offloaded");
836 flags.useGpuXBufferOps = simulationWork.useGpuBufferOps;
837 // on virial steps the CPU reduction path is taken
838 // TODO: remove flags.computeEnergy, ref #3128
839 flags.useGpuFBufferOps = simulationWork.useGpuBufferOps && !(flags.computeVirial || flags.computeEnergy);
840 flags.useGpuPmeFReduction = flags.useGpuFBufferOps && (simulationWork.useGpuPme &&
841 (rankHasPmeDuty || simulationWork.useGpuPmePpCommunication));
847 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
849 * TODO: eliminate the \p useGpuNonbonded and \p useGpuNonbonded when these are
850 * incorporated in DomainLifetimeWorkload.
853 launchGpuEndOfStepTasks(nonbonded_verlet_t *nbv,
854 gmx::GpuBonded *gpuBonded,
856 gmx_enerdata_t *enerd,
857 const gmx::MdrunScheduleWorkload &runScheduleWork,
858 bool useGpuNonbonded,
861 gmx_wallcycle_t wcycle)
865 /* Launch pruning before buffer clearing because the API overhead of the
866 * clear kernel launches can leave the GPU idle while it could be running
869 if (nbv->isDynamicPruningStepGpu(step))
871 nbv->dispatchPruneKernelGpu(step);
874 /* now clear the GPU outputs while we finish the step on the CPU */
875 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
876 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
877 Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, runScheduleWork.stepWork.computeVirial);
878 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
879 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
884 pme_gpu_reinit_computation(pmedata, wcycle);
887 if (runScheduleWork.domainWork.haveGpuBondedWork && runScheduleWork.stepWork.computeEnergy)
889 // in principle this should be included in the DD balancing region,
890 // but generally it is infrequent so we'll omit it for the sake of
892 gpuBonded->waitAccumulateEnergyTerms(enerd);
894 gpuBonded->clearEnergies();
899 void do_force(FILE *fplog,
901 const gmx_multisim_t *ms,
902 const t_inputrec *inputrec,
904 gmx_enfrot *enforcedRotation,
905 gmx::ImdSession *imdSession,
909 gmx_wallcycle_t wcycle,
910 const gmx_localtop_t *top,
912 gmx::ArrayRefWithPadding<gmx::RVec> x,
914 gmx::ArrayRefWithPadding<gmx::RVec> force,
916 const t_mdatoms *mdatoms,
917 gmx_enerdata_t *enerd,
919 gmx::ArrayRef<real> lambda,
922 gmx::MdrunScheduleWorkload *runScheduleWork,
923 const gmx_vsite_t *vsite,
928 const DDBalanceRegionHandler &ddBalanceRegionHandler)
932 nonbonded_verlet_t *nbv = fr->nbv.get();
933 interaction_const_t *ic = fr->ic;
934 gmx::StatePropagatorDataGpu *stateGpu = fr->stateGpu;
936 // TODO remove the code below when the legacy flags are not in use anymore
937 /* modify force flag if not doing nonbonded */
940 legacyFlags &= ~GMX_FORCE_NONBONDED;
943 const SimulationWorkload &simulationWork = runScheduleWork->simulationWork;
946 runScheduleWork->stepWork = setupStepWorkload(legacyFlags, fr->bNonbonded,
947 simulationWork, thisRankHasDuty(cr, DUTY_PME));
948 const StepWorkload &stepWork = runScheduleWork->stepWork;
951 const bool useGpuPmeOnThisRank = simulationWork.useGpuPme && thisRankHasDuty(cr, DUTY_PME);
952 const int pmeFlags = makePmeFlags(stepWork);
954 // Switches on whether to use GPU for position and force buffer operations
955 // TODO consider all possible combinations of triggers, and how to combine optimally in each case.
956 const BufferOpsUseGpu useGpuXBufOps = stepWork.useGpuXBufferOps ? BufferOpsUseGpu::True : BufferOpsUseGpu::False;
957 // GPU Force buffer ops are disabled on virial steps, because the virial calc is not yet ported to GPU
958 const BufferOpsUseGpu useGpuFBufOps = stepWork.useGpuFBufferOps ? BufferOpsUseGpu::True : BufferOpsUseGpu::False;
960 /* At a search step we need to start the first balancing region
961 * somewhere early inside the step after communication during domain
962 * decomposition (and not during the previous step as usual).
964 if (stepWork.doNeighborSearch)
966 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
970 const int homenr = mdatoms->homenr;
972 clear_mat(vir_force);
974 if (stepWork.stateChanged)
976 if (inputrecNeedMutot(inputrec))
978 /* Calculate total (local) dipole moment in a temporary common array.
979 * This makes it possible to sum them over nodes faster.
981 calc_mu(start, homenr,
982 x.unpaddedArrayRef(), mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
987 if (fr->ePBC != epbcNONE)
989 /* Compute shift vectors every step,
990 * because of pressure coupling or box deformation!
992 if (stepWork.haveDynamicBox && stepWork.stateChanged)
994 calc_shifts(box, fr->shift_vec);
997 const bool fillGrid = (stepWork.doNeighborSearch && stepWork.stateChanged);
998 const bool calcCGCM = (fillGrid && !DOMAINDECOMP(cr));
1001 put_atoms_in_box_omp(fr->ePBC, box, x.unpaddedArrayRef().subArray(0, homenr), gmx_omp_nthreads_get(emntDefault));
1002 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
1004 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1006 unshift_self(graph, box, as_rvec_array(x.unpaddedArrayRef().data()));
1010 nbnxn_atomdata_copy_shiftvec(stepWork.haveDynamicBox,
1011 fr->shift_vec, nbv->nbat.get());
1013 // Coordinates on the device are needed if PME or BufferOps are offloaded.
1014 // The local coordinates can be copied right away.
1015 // NOTE: Consider moving this copy to right after they are updated and constrained,
1016 // if the later is not offloaded.
1017 if (useGpuPmeOnThisRank || useGpuXBufOps == BufferOpsUseGpu::True)
1019 if (stepWork.doNeighborSearch)
1021 stateGpu->reinit(mdatoms->homenr, cr->dd != nullptr ? dd_numAtomsZones(*cr->dd) : mdatoms->homenr);
1022 if (useGpuPmeOnThisRank)
1024 // TODO: This should be moved into PME setup function ( pme_gpu_prepare_computation(...) )
1025 pme_gpu_set_device_x(fr->pmedata, stateGpu->getCoordinates());
1028 // We need to copy coordinates when:
1029 // 1. Update is not offloaded
1030 // 2. The buffers were reinitialized on search step
1031 if (!simulationWork.useGpuUpdate || stepWork.doNeighborSearch)
1033 stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1038 if (!thisRankHasDuty(cr, DUTY_PME))
1040 /* Send particle coordinates to the pme nodes.
1041 * Since this is only implemented for domain decomposition
1042 * and domain decomposition does not use the graph,
1043 * we do not need to worry about shifting.
1045 bool reinitGpuPmePpComms = simulationWork.useGpuPmePpCommunication && (stepWork.doNeighborSearch);
1046 bool sendCoordinatesFromGpu = simulationWork.useGpuPmePpCommunication && !(stepWork.doNeighborSearch);
1047 gmx_pme_send_coordinates(fr, cr, box, as_rvec_array(x.unpaddedArrayRef().data()),
1048 lambda[efptCOUL], lambda[efptVDW],
1049 (stepWork.computeVirial || stepWork.computeEnergy),
1050 step, simulationWork.useGpuPmePpCommunication, reinitGpuPmePpComms,
1051 sendCoordinatesFromGpu, wcycle);
1053 #endif /* GMX_MPI */
1055 const auto localXReadyOnDevice = (stateGpu != nullptr) ? stateGpu->getCoordinatesReadyOnDeviceEvent(AtomLocality::Local,
1056 simulationWork, stepWork) : nullptr;
1057 if (useGpuPmeOnThisRank)
1059 launchPmeGpuSpread(fr->pmedata, box, stepWork, pmeFlags,
1060 localXReadyOnDevice, wcycle);
1063 /* do gridding for pair search */
1064 if (stepWork.doNeighborSearch)
1066 if (graph && stepWork.stateChanged)
1068 /* Calculate intramolecular shift vectors to make molecules whole */
1069 mk_mshift(fplog, graph, fr->ePBC, box, as_rvec_array(x.unpaddedArrayRef().data()));
1073 // - vzero is constant, do we need to pass it?
1074 // - box_diag should be passed directly to nbnxn_put_on_grid
1080 box_diag[XX] = box[XX][XX];
1081 box_diag[YY] = box[YY][YY];
1082 box_diag[ZZ] = box[ZZ][ZZ];
1084 wallcycle_start(wcycle, ewcNS);
1085 if (!DOMAINDECOMP(cr))
1087 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
1088 nbnxn_put_on_grid(nbv, box,
1090 nullptr, { 0, mdatoms->homenr }, -1,
1091 fr->cginfo, x.unpaddedArrayRef(),
1093 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
1097 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
1098 nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd),
1099 fr->cginfo, x.unpaddedArrayRef());
1100 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
1103 nbv->setAtomProperties(*mdatoms, fr->cginfo);
1105 wallcycle_stop(wcycle, ewcNS);
1107 /* initialize the GPU nbnxm atom data and bonded data structures */
1108 if (simulationWork.useGpuNonbonded)
1110 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1112 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1113 Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1114 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1118 /* Now we put all atoms on the grid, we can assign bonded
1119 * interactions to the GPU, where the grid order is
1120 * needed. Also the xq, f and fshift device buffers have
1121 * been reallocated if needed, so the bonded code can
1122 * learn about them. */
1123 // TODO the xq, f, and fshift buffers are now shared
1124 // resources, so they should be maintained by a
1125 // higher-level object than the nb module.
1126 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(nbv->getGridIndices(),
1128 Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1129 Nbnxm::gpu_get_f(nbv->gpu_nbv),
1130 Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1132 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1136 if (stepWork.doNeighborSearch)
1138 // Need to run after the GPU-offload bonded interaction lists
1139 // are set up to be able to determine whether there is bonded work.
1140 runScheduleWork->domainWork =
1141 setupDomainLifetimeWorkload(*inputrec,
1151 wallcycle_start_nocount(wcycle, ewcNS);
1152 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1153 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1154 nbv->constructPairlist(InteractionLocality::Local,
1155 &top->excls, step, nrnb);
1157 nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::Local);
1159 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1160 wallcycle_stop(wcycle, ewcNS);
1162 if (useGpuXBufOps == BufferOpsUseGpu::True)
1164 nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1166 // For force buffer ops, we use the below conditon rather than
1167 // useGpuFBufOps to ensure that init is performed even if this
1168 // NS step is also a virial step (on which f buf ops are deactivated).
1169 if (simulationWork.useGpuBufferOps && simulationWork.useGpuNonbonded && (GMX_GPU == GMX_GPU_CUDA))
1171 GMX_ASSERT(stateGpu, "stateGpu should be valid when buffer ops are offloaded");
1172 nbv->atomdata_init_add_nbat_f_to_f_gpu(stateGpu->fReducedOnDevice());
1175 else if (!EI_TPI(inputrec->eI))
1177 if (useGpuXBufOps == BufferOpsUseGpu::True)
1179 GMX_ASSERT(stateGpu, "stateGpu should be valid when buffer ops are offloaded");
1180 nbv->convertCoordinatesGpu(AtomLocality::Local, false,
1181 stateGpu->getCoordinates(),
1182 localXReadyOnDevice);
1186 nbv->convertCoordinates(AtomLocality::Local, false,
1187 x.unpaddedArrayRef());
1191 const gmx::DomainLifetimeWorkload &domainWork = runScheduleWork->domainWork;
1193 if (simulationWork.useGpuNonbonded)
1195 ddBalanceRegionHandler.openBeforeForceComputationGpu();
1197 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1199 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1200 Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1201 if (stepWork.doNeighborSearch || (useGpuXBufOps == BufferOpsUseGpu::False))
1203 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1204 AtomLocality::Local);
1206 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1207 // with X buffer ops offloaded to the GPU on all but the search steps
1209 // bonded work not split into separate local and non-local, so with DD
1210 // we can only launch the kernel after non-local coordinates have been received.
1211 if (domainWork.haveGpuBondedWork && !havePPDomainDecomposition(cr))
1213 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1214 fr->gpuBonded->launchKernel(fr, stepWork, box);
1215 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1218 /* launch local nonbonded work on GPU */
1219 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1220 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFNo,
1221 step, nrnb, wcycle);
1222 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1223 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1226 if (useGpuPmeOnThisRank)
1228 // In PME GPU and mixed mode we launch FFT / gather after the
1229 // X copy/transform to allow overlap as well as after the GPU NB
1230 // launch to avoid FFT launch overhead hijacking the CPU and delaying
1231 // the nonbonded kernel.
1232 launchPmeGpuFftAndGather(fr->pmedata, wcycle);
1235 // TODO Update this comment when introducing SimulationWorkload
1237 // The conditions for gpuHaloExchange e.g. using GPU buffer
1238 // operations were checked before construction, so here we can
1239 // just use it and assert upon any conditions.
1240 gmx::GpuHaloExchange *gpuHaloExchange = (havePPDomainDecomposition(cr) ? cr->dd->gpuHaloExchange.get() : nullptr);
1241 const bool ddUsesGpuDirectCommunication = (gpuHaloExchange != nullptr);
1242 GMX_ASSERT(!ddUsesGpuDirectCommunication || (useGpuXBufOps == BufferOpsUseGpu::True),
1243 "Must use coordinate buffer ops with GPU halo exchange");
1244 const bool useGpuForcesHaloExchange = ddUsesGpuDirectCommunication && (useGpuFBufOps == BufferOpsUseGpu::True);
1246 /* Communicate coordinates and sum dipole if necessary +
1247 do non-local pair search */
1248 if (havePPDomainDecomposition(cr))
1250 if (stepWork.doNeighborSearch)
1252 // TODO: fuse this branch with the above large stepWork.doNeighborSearch block
1253 wallcycle_start_nocount(wcycle, ewcNS);
1254 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1255 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1256 nbv->constructPairlist(InteractionLocality::NonLocal,
1257 &top->excls, step, nrnb);
1259 nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::NonLocal);
1260 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1261 wallcycle_stop(wcycle, ewcNS);
1262 if (ddUsesGpuDirectCommunication)
1264 gpuHaloExchange->reinitHalo(stateGpu->getCoordinates(), stateGpu->getForces());
1269 if (ddUsesGpuDirectCommunication)
1271 // The following must be called after local setCoordinates (which records an event
1272 // when the coordinate data has been copied to the device).
1273 gpuHaloExchange->communicateHaloCoordinates(box);
1275 if (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork)
1277 //non-local part of coordinate buffer must be copied back to host for CPU work
1278 stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1283 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1286 if (useGpuXBufOps == BufferOpsUseGpu::True)
1288 // The condition here was (pme != nullptr && pme_gpu_get_device_x(fr->pmedata) != nullptr)
1289 if (!useGpuPmeOnThisRank && !ddUsesGpuDirectCommunication)
1291 stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1293 nbv->convertCoordinatesGpu(AtomLocality::NonLocal, false,
1294 stateGpu->getCoordinates(),
1295 stateGpu->getCoordinatesReadyOnDeviceEvent(AtomLocality::NonLocal,
1296 simulationWork, stepWork));
1300 nbv->convertCoordinates(AtomLocality::NonLocal, false,
1301 x.unpaddedArrayRef());
1306 if (simulationWork.useGpuNonbonded)
1308 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1310 if (stepWork.doNeighborSearch || (useGpuXBufOps == BufferOpsUseGpu::False))
1312 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1313 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1314 AtomLocality::NonLocal);
1315 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1318 if (domainWork.haveGpuBondedWork)
1320 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1321 fr->gpuBonded->launchKernel(fr, stepWork, box);
1322 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1325 /* launch non-local nonbonded tasks on GPU */
1326 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1327 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo,
1328 step, nrnb, wcycle);
1329 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1331 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1335 if (simulationWork.useGpuNonbonded)
1337 /* launch D2H copy-back F */
1338 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1339 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1341 if (havePPDomainDecomposition(cr))
1343 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1344 stepWork, AtomLocality::NonLocal);
1346 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1347 stepWork, AtomLocality::Local);
1348 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1350 if (domainWork.haveGpuBondedWork && stepWork.computeEnergy)
1352 fr->gpuBonded->launchEnergyTransfer();
1354 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1357 if (stepWork.stateChanged && inputrecNeedMutot(inputrec))
1361 gmx_sumd(2*DIM, mu, cr);
1363 ddBalanceRegionHandler.reopenRegionCpu();
1366 for (i = 0; i < 2; i++)
1368 for (j = 0; j < DIM; j++)
1370 fr->mu_tot[i][j] = mu[i*DIM + j];
1374 if (mdatoms->nChargePerturbed == 0)
1376 copy_rvec(fr->mu_tot[0], mu_tot);
1380 for (j = 0; j < DIM; j++)
1383 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1384 lambda[efptCOUL]*fr->mu_tot[1][j];
1388 /* Reset energies */
1389 reset_enerdata(enerd);
1390 /* Clear the shift forces */
1391 // TODO: This should be linked to the shift force buffer in use, or cleared before use instead
1392 for (gmx::RVec &elem : fr->shiftForces)
1394 elem = { 0.0_real, 0.0_real, 0.0_real };
1397 if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1399 wallcycle_start(wcycle, ewcPPDURINGPME);
1400 dd_force_flop_start(cr->dd, nrnb);
1405 wallcycle_start(wcycle, ewcROT);
1406 do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step, stepWork.doNeighborSearch);
1407 wallcycle_stop(wcycle, ewcROT);
1410 /* Start the force cycle counter.
1411 * Note that a different counter is used for dynamic load balancing.
1413 wallcycle_start(wcycle, ewcFORCE);
1415 // Set up and clear force outputs.
1416 // We use std::move to keep the compiler happy, it has no effect.
1417 ForceOutputs forceOut = setupForceOutputs(fr, pull_work, *inputrec, std::move(force), stepWork, wcycle);
1419 /* We calculate the non-bonded forces, when done on the CPU, here.
1420 * We do this before calling do_force_lowlevel, because in that
1421 * function, the listed forces are calculated before PME, which
1422 * does communication. With this order, non-bonded and listed
1423 * force calculation imbalance can be balanced out by the domain
1424 * decomposition load balancing.
1427 const bool useOrEmulateGpuNb = simulationWork.useGpuNonbonded || fr->nbv->emulateGpu();
1429 if (!useOrEmulateGpuNb)
1431 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFYes,
1432 step, nrnb, wcycle);
1435 if (fr->efep != efepNO)
1437 /* Calculate the local and non-local free energy interactions here.
1438 * Happens here on the CPU both with and without GPU.
1440 nbv->dispatchFreeEnergyKernel(InteractionLocality::Local,
1441 fr, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut.forceWithShiftForces(), *mdatoms,
1442 inputrec->fepvals, lambda.data(),
1443 enerd, stepWork, nrnb);
1445 if (havePPDomainDecomposition(cr))
1447 nbv->dispatchFreeEnergyKernel(InteractionLocality::NonLocal,
1448 fr, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut.forceWithShiftForces(), *mdatoms,
1449 inputrec->fepvals, lambda.data(),
1450 enerd, stepWork, nrnb);
1454 if (!useOrEmulateGpuNb)
1456 if (havePPDomainDecomposition(cr))
1458 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo,
1459 step, nrnb, wcycle);
1462 if (stepWork.computeForces)
1464 /* Add all the non-bonded force to the normal force array.
1465 * This can be split into a local and a non-local part when overlapping
1466 * communication with calculation with domain decomposition.
1468 wallcycle_stop(wcycle, ewcFORCE);
1469 nbv->atomdata_add_nbat_f_to_f(AtomLocality::All, forceOut.forceWithShiftForces().force());
1470 wallcycle_start_nocount(wcycle, ewcFORCE);
1473 /* If there are multiple fshift output buffers we need to reduce them */
1474 if (stepWork.computeVirial)
1476 /* This is not in a subcounter because it takes a
1477 negligible and constant-sized amount of time */
1478 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat,
1479 forceOut.forceWithShiftForces().shiftForces());
1483 /* update QMMMrec, if necessary */
1486 update_QMMMrec(cr, fr, as_rvec_array(x.unpaddedArrayRef().data()), mdatoms, box);
1489 // TODO Force flags should include haveFreeEnergyWork for this domain
1490 if (ddUsesGpuDirectCommunication &&
1491 (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork))
1493 /* Wait for non-local coordinate data to be copied from device */
1494 nbv->wait_nonlocal_x_copy_D2H_done();
1496 /* Compute the bonded and non-bonded energies and optionally forces */
1497 do_force_lowlevel(fr, inputrec, &(top->idef),
1498 cr, ms, nrnb, wcycle, mdatoms,
1499 x, hist, &forceOut, enerd, fcd,
1500 box, lambda.data(), graph, fr->mu_tot,
1502 ddBalanceRegionHandler);
1504 wallcycle_stop(wcycle, ewcFORCE);
1506 computeSpecialForces(fplog, cr, inputrec, awh, enforcedRotation,
1507 imdSession, pull_work, step, t, wcycle,
1508 fr->forceProviders, box, x.unpaddedArrayRef(), mdatoms, lambda.data(),
1509 stepWork, &forceOut.forceWithVirial(), enerd,
1510 ed, stepWork.doNeighborSearch);
1513 // Will store the amount of cycles spent waiting for the GPU that
1514 // will be later used in the DLB accounting.
1515 float cycles_wait_gpu = 0;
1516 if (useOrEmulateGpuNb)
1518 auto &forceWithShiftForces = forceOut.forceWithShiftForces();
1520 /* wait for non-local forces (or calculate in emulation mode) */
1521 if (havePPDomainDecomposition(cr))
1523 if (simulationWork.useGpuNonbonded)
1525 cycles_wait_gpu += Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1526 stepWork, AtomLocality::NonLocal,
1527 enerd->grpp.ener[egLJSR].data(),
1528 enerd->grpp.ener[egCOULSR].data(),
1529 forceWithShiftForces.shiftForces(),
1534 wallcycle_start_nocount(wcycle, ewcFORCE);
1535 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFYes,
1536 step, nrnb, wcycle);
1537 wallcycle_stop(wcycle, ewcFORCE);
1540 if (useGpuFBufOps == BufferOpsUseGpu::True)
1542 gmx::FixedCapacityVector<GpuEventSynchronizer*, 1> dependencyList;
1544 // TODO: move this into DomainLifetimeWorkload, including the second part of the condition
1545 // The bonded and free energy CPU tasks can have non-local force contributions
1546 // which are a dependency for the GPU force reduction.
1547 bool haveNonLocalForceContribInCpuBuffer = domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork;
1549 if (haveNonLocalForceContribInCpuBuffer)
1551 stateGpu->copyForcesToGpu(forceOut.forceWithShiftForces().force(), AtomLocality::NonLocal);
1552 dependencyList.push_back(stateGpu->getForcesReadyOnDeviceEvent(AtomLocality::NonLocal,
1553 useGpuFBufOps == BufferOpsUseGpu::True));
1556 nbv->atomdata_add_nbat_f_to_f_gpu(AtomLocality::NonLocal,
1557 stateGpu->getForces(),
1558 pme_gpu_get_device_f(fr->pmedata),
1560 false, haveNonLocalForceContribInCpuBuffer);
1561 if (!useGpuForcesHaloExchange)
1563 // copy from GPU input for dd_move_f()
1564 stateGpu->copyForcesFromGpu(forceOut.forceWithShiftForces().force(), AtomLocality::NonLocal);
1569 nbv->atomdata_add_nbat_f_to_f(AtomLocality::NonLocal,
1570 forceWithShiftForces.force());
1574 if (fr->nbv->emulateGpu() && stepWork.computeVirial)
1576 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat,
1577 forceWithShiftForces.shiftForces());
1582 if (havePPDomainDecomposition(cr))
1584 /* We are done with the CPU compute.
1585 * We will now communicate the non-local forces.
1586 * If we use a GPU this will overlap with GPU work, so in that case
1587 * we do not close the DD force balancing region here.
1589 ddBalanceRegionHandler.closeAfterForceComputationCpu();
1591 if (stepWork.computeForces)
1594 if (useGpuForcesHaloExchange)
1596 if (domainWork.haveCpuLocalForceWork)
1598 stateGpu->copyForcesToGpu(forceOut.forceWithShiftForces().force(), AtomLocality::Local);
1600 gpuHaloExchange->communicateHaloForces(domainWork.haveCpuLocalForceWork);
1604 if (useGpuFBufOps == BufferOpsUseGpu::True)
1606 stateGpu->waitForcesReadyOnHost(AtomLocality::NonLocal);
1608 dd_move_f(cr->dd, &forceOut.forceWithShiftForces(), wcycle);
1614 // With both nonbonded and PME offloaded a GPU on the same rank, we use
1615 // an alternating wait/reduction scheme.
1616 bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPmeOnThisRank && simulationWork.useGpuNonbonded && !DOMAINDECOMP(cr) &&
1617 (useGpuFBufOps == BufferOpsUseGpu::False));
1618 if (alternateGpuWait)
1620 alternatePmeNbGpuWaitReduce(fr->nbv.get(), fr->pmedata, &forceOut, enerd,
1621 stepWork, pmeFlags, wcycle);
1624 if (!alternateGpuWait && useGpuPmeOnThisRank)
1626 pme_gpu_wait_and_reduce(fr->pmedata, pmeFlags, wcycle, &forceOut.forceWithVirial(), enerd);
1629 /* Wait for local GPU NB outputs on the non-alternating wait path */
1630 if (!alternateGpuWait && simulationWork.useGpuNonbonded)
1632 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1633 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1634 * but even with a step of 0.1 ms the difference is less than 1%
1637 const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
1638 const float waitCycles =
1639 Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1640 stepWork, AtomLocality::Local,
1641 enerd->grpp.ener[egLJSR].data(),
1642 enerd->grpp.ener[egCOULSR].data(),
1643 forceOut.forceWithShiftForces().shiftForces(),
1646 if (ddBalanceRegionHandler.useBalancingRegion())
1648 DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
1649 if (stepWork.computeForces && waitCycles <= gpuWaitApiOverheadMargin)
1651 /* We measured few cycles, it could be that the kernel
1652 * and transfer finished earlier and there was no actual
1653 * wait time, only API call overhead.
1654 * Then the actual time could be anywhere between 0 and
1655 * cycles_wait_est. We will use half of cycles_wait_est.
1657 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
1659 ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
1663 if (fr->nbv->emulateGpu())
1665 // NOTE: emulation kernel is not included in the balancing region,
1666 // but emulation mode does not target performance anyway
1667 wallcycle_start_nocount(wcycle, ewcFORCE);
1668 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local,
1669 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1670 step, nrnb, wcycle);
1671 wallcycle_stop(wcycle, ewcFORCE);
1674 // If on GPU PME-PP comms path, receive forces from PME before GPU buffer ops
1675 // TODO refoactor this and unify with below default-path call to the same function
1676 if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME) && simulationWork.useGpuPmePpCommunication)
1678 /* In case of node-splitting, the PP nodes receive the long-range
1679 * forces, virial and energy from the PME nodes here.
1681 pme_receive_force_ener(fr, cr, &forceOut.forceWithVirial(), enerd, simulationWork.useGpuPmePpCommunication, stepWork.useGpuPmeFReduction, wcycle);
1685 /* Do the nonbonded GPU (or emulation) force buffer reduction
1686 * on the non-alternating path. */
1687 if (useOrEmulateGpuNb && !alternateGpuWait)
1689 //TODO simplify the below conditionals. Pass buffer and sync pointers at init stage rather than here. Unify getter fns for sameGPU/otherGPU cases.
1690 void* pmeForcePtr = stepWork.useGpuPmeFReduction ?
1691 (thisRankHasDuty(cr, DUTY_PME) ?
1692 pme_gpu_get_device_f(fr->pmedata) : // PME force buffer on same GPU
1693 fr->pmePpCommGpu->getGpuForceStagingPtr()) // buffer received from other GPU
1694 : nullptr; // PME reduction not active on GPU
1696 GpuEventSynchronizer* const pmeSynchronizer = stepWork.useGpuPmeFReduction ?
1697 (thisRankHasDuty(cr, DUTY_PME) ?
1698 pme_gpu_get_f_ready_synchronizer(fr->pmedata) : // PME force buffer on same GPU
1699 static_cast<GpuEventSynchronizer*>
1700 (fr->pmePpCommGpu->getForcesReadySynchronizer())) // buffer received from other GPU
1701 : nullptr; // PME reduction not active on GPU
1703 gmx::FixedCapacityVector<GpuEventSynchronizer*, 2> dependencyList;
1705 if (stepWork.useGpuPmeFReduction)
1707 dependencyList.push_back(pmeSynchronizer);
1710 gmx::ArrayRef<gmx::RVec> forceWithShift = forceOut.forceWithShiftForces().force();
1712 if (useGpuFBufOps == BufferOpsUseGpu::True)
1714 // Flag to specify whether the CPU force buffer has contributions to
1715 // local atoms. This depends on whether there are CPU-based force tasks
1716 // or when DD is active the halo exchange has resulted in contributions
1717 // from the non-local part.
1718 const bool haveLocalForceContribInCpuBuffer = (domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr));
1720 // TODO: move these steps as early as possible:
1721 // - CPU f H2D should be as soon as all CPU-side forces are done
1722 // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
1723 // before the next CPU task that consumes the forces: vsite spread or update)
1724 // - copy is not perfomed if GPU force halo exchange is active, because it would overwrite the result
1725 // of the halo exchange. In that case the copy is instead performed above, before the exchange.
1726 // These should be unified.
1727 if (haveLocalForceContribInCpuBuffer && !useGpuForcesHaloExchange)
1729 stateGpu->copyForcesToGpu(forceWithShift, AtomLocality::Local);
1730 dependencyList.push_back(stateGpu->getForcesReadyOnDeviceEvent(AtomLocality::Local,
1731 useGpuFBufOps == BufferOpsUseGpu::True));
1733 if (useGpuForcesHaloExchange)
1735 // Add a stream synchronization to satisfy a dependency
1736 // for the local buffer ops on the result of GPU halo
1737 // exchange, which operates in the non-local stream and
1738 // writes to to local parf og the force buffer.
1740 // TODO improve this through use of an event - see Redmine #3093
1741 // push the event into the dependencyList
1742 nbv->stream_local_wait_for_nonlocal();
1744 nbv->atomdata_add_nbat_f_to_f_gpu(AtomLocality::Local,
1745 stateGpu->getForces(),
1748 stepWork.useGpuPmeFReduction, haveLocalForceContribInCpuBuffer);
1749 stateGpu->copyForcesFromGpu(forceWithShift, AtomLocality::Local);
1750 stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
1754 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceWithShift);
1759 launchGpuEndOfStepTasks(nbv, fr->gpuBonded, fr->pmedata, enerd,
1761 simulationWork.useGpuNonbonded, useGpuPmeOnThisRank,
1765 if (DOMAINDECOMP(cr))
1767 dd_force_flop_stop(cr->dd, nrnb);
1770 if (stepWork.computeForces)
1772 rvec *f = as_rvec_array(forceOut.forceWithShiftForces().force().data());
1774 /* If we have NoVirSum forces, but we do not calculate the virial,
1775 * we sum fr->f_novirsum=forceOut.f later.
1777 if (vsite && !(fr->haveDirectVirialContributions && !stepWork.computeVirial))
1779 rvec *fshift = as_rvec_array(forceOut.forceWithShiftForces().shiftForces().data());
1780 spread_vsite_f(vsite, as_rvec_array(x.unpaddedArrayRef().data()), f, fshift, FALSE, nullptr, nrnb,
1781 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
1784 if (stepWork.computeVirial)
1786 /* Calculation of the virial must be done after vsites! */
1787 calc_virial(0, mdatoms->homenr, as_rvec_array(x.unpaddedArrayRef().data()),
1788 forceOut.forceWithShiftForces(),
1789 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1793 // TODO refoactor this and unify with above PME-PP GPU communication path call to the same function
1794 if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication)
1796 /* In case of node-splitting, the PP nodes receive the long-range
1797 * forces, virial and energy from the PME nodes here.
1799 pme_receive_force_ener(fr, cr, &forceOut.forceWithVirial(), enerd,
1800 simulationWork.useGpuPmePpCommunication, false, wcycle);
1803 if (stepWork.computeForces)
1805 post_process_forces(cr, step, nrnb, wcycle,
1806 top, box, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut,
1807 vir_force, mdatoms, graph, fr, vsite,
1811 if (stepWork.computeEnergy)
1813 /* Sum the potential energy terms from group contributions */
1814 sum_epot(&(enerd->grpp), enerd->term);
1816 if (!EI_TPI(inputrec->eI))
1818 checkPotentialEnergyValidity(step, *enerd, *inputrec);
1822 /* In case we don't have constraints and are using GPUs, the next balancing
1823 * region starts here.
1824 * Some "special" work at the end of do_force_cuts?, such as vsite spread,
1825 * virial calculation and COM pulling, is not thus not included in
1826 * the balance timing, which is ok as most tasks do communication.
1828 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);