2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "gromacs/awh/awh.h"
49 #include "gromacs/domdec/dlbtiming.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/domdec/gpuhaloexchange.h"
53 #include "gromacs/domdec/partition.h"
54 #include "gromacs/essentialdynamics/edsam.h"
55 #include "gromacs/ewald/pme.h"
56 #include "gromacs/gmxlib/chargegroup.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
59 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
60 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/imd/imd.h"
63 #include "gromacs/listed_forces/disre.h"
64 #include "gromacs/listed_forces/gpubonded.h"
65 #include "gromacs/listed_forces/listed_forces.h"
66 #include "gromacs/listed_forces/manage_threading.h"
67 #include "gromacs/listed_forces/orires.h"
68 #include "gromacs/math/arrayrefwithpadding.h"
69 #include "gromacs/math/functions.h"
70 #include "gromacs/math/units.h"
71 #include "gromacs/math/vec.h"
72 #include "gromacs/math/vecdump.h"
73 #include "gromacs/mdlib/calcmu.h"
74 #include "gromacs/mdlib/calcvir.h"
75 #include "gromacs/mdlib/constr.h"
76 #include "gromacs/mdlib/enerdata_utils.h"
77 #include "gromacs/mdlib/force.h"
78 #include "gromacs/mdlib/forcerec.h"
79 #include "gromacs/mdlib/gmx_omp_nthreads.h"
80 #include "gromacs/mdlib/ppforceworkload.h"
81 #include "gromacs/mdlib/qmmm.h"
82 #include "gromacs/mdlib/update.h"
83 #include "gromacs/mdtypes/commrec.h"
84 #include "gromacs/mdtypes/enerdata.h"
85 #include "gromacs/mdtypes/forceoutput.h"
86 #include "gromacs/mdtypes/iforceprovider.h"
87 #include "gromacs/mdtypes/inputrec.h"
88 #include "gromacs/mdtypes/md_enums.h"
89 #include "gromacs/mdtypes/state.h"
90 #include "gromacs/nbnxm/atomdata.h"
91 #include "gromacs/nbnxm/gpu_data_mgmt.h"
92 #include "gromacs/nbnxm/nbnxm.h"
93 #include "gromacs/pbcutil/ishift.h"
94 #include "gromacs/pbcutil/mshift.h"
95 #include "gromacs/pbcutil/pbc.h"
96 #include "gromacs/pulling/pull.h"
97 #include "gromacs/pulling/pull_rotation.h"
98 #include "gromacs/timing/cyclecounter.h"
99 #include "gromacs/timing/gpu_timing.h"
100 #include "gromacs/timing/wallcycle.h"
101 #include "gromacs/timing/wallcyclereporting.h"
102 #include "gromacs/timing/walltime_accounting.h"
103 #include "gromacs/topology/topology.h"
104 #include "gromacs/utility/arrayref.h"
105 #include "gromacs/utility/basedefinitions.h"
106 #include "gromacs/utility/cstringutil.h"
107 #include "gromacs/utility/exceptions.h"
108 #include "gromacs/utility/fatalerror.h"
109 #include "gromacs/utility/gmxassert.h"
110 #include "gromacs/utility/gmxmpi.h"
111 #include "gromacs/utility/logger.h"
112 #include "gromacs/utility/smalloc.h"
113 #include "gromacs/utility/strconvert.h"
114 #include "gromacs/utility/sysinfo.h"
116 using gmx::ForceOutputs;
118 // TODO: this environment variable allows us to verify before release
119 // that on less common architectures the total cost of polling is not larger than
120 // a blocking wait (so polling does not introduce overhead when the static
121 // PME-first ordering would suffice).
122 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
124 // environment variable to enable GPU buffer ops, to allow incremental and optional
125 // introduction of this functionality.
126 // TODO eventially tie this in with other existing GPU flags.
127 static const bool c_enableGpuBufOps = (getenv("GMX_USE_GPU_BUFFER_OPS") != nullptr);
129 /*! \brief environment variable to enable GPU P2P communication */
130 static const bool c_enableGpuHaloExchange = (getenv("GMX_GPU_DD_COMMS") != nullptr)
131 && GMX_THREAD_MPI && (GMX_GPU == GMX_GPU_CUDA);
133 static void sum_forces(rvec f[], gmx::ArrayRef<const gmx::RVec> forceToAdd)
135 const int end = forceToAdd.size();
137 int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
138 #pragma omp parallel for num_threads(nt) schedule(static)
139 for (int i = 0; i < end; i++)
141 rvec_inc(f[i], forceToAdd[i]);
145 static void calc_virial(int start, int homenr, const rvec x[],
146 const gmx::ForceWithShiftForces &forceWithShiftForces,
147 tensor vir_part, const t_graph *graph, const matrix box,
148 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
150 /* The short-range virial from surrounding boxes */
151 const rvec *fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
152 calc_vir(SHIFTS, fr->shift_vec, fshift, vir_part, ePBC == epbcSCREW, box);
153 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
155 /* Calculate partial virial, for local atoms only, based on short range.
156 * Total virial is computed in global_stat, called from do_md
158 const rvec *f = as_rvec_array(forceWithShiftForces.force().data());
159 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
160 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
164 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
168 static void pull_potential_wrapper(const t_commrec *cr,
169 const t_inputrec *ir,
170 const matrix box, gmx::ArrayRef<const gmx::RVec> x,
171 gmx::ForceWithVirial *force,
172 const t_mdatoms *mdatoms,
173 gmx_enerdata_t *enerd,
177 gmx_wallcycle_t wcycle)
182 /* Calculate the center of mass forces, this requires communication,
183 * which is why pull_potential is called close to other communication.
185 wallcycle_start(wcycle, ewcPULLPOT);
186 set_pbc(&pbc, ir->ePBC, box);
188 enerd->term[F_COM_PULL] +=
189 pull_potential(pull_work, mdatoms, &pbc,
190 cr, t, lambda[efptRESTRAINT], as_rvec_array(x.data()), force, &dvdl);
191 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
192 wallcycle_stop(wcycle, ewcPULLPOT);
195 static void pme_receive_force_ener(const t_commrec *cr,
196 gmx::ForceWithVirial *forceWithVirial,
197 gmx_enerdata_t *enerd,
198 gmx_wallcycle_t wcycle)
200 real e_q, e_lj, dvdl_q, dvdl_lj;
201 float cycles_ppdpme, cycles_seppme;
203 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
204 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
206 /* In case of node-splitting, the PP nodes receive the long-range
207 * forces, virial and energy from the PME nodes here.
209 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
212 gmx_pme_receive_f(cr, forceWithVirial, &e_q, &e_lj, &dvdl_q, &dvdl_lj,
214 enerd->term[F_COUL_RECIP] += e_q;
215 enerd->term[F_LJ_RECIP] += e_lj;
216 enerd->dvdl_lin[efptCOUL] += dvdl_q;
217 enerd->dvdl_lin[efptVDW] += dvdl_lj;
221 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
223 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
226 static void print_large_forces(FILE *fp,
234 real force2Tolerance = gmx::square(forceTolerance);
235 gmx::index numNonFinite = 0;
236 for (int i = 0; i < md->homenr; i++)
238 real force2 = norm2(f[i]);
239 bool nonFinite = !std::isfinite(force2);
240 if (force2 >= force2Tolerance || nonFinite)
242 fprintf(fp, "step %" PRId64 " atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
244 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
251 if (numNonFinite > 0)
253 /* Note that with MPI this fatal call on one rank might interrupt
254 * the printing on other ranks. But we can only avoid that with
255 * an expensive MPI barrier that we would need at each step.
257 gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
261 static void post_process_forces(const t_commrec *cr,
264 gmx_wallcycle_t wcycle,
265 const gmx_localtop_t *top,
268 ForceOutputs *forceOutputs,
270 const t_mdatoms *mdatoms,
271 const t_graph *graph,
272 const t_forcerec *fr,
273 const gmx_vsite_t *vsite,
274 const gmx::ForceFlags &forceFlags)
276 rvec *f = as_rvec_array(forceOutputs->forceWithShiftForces().force().data());
278 if (fr->haveDirectVirialContributions)
280 auto &forceWithVirial = forceOutputs->forceWithVirial();
281 rvec *fDirectVir = as_rvec_array(forceWithVirial.force_.data());
285 /* Spread the mesh force on virtual sites to the other particles...
286 * This is parallellized. MPI communication is performed
287 * if the constructing atoms aren't local.
289 matrix virial = { { 0 } };
290 spread_vsite_f(vsite, x, fDirectVir, nullptr,
291 forceFlags.computeVirial, virial,
293 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
294 forceWithVirial.addVirialContribution(virial);
297 if (forceFlags.computeVirial)
299 /* Now add the forces, this is local */
300 sum_forces(f, forceWithVirial.force_);
302 /* Add the direct virial contributions */
303 GMX_ASSERT(forceWithVirial.computeVirial_, "forceWithVirial should request virial computation when we request the virial");
304 m_add(vir_force, forceWithVirial.getVirial(), vir_force);
308 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
313 if (fr->print_force >= 0)
315 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
319 static void do_nb_verlet(t_forcerec *fr,
320 const interaction_const_t *ic,
321 gmx_enerdata_t *enerd,
322 const gmx::ForceFlags &forceFlags,
323 const Nbnxm::InteractionLocality ilocality,
327 gmx_wallcycle_t wcycle)
329 if (!forceFlags.computeNonbondedForces)
331 /* skip non-bonded calculation */
335 nonbonded_verlet_t *nbv = fr->nbv.get();
337 /* GPU kernel launch overhead is already timed separately */
338 if (fr->cutoff_scheme != ecutsVERLET)
340 gmx_incons("Invalid cut-off scheme passed!");
345 /* When dynamic pair-list pruning is requested, we need to prune
346 * at nstlistPrune steps.
348 if (nbv->isDynamicPruningStepCpu(step))
350 /* Prune the pair-list beyond fr->ic->rlistPrune using
351 * the current coordinates of the atoms.
353 wallcycle_sub_start(wcycle, ewcsNONBONDED_PRUNING);
354 nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
355 wallcycle_sub_stop(wcycle, ewcsNONBONDED_PRUNING);
359 nbv->dispatchNonbondedKernel(ilocality, *ic, forceFlags, clearF, *fr, enerd, nrnb);
362 static inline void clear_rvecs_omp(int n, rvec v[])
364 int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
366 /* Note that we would like to avoid this conditional by putting it
367 * into the omp pragma instead, but then we still take the full
368 * omp parallel for overhead (at least with gcc5).
372 for (int i = 0; i < n; i++)
379 #pragma omp parallel for num_threads(nth) schedule(static)
380 for (int i = 0; i < n; i++)
387 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
389 * \param groupOptions Group options, containing T-coupling options
391 static real averageKineticEnergyEstimate(const t_grpopts &groupOptions)
393 real nrdfCoupled = 0;
394 real nrdfUncoupled = 0;
395 real kineticEnergy = 0;
396 for (int g = 0; g < groupOptions.ngtc; g++)
398 if (groupOptions.tau_t[g] >= 0)
400 nrdfCoupled += groupOptions.nrdf[g];
401 kineticEnergy += groupOptions.nrdf[g]*0.5*groupOptions.ref_t[g]*BOLTZ;
405 nrdfUncoupled += groupOptions.nrdf[g];
409 /* This conditional with > also catches nrdf=0 */
410 if (nrdfCoupled > nrdfUncoupled)
412 return kineticEnergy*(nrdfCoupled + nrdfUncoupled)/nrdfCoupled;
420 /*! \brief This routine checks that the potential energy is finite.
422 * Always checks that the potential energy is finite. If step equals
423 * inputrec.init_step also checks that the magnitude of the potential energy
424 * is reasonable. Terminates with a fatal error when a check fails.
425 * Note that passing this check does not guarantee finite forces,
426 * since those use slightly different arithmetics. But in most cases
427 * there is just a narrow coordinate range where forces are not finite
428 * and energies are finite.
430 * \param[in] step The step number, used for checking and printing
431 * \param[in] enerd The energy data; the non-bonded group energies need to be added to enerd.term[F_EPOT] before calling this routine
432 * \param[in] inputrec The input record
434 static void checkPotentialEnergyValidity(int64_t step,
435 const gmx_enerdata_t &enerd,
436 const t_inputrec &inputrec)
438 /* Threshold valid for comparing absolute potential energy against
439 * the kinetic energy. Normally one should not consider absolute
440 * potential energy values, but with a factor of one million
441 * we should never get false positives.
443 constexpr real c_thresholdFactor = 1e6;
445 bool energyIsNotFinite = !std::isfinite(enerd.term[F_EPOT]);
446 real averageKineticEnergy = 0;
447 /* We only check for large potential energy at the initial step,
448 * because that is by far the most likely step for this too occur
449 * and because computing the average kinetic energy is not free.
450 * Note: nstcalcenergy >> 1 often does not allow to catch large energies
451 * before they become NaN.
453 if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
455 averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
458 if (energyIsNotFinite || (averageKineticEnergy > 0 &&
459 enerd.term[F_EPOT] > c_thresholdFactor*averageKineticEnergy))
461 gmx_fatal(FARGS, "Step %" PRId64 ": The total potential energy is %g, which is %s. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A %s potential energy can be caused by overlapping interactions in bonded interactions or very large%s coordinate values. Usually this is caused by a badly- or non-equilibrated initial configuration, incorrect interactions or parameters in the topology.",
464 energyIsNotFinite ? "not finite" : "extremely high",
466 enerd.term[F_COUL_SR],
467 energyIsNotFinite ? "non-finite" : "very high",
468 energyIsNotFinite ? " or Nan" : "");
472 /*! \brief Return true if there are special forces computed this step.
474 * The conditionals exactly correspond to those in computeSpecialForces().
477 haveSpecialForces(const t_inputrec *inputrec,
478 ForceProviders *forceProviders,
479 const pull_t *pull_work,
480 const bool computeForces,
485 ((computeForces && forceProviders->hasForceProvider()) || // forceProviders
486 (inputrec->bPull && pull_have_potential(pull_work)) || // pull
487 inputrec->bRot || // enforced rotation
488 (ed != nullptr) || // flooding
489 (inputrec->bIMD && computeForces)); // IMD
492 /*! \brief Compute forces and/or energies for special algorithms
494 * The intention is to collect all calls to algorithms that compute
495 * forces on local atoms only and that do not contribute to the local
496 * virial sum (but add their virial contribution separately).
497 * Eventually these should likely all become ForceProviders.
498 * Within this function the intention is to have algorithms that do
499 * global communication at the end, so global barriers within the MD loop
500 * are as close together as possible.
502 * \param[in] fplog The log file
503 * \param[in] cr The communication record
504 * \param[in] inputrec The input record
505 * \param[in] awh The Awh module (nullptr if none in use).
506 * \param[in] enforcedRotation Enforced rotation module.
507 * \param[in] imdSession The IMD session
508 * \param[in] pull_work The pull work structure.
509 * \param[in] step The current MD step
510 * \param[in] t The current time
511 * \param[in,out] wcycle Wallcycle accounting struct
512 * \param[in,out] forceProviders Pointer to a list of force providers
513 * \param[in] box The unit cell
514 * \param[in] x The coordinates
515 * \param[in] mdatoms Per atom properties
516 * \param[in] lambda Array of free-energy lambda values
517 * \param[in] forceFlags Force schedule flags
518 * \param[in,out] forceWithVirial Force and virial buffers
519 * \param[in,out] enerd Energy buffer
520 * \param[in,out] ed Essential dynamics pointer
521 * \param[in] didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
523 * \todo Remove didNeighborSearch, which is used incorrectly.
524 * \todo Convert all other algorithms called here to ForceProviders.
527 computeSpecialForces(FILE *fplog,
529 const t_inputrec *inputrec,
531 gmx_enfrot *enforcedRotation,
532 gmx::ImdSession *imdSession,
536 gmx_wallcycle_t wcycle,
537 ForceProviders *forceProviders,
539 gmx::ArrayRef<const gmx::RVec> x,
540 const t_mdatoms *mdatoms,
542 const gmx::ForceFlags &forceFlags,
543 gmx::ForceWithVirial *forceWithVirial,
544 gmx_enerdata_t *enerd,
546 bool didNeighborSearch)
548 /* NOTE: Currently all ForceProviders only provide forces.
549 * When they also provide energies, remove this conditional.
551 if (forceFlags.computeForces)
553 gmx::ForceProviderInput forceProviderInput(x, *mdatoms, t, box, *cr);
554 gmx::ForceProviderOutput forceProviderOutput(forceWithVirial, enerd);
556 /* Collect forces from modules */
557 forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
560 if (inputrec->bPull && pull_have_potential(pull_work))
562 pull_potential_wrapper(cr, inputrec, box, x,
564 mdatoms, enerd, pull_work, lambda, t,
569 enerd->term[F_COM_PULL] +=
570 awh->applyBiasForcesAndUpdateBias(inputrec->ePBC, *mdatoms, box,
572 t, step, wcycle, fplog);
576 rvec *f = as_rvec_array(forceWithVirial->force_.data());
578 /* Add the forces from enforced rotation potentials (if any) */
581 wallcycle_start(wcycle, ewcROTadd);
582 enerd->term[F_COM_PULL] += add_rot_forces(enforcedRotation, f, cr, step, t);
583 wallcycle_stop(wcycle, ewcROTadd);
588 /* Note that since init_edsam() is called after the initialization
589 * of forcerec, edsam doesn't request the noVirSum force buffer.
590 * Thus if no other algorithm (e.g. PME) requires it, the forces
591 * here will contribute to the virial.
593 do_flood(cr, inputrec, as_rvec_array(x.data()), f, ed, box, step, didNeighborSearch);
596 /* Add forces from interactive molecular dynamics (IMD), if any */
597 if (inputrec->bIMD && forceFlags.computeForces)
599 imdSession->applyForces(f);
603 /*! \brief Launch the prepare_step and spread stages of PME GPU.
605 * \param[in] pmedata The PME structure
606 * \param[in] box The box matrix
607 * \param[in] x Coordinate array
608 * \param[in] forceFlags Force schedule flags
609 * \param[in] pmeFlags PME flags
610 * \param[in] useGpuForceReduction True if GPU-based force reduction is active this step
611 * \param[in] wcycle The wallcycle structure
613 static inline void launchPmeGpuSpread(gmx_pme_t *pmedata,
616 const gmx::ForceFlags &forceFlags,
618 bool useGpuForceReduction,
619 gmx_wallcycle_t wcycle)
621 pme_gpu_prepare_computation(pmedata, forceFlags.haveDynamicBox, box, wcycle, pmeFlags, useGpuForceReduction);
622 pme_gpu_copy_coordinates_to_gpu(pmedata, x, wcycle);
623 pme_gpu_launch_spread(pmedata, wcycle);
626 /*! \brief Launch the FFT and gather stages of PME GPU
628 * This function only implements setting the output forces (no accumulation).
630 * \param[in] pmedata The PME structure
631 * \param[in] wcycle The wallcycle structure
633 static void launchPmeGpuFftAndGather(gmx_pme_t *pmedata,
634 gmx_wallcycle_t wcycle)
636 pme_gpu_launch_complex_transforms(pmedata, wcycle);
637 pme_gpu_launch_gather(pmedata, wcycle, PmeForceOutputHandling::Set);
641 * Polling wait for either of the PME or nonbonded GPU tasks.
643 * Instead of a static order in waiting for GPU tasks, this function
644 * polls checking which of the two tasks completes first, and does the
645 * associated force buffer reduction overlapped with the other task.
646 * By doing that, unlike static scheduling order, it can always overlap
647 * one of the reductions, regardless of the GPU task completion order.
649 * \param[in] nbv Nonbonded verlet structure
650 * \param[in,out] pmedata PME module data
651 * \param[in,out] forceOutputs Output buffer for the forces and virial
652 * \param[in,out] enerd Energy data structure results are reduced into
653 * \param[in] forceFlags Force schedule flags
654 * \param[in] pmeFlags PME flags
655 * \param[in] wcycle The wallcycle structure
657 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t *nbv,
659 gmx::ForceOutputs *forceOutputs,
660 gmx_enerdata_t *enerd,
661 const gmx::ForceFlags &forceFlags,
663 gmx_wallcycle_t wcycle)
665 bool isPmeGpuDone = false;
666 bool isNbGpuDone = false;
670 gmx::ForceWithShiftForces &forceWithShiftForces = forceOutputs->forceWithShiftForces();
671 gmx::ForceWithVirial &forceWithVirial = forceOutputs->forceWithVirial();
673 gmx::ArrayRef<const gmx::RVec> pmeGpuForces;
675 while (!isPmeGpuDone || !isNbGpuDone)
679 GpuTaskCompletion completionType = (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
680 isPmeGpuDone = pme_gpu_try_finish_task(pmedata, pmeFlags, wcycle, &forceWithVirial, enerd, completionType);
685 GpuTaskCompletion completionType = (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
686 isNbGpuDone = Nbnxm::gpu_try_finish_task(nbv->gpu_nbv,
688 Nbnxm::AtomLocality::Local,
689 enerd->grpp.ener[egLJSR].data(),
690 enerd->grpp.ener[egCOULSR].data(),
691 forceWithShiftForces.shiftForces(), completionType, wcycle);
695 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local,
696 forceWithShiftForces.force());
702 /*! \brief Set up the different force buffers; also does clearing.
704 * \param[in] fr force record pointer
705 * \param[in] pull_work The pull work object.
706 * \param[in] inputrec input record
707 * \param[in] force force array
708 * \param[in] forceFlags Force schedule flags
709 * \param[out] wcycle wallcycle recording structure
711 * \returns Cleared force output structure
714 setupForceOutputs(t_forcerec *fr,
716 const t_inputrec &inputrec,
717 gmx::ArrayRefWithPadding<gmx::RVec> force,
718 const gmx::ForceFlags &forceFlags,
719 gmx_wallcycle_t wcycle)
721 wallcycle_sub_start(wcycle, ewcsCLEAR_FORCE_BUFFER);
723 /* NOTE: We assume fr->shiftForces is all zeros here */
724 gmx::ForceWithShiftForces forceWithShiftForces(force, forceFlags.computeVirial, fr->shiftForces);
726 if (forceFlags.computeForces)
728 /* Clear the short- and long-range forces */
729 clear_rvecs_omp(fr->natoms_force_constr,
730 as_rvec_array(forceWithShiftForces.force().data()));
733 /* If we need to compute the virial, we might need a separate
734 * force buffer for algorithms for which the virial is calculated
735 * directly, such as PME. Otherwise, forceWithVirial uses the
736 * the same force (f in legacy calls) buffer as other algorithms.
738 const bool useSeparateForceWithVirialBuffer = (forceFlags.computeForces &&
739 (forceFlags.computeVirial && fr->haveDirectVirialContributions));
740 /* forceWithVirial uses the local atom range only */
741 gmx::ForceWithVirial forceWithVirial(useSeparateForceWithVirialBuffer ?
742 fr->forceBufferForDirectVirialContributions : force.unpaddedArrayRef(),
743 forceFlags.computeVirial);
745 if (useSeparateForceWithVirialBuffer)
747 /* TODO: update comment
748 * We only compute forces on local atoms. Note that vsites can
749 * spread to non-local atoms, but that part of the buffer is
750 * cleared separately in the vsite spreading code.
752 clear_rvecs_omp(forceWithVirial.force_.size(), as_rvec_array(forceWithVirial.force_.data()));
755 if (inputrec.bPull && pull_have_constraint(pull_work))
757 clear_pull_forces(pull_work);
760 wallcycle_sub_stop(wcycle, ewcsCLEAR_FORCE_BUFFER);
762 return ForceOutputs(forceWithShiftForces, forceWithVirial);
766 /*! \brief Set up flags that indicate what type of work is there to compute.
768 * Currently we only update it at search steps,
769 * but some properties may change more frequently (e.g. virial/non-virial step),
770 * so when including those either the frequency of update (per-step) or the scope
771 * of a flag will change (i.e. a set of flags for nstlist steps).
775 setupForceWorkload(gmx::PpForceWorkload *forceWork,
776 const t_inputrec *inputrec,
777 const t_forcerec *fr,
778 const pull_t *pull_work,
782 const gmx::ForceFlags &forceFlags
785 forceWork->haveSpecialForces = haveSpecialForces(inputrec, fr->forceProviders, pull_work, forceFlags.computeForces, ed);
786 forceWork->haveCpuBondedWork = haveCpuBondeds(*fr);
787 forceWork->haveGpuBondedWork = ((fr->gpuBonded != nullptr) && fr->gpuBonded->haveInteractions());
788 forceWork->haveRestraintsWork = havePositionRestraints(idef, *fcd);
789 forceWork->haveCpuListedForceWork = haveCpuListedForces(*fr, idef, *fcd);
792 /*! \brief Set up force flag stuct from the force bitmask.
794 * \param[out] flags Force schedule flags
795 * \param[in] legacyFlags Force bitmask flags used to construct the new flags
796 * \param[in] isNonbondedOn Global override, if false forces to turn off all nonbonded calculation.
799 setupForceFlags(gmx::ForceFlags *flags,
800 const int legacyFlags,
801 const bool isNonbondedOn)
803 flags->stateChanged = ((legacyFlags & GMX_FORCE_STATECHANGED) != 0);
804 flags->haveDynamicBox = ((legacyFlags & GMX_FORCE_DYNAMICBOX) != 0);
805 flags->doNeighborSearch = ((legacyFlags & GMX_FORCE_NS) != 0);
806 flags->computeVirial = ((legacyFlags & GMX_FORCE_VIRIAL) != 0);
807 flags->computeEnergy = ((legacyFlags & GMX_FORCE_ENERGY) != 0);
808 flags->computeForces = ((legacyFlags & GMX_FORCE_FORCES) != 0);
809 flags->computeListedForces = ((legacyFlags & GMX_FORCE_LISTED) != 0);
810 flags->computeNonbondedForces = ((legacyFlags & GMX_FORCE_NONBONDED) != 0) && isNonbondedOn;
811 flags->computeDhdl = ((legacyFlags & GMX_FORCE_DHDL) != 0);
815 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
817 * TODO: eliminate the \p useGpuNonbonded and \p useGpuNonbonded when these are
818 * incorporated in PpForceWorkload.
821 launchGpuEndOfStepTasks(nonbonded_verlet_t *nbv,
822 gmx::GpuBonded *gpuBonded,
824 gmx_enerdata_t *enerd,
825 const gmx::MdScheduleWorkload &mdScheduleWork,
826 bool useGpuNonbonded,
829 gmx_wallcycle_t wcycle)
833 /* Launch pruning before buffer clearing because the API overhead of the
834 * clear kernel launches can leave the GPU idle while it could be running
837 if (nbv->isDynamicPruningStepGpu(step))
839 nbv->dispatchPruneKernelGpu(step);
842 /* now clear the GPU outputs while we finish the step on the CPU */
843 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
844 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
845 Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, mdScheduleWork.forceFlags.computeVirial);
846 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
847 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
852 pme_gpu_reinit_computation(pmedata, wcycle);
855 if (mdScheduleWork.forceWork.haveGpuBondedWork && mdScheduleWork.forceFlags.computeEnergy)
857 // in principle this should be included in the DD balancing region,
858 // but generally it is infrequent so we'll omit it for the sake of
860 gpuBonded->waitAccumulateEnergyTerms(enerd);
862 gpuBonded->clearEnergies();
867 void do_force(FILE *fplog,
869 const gmx_multisim_t *ms,
870 const t_inputrec *inputrec,
872 gmx_enfrot *enforcedRotation,
873 gmx::ImdSession *imdSession,
877 gmx_wallcycle_t wcycle,
878 const gmx_localtop_t *top,
880 gmx::ArrayRefWithPadding<gmx::RVec> x,
882 gmx::ArrayRefWithPadding<gmx::RVec> force,
884 const t_mdatoms *mdatoms,
885 gmx_enerdata_t *enerd,
887 gmx::ArrayRef<real> lambda,
890 gmx::MdScheduleWorkload *mdScheduleWork,
891 const gmx_vsite_t *vsite,
896 const DDBalanceRegionHandler &ddBalanceRegionHandler)
900 gmx_bool bFillGrid, bCalcCGCM;
901 gmx_bool bUseGPU, bUseOrEmulGPU;
902 nonbonded_verlet_t *nbv = fr->nbv.get();
903 interaction_const_t *ic = fr->ic;
905 // TODO remove the code below when the legacy flags are not in use anymore
906 /* modify force flag if not doing nonbonded */
909 legacyFlags &= ~GMX_FORCE_NONBONDED;
911 setupForceFlags(&mdScheduleWork->forceFlags, legacyFlags, fr->bNonbonded);
913 const gmx::ForceFlags &forceFlags = mdScheduleWork->forceFlags;
915 bFillGrid = (forceFlags.doNeighborSearch && forceFlags.stateChanged);
916 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
917 bUseGPU = fr->nbv->useGpu();
918 bUseOrEmulGPU = bUseGPU || fr->nbv->emulateGpu();
920 const auto pmeRunMode = fr->pmedata ? pme_run_mode(fr->pmedata) : PmeRunMode::CPU;
921 // TODO slim this conditional down - inputrec and duty checks should mean the same in proper code!
922 const bool useGpuPme = EEL_PME(fr->ic->eeltype) && thisRankHasDuty(cr, DUTY_PME) &&
923 ((pmeRunMode == PmeRunMode::GPU) || (pmeRunMode == PmeRunMode::Mixed));
924 const int pmeFlags = GMX_PME_SPREAD | GMX_PME_SOLVE |
925 (forceFlags.computeVirial ? GMX_PME_CALC_ENER_VIR : 0) |
926 (forceFlags.computeEnergy ? GMX_PME_CALC_ENER_VIR : 0) |
927 (forceFlags.computeForces ? GMX_PME_CALC_F : 0);
929 // Switches on whether to use GPU for position and force buffer operations
930 // TODO consider all possible combinations of triggers, and how to combine optimally in each case.
931 const BufferOpsUseGpu useGpuXBufOps = (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA)) ?
932 BufferOpsUseGpu::True : BufferOpsUseGpu::False;;
933 // GPU Force buffer ops are disabled on virial steps, because the virial calc is not yet ported to GPU
934 const BufferOpsUseGpu useGpuFBufOps = (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA))
935 && !(forceFlags.computeVirial || forceFlags.computeEnergy) ?
936 BufferOpsUseGpu::True : BufferOpsUseGpu::False;
937 // TODO: move / add this flag to the internal PME GPU data structures
938 const bool useGpuPmeFReduction = (useGpuFBufOps == BufferOpsUseGpu::True) &&
939 thisRankHasDuty(cr, DUTY_PME) && useGpuPme; // only supported if this rank is perfoming PME on the GPU
941 /* At a search step we need to start the first balancing region
942 * somewhere early inside the step after communication during domain
943 * decomposition (and not during the previous step as usual).
945 if (forceFlags.doNeighborSearch)
947 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
951 const int homenr = mdatoms->homenr;
953 clear_mat(vir_force);
955 if (forceFlags.stateChanged)
957 if (inputrecNeedMutot(inputrec))
959 /* Calculate total (local) dipole moment in a temporary common array.
960 * This makes it possible to sum them over nodes faster.
962 calc_mu(start, homenr,
963 x.unpaddedArrayRef(), mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
968 if (fr->ePBC != epbcNONE)
970 /* Compute shift vectors every step,
971 * because of pressure coupling or box deformation!
973 if (forceFlags.haveDynamicBox && forceFlags.stateChanged)
975 calc_shifts(box, fr->shift_vec);
980 put_atoms_in_box_omp(fr->ePBC, box, x.unpaddedArrayRef().subArray(0, homenr), gmx_omp_nthreads_get(emntDefault));
981 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
983 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
985 unshift_self(graph, box, as_rvec_array(x.unpaddedArrayRef().data()));
989 nbnxn_atomdata_copy_shiftvec(forceFlags.haveDynamicBox,
990 fr->shift_vec, nbv->nbat.get());
993 if (!thisRankHasDuty(cr, DUTY_PME))
995 /* Send particle coordinates to the pme nodes.
996 * Since this is only implemented for domain decomposition
997 * and domain decomposition does not use the graph,
998 * we do not need to worry about shifting.
1000 gmx_pme_send_coordinates(cr, box, as_rvec_array(x.unpaddedArrayRef().data()),
1001 lambda[efptCOUL], lambda[efptVDW],
1002 (forceFlags.computeVirial || forceFlags.computeEnergy),
1005 #endif /* GMX_MPI */
1009 launchPmeGpuSpread(fr->pmedata, box, as_rvec_array(x.unpaddedArrayRef().data()), forceFlags, pmeFlags, useGpuPmeFReduction, wcycle);
1012 /* do gridding for pair search */
1013 if (forceFlags.doNeighborSearch)
1015 if (graph && forceFlags.stateChanged)
1017 /* Calculate intramolecular shift vectors to make molecules whole */
1018 mk_mshift(fplog, graph, fr->ePBC, box, as_rvec_array(x.unpaddedArrayRef().data()));
1022 // - vzero is constant, do we need to pass it?
1023 // - box_diag should be passed directly to nbnxn_put_on_grid
1029 box_diag[XX] = box[XX][XX];
1030 box_diag[YY] = box[YY][YY];
1031 box_diag[ZZ] = box[ZZ][ZZ];
1033 wallcycle_start(wcycle, ewcNS);
1034 if (!DOMAINDECOMP(cr))
1036 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
1037 nbnxn_put_on_grid(nbv, box,
1039 nullptr, 0, mdatoms->homenr, -1,
1040 fr->cginfo, x.unpaddedArrayRef(),
1042 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
1046 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
1047 nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd),
1048 fr->cginfo, x.unpaddedArrayRef());
1049 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
1052 nbv->setAtomProperties(*mdatoms, fr->cginfo);
1054 wallcycle_stop(wcycle, ewcNS);
1056 /* initialize the GPU nbnxm atom data and bonded data structures */
1059 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1061 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1062 Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1063 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1067 /* Now we put all atoms on the grid, we can assign bonded
1068 * interactions to the GPU, where the grid order is
1069 * needed. Also the xq, f and fshift device buffers have
1070 * been reallocated if needed, so the bonded code can
1071 * learn about them. */
1072 // TODO the xq, f, and fshift buffers are now shared
1073 // resources, so they should be maintained by a
1074 // higher-level object than the nb module.
1075 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(nbv->getGridIndices(),
1077 Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1078 Nbnxm::gpu_get_f(nbv->gpu_nbv),
1079 Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1081 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1085 // Call it per-step as force-flags can change.
1086 // Need to run after the GPU-offload bonded interaction lists
1087 // are set up to be able to determine whether there is bonded work.
1088 setupForceWorkload(&mdScheduleWork->forceWork,
1097 const gmx::PpForceWorkload &forceWork = mdScheduleWork->forceWork;
1099 /* do local pair search */
1100 if (forceFlags.doNeighborSearch)
1102 // TODO: fuse this branch with the above forceFlags.doNeighborSearch block
1103 wallcycle_start_nocount(wcycle, ewcNS);
1104 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1105 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1106 nbv->constructPairlist(Nbnxm::InteractionLocality::Local,
1107 &top->excls, step, nrnb);
1109 nbv->setupGpuShortRangeWork(fr->gpuBonded, Nbnxm::InteractionLocality::Local);
1111 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1112 wallcycle_stop(wcycle, ewcNS);
1114 if (useGpuXBufOps == BufferOpsUseGpu::True)
1116 nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1118 // For force buffer ops, we use the below conditon rather than
1119 // useGpuFBufOps to ensure that init is performed even if this
1120 // NS step is also a virial step (on which f buf ops are deactivated).
1121 if (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA))
1123 nbv->atomdata_init_add_nbat_f_to_f_gpu();
1126 else if (!EI_TPI(inputrec->eI))
1128 if (useGpuXBufOps == BufferOpsUseGpu::True)
1130 // The condition here was (pme != nullptr && pme_gpu_get_device_x(fr->pmedata) != nullptr)
1133 nbv->copyCoordinatesToGpu(Nbnxm::AtomLocality::Local, false,
1134 x.unpaddedArrayRef());
1136 nbv->convertCoordinatesGpu(Nbnxm::AtomLocality::Local, false,
1137 useGpuPme ? pme_gpu_get_device_x(fr->pmedata) : nbv->getDeviceCoordinates());
1141 nbv->convertCoordinates(Nbnxm::AtomLocality::Local, false,
1142 x.unpaddedArrayRef());
1148 ddBalanceRegionHandler.openBeforeForceComputationGpu();
1150 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1152 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1153 Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1154 if (forceFlags.doNeighborSearch || (useGpuXBufOps == BufferOpsUseGpu::False))
1156 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1157 Nbnxm::AtomLocality::Local);
1159 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1160 // with X buffer ops offloaded to the GPU on all but the search steps
1162 // bonded work not split into separate local and non-local, so with DD
1163 // we can only launch the kernel after non-local coordinates have been received.
1164 if (forceWork.haveGpuBondedWork && !havePPDomainDecomposition(cr))
1166 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1167 fr->gpuBonded->launchKernel(fr, forceFlags, box);
1168 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1171 /* launch local nonbonded work on GPU */
1172 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1173 do_nb_verlet(fr, ic, enerd, forceFlags, Nbnxm::InteractionLocality::Local, enbvClearFNo,
1174 step, nrnb, wcycle);
1175 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1176 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1181 // In PME GPU and mixed mode we launch FFT / gather after the
1182 // X copy/transform to allow overlap as well as after the GPU NB
1183 // launch to avoid FFT launch overhead hijacking the CPU and delaying
1184 // the nonbonded kernel.
1185 launchPmeGpuFftAndGather(fr->pmedata, wcycle);
1188 const bool ddUsesGpuDirectCommunication
1189 = c_enableGpuHaloExchange && c_enableGpuBufOps && bUseGPU && havePPDomainDecomposition(cr);
1190 gmx::GpuHaloExchange *gpuHaloExchange = ddUsesGpuDirectCommunication ? cr->dd->gpuHaloExchange.get() : nullptr;
1191 GMX_ASSERT(!ddUsesGpuDirectCommunication || gpuHaloExchange != nullptr,
1192 "Must have valid gpuHaloExchange when doing halo exchange on the GPU");
1194 /* Communicate coordinates and sum dipole if necessary +
1195 do non-local pair search */
1196 if (havePPDomainDecomposition(cr))
1198 if (forceFlags.doNeighborSearch)
1200 // TODO: fuse this branch with the above large forceFlags.doNeighborSearch block
1201 wallcycle_start_nocount(wcycle, ewcNS);
1202 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1203 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1204 nbv->constructPairlist(Nbnxm::InteractionLocality::NonLocal,
1205 &top->excls, step, nrnb);
1207 nbv->setupGpuShortRangeWork(fr->gpuBonded, Nbnxm::InteractionLocality::NonLocal);
1208 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1209 wallcycle_stop(wcycle, ewcNS);
1210 if (ddUsesGpuDirectCommunication)
1212 rvec* d_x = static_cast<rvec *> (nbv->get_gpu_xrvec());
1213 rvec* d_f = static_cast<rvec *> (nbv->get_gpu_frvec());
1214 gpuHaloExchange->reinitHalo(d_x, d_f);
1219 if (ddUsesGpuDirectCommunication)
1221 // The following must be called after local setCoordinates (which records an event
1222 // when the coordinate data has been copied to the device).
1223 gpuHaloExchange->communicateHaloCoordinates(box);
1225 // TODO Force flags should include haveFreeEnergyWork for this domain
1226 if (forceWork.haveCpuBondedWork || (fr->efep != efepNO))
1228 //non-local part of coordinate buffer must be copied back to host for CPU work
1229 nbv->launch_copy_x_from_gpu(as_rvec_array(x.unpaddedArrayRef().data()), Nbnxm::AtomLocality::NonLocal);
1234 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1237 if (useGpuXBufOps == BufferOpsUseGpu::True)
1239 // The condition here was (pme != nullptr && pme_gpu_get_device_x(fr->pmedata) != nullptr)
1240 if (!useGpuPme && !ddUsesGpuDirectCommunication)
1242 nbv->copyCoordinatesToGpu(Nbnxm::AtomLocality::NonLocal, false,
1243 x.unpaddedArrayRef());
1245 nbv->convertCoordinatesGpu(Nbnxm::AtomLocality::NonLocal, false,
1246 useGpuPme ? pme_gpu_get_device_x(fr->pmedata) : nbv->getDeviceCoordinates());
1250 nbv->convertCoordinates(Nbnxm::AtomLocality::NonLocal, false,
1251 x.unpaddedArrayRef());
1258 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1260 if (forceFlags.doNeighborSearch || (useGpuXBufOps == BufferOpsUseGpu::False))
1262 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1263 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1264 Nbnxm::AtomLocality::NonLocal);
1265 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1268 if (forceWork.haveGpuBondedWork)
1270 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1271 fr->gpuBonded->launchKernel(fr, forceFlags, box);
1272 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1275 /* launch non-local nonbonded tasks on GPU */
1276 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1277 do_nb_verlet(fr, ic, enerd, forceFlags, Nbnxm::InteractionLocality::NonLocal, enbvClearFNo,
1278 step, nrnb, wcycle);
1279 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1281 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1287 /* launch D2H copy-back F */
1288 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1289 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1291 bool copyBackNbForce = (useGpuFBufOps == BufferOpsUseGpu::False);
1293 if (havePPDomainDecomposition(cr))
1295 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1296 forceFlags, Nbnxm::AtomLocality::NonLocal, copyBackNbForce);
1298 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1299 forceFlags, Nbnxm::AtomLocality::Local, copyBackNbForce);
1300 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1302 if (forceWork.haveGpuBondedWork && forceFlags.computeEnergy)
1304 fr->gpuBonded->launchEnergyTransfer();
1306 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1309 if (forceFlags.stateChanged && inputrecNeedMutot(inputrec))
1313 gmx_sumd(2*DIM, mu, cr);
1315 ddBalanceRegionHandler.reopenRegionCpu();
1318 for (i = 0; i < 2; i++)
1320 for (j = 0; j < DIM; j++)
1322 fr->mu_tot[i][j] = mu[i*DIM + j];
1326 if (fr->efep == efepNO)
1328 copy_rvec(fr->mu_tot[0], mu_tot);
1332 for (j = 0; j < DIM; j++)
1335 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1336 lambda[efptCOUL]*fr->mu_tot[1][j];
1340 /* Reset energies */
1341 reset_enerdata(enerd);
1342 /* Clear the shift forces */
1343 // TODO: This should be linked to the shift force buffer in use, or cleared before use instead
1344 for (gmx::RVec &elem : fr->shiftForces)
1346 elem = { 0.0_real, 0.0_real, 0.0_real };
1349 if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1351 wallcycle_start(wcycle, ewcPPDURINGPME);
1352 dd_force_flop_start(cr->dd, nrnb);
1357 wallcycle_start(wcycle, ewcROT);
1358 do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step, forceFlags.doNeighborSearch);
1359 wallcycle_stop(wcycle, ewcROT);
1362 /* Start the force cycle counter.
1363 * Note that a different counter is used for dynamic load balancing.
1365 wallcycle_start(wcycle, ewcFORCE);
1367 // Set up and clear force outputs.
1368 // We use std::move to keep the compiler happy, it has no effect.
1369 ForceOutputs forceOut = setupForceOutputs(fr, pull_work, *inputrec, std::move(force), forceFlags, wcycle);
1371 /* We calculate the non-bonded forces, when done on the CPU, here.
1372 * We do this before calling do_force_lowlevel, because in that
1373 * function, the listed forces are calculated before PME, which
1374 * does communication. With this order, non-bonded and listed
1375 * force calculation imbalance can be balanced out by the domain
1376 * decomposition load balancing.
1381 do_nb_verlet(fr, ic, enerd, forceFlags, Nbnxm::InteractionLocality::Local, enbvClearFYes,
1382 step, nrnb, wcycle);
1385 if (fr->efep != efepNO)
1387 /* Calculate the local and non-local free energy interactions here.
1388 * Happens here on the CPU both with and without GPU.
1390 nbv->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::Local,
1391 fr, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut.forceWithShiftForces(), *mdatoms,
1392 inputrec->fepvals, lambda.data(),
1393 enerd, forceFlags, nrnb);
1395 if (havePPDomainDecomposition(cr))
1397 nbv->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::NonLocal,
1398 fr, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut.forceWithShiftForces(), *mdatoms,
1399 inputrec->fepvals, lambda.data(),
1400 enerd, forceFlags, nrnb);
1406 if (havePPDomainDecomposition(cr))
1408 do_nb_verlet(fr, ic, enerd, forceFlags, Nbnxm::InteractionLocality::NonLocal, enbvClearFNo,
1409 step, nrnb, wcycle);
1412 if (forceFlags.computeForces)
1414 /* Add all the non-bonded force to the normal force array.
1415 * This can be split into a local and a non-local part when overlapping
1416 * communication with calculation with domain decomposition.
1418 wallcycle_stop(wcycle, ewcFORCE);
1419 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::All, forceOut.forceWithShiftForces().force());
1420 wallcycle_start_nocount(wcycle, ewcFORCE);
1423 /* If there are multiple fshift output buffers we need to reduce them */
1424 if (forceFlags.computeVirial)
1426 /* This is not in a subcounter because it takes a
1427 negligible and constant-sized amount of time */
1428 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat,
1429 forceOut.forceWithShiftForces().shiftForces());
1433 /* update QMMMrec, if necessary */
1436 update_QMMMrec(cr, fr, as_rvec_array(x.unpaddedArrayRef().data()), mdatoms, box);
1439 // TODO Force flags should include haveFreeEnergyWork for this domain
1440 if (ddUsesGpuDirectCommunication &&
1441 (forceWork.haveCpuBondedWork || (fr->efep != efepNO)))
1443 /* Wait for non-local coordinate data to be copied from device */
1444 nbv->wait_nonlocal_x_copy_D2H_done();
1446 /* Compute the bonded and non-bonded energies and optionally forces */
1447 do_force_lowlevel(fr, inputrec, &(top->idef),
1448 cr, ms, nrnb, wcycle, mdatoms,
1449 x, hist, &forceOut, enerd, fcd,
1450 box, lambda.data(), graph, fr->mu_tot,
1452 ddBalanceRegionHandler);
1454 wallcycle_stop(wcycle, ewcFORCE);
1456 computeSpecialForces(fplog, cr, inputrec, awh, enforcedRotation,
1457 imdSession, pull_work, step, t, wcycle,
1458 fr->forceProviders, box, x.unpaddedArrayRef(), mdatoms, lambda.data(),
1459 forceFlags, &forceOut.forceWithVirial(), enerd,
1460 ed, forceFlags.doNeighborSearch);
1462 // Will store the amount of cycles spent waiting for the GPU that
1463 // will be later used in the DLB accounting.
1464 float cycles_wait_gpu = 0;
1467 auto &forceWithShiftForces = forceOut.forceWithShiftForces();
1469 /* wait for non-local forces (or calculate in emulation mode) */
1470 if (havePPDomainDecomposition(cr))
1474 cycles_wait_gpu += Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1475 forceFlags, Nbnxm::AtomLocality::NonLocal,
1476 enerd->grpp.ener[egLJSR].data(),
1477 enerd->grpp.ener[egCOULSR].data(),
1478 forceWithShiftForces.shiftForces(),
1483 wallcycle_start_nocount(wcycle, ewcFORCE);
1484 do_nb_verlet(fr, ic, enerd, forceFlags, Nbnxm::InteractionLocality::NonLocal, enbvClearFYes,
1485 step, nrnb, wcycle);
1486 wallcycle_stop(wcycle, ewcFORCE);
1489 if (useGpuFBufOps == BufferOpsUseGpu::True)
1491 // TODO: move this into DomainLifetimeWorkload, including the second part of the condition
1492 // The bonded and free energy CPU tasks can have non-local force contributions
1493 // which are a dependency for the GPU force reduction.
1494 bool haveNonLocalForceContribInCpuBuffer = forceWork.haveCpuBondedWork || (fr->efep != efepNO);
1496 rvec *f = as_rvec_array(forceWithShiftForces.force().data());
1497 if (haveNonLocalForceContribInCpuBuffer)
1499 nbv->launch_copy_f_to_gpu(f, Nbnxm::AtomLocality::NonLocal);
1501 nbv->atomdata_add_nbat_f_to_f_gpu(Nbnxm::AtomLocality::NonLocal,
1502 nbv->getDeviceForces(),
1503 pme_gpu_get_device_f(fr->pmedata),
1504 pme_gpu_get_f_ready_synchronizer(fr->pmedata),
1505 useGpuPmeFReduction, haveNonLocalForceContribInCpuBuffer);
1506 nbv->launch_copy_f_from_gpu(f, Nbnxm::AtomLocality::NonLocal);
1510 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::NonLocal,
1511 forceWithShiftForces.force());
1515 if (fr->nbv->emulateGpu() && forceFlags.computeVirial)
1517 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat,
1518 forceWithShiftForces.shiftForces());
1523 const bool useGpuForcesHaloExchange = ddUsesGpuDirectCommunication && (useGpuFBufOps == BufferOpsUseGpu::True);
1524 const bool useCpuPmeFReduction = thisRankHasDuty(cr, DUTY_PME) && !useGpuPmeFReduction;
1525 // TODO: move this into DomainLifetimeWorkload, including the second part of the condition
1526 const bool haveCpuLocalForces = (forceWork.haveSpecialForces || forceWork.haveCpuListedForceWork || useCpuPmeFReduction ||
1527 (fr->efep != efepNO));
1529 if (havePPDomainDecomposition(cr))
1531 /* We are done with the CPU compute.
1532 * We will now communicate the non-local forces.
1533 * If we use a GPU this will overlap with GPU work, so in that case
1534 * we do not close the DD force balancing region here.
1536 ddBalanceRegionHandler.closeAfterForceComputationCpu();
1538 if (forceFlags.computeForces)
1540 gmx::ArrayRef<gmx::RVec> force = forceOut.forceWithShiftForces().force();
1541 rvec *f = as_rvec_array(force.data());
1543 if (useGpuForcesHaloExchange)
1545 if (haveCpuLocalForces)
1547 nbv->launch_copy_f_to_gpu(f, Nbnxm::AtomLocality::Local);
1549 bool accumulateHaloForces = haveCpuLocalForces;
1550 gpuHaloExchange->communicateHaloForces(accumulateHaloForces);
1554 if (useGpuFBufOps == BufferOpsUseGpu::True)
1556 nbv->wait_for_gpu_force_reduction(Nbnxm::AtomLocality::NonLocal);
1558 dd_move_f(cr->dd, &forceOut.forceWithShiftForces(), wcycle);
1564 // With both nonbonded and PME offloaded a GPU on the same rank, we use
1565 // an alternating wait/reduction scheme.
1566 bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPme && bUseGPU && !DOMAINDECOMP(cr) &&
1567 (useGpuFBufOps == BufferOpsUseGpu::False));
1568 if (alternateGpuWait)
1570 alternatePmeNbGpuWaitReduce(fr->nbv.get(), fr->pmedata, &forceOut, enerd,
1571 forceFlags, pmeFlags, wcycle);
1574 if (!alternateGpuWait && useGpuPme)
1576 pme_gpu_wait_and_reduce(fr->pmedata, pmeFlags, wcycle, &forceOut.forceWithVirial(), enerd);
1579 /* Wait for local GPU NB outputs on the non-alternating wait path */
1580 if (!alternateGpuWait && bUseGPU)
1582 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1583 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1584 * but even with a step of 0.1 ms the difference is less than 1%
1587 const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
1588 const float waitCycles =
1589 Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1590 forceFlags, Nbnxm::AtomLocality::Local,
1591 enerd->grpp.ener[egLJSR].data(),
1592 enerd->grpp.ener[egCOULSR].data(),
1593 forceOut.forceWithShiftForces().shiftForces(),
1596 if (ddBalanceRegionHandler.useBalancingRegion())
1598 DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
1599 if (forceFlags.computeForces && waitCycles <= gpuWaitApiOverheadMargin)
1601 /* We measured few cycles, it could be that the kernel
1602 * and transfer finished earlier and there was no actual
1603 * wait time, only API call overhead.
1604 * Then the actual time could be anywhere between 0 and
1605 * cycles_wait_est. We will use half of cycles_wait_est.
1607 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
1609 ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
1613 if (fr->nbv->emulateGpu())
1615 // NOTE: emulation kernel is not included in the balancing region,
1616 // but emulation mode does not target performance anyway
1617 wallcycle_start_nocount(wcycle, ewcFORCE);
1618 do_nb_verlet(fr, ic, enerd, forceFlags, Nbnxm::InteractionLocality::Local,
1619 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1620 step, nrnb, wcycle);
1621 wallcycle_stop(wcycle, ewcFORCE);
1624 /* Do the nonbonded GPU (or emulation) force buffer reduction
1625 * on the non-alternating path. */
1626 if (bUseOrEmulGPU && !alternateGpuWait)
1628 gmx::ArrayRef<gmx::RVec> forceWithShift = forceOut.forceWithShiftForces().force();
1630 if (useGpuFBufOps == BufferOpsUseGpu::True)
1632 // Flag to specify whether the CPU force buffer has contributions to
1633 // local atoms. This depends on whether there are CPU-based force tasks
1634 // or when DD is active the halo exchange has resulted in contributions
1635 // from the non-local part.
1636 const bool haveLocalForceContribInCpuBuffer = (haveCpuLocalForces || havePPDomainDecomposition(cr));
1638 // TODO: move these steps as early as possible:
1639 // - CPU f H2D should be as soon as all CPU-side forces are done
1640 // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
1641 // before the next CPU task that consumes the forces: vsite spread or update)
1642 // - copy is not perfomed if GPU force halo exchange is active, because it would overwrite the result
1643 // of the halo exchange. In that case the copy is instead performed above, before the exchange.
1644 // These should be unified.
1645 rvec *f = as_rvec_array(forceWithShift.data());
1646 if (haveLocalForceContribInCpuBuffer && !useGpuForcesHaloExchange)
1648 nbv->launch_copy_f_to_gpu(f, Nbnxm::AtomLocality::Local);
1650 if (useGpuForcesHaloExchange)
1652 // Add a stream synchronization to satisfy a dependency
1653 // for the local buffer ops on the result of GPU halo
1654 // exchange, which operates in the non-local stream and
1655 // writes to to local parf og the force buffer.
1656 // TODO improve this through use of an event - see Redmine #3093
1657 nbv->stream_local_wait_for_nonlocal();
1659 nbv->atomdata_add_nbat_f_to_f_gpu(Nbnxm::AtomLocality::Local,
1660 nbv->getDeviceForces(),
1661 pme_gpu_get_device_f(fr->pmedata),
1662 pme_gpu_get_f_ready_synchronizer(fr->pmedata),
1663 useGpuPmeFReduction, haveLocalForceContribInCpuBuffer);
1664 nbv->launch_copy_f_from_gpu(f, Nbnxm::AtomLocality::Local);
1665 nbv->wait_for_gpu_force_reduction(Nbnxm::AtomLocality::Local);
1669 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local, forceWithShift);
1674 launchGpuEndOfStepTasks(nbv, fr->gpuBonded, fr->pmedata, enerd,
1680 if (DOMAINDECOMP(cr))
1682 dd_force_flop_stop(cr->dd, nrnb);
1685 if (forceFlags.computeForces)
1687 rvec *f = as_rvec_array(forceOut.forceWithShiftForces().force().data());
1689 /* If we have NoVirSum forces, but we do not calculate the virial,
1690 * we sum fr->f_novirsum=forceOut.f later.
1692 if (vsite && !(fr->haveDirectVirialContributions && !forceFlags.computeVirial))
1694 rvec *fshift = as_rvec_array(forceOut.forceWithShiftForces().shiftForces().data());
1695 spread_vsite_f(vsite, as_rvec_array(x.unpaddedArrayRef().data()), f, fshift, FALSE, nullptr, nrnb,
1696 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
1699 if (forceFlags.computeVirial)
1701 /* Calculation of the virial must be done after vsites! */
1702 calc_virial(0, mdatoms->homenr, as_rvec_array(x.unpaddedArrayRef().data()),
1703 forceOut.forceWithShiftForces(),
1704 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1708 if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME))
1710 /* In case of node-splitting, the PP nodes receive the long-range
1711 * forces, virial and energy from the PME nodes here.
1713 pme_receive_force_ener(cr, &forceOut.forceWithVirial(), enerd, wcycle);
1716 if (forceFlags.computeForces)
1718 post_process_forces(cr, step, nrnb, wcycle,
1719 top, box, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut,
1720 vir_force, mdatoms, graph, fr, vsite,
1724 if (forceFlags.computeEnergy)
1726 /* Sum the potential energy terms from group contributions */
1727 sum_epot(&(enerd->grpp), enerd->term);
1729 if (!EI_TPI(inputrec->eI))
1731 checkPotentialEnergyValidity(step, *enerd, *inputrec);
1735 /* In case we don't have constraints and are using GPUs, the next balancing
1736 * region starts here.
1737 * Some "special" work at the end of do_force_cuts?, such as vsite spread,
1738 * virial calculation and COM pulling, is not thus not included in
1739 * the balance timing, which is ok as most tasks do communication.
1741 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);