2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013-2019,2020, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "gromacs/awh/awh.h"
49 #include "gromacs/domdec/dlbtiming.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/domdec/gpuhaloexchange.h"
53 #include "gromacs/domdec/partition.h"
54 #include "gromacs/essentialdynamics/edsam.h"
55 #include "gromacs/ewald/pme.h"
56 #include "gromacs/ewald/pme_pp.h"
57 #include "gromacs/ewald/pme_pp_comm_gpu.h"
58 #include "gromacs/gmxlib/network.h"
59 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
60 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
61 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
62 #include "gromacs/gpu_utils/gpu_utils.h"
63 #include "gromacs/imd/imd.h"
64 #include "gromacs/listed_forces/disre.h"
65 #include "gromacs/listed_forces/gpubonded.h"
66 #include "gromacs/listed_forces/listed_forces.h"
67 #include "gromacs/listed_forces/manage_threading.h"
68 #include "gromacs/listed_forces/orires.h"
69 #include "gromacs/math/arrayrefwithpadding.h"
70 #include "gromacs/math/functions.h"
71 #include "gromacs/math/units.h"
72 #include "gromacs/math/vec.h"
73 #include "gromacs/math/vecdump.h"
74 #include "gromacs/mdlib/calcmu.h"
75 #include "gromacs/mdlib/calcvir.h"
76 #include "gromacs/mdlib/constr.h"
77 #include "gromacs/mdlib/enerdata_utils.h"
78 #include "gromacs/mdlib/force.h"
79 #include "gromacs/mdlib/forcerec.h"
80 #include "gromacs/mdlib/gmx_omp_nthreads.h"
81 #include "gromacs/mdlib/qmmm.h"
82 #include "gromacs/mdlib/update.h"
83 #include "gromacs/mdtypes/commrec.h"
84 #include "gromacs/mdtypes/enerdata.h"
85 #include "gromacs/mdtypes/forceoutput.h"
86 #include "gromacs/mdtypes/iforceprovider.h"
87 #include "gromacs/mdtypes/inputrec.h"
88 #include "gromacs/mdtypes/md_enums.h"
89 #include "gromacs/mdtypes/simulation_workload.h"
90 #include "gromacs/mdtypes/state.h"
91 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
92 #include "gromacs/nbnxm/gpu_data_mgmt.h"
93 #include "gromacs/nbnxm/nbnxm.h"
94 #include "gromacs/nbnxm/nbnxm_gpu.h"
95 #include "gromacs/pbcutil/ishift.h"
96 #include "gromacs/pbcutil/mshift.h"
97 #include "gromacs/pbcutil/pbc.h"
98 #include "gromacs/pulling/pull.h"
99 #include "gromacs/pulling/pull_rotation.h"
100 #include "gromacs/timing/cyclecounter.h"
101 #include "gromacs/timing/gpu_timing.h"
102 #include "gromacs/timing/wallcycle.h"
103 #include "gromacs/timing/wallcyclereporting.h"
104 #include "gromacs/timing/walltime_accounting.h"
105 #include "gromacs/topology/topology.h"
106 #include "gromacs/utility/arrayref.h"
107 #include "gromacs/utility/basedefinitions.h"
108 #include "gromacs/utility/cstringutil.h"
109 #include "gromacs/utility/exceptions.h"
110 #include "gromacs/utility/fatalerror.h"
111 #include "gromacs/utility/fixedcapacityvector.h"
112 #include "gromacs/utility/gmxassert.h"
113 #include "gromacs/utility/gmxmpi.h"
114 #include "gromacs/utility/logger.h"
115 #include "gromacs/utility/smalloc.h"
116 #include "gromacs/utility/strconvert.h"
117 #include "gromacs/utility/sysinfo.h"
119 using gmx::AtomLocality;
120 using gmx::DomainLifetimeWorkload;
121 using gmx::ForceOutputs;
122 using gmx::InteractionLocality;
123 using gmx::SimulationWorkload;
124 using gmx::StepWorkload;
126 // TODO: this environment variable allows us to verify before release
127 // that on less common architectures the total cost of polling is not larger than
128 // a blocking wait (so polling does not introduce overhead when the static
129 // PME-first ordering would suffice).
130 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
132 static void sum_forces(rvec f[], gmx::ArrayRef<const gmx::RVec> forceToAdd)
134 const int end = forceToAdd.size();
136 int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
137 #pragma omp parallel for num_threads(nt) schedule(static)
138 for (int i = 0; i < end; i++)
140 rvec_inc(f[i], forceToAdd[i]);
144 static void calc_virial(int start,
147 const gmx::ForceWithShiftForces& forceWithShiftForces,
149 const t_graph* graph,
152 const t_forcerec* fr,
155 /* The short-range virial from surrounding boxes */
156 const rvec* fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
157 calc_vir(SHIFTS, fr->shift_vec, fshift, vir_part, pbcType == PbcType::Screw, box);
158 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
160 /* Calculate partial virial, for local atoms only, based on short range.
161 * Total virial is computed in global_stat, called from do_md
163 const rvec* f = as_rvec_array(forceWithShiftForces.force().data());
164 f_calc_vir(start, start + homenr, x, f, vir_part, graph, box);
165 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
169 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
173 static void pull_potential_wrapper(const t_commrec* cr,
174 const t_inputrec* ir,
176 gmx::ArrayRef<const gmx::RVec> x,
177 gmx::ForceWithVirial* force,
178 const t_mdatoms* mdatoms,
179 gmx_enerdata_t* enerd,
183 gmx_wallcycle_t wcycle)
188 /* Calculate the center of mass forces, this requires communication,
189 * which is why pull_potential is called close to other communication.
191 wallcycle_start(wcycle, ewcPULLPOT);
192 set_pbc(&pbc, ir->pbcType, box);
194 enerd->term[F_COM_PULL] += pull_potential(pull_work, mdatoms, &pbc, cr, t, lambda[efptRESTRAINT],
195 as_rvec_array(x.data()), force, &dvdl);
196 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
197 wallcycle_stop(wcycle, ewcPULLPOT);
200 static void pme_receive_force_ener(t_forcerec* fr,
202 gmx::ForceWithVirial* forceWithVirial,
203 gmx_enerdata_t* enerd,
204 bool useGpuPmePpComms,
205 bool receivePmeForceToGpu,
206 gmx_wallcycle_t wcycle)
208 real e_q, e_lj, dvdl_q, dvdl_lj;
209 float cycles_ppdpme, cycles_seppme;
211 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
212 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
214 /* In case of node-splitting, the PP nodes receive the long-range
215 * forces, virial and energy from the PME nodes here.
217 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
220 gmx_pme_receive_f(fr->pmePpCommGpu.get(), cr, forceWithVirial, &e_q, &e_lj, &dvdl_q, &dvdl_lj,
221 useGpuPmePpComms, receivePmeForceToGpu, &cycles_seppme);
222 enerd->term[F_COUL_RECIP] += e_q;
223 enerd->term[F_LJ_RECIP] += e_lj;
224 enerd->dvdl_lin[efptCOUL] += dvdl_q;
225 enerd->dvdl_lin[efptVDW] += dvdl_lj;
229 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
231 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
234 static void print_large_forces(FILE* fp,
242 real force2Tolerance = gmx::square(forceTolerance);
243 gmx::index numNonFinite = 0;
244 for (int i = 0; i < md->homenr; i++)
246 real force2 = norm2(f[i]);
247 bool nonFinite = !std::isfinite(force2);
248 if (force2 >= force2Tolerance || nonFinite)
250 fprintf(fp, "step %" PRId64 " atom %6d x %8.3f %8.3f %8.3f force %12.5e\n", step,
251 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
258 if (numNonFinite > 0)
260 /* Note that with MPI this fatal call on one rank might interrupt
261 * the printing on other ranks. But we can only avoid that with
262 * an expensive MPI barrier that we would need at each step.
264 gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
268 static void post_process_forces(const t_commrec* cr,
271 gmx_wallcycle_t wcycle,
272 const gmx_localtop_t* top,
275 ForceOutputs* forceOutputs,
277 const t_mdatoms* mdatoms,
278 const t_graph* graph,
279 const t_forcerec* fr,
280 const gmx_vsite_t* vsite,
281 const StepWorkload& stepWork)
283 rvec* f = as_rvec_array(forceOutputs->forceWithShiftForces().force().data());
285 if (fr->haveDirectVirialContributions)
287 auto& forceWithVirial = forceOutputs->forceWithVirial();
288 rvec* fDirectVir = as_rvec_array(forceWithVirial.force_.data());
292 /* Spread the mesh force on virtual sites to the other particles...
293 * This is parallellized. MPI communication is performed
294 * if the constructing atoms aren't local.
296 matrix virial = { { 0 } };
297 spread_vsite_f(vsite, x, fDirectVir, nullptr, stepWork.computeVirial, virial, nrnb,
298 &top->idef, fr->pbcType, fr->bMolPBC, graph, box, cr, wcycle);
299 forceWithVirial.addVirialContribution(virial);
302 if (stepWork.computeVirial)
304 /* Now add the forces, this is local */
305 sum_forces(f, forceWithVirial.force_);
307 /* Add the direct virial contributions */
309 forceWithVirial.computeVirial_,
310 "forceWithVirial should request virial computation when we request the virial");
311 m_add(vir_force, forceWithVirial.getVirial(), vir_force);
315 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
320 if (fr->print_force >= 0)
322 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
326 static void do_nb_verlet(t_forcerec* fr,
327 const interaction_const_t* ic,
328 gmx_enerdata_t* enerd,
329 const StepWorkload& stepWork,
330 const InteractionLocality ilocality,
334 gmx_wallcycle_t wcycle)
336 if (!stepWork.computeNonbondedForces)
338 /* skip non-bonded calculation */
342 nonbonded_verlet_t* nbv = fr->nbv.get();
344 /* GPU kernel launch overhead is already timed separately */
345 if (fr->cutoff_scheme != ecutsVERLET)
347 gmx_incons("Invalid cut-off scheme passed!");
352 /* When dynamic pair-list pruning is requested, we need to prune
353 * at nstlistPrune steps.
355 if (nbv->isDynamicPruningStepCpu(step))
357 /* Prune the pair-list beyond fr->ic->rlistPrune using
358 * the current coordinates of the atoms.
360 wallcycle_sub_start(wcycle, ewcsNONBONDED_PRUNING);
361 nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
362 wallcycle_sub_stop(wcycle, ewcsNONBONDED_PRUNING);
366 nbv->dispatchNonbondedKernel(ilocality, *ic, stepWork, clearF, *fr, enerd, nrnb);
369 static inline void clear_rvecs_omp(int n, rvec v[])
371 int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
373 /* Note that we would like to avoid this conditional by putting it
374 * into the omp pragma instead, but then we still take the full
375 * omp parallel for overhead (at least with gcc5).
379 for (int i = 0; i < n; i++)
386 #pragma omp parallel for num_threads(nth) schedule(static)
387 for (int i = 0; i < n; i++)
394 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
396 * \param groupOptions Group options, containing T-coupling options
398 static real averageKineticEnergyEstimate(const t_grpopts& groupOptions)
400 real nrdfCoupled = 0;
401 real nrdfUncoupled = 0;
402 real kineticEnergy = 0;
403 for (int g = 0; g < groupOptions.ngtc; g++)
405 if (groupOptions.tau_t[g] >= 0)
407 nrdfCoupled += groupOptions.nrdf[g];
408 kineticEnergy += groupOptions.nrdf[g] * 0.5 * groupOptions.ref_t[g] * BOLTZ;
412 nrdfUncoupled += groupOptions.nrdf[g];
416 /* This conditional with > also catches nrdf=0 */
417 if (nrdfCoupled > nrdfUncoupled)
419 return kineticEnergy * (nrdfCoupled + nrdfUncoupled) / nrdfCoupled;
427 /*! \brief This routine checks that the potential energy is finite.
429 * Always checks that the potential energy is finite. If step equals
430 * inputrec.init_step also checks that the magnitude of the potential energy
431 * is reasonable. Terminates with a fatal error when a check fails.
432 * Note that passing this check does not guarantee finite forces,
433 * since those use slightly different arithmetics. But in most cases
434 * there is just a narrow coordinate range where forces are not finite
435 * and energies are finite.
437 * \param[in] step The step number, used for checking and printing
438 * \param[in] enerd The energy data; the non-bonded group energies need to be added to
439 * enerd.term[F_EPOT] before calling this routine \param[in] inputrec The input record
441 static void checkPotentialEnergyValidity(int64_t step, const gmx_enerdata_t& enerd, const t_inputrec& inputrec)
443 /* Threshold valid for comparing absolute potential energy against
444 * the kinetic energy. Normally one should not consider absolute
445 * potential energy values, but with a factor of one million
446 * we should never get false positives.
448 constexpr real c_thresholdFactor = 1e6;
450 bool energyIsNotFinite = !std::isfinite(enerd.term[F_EPOT]);
451 real averageKineticEnergy = 0;
452 /* We only check for large potential energy at the initial step,
453 * because that is by far the most likely step for this too occur
454 * and because computing the average kinetic energy is not free.
455 * Note: nstcalcenergy >> 1 often does not allow to catch large energies
456 * before they become NaN.
458 if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
460 averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
463 if (energyIsNotFinite
464 || (averageKineticEnergy > 0 && enerd.term[F_EPOT] > c_thresholdFactor * averageKineticEnergy))
469 ": The total potential energy is %g, which is %s. The LJ and electrostatic "
470 "contributions to the energy are %g and %g, respectively. A %s potential energy "
471 "can be caused by overlapping interactions in bonded interactions or very large%s "
472 "coordinate values. Usually this is caused by a badly- or non-equilibrated initial "
473 "configuration, incorrect interactions or parameters in the topology.",
474 step, enerd.term[F_EPOT], energyIsNotFinite ? "not finite" : "extremely high",
475 enerd.term[F_LJ], enerd.term[F_COUL_SR],
476 energyIsNotFinite ? "non-finite" : "very high", energyIsNotFinite ? " or Nan" : "");
480 /*! \brief Return true if there are special forces computed this step.
482 * The conditionals exactly correspond to those in computeSpecialForces().
484 static bool haveSpecialForces(const t_inputrec& inputrec,
485 const gmx::ForceProviders& forceProviders,
486 const pull_t* pull_work,
487 const bool computeForces,
491 return ((computeForces && forceProviders.hasForceProvider()) || // forceProviders
492 (inputrec.bPull && pull_have_potential(pull_work)) || // pull
493 inputrec.bRot || // enforced rotation
494 (ed != nullptr) || // flooding
495 (inputrec.bIMD && computeForces)); // IMD
498 /*! \brief Compute forces and/or energies for special algorithms
500 * The intention is to collect all calls to algorithms that compute
501 * forces on local atoms only and that do not contribute to the local
502 * virial sum (but add their virial contribution separately).
503 * Eventually these should likely all become ForceProviders.
504 * Within this function the intention is to have algorithms that do
505 * global communication at the end, so global barriers within the MD loop
506 * are as close together as possible.
508 * \param[in] fplog The log file
509 * \param[in] cr The communication record
510 * \param[in] inputrec The input record
511 * \param[in] awh The Awh module (nullptr if none in use).
512 * \param[in] enforcedRotation Enforced rotation module.
513 * \param[in] imdSession The IMD session
514 * \param[in] pull_work The pull work structure.
515 * \param[in] step The current MD step
516 * \param[in] t The current time
517 * \param[in,out] wcycle Wallcycle accounting struct
518 * \param[in,out] forceProviders Pointer to a list of force providers
519 * \param[in] box The unit cell
520 * \param[in] x The coordinates
521 * \param[in] mdatoms Per atom properties
522 * \param[in] lambda Array of free-energy lambda values
523 * \param[in] stepWork Step schedule flags
524 * \param[in,out] forceWithVirial Force and virial buffers
525 * \param[in,out] enerd Energy buffer
526 * \param[in,out] ed Essential dynamics pointer
527 * \param[in] didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
529 * \todo Remove didNeighborSearch, which is used incorrectly.
530 * \todo Convert all other algorithms called here to ForceProviders.
532 static void computeSpecialForces(FILE* fplog,
534 const t_inputrec* inputrec,
536 gmx_enfrot* enforcedRotation,
537 gmx::ImdSession* imdSession,
541 gmx_wallcycle_t wcycle,
542 gmx::ForceProviders* forceProviders,
544 gmx::ArrayRef<const gmx::RVec> x,
545 const t_mdatoms* mdatoms,
547 const StepWorkload& stepWork,
548 gmx::ForceWithVirial* forceWithVirial,
549 gmx_enerdata_t* enerd,
551 bool didNeighborSearch)
553 /* NOTE: Currently all ForceProviders only provide forces.
554 * When they also provide energies, remove this conditional.
556 if (stepWork.computeForces)
558 gmx::ForceProviderInput forceProviderInput(x, *mdatoms, t, box, *cr);
559 gmx::ForceProviderOutput forceProviderOutput(forceWithVirial, enerd);
561 /* Collect forces from modules */
562 forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
565 if (inputrec->bPull && pull_have_potential(pull_work))
567 pull_potential_wrapper(cr, inputrec, box, x, forceWithVirial, mdatoms, enerd, pull_work,
572 enerd->term[F_COM_PULL] += awh->applyBiasForcesAndUpdateBias(
573 inputrec->pbcType, *mdatoms, box, forceWithVirial, t, step, wcycle, fplog);
577 rvec* f = as_rvec_array(forceWithVirial->force_.data());
579 /* Add the forces from enforced rotation potentials (if any) */
582 wallcycle_start(wcycle, ewcROTadd);
583 enerd->term[F_COM_PULL] += add_rot_forces(enforcedRotation, f, cr, step, t);
584 wallcycle_stop(wcycle, ewcROTadd);
589 /* Note that since init_edsam() is called after the initialization
590 * of forcerec, edsam doesn't request the noVirSum force buffer.
591 * Thus if no other algorithm (e.g. PME) requires it, the forces
592 * here will contribute to the virial.
594 do_flood(cr, inputrec, as_rvec_array(x.data()), f, ed, box, step, didNeighborSearch);
597 /* Add forces from interactive molecular dynamics (IMD), if any */
598 if (inputrec->bIMD && stepWork.computeForces)
600 imdSession->applyForces(f);
604 /*! \brief Makes PME flags from StepWorkload data.
606 * \param[in] stepWork Step schedule flags
609 static int makePmeFlags(const StepWorkload& stepWork)
611 return GMX_PME_SPREAD | GMX_PME_SOLVE | (stepWork.computeVirial ? GMX_PME_CALC_ENER_VIR : 0)
612 | (stepWork.computeEnergy ? GMX_PME_CALC_ENER_VIR : 0)
613 | (stepWork.computeForces ? GMX_PME_CALC_F : 0);
616 /*! \brief Launch the prepare_step and spread stages of PME GPU.
618 * \param[in] pmedata The PME structure
619 * \param[in] box The box matrix
620 * \param[in] stepWork Step schedule flags
621 * \param[in] pmeFlags PME flags
622 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in
623 * the device memory. \param[in] wcycle The wallcycle structure
625 static inline void launchPmeGpuSpread(gmx_pme_t* pmedata,
627 const StepWorkload& stepWork,
629 GpuEventSynchronizer* xReadyOnDevice,
630 gmx_wallcycle_t wcycle)
632 pme_gpu_prepare_computation(pmedata, stepWork.haveDynamicBox, box, wcycle, pmeFlags,
633 stepWork.useGpuPmeFReduction);
634 pme_gpu_launch_spread(pmedata, xReadyOnDevice, wcycle);
637 /*! \brief Launch the FFT and gather stages of PME GPU
639 * This function only implements setting the output forces (no accumulation).
641 * \param[in] pmedata The PME structure
642 * \param[in] wcycle The wallcycle structure
644 static void launchPmeGpuFftAndGather(gmx_pme_t* pmedata, gmx_wallcycle_t wcycle)
646 pme_gpu_launch_complex_transforms(pmedata, wcycle);
647 pme_gpu_launch_gather(pmedata, wcycle, PmeForceOutputHandling::Set);
651 * Polling wait for either of the PME or nonbonded GPU tasks.
653 * Instead of a static order in waiting for GPU tasks, this function
654 * polls checking which of the two tasks completes first, and does the
655 * associated force buffer reduction overlapped with the other task.
656 * By doing that, unlike static scheduling order, it can always overlap
657 * one of the reductions, regardless of the GPU task completion order.
659 * \param[in] nbv Nonbonded verlet structure
660 * \param[in,out] pmedata PME module data
661 * \param[in,out] forceOutputs Output buffer for the forces and virial
662 * \param[in,out] enerd Energy data structure results are reduced into
663 * \param[in] stepWork Step schedule flags
664 * \param[in] pmeFlags PME flags
665 * \param[in] wcycle The wallcycle structure
667 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t* nbv,
669 gmx::ForceOutputs* forceOutputs,
670 gmx_enerdata_t* enerd,
671 const StepWorkload& stepWork,
673 gmx_wallcycle_t wcycle)
675 bool isPmeGpuDone = false;
676 bool isNbGpuDone = false;
679 gmx::ForceWithShiftForces& forceWithShiftForces = forceOutputs->forceWithShiftForces();
680 gmx::ForceWithVirial& forceWithVirial = forceOutputs->forceWithVirial();
682 gmx::ArrayRef<const gmx::RVec> pmeGpuForces;
684 while (!isPmeGpuDone || !isNbGpuDone)
688 GpuTaskCompletion completionType =
689 (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
690 isPmeGpuDone = pme_gpu_try_finish_task(pmedata, pmeFlags, wcycle, &forceWithVirial,
691 enerd, completionType);
696 GpuTaskCompletion completionType =
697 (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
698 isNbGpuDone = Nbnxm::gpu_try_finish_task(
699 nbv->gpu_nbv, stepWork, AtomLocality::Local, enerd->grpp.ener[egLJSR].data(),
700 enerd->grpp.ener[egCOULSR].data(), forceWithShiftForces.shiftForces(),
701 completionType, wcycle);
705 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceWithShiftForces.force());
711 /*! \brief Set up the different force buffers; also does clearing.
713 * \param[in] fr force record pointer
714 * \param[in] pull_work The pull work object.
715 * \param[in] inputrec input record
716 * \param[in] force force array
717 * \param[in] stepWork Step schedule flags
718 * \param[out] wcycle wallcycle recording structure
720 * \returns Cleared force output structure
722 static ForceOutputs setupForceOutputs(t_forcerec* fr,
724 const t_inputrec& inputrec,
725 gmx::ArrayRefWithPadding<gmx::RVec> force,
726 const StepWorkload& stepWork,
727 gmx_wallcycle_t wcycle)
729 wallcycle_sub_start(wcycle, ewcsCLEAR_FORCE_BUFFER);
731 /* NOTE: We assume fr->shiftForces is all zeros here */
732 gmx::ForceWithShiftForces forceWithShiftForces(force, stepWork.computeVirial, fr->shiftForces);
734 if (stepWork.computeForces)
736 /* Clear the short- and long-range forces */
737 clear_rvecs_omp(fr->natoms_force_constr, as_rvec_array(forceWithShiftForces.force().data()));
740 /* If we need to compute the virial, we might need a separate
741 * force buffer for algorithms for which the virial is calculated
742 * directly, such as PME. Otherwise, forceWithVirial uses the
743 * the same force (f in legacy calls) buffer as other algorithms.
745 const bool useSeparateForceWithVirialBuffer =
746 (stepWork.computeForces && (stepWork.computeVirial && fr->haveDirectVirialContributions));
747 /* forceWithVirial uses the local atom range only */
748 gmx::ForceWithVirial forceWithVirial(useSeparateForceWithVirialBuffer ? fr->forceBufferForDirectVirialContributions
749 : force.unpaddedArrayRef(),
750 stepWork.computeVirial);
752 if (useSeparateForceWithVirialBuffer)
754 /* TODO: update comment
755 * We only compute forces on local atoms. Note that vsites can
756 * spread to non-local atoms, but that part of the buffer is
757 * cleared separately in the vsite spreading code.
759 clear_rvecs_omp(forceWithVirial.force_.size(), as_rvec_array(forceWithVirial.force_.data()));
762 if (inputrec.bPull && pull_have_constraint(pull_work))
764 clear_pull_forces(pull_work);
767 wallcycle_sub_stop(wcycle, ewcsCLEAR_FORCE_BUFFER);
769 return ForceOutputs(forceWithShiftForces, forceWithVirial);
773 /*! \brief Set up flags that have the lifetime of the domain indicating what type of work is there to compute.
775 static DomainLifetimeWorkload setupDomainLifetimeWorkload(const t_inputrec& inputrec,
776 const t_forcerec& fr,
777 const pull_t* pull_work,
781 const t_mdatoms& mdatoms,
782 const SimulationWorkload& simulationWork,
783 const StepWorkload& stepWork)
785 DomainLifetimeWorkload domainWork;
786 // Note that haveSpecialForces is constant over the whole run
787 domainWork.haveSpecialForces =
788 haveSpecialForces(inputrec, *fr.forceProviders, pull_work, stepWork.computeForces, ed);
789 domainWork.haveCpuBondedWork = haveCpuBondeds(fr);
790 domainWork.haveGpuBondedWork = ((fr.gpuBonded != nullptr) && fr.gpuBonded->haveInteractions());
791 domainWork.haveRestraintsWork = haveRestraints(idef, fcd);
792 domainWork.haveCpuListedForceWork = haveCpuListedForces(fr, idef, fcd);
793 // Note that haveFreeEnergyWork is constant over the whole run
794 domainWork.haveFreeEnergyWork = (fr.efep != efepNO && mdatoms.nPerturbed != 0);
795 // We assume we have local force work if there are CPU
796 // force tasks including PME or nonbondeds.
797 domainWork.haveCpuLocalForceWork =
798 domainWork.haveSpecialForces || domainWork.haveCpuListedForceWork
799 || domainWork.haveFreeEnergyWork || simulationWork.useCpuNonbonded || simulationWork.useCpuPme
800 || simulationWork.haveEwaldSurfaceContribution || inputrec.nwall > 0;
805 /*! \brief Set up force flag stuct from the force bitmask.
807 * \param[in] legacyFlags Force bitmask flags used to construct the new flags
808 * \param[in] isNonbondedOn Global override, if false forces to turn off all nonbonded calculation.
809 * \param[in] simulationWork Simulation workload description.
810 * \param[in] rankHasPmeDuty If this rank computes PME.
812 * \returns New Stepworkload description.
814 static StepWorkload setupStepWorkload(const int legacyFlags,
815 const bool isNonbondedOn,
816 const SimulationWorkload& simulationWork,
817 const bool rankHasPmeDuty)
820 flags.stateChanged = ((legacyFlags & GMX_FORCE_STATECHANGED) != 0);
821 flags.haveDynamicBox = ((legacyFlags & GMX_FORCE_DYNAMICBOX) != 0);
822 flags.doNeighborSearch = ((legacyFlags & GMX_FORCE_NS) != 0);
823 flags.computeVirial = ((legacyFlags & GMX_FORCE_VIRIAL) != 0);
824 flags.computeEnergy = ((legacyFlags & GMX_FORCE_ENERGY) != 0);
825 flags.computeForces = ((legacyFlags & GMX_FORCE_FORCES) != 0);
826 flags.computeListedForces = ((legacyFlags & GMX_FORCE_LISTED) != 0);
827 flags.computeNonbondedForces = ((legacyFlags & GMX_FORCE_NONBONDED) != 0) && isNonbondedOn;
828 flags.computeDhdl = ((legacyFlags & GMX_FORCE_DHDL) != 0);
830 if (simulationWork.useGpuBufferOps)
832 GMX_ASSERT(simulationWork.useGpuNonbonded,
833 "Can only offload buffer ops if nonbonded computation is also offloaded");
835 flags.useGpuXBufferOps = simulationWork.useGpuBufferOps;
836 // on virial steps the CPU reduction path is taken
837 flags.useGpuFBufferOps = simulationWork.useGpuBufferOps && !flags.computeVirial;
838 flags.useGpuPmeFReduction = flags.useGpuFBufferOps
839 && (simulationWork.useGpuPme
840 && (rankHasPmeDuty || simulationWork.useGpuPmePpCommunication));
846 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
848 * TODO: eliminate \p useGpuPmeOnThisRank when this is
849 * incorporated in DomainLifetimeWorkload.
851 static void launchGpuEndOfStepTasks(nonbonded_verlet_t* nbv,
852 gmx::GpuBonded* gpuBonded,
854 gmx_enerdata_t* enerd,
855 const gmx::MdrunScheduleWorkload& runScheduleWork,
856 bool useGpuPmeOnThisRank,
858 gmx_wallcycle_t wcycle)
860 if (runScheduleWork.simulationWork.useGpuNonbonded)
862 /* Launch pruning before buffer clearing because the API overhead of the
863 * clear kernel launches can leave the GPU idle while it could be running
866 if (nbv->isDynamicPruningStepGpu(step))
868 nbv->dispatchPruneKernelGpu(step);
871 /* now clear the GPU outputs while we finish the step on the CPU */
872 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
873 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
874 Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, runScheduleWork.stepWork.computeVirial);
875 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
876 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
879 if (useGpuPmeOnThisRank)
881 pme_gpu_reinit_computation(pmedata, wcycle);
884 if (runScheduleWork.domainWork.haveGpuBondedWork && runScheduleWork.stepWork.computeEnergy)
886 // in principle this should be included in the DD balancing region,
887 // but generally it is infrequent so we'll omit it for the sake of
889 gpuBonded->waitAccumulateEnergyTerms(enerd);
891 gpuBonded->clearEnergies();
896 void do_force(FILE* fplog,
898 const gmx_multisim_t* ms,
899 const t_inputrec* inputrec,
901 gmx_enfrot* enforcedRotation,
902 gmx::ImdSession* imdSession,
906 gmx_wallcycle_t wcycle,
907 const gmx_localtop_t* top,
909 gmx::ArrayRefWithPadding<gmx::RVec> x,
911 gmx::ArrayRefWithPadding<gmx::RVec> force,
913 const t_mdatoms* mdatoms,
914 gmx_enerdata_t* enerd,
916 gmx::ArrayRef<real> lambda,
919 gmx::MdrunScheduleWorkload* runScheduleWork,
920 const gmx_vsite_t* vsite,
925 const DDBalanceRegionHandler& ddBalanceRegionHandler)
929 nonbonded_verlet_t* nbv = fr->nbv.get();
930 interaction_const_t* ic = fr->ic;
931 gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
933 // TODO remove the code below when the legacy flags are not in use anymore
934 /* modify force flag if not doing nonbonded */
937 legacyFlags &= ~GMX_FORCE_NONBONDED;
940 const SimulationWorkload& simulationWork = runScheduleWork->simulationWork;
943 runScheduleWork->stepWork = setupStepWorkload(legacyFlags, fr->bNonbonded, simulationWork,
944 thisRankHasDuty(cr, DUTY_PME));
945 const StepWorkload& stepWork = runScheduleWork->stepWork;
948 const bool useGpuPmeOnThisRank = simulationWork.useGpuPme && thisRankHasDuty(cr, DUTY_PME);
949 const int pmeFlags = makePmeFlags(stepWork);
951 /* At a search step we need to start the first balancing region
952 * somewhere early inside the step after communication during domain
953 * decomposition (and not during the previous step as usual).
955 if (stepWork.doNeighborSearch)
957 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
961 const int homenr = mdatoms->homenr;
963 clear_mat(vir_force);
965 if (stepWork.stateChanged)
967 if (inputrecNeedMutot(inputrec))
969 /* Calculate total (local) dipole moment in a temporary common array.
970 * This makes it possible to sum them over nodes faster.
972 calc_mu(start, homenr, x.unpaddedArrayRef(), mdatoms->chargeA, mdatoms->chargeB,
973 mdatoms->nChargePerturbed, mu, mu + DIM);
977 if (fr->pbcType != PbcType::No)
979 /* Compute shift vectors every step,
980 * because of pressure coupling or box deformation!
982 if (stepWork.haveDynamicBox && stepWork.stateChanged)
984 calc_shifts(box, fr->shift_vec);
987 const bool fillGrid = (stepWork.doNeighborSearch && stepWork.stateChanged);
988 const bool calcCGCM = (fillGrid && !DOMAINDECOMP(cr));
991 put_atoms_in_box_omp(fr->pbcType, box, x.unpaddedArrayRef().subArray(0, homenr),
992 gmx_omp_nthreads_get(emntDefault));
993 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
995 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
997 unshift_self(graph, box, as_rvec_array(x.unpaddedArrayRef().data()));
1001 nbnxn_atomdata_copy_shiftvec(stepWork.haveDynamicBox, fr->shift_vec, nbv->nbat.get());
1003 // Coordinates on the device are needed if PME or BufferOps are offloaded.
1004 // The local coordinates can be copied right away.
1005 // NOTE: Consider moving this copy to right after they are updated and constrained,
1006 // if the later is not offloaded.
1007 if (useGpuPmeOnThisRank || stepWork.useGpuXBufferOps)
1009 if (stepWork.doNeighborSearch)
1011 stateGpu->reinit(mdatoms->homenr,
1012 cr->dd != nullptr ? dd_numAtomsZones(*cr->dd) : mdatoms->homenr);
1013 if (useGpuPmeOnThisRank)
1015 // TODO: This should be moved into PME setup function ( pme_gpu_prepare_computation(...) )
1016 pme_gpu_set_device_x(fr->pmedata, stateGpu->getCoordinates());
1019 // We need to copy coordinates when:
1020 // 1. Update is not offloaded
1021 // 2. The buffers were reinitialized on search step
1022 if (!simulationWork.useGpuUpdate || stepWork.doNeighborSearch)
1024 stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1028 // TODO Update this comment when introducing SimulationWorkload
1030 // The conditions for gpuHaloExchange e.g. using GPU buffer
1031 // operations were checked before construction, so here we can
1032 // just use it and assert upon any conditions.
1033 gmx::GpuHaloExchange* gpuHaloExchange =
1034 (havePPDomainDecomposition(cr) ? cr->dd->gpuHaloExchange.get() : nullptr);
1035 const bool ddUsesGpuDirectCommunication = (gpuHaloExchange != nullptr);
1036 GMX_ASSERT(!ddUsesGpuDirectCommunication || stepWork.useGpuXBufferOps,
1037 "Must use coordinate buffer ops with GPU halo exchange");
1038 const bool useGpuForcesHaloExchange = ddUsesGpuDirectCommunication && stepWork.useGpuFBufferOps;
1040 // Copy coordinate from the GPU if update is on the GPU and there
1041 // are forces to be computed on the CPU, or for the computation of
1042 // virial, or if host-side data will be transferred from this task
1043 // to a remote task for halo exchange or PME-PP communication. At
1044 // search steps the current coordinates are already on the host,
1045 // hence copy is not needed.
1046 const bool haveHostPmePpComms =
1047 !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication;
1048 const bool haveHostHaloExchangeComms = havePPDomainDecomposition(cr) && !ddUsesGpuDirectCommunication;
1050 bool gmx_used_in_debug haveCopiedXFromGpu = false;
1051 if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch
1052 && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial
1053 || haveHostPmePpComms || haveHostHaloExchangeComms))
1055 stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1056 haveCopiedXFromGpu = true;
1059 const auto localXReadyOnDevice = (stateGpu != nullptr)
1060 ? stateGpu->getCoordinatesReadyOnDeviceEvent(
1061 AtomLocality::Local, simulationWork, stepWork)
1065 if (!thisRankHasDuty(cr, DUTY_PME))
1067 /* Send particle coordinates to the pme nodes.
1068 * Since this is only implemented for domain decomposition
1069 * and domain decomposition does not use the graph,
1070 * we do not need to worry about shifting.
1072 bool reinitGpuPmePpComms = simulationWork.useGpuPmePpCommunication && (stepWork.doNeighborSearch);
1073 bool sendCoordinatesFromGpu =
1074 simulationWork.useGpuPmePpCommunication && !(stepWork.doNeighborSearch);
1076 if (!stepWork.doNeighborSearch && simulationWork.useGpuUpdate && !sendCoordinatesFromGpu)
1078 GMX_RELEASE_ASSERT(false,
1079 "GPU update and separate PME ranks are only supported with GPU "
1080 "direct communication!");
1081 // TODO: when this code-path becomes supported add:
1082 // stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1085 gmx_pme_send_coordinates(fr, cr, box, as_rvec_array(x.unpaddedArrayRef().data()), lambda[efptCOUL],
1086 lambda[efptVDW], (stepWork.computeVirial || stepWork.computeEnergy),
1087 step, simulationWork.useGpuPmePpCommunication, reinitGpuPmePpComms,
1088 sendCoordinatesFromGpu, localXReadyOnDevice, wcycle);
1090 #endif /* GMX_MPI */
1092 if (useGpuPmeOnThisRank)
1094 launchPmeGpuSpread(fr->pmedata, box, stepWork, pmeFlags, localXReadyOnDevice, wcycle);
1097 /* do gridding for pair search */
1098 if (stepWork.doNeighborSearch)
1100 if (graph && stepWork.stateChanged)
1102 /* Calculate intramolecular shift vectors to make molecules whole */
1103 mk_mshift(fplog, graph, fr->pbcType, box, as_rvec_array(x.unpaddedArrayRef().data()));
1107 // - vzero is constant, do we need to pass it?
1108 // - box_diag should be passed directly to nbnxn_put_on_grid
1114 box_diag[XX] = box[XX][XX];
1115 box_diag[YY] = box[YY][YY];
1116 box_diag[ZZ] = box[ZZ][ZZ];
1118 wallcycle_start(wcycle, ewcNS);
1119 if (!DOMAINDECOMP(cr))
1121 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
1122 nbnxn_put_on_grid(nbv, box, 0, vzero, box_diag, nullptr, { 0, mdatoms->homenr }, -1,
1123 fr->cginfo, x.unpaddedArrayRef(), 0, nullptr);
1124 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
1128 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
1129 nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd), fr->cginfo, x.unpaddedArrayRef());
1130 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
1133 nbv->setAtomProperties(*mdatoms, fr->cginfo);
1135 wallcycle_stop(wcycle, ewcNS);
1137 /* initialize the GPU nbnxm atom data and bonded data structures */
1138 if (simulationWork.useGpuNonbonded)
1140 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1142 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1143 Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1144 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1148 /* Now we put all atoms on the grid, we can assign bonded
1149 * interactions to the GPU, where the grid order is
1150 * needed. Also the xq, f and fshift device buffers have
1151 * been reallocated if needed, so the bonded code can
1152 * learn about them. */
1153 // TODO the xq, f, and fshift buffers are now shared
1154 // resources, so they should be maintained by a
1155 // higher-level object than the nb module.
1156 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(
1157 nbv->getGridIndices(), top->idef, Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1158 Nbnxm::gpu_get_f(nbv->gpu_nbv), Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1160 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1163 // Need to run after the GPU-offload bonded interaction lists
1164 // are set up to be able to determine whether there is bonded work.
1165 runScheduleWork->domainWork = setupDomainLifetimeWorkload(
1166 *inputrec, *fr, pull_work, ed, top->idef, *fcd, *mdatoms, simulationWork, stepWork);
1168 wallcycle_start_nocount(wcycle, ewcNS);
1169 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1170 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1171 nbv->constructPairlist(InteractionLocality::Local, top->excls, step, nrnb);
1173 nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::Local);
1175 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1176 wallcycle_stop(wcycle, ewcNS);
1178 if (stepWork.useGpuXBufferOps)
1180 nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1182 // For force buffer ops, we use the below conditon rather than
1183 // useGpuFBufferOps to ensure that init is performed even if this
1184 // NS step is also a virial step (on which f buf ops are deactivated).
1185 if (simulationWork.useGpuBufferOps && simulationWork.useGpuNonbonded && (GMX_GPU == GMX_GPU_CUDA))
1187 GMX_ASSERT(stateGpu, "stateGpu should be valid when buffer ops are offloaded");
1188 nbv->atomdata_init_add_nbat_f_to_f_gpu(stateGpu->fReducedOnDevice());
1191 else if (!EI_TPI(inputrec->eI))
1193 if (stepWork.useGpuXBufferOps)
1195 GMX_ASSERT(stateGpu, "stateGpu should be valid when buffer ops are offloaded");
1196 nbv->convertCoordinatesGpu(AtomLocality::Local, false, stateGpu->getCoordinates(),
1197 localXReadyOnDevice);
1201 if (simulationWork.useGpuUpdate)
1203 GMX_ASSERT(stateGpu, "need a valid stateGpu object");
1204 GMX_ASSERT(haveCopiedXFromGpu,
1205 "a wait should only be triggered if copy has been scheduled");
1206 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1208 nbv->convertCoordinates(AtomLocality::Local, false, x.unpaddedArrayRef());
1212 const gmx::DomainLifetimeWorkload& domainWork = runScheduleWork->domainWork;
1214 if (simulationWork.useGpuNonbonded)
1216 ddBalanceRegionHandler.openBeforeForceComputationGpu();
1218 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1220 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1221 Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1222 if (stepWork.doNeighborSearch || !stepWork.useGpuXBufferOps)
1224 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::Local);
1226 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1227 // with X buffer ops offloaded to the GPU on all but the search steps
1229 // bonded work not split into separate local and non-local, so with DD
1230 // we can only launch the kernel after non-local coordinates have been received.
1231 if (domainWork.haveGpuBondedWork && !havePPDomainDecomposition(cr))
1233 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1234 fr->gpuBonded->launchKernel(fr, stepWork, box);
1235 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1238 /* launch local nonbonded work on GPU */
1239 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1240 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFNo, step, nrnb, wcycle);
1241 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1242 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1245 if (useGpuPmeOnThisRank)
1247 // In PME GPU and mixed mode we launch FFT / gather after the
1248 // X copy/transform to allow overlap as well as after the GPU NB
1249 // launch to avoid FFT launch overhead hijacking the CPU and delaying
1250 // the nonbonded kernel.
1251 launchPmeGpuFftAndGather(fr->pmedata, wcycle);
1254 /* Communicate coordinates and sum dipole if necessary +
1255 do non-local pair search */
1256 if (havePPDomainDecomposition(cr))
1258 if (stepWork.doNeighborSearch)
1260 // TODO: fuse this branch with the above large stepWork.doNeighborSearch block
1261 wallcycle_start_nocount(wcycle, ewcNS);
1262 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1263 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1264 nbv->constructPairlist(InteractionLocality::NonLocal, top->excls, step, nrnb);
1266 nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::NonLocal);
1267 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1268 wallcycle_stop(wcycle, ewcNS);
1269 if (ddUsesGpuDirectCommunication)
1271 gpuHaloExchange->reinitHalo(stateGpu->getCoordinates(), stateGpu->getForces());
1276 if (ddUsesGpuDirectCommunication)
1278 // The following must be called after local setCoordinates (which records an event
1279 // when the coordinate data has been copied to the device).
1280 gpuHaloExchange->communicateHaloCoordinates(box, localXReadyOnDevice);
1282 if (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork)
1284 // non-local part of coordinate buffer must be copied back to host for CPU work
1285 stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1290 // Note: GPU update + DD without direct communication is not supported,
1291 // a waitCoordinatesReadyOnHost() should be issued if it will be.
1292 GMX_ASSERT(!simulationWork.useGpuUpdate,
1293 "GPU update is not supported with CPU halo exchange");
1294 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1297 if (stepWork.useGpuXBufferOps)
1299 if (!useGpuPmeOnThisRank && !ddUsesGpuDirectCommunication)
1301 stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1303 nbv->convertCoordinatesGpu(AtomLocality::NonLocal, false, stateGpu->getCoordinates(),
1304 stateGpu->getCoordinatesReadyOnDeviceEvent(
1305 AtomLocality::NonLocal, simulationWork, stepWork));
1309 nbv->convertCoordinates(AtomLocality::NonLocal, false, x.unpaddedArrayRef());
1313 if (simulationWork.useGpuNonbonded)
1315 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1317 if (stepWork.doNeighborSearch || !stepWork.useGpuXBufferOps)
1319 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1320 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::NonLocal);
1321 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1324 if (domainWork.haveGpuBondedWork)
1326 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1327 fr->gpuBonded->launchKernel(fr, stepWork, box);
1328 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1331 /* launch non-local nonbonded tasks on GPU */
1332 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1333 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step,
1335 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1337 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1341 if (simulationWork.useGpuNonbonded)
1343 /* launch D2H copy-back F */
1344 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1345 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1347 if (havePPDomainDecomposition(cr))
1349 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::NonLocal);
1351 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::Local);
1352 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1354 if (domainWork.haveGpuBondedWork && stepWork.computeEnergy)
1356 fr->gpuBonded->launchEnergyTransfer();
1358 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1361 if (stepWork.stateChanged && inputrecNeedMutot(inputrec))
1365 gmx_sumd(2 * DIM, mu, cr);
1367 ddBalanceRegionHandler.reopenRegionCpu();
1370 for (i = 0; i < 2; i++)
1372 for (j = 0; j < DIM; j++)
1374 fr->mu_tot[i][j] = mu[i * DIM + j];
1378 if (mdatoms->nChargePerturbed == 0)
1380 copy_rvec(fr->mu_tot[0], mu_tot);
1384 for (j = 0; j < DIM; j++)
1386 mu_tot[j] = (1.0 - lambda[efptCOUL]) * fr->mu_tot[0][j] + lambda[efptCOUL] * fr->mu_tot[1][j];
1390 /* Reset energies */
1391 reset_enerdata(enerd);
1392 /* Clear the shift forces */
1393 // TODO: This should be linked to the shift force buffer in use, or cleared before use instead
1394 for (gmx::RVec& elem : fr->shiftForces)
1396 elem = { 0.0_real, 0.0_real, 0.0_real };
1399 if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1401 wallcycle_start(wcycle, ewcPPDURINGPME);
1402 dd_force_flop_start(cr->dd, nrnb);
1405 // For the rest of the CPU tasks that depend on GPU-update produced coordinates,
1406 // this wait ensures that the D2H transfer is complete.
1407 if ((simulationWork.useGpuUpdate)
1408 && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial))
1410 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1415 wallcycle_start(wcycle, ewcROT);
1416 do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step,
1417 stepWork.doNeighborSearch);
1418 wallcycle_stop(wcycle, ewcROT);
1421 /* Start the force cycle counter.
1422 * Note that a different counter is used for dynamic load balancing.
1424 wallcycle_start(wcycle, ewcFORCE);
1426 // Set up and clear force outputs.
1427 // We use std::move to keep the compiler happy, it has no effect.
1428 ForceOutputs forceOut =
1429 setupForceOutputs(fr, pull_work, *inputrec, std::move(force), stepWork, wcycle);
1431 /* We calculate the non-bonded forces, when done on the CPU, here.
1432 * We do this before calling do_force_lowlevel, because in that
1433 * function, the listed forces are calculated before PME, which
1434 * does communication. With this order, non-bonded and listed
1435 * force calculation imbalance can be balanced out by the domain
1436 * decomposition load balancing.
1439 const bool useOrEmulateGpuNb = simulationWork.useGpuNonbonded || fr->nbv->emulateGpu();
1441 if (!useOrEmulateGpuNb)
1443 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFYes, step, nrnb, wcycle);
1446 if (fr->efep != efepNO)
1448 /* Calculate the local and non-local free energy interactions here.
1449 * Happens here on the CPU both with and without GPU.
1451 nbv->dispatchFreeEnergyKernel(InteractionLocality::Local, fr,
1452 as_rvec_array(x.unpaddedArrayRef().data()),
1453 &forceOut.forceWithShiftForces(), *mdatoms, inputrec->fepvals,
1454 lambda.data(), enerd, stepWork, nrnb);
1456 if (havePPDomainDecomposition(cr))
1458 nbv->dispatchFreeEnergyKernel(InteractionLocality::NonLocal, fr,
1459 as_rvec_array(x.unpaddedArrayRef().data()),
1460 &forceOut.forceWithShiftForces(), *mdatoms,
1461 inputrec->fepvals, lambda.data(), enerd, stepWork, nrnb);
1465 if (!useOrEmulateGpuNb)
1467 if (havePPDomainDecomposition(cr))
1469 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step,
1473 if (stepWork.computeForces)
1475 /* Add all the non-bonded force to the normal force array.
1476 * This can be split into a local and a non-local part when overlapping
1477 * communication with calculation with domain decomposition.
1479 wallcycle_stop(wcycle, ewcFORCE);
1480 nbv->atomdata_add_nbat_f_to_f(AtomLocality::All, forceOut.forceWithShiftForces().force());
1481 wallcycle_start_nocount(wcycle, ewcFORCE);
1484 /* If there are multiple fshift output buffers we need to reduce them */
1485 if (stepWork.computeVirial)
1487 /* This is not in a subcounter because it takes a
1488 negligible and constant-sized amount of time */
1489 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat,
1490 forceOut.forceWithShiftForces().shiftForces());
1494 /* update QMMMrec, if necessary */
1497 update_QMMMrec(cr, fr, as_rvec_array(x.unpaddedArrayRef().data()), mdatoms, box);
1500 // TODO Force flags should include haveFreeEnergyWork for this domain
1501 if (ddUsesGpuDirectCommunication && (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork))
1503 /* Wait for non-local coordinate data to be copied from device */
1504 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::NonLocal);
1506 /* Compute the bonded and non-bonded energies and optionally forces */
1507 do_force_lowlevel(fr, inputrec, &(top->idef), cr, ms, nrnb, wcycle, mdatoms, x, hist, &forceOut, enerd,
1508 fcd, box, lambda.data(), graph, fr->mu_tot, stepWork, ddBalanceRegionHandler);
1510 wallcycle_stop(wcycle, ewcFORCE);
1512 computeSpecialForces(fplog, cr, inputrec, awh, enforcedRotation, imdSession, pull_work, step, t,
1513 wcycle, fr->forceProviders, box, x.unpaddedArrayRef(), mdatoms, lambda.data(),
1514 stepWork, &forceOut.forceWithVirial(), enerd, ed, stepWork.doNeighborSearch);
1517 // Will store the amount of cycles spent waiting for the GPU that
1518 // will be later used in the DLB accounting.
1519 float cycles_wait_gpu = 0;
1520 if (useOrEmulateGpuNb)
1522 auto& forceWithShiftForces = forceOut.forceWithShiftForces();
1524 /* wait for non-local forces (or calculate in emulation mode) */
1525 if (havePPDomainDecomposition(cr))
1527 if (simulationWork.useGpuNonbonded)
1529 cycles_wait_gpu += Nbnxm::gpu_wait_finish_task(
1530 nbv->gpu_nbv, stepWork, AtomLocality::NonLocal, enerd->grpp.ener[egLJSR].data(),
1531 enerd->grpp.ener[egCOULSR].data(), forceWithShiftForces.shiftForces(), wcycle);
1535 wallcycle_start_nocount(wcycle, ewcFORCE);
1536 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFYes,
1537 step, nrnb, wcycle);
1538 wallcycle_stop(wcycle, ewcFORCE);
1541 if (stepWork.useGpuFBufferOps)
1543 gmx::FixedCapacityVector<GpuEventSynchronizer*, 1> dependencyList;
1545 // TODO: move this into DomainLifetimeWorkload, including the second part of the
1546 // condition The bonded and free energy CPU tasks can have non-local force
1547 // contributions which are a dependency for the GPU force reduction.
1548 bool haveNonLocalForceContribInCpuBuffer =
1549 domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork;
1551 if (haveNonLocalForceContribInCpuBuffer)
1553 stateGpu->copyForcesToGpu(forceOut.forceWithShiftForces().force(),
1554 AtomLocality::NonLocal);
1555 dependencyList.push_back(stateGpu->getForcesReadyOnDeviceEvent(
1556 AtomLocality::NonLocal, stepWork.useGpuFBufferOps));
1559 nbv->atomdata_add_nbat_f_to_f_gpu(AtomLocality::NonLocal, stateGpu->getForces(),
1560 pme_gpu_get_device_f(fr->pmedata), dependencyList,
1561 false, haveNonLocalForceContribInCpuBuffer);
1562 if (!useGpuForcesHaloExchange)
1564 // copy from GPU input for dd_move_f()
1565 stateGpu->copyForcesFromGpu(forceOut.forceWithShiftForces().force(),
1566 AtomLocality::NonLocal);
1571 nbv->atomdata_add_nbat_f_to_f(AtomLocality::NonLocal, forceWithShiftForces.force());
1575 if (fr->nbv->emulateGpu() && stepWork.computeVirial)
1577 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat, forceWithShiftForces.shiftForces());
1582 if (havePPDomainDecomposition(cr))
1584 /* We are done with the CPU compute.
1585 * We will now communicate the non-local forces.
1586 * If we use a GPU this will overlap with GPU work, so in that case
1587 * we do not close the DD force balancing region here.
1589 ddBalanceRegionHandler.closeAfterForceComputationCpu();
1591 if (stepWork.computeForces)
1594 if (useGpuForcesHaloExchange)
1596 if (domainWork.haveCpuLocalForceWork)
1598 stateGpu->copyForcesToGpu(forceOut.forceWithShiftForces().force(), AtomLocality::Local);
1600 gpuHaloExchange->communicateHaloForces(domainWork.haveCpuLocalForceWork);
1604 if (stepWork.useGpuFBufferOps)
1606 stateGpu->waitForcesReadyOnHost(AtomLocality::NonLocal);
1608 dd_move_f(cr->dd, &forceOut.forceWithShiftForces(), wcycle);
1613 // With both nonbonded and PME offloaded a GPU on the same rank, we use
1614 // an alternating wait/reduction scheme.
1615 bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPmeOnThisRank && simulationWork.useGpuNonbonded
1616 && !DOMAINDECOMP(cr) && !stepWork.useGpuFBufferOps);
1617 if (alternateGpuWait)
1619 alternatePmeNbGpuWaitReduce(fr->nbv.get(), fr->pmedata, &forceOut, enerd, stepWork, pmeFlags, wcycle);
1622 if (!alternateGpuWait && useGpuPmeOnThisRank)
1624 pme_gpu_wait_and_reduce(fr->pmedata, pmeFlags, wcycle, &forceOut.forceWithVirial(), enerd);
1627 /* Wait for local GPU NB outputs on the non-alternating wait path */
1628 if (!alternateGpuWait && simulationWork.useGpuNonbonded)
1630 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1631 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1632 * but even with a step of 0.1 ms the difference is less than 1%
1635 const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
1636 const float waitCycles = Nbnxm::gpu_wait_finish_task(
1637 nbv->gpu_nbv, stepWork, AtomLocality::Local, enerd->grpp.ener[egLJSR].data(),
1638 enerd->grpp.ener[egCOULSR].data(), forceOut.forceWithShiftForces().shiftForces(), wcycle);
1640 if (ddBalanceRegionHandler.useBalancingRegion())
1642 DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
1643 if (stepWork.computeForces && waitCycles <= gpuWaitApiOverheadMargin)
1645 /* We measured few cycles, it could be that the kernel
1646 * and transfer finished earlier and there was no actual
1647 * wait time, only API call overhead.
1648 * Then the actual time could be anywhere between 0 and
1649 * cycles_wait_est. We will use half of cycles_wait_est.
1651 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
1653 ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
1657 if (fr->nbv->emulateGpu())
1659 // NOTE: emulation kernel is not included in the balancing region,
1660 // but emulation mode does not target performance anyway
1661 wallcycle_start_nocount(wcycle, ewcFORCE);
1662 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local,
1663 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes, step, nrnb, wcycle);
1664 wallcycle_stop(wcycle, ewcFORCE);
1667 // If on GPU PME-PP comms or GPU update path, receive forces from PME before GPU buffer ops
1668 // TODO refactor this and unify with below default-path call to the same function
1669 if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME)
1670 && (simulationWork.useGpuPmePpCommunication || simulationWork.useGpuUpdate))
1672 /* In case of node-splitting, the PP nodes receive the long-range
1673 * forces, virial and energy from the PME nodes here.
1675 pme_receive_force_ener(fr, cr, &forceOut.forceWithVirial(), enerd,
1676 simulationWork.useGpuPmePpCommunication,
1677 stepWork.useGpuPmeFReduction, wcycle);
1681 /* Do the nonbonded GPU (or emulation) force buffer reduction
1682 * on the non-alternating path. */
1683 if (useOrEmulateGpuNb && !alternateGpuWait)
1685 // TODO simplify the below conditionals. Pass buffer and sync pointers at init stage rather than here. Unify getter fns for sameGPU/otherGPU cases.
1687 stepWork.useGpuPmeFReduction
1688 ? (thisRankHasDuty(cr, DUTY_PME) ? pme_gpu_get_device_f(fr->pmedata)
1689 : // PME force buffer on same GPU
1690 fr->pmePpCommGpu->getGpuForceStagingPtr()) // buffer received from other GPU
1691 : nullptr; // PME reduction not active on GPU
1693 GpuEventSynchronizer* const pmeSynchronizer =
1694 stepWork.useGpuPmeFReduction
1695 ? (thisRankHasDuty(cr, DUTY_PME) ? pme_gpu_get_f_ready_synchronizer(fr->pmedata)
1696 : // PME force buffer on same GPU
1697 static_cast<GpuEventSynchronizer*>(
1698 fr->pmePpCommGpu->getForcesReadySynchronizer())) // buffer received from other GPU
1699 : nullptr; // PME reduction not active on GPU
1701 gmx::FixedCapacityVector<GpuEventSynchronizer*, 3> dependencyList;
1703 if (stepWork.useGpuPmeFReduction)
1705 dependencyList.push_back(pmeSynchronizer);
1708 gmx::ArrayRef<gmx::RVec> forceWithShift = forceOut.forceWithShiftForces().force();
1710 if (stepWork.useGpuFBufferOps)
1712 // Flag to specify whether the CPU force buffer has contributions to
1713 // local atoms. This depends on whether there are CPU-based force tasks
1714 // or when DD is active the halo exchange has resulted in contributions
1715 // from the non-local part.
1716 const bool haveLocalForceContribInCpuBuffer =
1717 (domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr));
1719 // TODO: move these steps as early as possible:
1720 // - CPU f H2D should be as soon as all CPU-side forces are done
1721 // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
1722 // before the next CPU task that consumes the forces: vsite spread or update)
1723 // - copy is not perfomed if GPU force halo exchange is active, because it would overwrite the result
1724 // of the halo exchange. In that case the copy is instead performed above, before the exchange.
1725 // These should be unified.
1726 if (haveLocalForceContribInCpuBuffer && !useGpuForcesHaloExchange)
1728 // Note: AtomLocality::All is used for the non-DD case because, as in this
1729 // case copyForcesToGpu() uses a separate stream, it allows overlap of
1730 // CPU force H2D with GPU force tasks on all streams including those in the
1731 // local stream which would otherwise be implicit dependencies for the
1732 // transfer and would not overlap.
1733 auto locality = havePPDomainDecomposition(cr) ? AtomLocality::Local : AtomLocality::All;
1735 stateGpu->copyForcesToGpu(forceWithShift, locality);
1736 dependencyList.push_back(
1737 stateGpu->getForcesReadyOnDeviceEvent(locality, stepWork.useGpuFBufferOps));
1739 if (useGpuForcesHaloExchange)
1741 dependencyList.push_back(gpuHaloExchange->getForcesReadyOnDeviceEvent());
1743 nbv->atomdata_add_nbat_f_to_f_gpu(AtomLocality::Local, stateGpu->getForces(), pmeForcePtr,
1744 dependencyList, stepWork.useGpuPmeFReduction,
1745 haveLocalForceContribInCpuBuffer);
1746 // Copy forces to host if they are needed for update or if virtual sites are enabled.
1747 // If there are vsites, we need to copy forces every step to spread vsite forces on host.
1748 // TODO: When the output flags will be included in step workload, this copy can be combined with the
1749 // copy call done in sim_utils(...) for the output.
1750 // NOTE: If there are virtual sites, the forces are modified on host after this D2H copy. Hence,
1751 // they should not be copied in do_md(...) for the output.
1752 if (!simulationWork.useGpuUpdate || vsite)
1754 stateGpu->copyForcesFromGpu(forceWithShift, AtomLocality::Local);
1755 stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
1760 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceWithShift);
1764 launchGpuEndOfStepTasks(nbv, fr->gpuBonded, fr->pmedata, enerd, *runScheduleWork,
1765 useGpuPmeOnThisRank, step, wcycle);
1767 if (DOMAINDECOMP(cr))
1769 dd_force_flop_stop(cr->dd, nrnb);
1772 if (stepWork.computeForces)
1774 rvec* f = as_rvec_array(forceOut.forceWithShiftForces().force().data());
1776 /* If we have NoVirSum forces, but we do not calculate the virial,
1777 * we sum fr->f_novirsum=forceOut.f later.
1779 if (vsite && !(fr->haveDirectVirialContributions && !stepWork.computeVirial))
1781 rvec* fshift = as_rvec_array(forceOut.forceWithShiftForces().shiftForces().data());
1782 spread_vsite_f(vsite, as_rvec_array(x.unpaddedArrayRef().data()), f, fshift, FALSE, nullptr,
1783 nrnb, &top->idef, fr->pbcType, fr->bMolPBC, graph, box, cr, wcycle);
1786 if (stepWork.computeVirial)
1788 /* Calculation of the virial must be done after vsites! */
1789 calc_virial(0, mdatoms->homenr, as_rvec_array(x.unpaddedArrayRef().data()),
1790 forceOut.forceWithShiftForces(), vir_force, graph, box, nrnb, fr,
1795 // TODO refactor this and unify with above GPU PME-PP / GPU update path call to the same function
1796 if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication
1797 && !simulationWork.useGpuUpdate)
1799 /* In case of node-splitting, the PP nodes receive the long-range
1800 * forces, virial and energy from the PME nodes here.
1802 pme_receive_force_ener(fr, cr, &forceOut.forceWithVirial(), enerd,
1803 simulationWork.useGpuPmePpCommunication, false, wcycle);
1806 if (stepWork.computeForces)
1808 post_process_forces(cr, step, nrnb, wcycle, top, box, as_rvec_array(x.unpaddedArrayRef().data()),
1809 &forceOut, vir_force, mdatoms, graph, fr, vsite, stepWork);
1812 if (stepWork.computeEnergy)
1814 /* Sum the potential energy terms from group contributions */
1815 sum_epot(&(enerd->grpp), enerd->term);
1817 if (!EI_TPI(inputrec->eI))
1819 checkPotentialEnergyValidity(step, *enerd, *inputrec);
1823 /* In case we don't have constraints and are using GPUs, the next balancing
1824 * region starts here.
1825 * Some "special" work at the end of do_force_cuts?, such as vsite spread,
1826 * virial calculation and COM pulling, is not thus not included in
1827 * the balance timing, which is ok as most tasks do communication.
1829 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);