2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
50 #include "gromacs/awh/awh.h"
51 #include "gromacs/domdec/dlbtiming.h"
52 #include "gromacs/domdec/domdec.h"
53 #include "gromacs/domdec/domdec_struct.h"
54 #include "gromacs/domdec/partition.h"
55 #include "gromacs/essentialdynamics/edsam.h"
56 #include "gromacs/ewald/pme.h"
57 #include "gromacs/gmxlib/chargegroup.h"
58 #include "gromacs/gmxlib/network.h"
59 #include "gromacs/gmxlib/nrnb.h"
60 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
61 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
62 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
63 #include "gromacs/gpu_utils/gpu_utils.h"
64 #include "gromacs/imd/imd.h"
65 #include "gromacs/listed_forces/bonded.h"
66 #include "gromacs/listed_forces/disre.h"
67 #include "gromacs/listed_forces/gpubonded.h"
68 #include "gromacs/listed_forces/manage_threading.h"
69 #include "gromacs/listed_forces/orires.h"
70 #include "gromacs/math/arrayrefwithpadding.h"
71 #include "gromacs/math/functions.h"
72 #include "gromacs/math/units.h"
73 #include "gromacs/math/vec.h"
74 #include "gromacs/math/vecdump.h"
75 #include "gromacs/mdlib/calcmu.h"
76 #include "gromacs/mdlib/calcvir.h"
77 #include "gromacs/mdlib/constr.h"
78 #include "gromacs/mdlib/force.h"
79 #include "gromacs/mdlib/forcerec.h"
80 #include "gromacs/mdlib/gmx_omp_nthreads.h"
81 #include "gromacs/mdlib/ppforceworkload.h"
82 #include "gromacs/mdlib/qmmm.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdtypes/commrec.h"
85 #include "gromacs/mdtypes/enerdata.h"
86 #include "gromacs/mdtypes/forceoutput.h"
87 #include "gromacs/mdtypes/iforceprovider.h"
88 #include "gromacs/mdtypes/inputrec.h"
89 #include "gromacs/mdtypes/md_enums.h"
90 #include "gromacs/mdtypes/state.h"
91 #include "gromacs/nbnxm/atomdata.h"
92 #include "gromacs/nbnxm/gpu_data_mgmt.h"
93 #include "gromacs/nbnxm/nbnxm.h"
94 #include "gromacs/pbcutil/ishift.h"
95 #include "gromacs/pbcutil/mshift.h"
96 #include "gromacs/pbcutil/pbc.h"
97 #include "gromacs/pulling/pull.h"
98 #include "gromacs/pulling/pull_rotation.h"
99 #include "gromacs/timing/cyclecounter.h"
100 #include "gromacs/timing/gpu_timing.h"
101 #include "gromacs/timing/wallcycle.h"
102 #include "gromacs/timing/wallcyclereporting.h"
103 #include "gromacs/timing/walltime_accounting.h"
104 #include "gromacs/topology/topology.h"
105 #include "gromacs/utility/arrayref.h"
106 #include "gromacs/utility/basedefinitions.h"
107 #include "gromacs/utility/cstringutil.h"
108 #include "gromacs/utility/exceptions.h"
109 #include "gromacs/utility/fatalerror.h"
110 #include "gromacs/utility/gmxassert.h"
111 #include "gromacs/utility/gmxmpi.h"
112 #include "gromacs/utility/logger.h"
113 #include "gromacs/utility/smalloc.h"
114 #include "gromacs/utility/strconvert.h"
115 #include "gromacs/utility/sysinfo.h"
117 // TODO: this environment variable allows us to verify before release
118 // that on less common architectures the total cost of polling is not larger than
119 // a blocking wait (so polling does not introduce overhead when the static
120 // PME-first ordering would suffice).
121 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
124 static void sum_forces(rvec f[], gmx::ArrayRef<const gmx::RVec> forceToAdd)
126 const int end = forceToAdd.size();
128 int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
129 #pragma omp parallel for num_threads(nt) schedule(static)
130 for (int i = 0; i < end; i++)
132 rvec_inc(f[i], forceToAdd[i]);
136 static void calc_virial(int start, int homenr, const rvec x[], const rvec f[],
137 tensor vir_part, const t_graph *graph, const matrix box,
138 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
140 /* The short-range virial from surrounding boxes */
141 calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
142 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
144 /* Calculate partial virial, for local atoms only, based on short range.
145 * Total virial is computed in global_stat, called from do_md
147 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
148 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
152 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
156 static void pull_potential_wrapper(const t_commrec *cr,
157 const t_inputrec *ir,
158 const matrix box, gmx::ArrayRef<const gmx::RVec> x,
159 gmx::ForceWithVirial *force,
160 const t_mdatoms *mdatoms,
161 gmx_enerdata_t *enerd,
164 gmx_wallcycle_t wcycle)
169 /* Calculate the center of mass forces, this requires communication,
170 * which is why pull_potential is called close to other communication.
172 wallcycle_start(wcycle, ewcPULLPOT);
173 set_pbc(&pbc, ir->ePBC, box);
175 enerd->term[F_COM_PULL] +=
176 pull_potential(ir->pull_work, mdatoms, &pbc,
177 cr, t, lambda[efptRESTRAINT], as_rvec_array(x.data()), force, &dvdl);
178 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
179 wallcycle_stop(wcycle, ewcPULLPOT);
182 static void pme_receive_force_ener(const t_commrec *cr,
183 gmx::ForceWithVirial *forceWithVirial,
184 gmx_enerdata_t *enerd,
185 gmx_wallcycle_t wcycle)
187 real e_q, e_lj, dvdl_q, dvdl_lj;
188 float cycles_ppdpme, cycles_seppme;
190 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
191 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
193 /* In case of node-splitting, the PP nodes receive the long-range
194 * forces, virial and energy from the PME nodes here.
196 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
199 gmx_pme_receive_f(cr, forceWithVirial, &e_q, &e_lj, &dvdl_q, &dvdl_lj,
201 enerd->term[F_COUL_RECIP] += e_q;
202 enerd->term[F_LJ_RECIP] += e_lj;
203 enerd->dvdl_lin[efptCOUL] += dvdl_q;
204 enerd->dvdl_lin[efptVDW] += dvdl_lj;
208 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
210 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
213 static void print_large_forces(FILE *fp,
221 real force2Tolerance = gmx::square(forceTolerance);
222 gmx::index numNonFinite = 0;
223 for (int i = 0; i < md->homenr; i++)
225 real force2 = norm2(f[i]);
226 bool nonFinite = !std::isfinite(force2);
227 if (force2 >= force2Tolerance || nonFinite)
229 fprintf(fp, "step %" PRId64 " atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
231 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
238 if (numNonFinite > 0)
240 /* Note that with MPI this fatal call on one rank might interrupt
241 * the printing on other ranks. But we can only avoid that with
242 * an expensive MPI barrier that we would need at each step.
244 gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
248 static void post_process_forces(const t_commrec *cr,
251 gmx_wallcycle_t wcycle,
252 const gmx_localtop_t *top,
256 gmx::ForceWithVirial *forceWithVirial,
258 const t_mdatoms *mdatoms,
259 const t_graph *graph,
260 const t_forcerec *fr,
261 const gmx_vsite_t *vsite,
264 if (fr->haveDirectVirialContributions)
266 rvec *fDirectVir = as_rvec_array(forceWithVirial->force_.data());
270 /* Spread the mesh force on virtual sites to the other particles...
271 * This is parallellized. MPI communication is performed
272 * if the constructing atoms aren't local.
274 matrix virial = { { 0 } };
275 spread_vsite_f(vsite, x, fDirectVir, nullptr,
276 (flags & GMX_FORCE_VIRIAL) != 0, virial,
278 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
279 forceWithVirial->addVirialContribution(virial);
282 if (flags & GMX_FORCE_VIRIAL)
284 /* Now add the forces, this is local */
285 sum_forces(f, forceWithVirial->force_);
287 /* Add the direct virial contributions */
288 GMX_ASSERT(forceWithVirial->computeVirial_, "forceWithVirial should request virial computation when we request the virial");
289 m_add(vir_force, forceWithVirial->getVirial(), vir_force);
293 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
298 if (fr->print_force >= 0)
300 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
304 static void do_nb_verlet(t_forcerec *fr,
305 const interaction_const_t *ic,
306 gmx_enerdata_t *enerd,
308 const Nbnxm::InteractionLocality ilocality,
312 gmx_wallcycle_t wcycle)
314 if (!(flags & GMX_FORCE_NONBONDED))
316 /* skip non-bonded calculation */
320 nonbonded_verlet_t *nbv = fr->nbv.get();
322 /* GPU kernel launch overhead is already timed separately */
323 if (fr->cutoff_scheme != ecutsVERLET)
325 gmx_incons("Invalid cut-off scheme passed!");
330 /* When dynamic pair-list pruning is requested, we need to prune
331 * at nstlistPrune steps.
333 if (nbv->pairlistSets().isDynamicPruningStepCpu(step))
335 /* Prune the pair-list beyond fr->ic->rlistPrune using
336 * the current coordinates of the atoms.
338 wallcycle_sub_start(wcycle, ewcsNONBONDED_PRUNING);
339 nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
340 wallcycle_sub_stop(wcycle, ewcsNONBONDED_PRUNING);
343 wallcycle_sub_start(wcycle, ewcsNONBONDED);
346 nbv->dispatchNonbondedKernel(ilocality, *ic, flags, clearF, fr, enerd, nrnb);
350 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
354 gmx_bool use_GPU(const nonbonded_verlet_t *nbv)
356 return nbv != nullptr && nbv->useGpu();
359 static inline void clear_rvecs_omp(int n, rvec v[])
361 int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
363 /* Note that we would like to avoid this conditional by putting it
364 * into the omp pragma instead, but then we still take the full
365 * omp parallel for overhead (at least with gcc5).
369 for (int i = 0; i < n; i++)
376 #pragma omp parallel for num_threads(nth) schedule(static)
377 for (int i = 0; i < n; i++)
384 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
386 * \param groupOptions Group options, containing T-coupling options
388 static real averageKineticEnergyEstimate(const t_grpopts &groupOptions)
390 real nrdfCoupled = 0;
391 real nrdfUncoupled = 0;
392 real kineticEnergy = 0;
393 for (int g = 0; g < groupOptions.ngtc; g++)
395 if (groupOptions.tau_t[g] >= 0)
397 nrdfCoupled += groupOptions.nrdf[g];
398 kineticEnergy += groupOptions.nrdf[g]*0.5*groupOptions.ref_t[g]*BOLTZ;
402 nrdfUncoupled += groupOptions.nrdf[g];
406 /* This conditional with > also catches nrdf=0 */
407 if (nrdfCoupled > nrdfUncoupled)
409 return kineticEnergy*(nrdfCoupled + nrdfUncoupled)/nrdfCoupled;
417 /*! \brief This routine checks that the potential energy is finite.
419 * Always checks that the potential energy is finite. If step equals
420 * inputrec.init_step also checks that the magnitude of the potential energy
421 * is reasonable. Terminates with a fatal error when a check fails.
422 * Note that passing this check does not guarantee finite forces,
423 * since those use slightly different arithmetics. But in most cases
424 * there is just a narrow coordinate range where forces are not finite
425 * and energies are finite.
427 * \param[in] step The step number, used for checking and printing
428 * \param[in] enerd The energy data; the non-bonded group energies need to be added to enerd.term[F_EPOT] before calling this routine
429 * \param[in] inputrec The input record
431 static void checkPotentialEnergyValidity(int64_t step,
432 const gmx_enerdata_t &enerd,
433 const t_inputrec &inputrec)
435 /* Threshold valid for comparing absolute potential energy against
436 * the kinetic energy. Normally one should not consider absolute
437 * potential energy values, but with a factor of one million
438 * we should never get false positives.
440 constexpr real c_thresholdFactor = 1e6;
442 bool energyIsNotFinite = !std::isfinite(enerd.term[F_EPOT]);
443 real averageKineticEnergy = 0;
444 /* We only check for large potential energy at the initial step,
445 * because that is by far the most likely step for this too occur
446 * and because computing the average kinetic energy is not free.
447 * Note: nstcalcenergy >> 1 often does not allow to catch large energies
448 * before they become NaN.
450 if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
452 averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
455 if (energyIsNotFinite || (averageKineticEnergy > 0 &&
456 enerd.term[F_EPOT] > c_thresholdFactor*averageKineticEnergy))
458 gmx_fatal(FARGS, "Step %" PRId64 ": The total potential energy is %g, which is %s. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A %s potential energy can be caused by overlapping interactions in bonded interactions or very large%s coordinate values. Usually this is caused by a badly- or non-equilibrated initial configuration, incorrect interactions or parameters in the topology.",
461 energyIsNotFinite ? "not finite" : "extremely high",
463 enerd.term[F_COUL_SR],
464 energyIsNotFinite ? "non-finite" : "very high",
465 energyIsNotFinite ? " or Nan" : "");
469 /*! \brief Compute forces and/or energies for special algorithms
471 * The intention is to collect all calls to algorithms that compute
472 * forces on local atoms only and that do not contribute to the local
473 * virial sum (but add their virial contribution separately).
474 * Eventually these should likely all become ForceProviders.
475 * Within this function the intention is to have algorithms that do
476 * global communication at the end, so global barriers within the MD loop
477 * are as close together as possible.
479 * \param[in] fplog The log file
480 * \param[in] cr The communication record
481 * \param[in] inputrec The input record
482 * \param[in] awh The Awh module (nullptr if none in use).
483 * \param[in] enforcedRotation Enforced rotation module.
484 * \param[in] step The current MD step
485 * \param[in] t The current time
486 * \param[in,out] wcycle Wallcycle accounting struct
487 * \param[in,out] forceProviders Pointer to a list of force providers
488 * \param[in] box The unit cell
489 * \param[in] x The coordinates
490 * \param[in] mdatoms Per atom properties
491 * \param[in] lambda Array of free-energy lambda values
492 * \param[in] forceFlags Flags that tell whether we should compute forces/energies/virial
493 * \param[in,out] forceWithVirial Force and virial buffers
494 * \param[in,out] enerd Energy buffer
495 * \param[in,out] ed Essential dynamics pointer
496 * \param[in] bNS Tells if we did neighbor searching this step, used for ED sampling
498 * \todo Remove bNS, which is used incorrectly.
499 * \todo Convert all other algorithms called here to ForceProviders.
502 computeSpecialForces(FILE *fplog,
504 const t_inputrec *inputrec,
506 gmx_enfrot *enforcedRotation,
509 gmx_wallcycle_t wcycle,
510 ForceProviders *forceProviders,
512 gmx::ArrayRef<const gmx::RVec> x,
513 const t_mdatoms *mdatoms,
516 gmx::ForceWithVirial *forceWithVirial,
517 gmx_enerdata_t *enerd,
521 const bool computeForces = (forceFlags & GMX_FORCE_FORCES) != 0;
523 /* NOTE: Currently all ForceProviders only provide forces.
524 * When they also provide energies, remove this conditional.
528 gmx::ForceProviderInput forceProviderInput(x, *mdatoms, t, box, *cr);
529 gmx::ForceProviderOutput forceProviderOutput(forceWithVirial, enerd);
531 /* Collect forces from modules */
532 forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
535 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
537 pull_potential_wrapper(cr, inputrec, box, x,
539 mdatoms, enerd, lambda, t,
544 enerd->term[F_COM_PULL] +=
545 awh->applyBiasForcesAndUpdateBias(inputrec->ePBC, *mdatoms, box,
547 t, step, wcycle, fplog);
551 rvec *f = as_rvec_array(forceWithVirial->force_.data());
553 /* Add the forces from enforced rotation potentials (if any) */
556 wallcycle_start(wcycle, ewcROTadd);
557 enerd->term[F_COM_PULL] += add_rot_forces(enforcedRotation, f, cr, step, t);
558 wallcycle_stop(wcycle, ewcROTadd);
563 /* Note that since init_edsam() is called after the initialization
564 * of forcerec, edsam doesn't request the noVirSum force buffer.
565 * Thus if no other algorithm (e.g. PME) requires it, the forces
566 * here will contribute to the virial.
568 do_flood(cr, inputrec, as_rvec_array(x.data()), f, ed, box, step, bNS);
571 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
572 if (inputrec->bIMD && computeForces)
574 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
578 /*! \brief Launch the prepare_step and spread stages of PME GPU.
580 * \param[in] pmedata The PME structure
581 * \param[in] box The box matrix
582 * \param[in] x Coordinate array
583 * \param[in] flags Force flags
584 * \param[in] pmeFlags PME flags
585 * \param[in] wcycle The wallcycle structure
587 static inline void launchPmeGpuSpread(gmx_pme_t *pmedata,
592 gmx_wallcycle_t wcycle)
594 pme_gpu_prepare_computation(pmedata, (flags & GMX_FORCE_DYNAMICBOX) != 0, box, wcycle, pmeFlags);
595 pme_gpu_launch_spread(pmedata, x, wcycle);
598 /*! \brief Launch the FFT and gather stages of PME GPU
600 * This function only implements setting the output forces (no accumulation).
602 * \param[in] pmedata The PME structure
603 * \param[in] wcycle The wallcycle structure
605 static void launchPmeGpuFftAndGather(gmx_pme_t *pmedata,
606 gmx_wallcycle_t wcycle)
608 pme_gpu_launch_complex_transforms(pmedata, wcycle);
609 pme_gpu_launch_gather(pmedata, wcycle, PmeForceOutputHandling::Set);
613 * Polling wait for either of the PME or nonbonded GPU tasks.
615 * Instead of a static order in waiting for GPU tasks, this function
616 * polls checking which of the two tasks completes first, and does the
617 * associated force buffer reduction overlapped with the other task.
618 * By doing that, unlike static scheduling order, it can always overlap
619 * one of the reductions, regardless of the GPU task completion order.
621 * \param[in] nbv Nonbonded verlet structure
622 * \param[in,out] pmedata PME module data
623 * \param[in,out] force Force array to reduce task outputs into.
624 * \param[in,out] forceWithVirial Force and virial buffers
625 * \param[in,out] fshift Shift force output vector results are reduced into
626 * \param[in,out] enerd Energy data structure results are reduced into
627 * \param[in] flags Force flags
628 * \param[in] pmeFlags PME flags
629 * \param[in] haveOtherWork Tells whether there is other work than non-bonded in the stream(s)
630 * \param[in] wcycle The wallcycle structure
632 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t *nbv,
634 gmx::ArrayRefWithPadding<gmx::RVec> *force,
635 gmx::ForceWithVirial *forceWithVirial,
637 gmx_enerdata_t *enerd,
641 gmx_wallcycle_t wcycle)
643 bool isPmeGpuDone = false;
644 bool isNbGpuDone = false;
647 gmx::ArrayRef<const gmx::RVec> pmeGpuForces;
649 while (!isPmeGpuDone || !isNbGpuDone)
653 GpuTaskCompletion completionType = (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
654 isPmeGpuDone = pme_gpu_try_finish_task(pmedata, pmeFlags, wcycle, forceWithVirial, enerd, completionType);
659 GpuTaskCompletion completionType = (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
660 wallcycle_start_nocount(wcycle, ewcWAIT_GPU_NB_L);
661 isNbGpuDone = Nbnxm::gpu_try_finish_task(nbv->gpu_nbv,
663 Nbnxm::AtomLocality::Local,
665 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
666 fshift, completionType);
667 wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
668 // To get the call count right, when the task finished we
669 // issue a start/stop.
670 // TODO: move the ewcWAIT_GPU_NB_L cycle counting into nbnxn_gpu_try_finish_task()
671 // and ewcNB_XF_BUF_OPS counting into nbnxn_atomdata_add_nbat_f_to_f().
674 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
675 wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
677 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local,
678 as_rvec_array(force->unpaddedArrayRef().data()), wcycle);
684 /*! \brief Hack structure with force ouput buffers for do_force */
688 ForceOutputs(rvec *f, gmx::ForceWithVirial const forceWithVirial) :
690 forceWithVirial(forceWithVirial) {}
692 //! Force output buffer used by legacy modules
694 //! Force with direct virial contribution (if there are any)
695 gmx::ForceWithVirial forceWithVirial;
698 /*! \brief Set up the different force buffers; also does clearing.
700 * \param[in] fr force record pointer
701 * \param[in] inputrec input record
702 * \param[in] force force array
703 * \param[in] bDoForces True if force are computed this step
704 * \param[in] doVirial True if virial is computed this step
705 * \param[out] wcycle wallcycle recording structure
707 * \returns Cleared force output structure
710 setupForceOutputs(const t_forcerec *fr,
711 const t_inputrec &inputrec,
712 gmx::ArrayRefWithPadding<gmx::RVec> force,
713 const bool bDoForces,
715 gmx_wallcycle_t wcycle)
717 wallcycle_sub_start(wcycle, ewcsCLEAR_FORCE_BUFFER);
719 /* Temporary solution until all routines take PaddedRVecVector */
720 rvec *const f = as_rvec_array(force.unpaddedArrayRef().data());
723 /* Clear the short- and long-range forces */
724 clear_rvecs_omp(fr->natoms_force_constr, f);
727 /* If we need to compute the virial, we might need a separate
728 * force buffer for algorithms for which the virial is calculated
729 * directly, such as PME. Otherwise, forceWithVirial uses the
730 * the same force (f in legacy calls) buffer as other algorithms.
732 const bool useSeparateForceWithVirialBuffer = (bDoForces && (doVirial && fr->haveDirectVirialContributions));
735 /* forceWithVirial uses the local atom range only */
736 gmx::ForceWithVirial forceWithVirial (useSeparateForceWithVirialBuffer ?
737 *fr->forceBufferForDirectVirialContributions : force.unpaddedArrayRef(),
740 if (useSeparateForceWithVirialBuffer)
742 /* TODO: update comment
743 * We only compute forces on local atoms. Note that vsites can
744 * spread to non-local atoms, but that part of the buffer is
745 * cleared separately in the vsite spreading code.
747 clear_rvecs_omp(forceWithVirial.force_.size(), as_rvec_array(forceWithVirial.force_.data()));
750 if (inputrec.bPull && pull_have_constraint(inputrec.pull_work))
752 clear_pull_forces(inputrec.pull_work);
755 wallcycle_sub_stop(wcycle, ewcsCLEAR_FORCE_BUFFER);
757 return ForceOutputs(f, forceWithVirial);
761 static void do_force_cutsVERLET(FILE *fplog,
763 const gmx_multisim_t *ms,
764 const t_inputrec *inputrec,
766 gmx_enfrot *enforcedRotation,
769 gmx_wallcycle_t wcycle,
770 const gmx_localtop_t *top,
771 const gmx_groups_t * /* groups */,
772 matrix box, gmx::ArrayRefWithPadding<gmx::RVec> x,
774 gmx::ArrayRefWithPadding<gmx::RVec> force,
776 const t_mdatoms *mdatoms,
777 gmx_enerdata_t *enerd, t_fcdata *fcd,
781 gmx::PpForceWorkload *ppForceWorkload,
782 interaction_const_t *ic,
783 const gmx_vsite_t *vsite,
788 const DDBalanceRegionHandler &ddBalanceRegionHandler)
792 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
793 gmx_bool bDoForces, bUseGPU, bUseOrEmulGPU;
794 rvec vzero, box_diag;
795 float cycles_pme, cycles_wait_gpu;
796 nonbonded_verlet_t *nbv = fr->nbv.get();
798 bStateChanged = ((flags & GMX_FORCE_STATECHANGED) != 0);
799 bNS = ((flags & GMX_FORCE_NS) != 0);
800 bFillGrid = (bNS && bStateChanged);
801 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
802 bDoForces = ((flags & GMX_FORCE_FORCES) != 0);
803 bUseGPU = fr->nbv->useGpu();
804 bUseOrEmulGPU = bUseGPU || fr->nbv->emulateGpu();
806 const auto pmeRunMode = fr->pmedata ? pme_run_mode(fr->pmedata) : PmeRunMode::CPU;
807 // TODO slim this conditional down - inputrec and duty checks should mean the same in proper code!
808 const bool useGpuPme = EEL_PME(fr->ic->eeltype) && thisRankHasDuty(cr, DUTY_PME) &&
809 ((pmeRunMode == PmeRunMode::GPU) || (pmeRunMode == PmeRunMode::Mixed));
810 const int pmeFlags = GMX_PME_SPREAD | GMX_PME_SOLVE |
811 ((flags & GMX_FORCE_VIRIAL) ? GMX_PME_CALC_ENER_VIR : 0) |
812 ((flags & GMX_FORCE_ENERGY) ? GMX_PME_CALC_ENER_VIR : 0) |
813 ((flags & GMX_FORCE_FORCES) ? GMX_PME_CALC_F : 0);
815 /* At a search step we need to start the first balancing region
816 * somewhere early inside the step after communication during domain
817 * decomposition (and not during the previous step as usual).
821 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
827 const int homenr = mdatoms->homenr;
829 clear_mat(vir_force);
831 if (DOMAINDECOMP(cr))
833 cg1 = cr->dd->globalAtomGroupIndices.size();
846 update_forcerec(fr, box);
848 if (inputrecNeedMutot(inputrec))
850 /* Calculate total (local) dipole moment in a temporary common array.
851 * This makes it possible to sum them over nodes faster.
853 calc_mu(start, homenr,
854 x.unpaddedArrayRef(), mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
859 if (fr->ePBC != epbcNONE)
861 /* Compute shift vectors every step,
862 * because of pressure coupling or box deformation!
864 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
866 calc_shifts(box, fr->shift_vec);
871 put_atoms_in_box_omp(fr->ePBC, box, x.unpaddedArrayRef().subArray(0, homenr));
872 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
874 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
876 unshift_self(graph, box, as_rvec_array(x.unpaddedArrayRef().data()));
880 nbnxn_atomdata_copy_shiftvec((flags & GMX_FORCE_DYNAMICBOX) != 0,
881 fr->shift_vec, nbv->nbat.get());
884 if (!thisRankHasDuty(cr, DUTY_PME))
886 /* Send particle coordinates to the pme nodes.
887 * Since this is only implemented for domain decomposition
888 * and domain decomposition does not use the graph,
889 * we do not need to worry about shifting.
891 gmx_pme_send_coordinates(cr, box, as_rvec_array(x.unpaddedArrayRef().data()),
892 lambda[efptCOUL], lambda[efptVDW],
893 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)) != 0,
900 launchPmeGpuSpread(fr->pmedata, box, as_rvec_array(x.unpaddedArrayRef().data()), flags, pmeFlags, wcycle);
903 /* do gridding for pair search */
906 if (graph && bStateChanged)
908 /* Calculate intramolecular shift vectors to make molecules whole */
909 mk_mshift(fplog, graph, fr->ePBC, box, as_rvec_array(x.unpaddedArrayRef().data()));
913 box_diag[XX] = box[XX][XX];
914 box_diag[YY] = box[YY][YY];
915 box_diag[ZZ] = box[ZZ][ZZ];
917 wallcycle_start(wcycle, ewcNS);
918 if (!DOMAINDECOMP(cr))
920 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
921 nbnxn_put_on_grid(nbv, box,
923 nullptr, 0, mdatoms->homenr, -1,
924 fr->cginfo, x.unpaddedArrayRef(),
926 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
930 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
931 nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd),
932 fr->cginfo, x.unpaddedArrayRef());
933 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
936 nbnxn_atomdata_set(nbv->nbat.get(), nbv->nbs.get(), mdatoms, fr->cginfo);
938 wallcycle_stop(wcycle, ewcNS);
941 /* initialize the GPU atom data and copy shift vector */
944 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
945 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
949 Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
952 Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
954 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
956 if (bNS && fr->gpuBonded)
958 /* Now we put all atoms on the grid, we can assign bonded
959 * interactions to the GPU, where the grid order is
960 * needed. Also the xq, f and fshift device buffers have
961 * been reallocated if needed, so the bonded code can
962 * learn about them. */
963 // TODO the xq, f, and fshift buffers are now shared
964 // resources, so they should be maintained by a
965 // higher-level object than the nb module.
966 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(nbnxn_get_gridindices(fr->nbv->nbs.get()),
968 Nbnxm::gpu_get_xq(nbv->gpu_nbv),
969 Nbnxm::gpu_get_f(nbv->gpu_nbv),
970 Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
971 ppForceWorkload->haveGpuBondedWork = fr->gpuBonded->haveInteractions();
974 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
977 /* do local pair search */
980 wallcycle_start_nocount(wcycle, ewcNS);
981 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
982 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
983 nbv->constructPairlist(Nbnxm::InteractionLocality::Local,
984 &top->excls, step, nrnb);
985 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
986 wallcycle_stop(wcycle, ewcNS);
990 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs.get(), Nbnxm::AtomLocality::Local,
991 FALSE, as_rvec_array(x.unpaddedArrayRef().data()),
992 nbv->nbat.get(), wcycle);
997 ddBalanceRegionHandler.openBeforeForceComputationGpu();
999 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1001 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1002 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1003 Nbnxm::AtomLocality::Local,
1004 ppForceWorkload->haveGpuBondedWork);
1005 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1007 // bonded work not split into separate local and non-local, so with DD
1008 // we can only launch the kernel after non-local coordinates have been received.
1009 if (ppForceWorkload->haveGpuBondedWork && !havePPDomainDecomposition(cr))
1011 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1012 fr->gpuBonded->launchKernels(fr, flags, box);
1013 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1016 /* launch local nonbonded work on GPU */
1017 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1018 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::Local, enbvClearFNo,
1019 step, nrnb, wcycle);
1020 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1021 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1026 // In PME GPU and mixed mode we launch FFT / gather after the
1027 // X copy/transform to allow overlap as well as after the GPU NB
1028 // launch to avoid FFT launch overhead hijacking the CPU and delaying
1029 // the nonbonded kernel.
1030 launchPmeGpuFftAndGather(fr->pmedata, wcycle);
1033 /* Communicate coordinates and sum dipole if necessary +
1034 do non-local pair search */
1035 if (havePPDomainDecomposition(cr))
1039 wallcycle_start_nocount(wcycle, ewcNS);
1040 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1041 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1042 nbv->constructPairlist(Nbnxm::InteractionLocality::NonLocal,
1043 &top->excls, step, nrnb);
1044 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1045 wallcycle_stop(wcycle, ewcNS);
1049 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1051 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs.get(), Nbnxm::AtomLocality::NonLocal,
1052 FALSE, as_rvec_array(x.unpaddedArrayRef().data()),
1053 nbv->nbat.get(), wcycle);
1058 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1060 /* launch non-local nonbonded tasks on GPU */
1061 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1062 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1063 Nbnxm::AtomLocality::NonLocal,
1064 ppForceWorkload->haveGpuBondedWork);
1065 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1067 if (ppForceWorkload->haveGpuBondedWork)
1069 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1070 fr->gpuBonded->launchKernels(fr, flags, box);
1071 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1074 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1075 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::NonLocal, enbvClearFNo,
1076 step, nrnb, wcycle);
1077 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1079 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1085 /* launch D2H copy-back F */
1086 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1087 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1088 if (havePPDomainDecomposition(cr))
1090 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1091 flags, Nbnxm::AtomLocality::NonLocal, ppForceWorkload->haveGpuBondedWork);
1093 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1094 flags, Nbnxm::AtomLocality::Local, ppForceWorkload->haveGpuBondedWork);
1095 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1097 if (ppForceWorkload->haveGpuBondedWork && (flags & GMX_FORCE_ENERGY))
1099 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_BONDED);
1100 fr->gpuBonded->launchEnergyTransfer();
1101 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1103 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1106 if (bStateChanged && inputrecNeedMutot(inputrec))
1110 gmx_sumd(2*DIM, mu, cr);
1112 ddBalanceRegionHandler.reopenRegionCpu();
1115 for (i = 0; i < 2; i++)
1117 for (j = 0; j < DIM; j++)
1119 fr->mu_tot[i][j] = mu[i*DIM + j];
1123 if (fr->efep == efepNO)
1125 copy_rvec(fr->mu_tot[0], mu_tot);
1129 for (j = 0; j < DIM; j++)
1132 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1133 lambda[efptCOUL]*fr->mu_tot[1][j];
1137 /* Reset energies */
1138 reset_enerdata(enerd);
1139 clear_rvecs(SHIFTS, fr->fshift);
1141 if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1143 wallcycle_start(wcycle, ewcPPDURINGPME);
1144 dd_force_flop_start(cr->dd, nrnb);
1149 wallcycle_start(wcycle, ewcROT);
1150 do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step, bNS);
1151 wallcycle_stop(wcycle, ewcROT);
1154 /* Start the force cycle counter.
1155 * Note that a different counter is used for dynamic load balancing.
1157 wallcycle_start(wcycle, ewcFORCE);
1159 // set up and clear force outputs
1160 struct ForceOutputs forceOut = setupForceOutputs(fr, *inputrec, force, bDoForces, ((flags & GMX_FORCE_VIRIAL) != 0), wcycle);
1162 /* We calculate the non-bonded forces, when done on the CPU, here.
1163 * We do this before calling do_force_lowlevel, because in that
1164 * function, the listed forces are calculated before PME, which
1165 * does communication. With this order, non-bonded and listed
1166 * force calculation imbalance can be balanced out by the domain
1167 * decomposition load balancing.
1172 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::Local, enbvClearFYes,
1173 step, nrnb, wcycle);
1176 if (fr->efep != efepNO)
1178 /* Calculate the local and non-local free energy interactions here.
1179 * Happens here on the CPU both with and without GPU.
1181 wallcycle_sub_start(wcycle, ewcsNONBONDED);
1182 nbv->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::Local,
1183 fr, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f, *mdatoms,
1184 inputrec->fepvals, lambda,
1185 enerd, flags, nrnb);
1187 if (havePPDomainDecomposition(cr))
1189 nbv->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::NonLocal,
1190 fr, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f, *mdatoms,
1191 inputrec->fepvals, lambda,
1192 enerd, flags, nrnb);
1194 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
1199 if (havePPDomainDecomposition(cr))
1201 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::NonLocal, enbvClearFNo,
1202 step, nrnb, wcycle);
1205 /* Add all the non-bonded force to the normal force array.
1206 * This can be split into a local and a non-local part when overlapping
1207 * communication with calculation with domain decomposition.
1209 wallcycle_stop(wcycle, ewcFORCE);
1211 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::All, forceOut.f, wcycle);
1213 wallcycle_start_nocount(wcycle, ewcFORCE);
1215 /* If there are multiple fshift output buffers we need to reduce them */
1216 if (flags & GMX_FORCE_VIRIAL)
1218 /* This is not in a subcounter because it takes a
1219 negligible and constant-sized amount of time */
1220 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->nbat.get(),
1225 /* update QMMMrec, if necessary */
1228 update_QMMMrec(cr, fr, as_rvec_array(x.unpaddedArrayRef().data()), mdatoms, box);
1231 /* Compute the bonded and non-bonded energies and optionally forces */
1232 do_force_lowlevel(fr, inputrec, &(top->idef),
1233 cr, ms, nrnb, wcycle, mdatoms,
1234 as_rvec_array(x.unpaddedArrayRef().data()), hist, forceOut.f, &forceOut.forceWithVirial, enerd, fcd,
1235 box, inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1237 &cycles_pme, ddBalanceRegionHandler);
1239 wallcycle_stop(wcycle, ewcFORCE);
1241 computeSpecialForces(fplog, cr, inputrec, awh, enforcedRotation,
1243 fr->forceProviders, box, x.unpaddedArrayRef(), mdatoms, lambda,
1244 flags, &forceOut.forceWithVirial, enerd,
1249 /* wait for non-local forces (or calculate in emulation mode) */
1250 if (havePPDomainDecomposition(cr))
1254 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1255 Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1256 flags, Nbnxm::AtomLocality::NonLocal,
1257 ppForceWorkload->haveGpuBondedWork,
1258 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1260 cycles_wait_gpu += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1264 wallcycle_start_nocount(wcycle, ewcFORCE);
1265 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::NonLocal, enbvClearFYes,
1266 step, nrnb, wcycle);
1267 wallcycle_stop(wcycle, ewcFORCE);
1270 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::NonLocal,
1271 forceOut.f, wcycle);
1275 if (havePPDomainDecomposition(cr))
1277 /* We are done with the CPU compute.
1278 * We will now communicate the non-local forces.
1279 * If we use a GPU this will overlap with GPU work, so in that case
1280 * we do not close the DD force balancing region here.
1282 ddBalanceRegionHandler.closeAfterForceComputationCpu();
1286 dd_move_f(cr->dd, force.unpaddedArrayRef(), fr->fshift, wcycle);
1290 // With both nonbonded and PME offloaded a GPU on the same rank, we use
1291 // an alternating wait/reduction scheme.
1292 bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPme && bUseGPU && !DOMAINDECOMP(cr));
1293 if (alternateGpuWait)
1295 alternatePmeNbGpuWaitReduce(fr->nbv.get(), fr->pmedata, &force, &forceOut.forceWithVirial, fr->fshift, enerd,
1296 flags, pmeFlags, ppForceWorkload->haveGpuBondedWork, wcycle);
1299 if (!alternateGpuWait && useGpuPme)
1301 pme_gpu_wait_and_reduce(fr->pmedata, pmeFlags, wcycle, &forceOut.forceWithVirial, enerd);
1304 /* Wait for local GPU NB outputs on the non-alternating wait path */
1305 if (!alternateGpuWait && bUseGPU)
1307 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1308 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1309 * but even with a step of 0.1 ms the difference is less than 1%
1312 const float gpuWaitApiOverheadMargin = 2e6f; /* cycles */
1314 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1315 Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1316 flags, Nbnxm::AtomLocality::Local, ppForceWorkload->haveGpuBondedWork,
1317 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1319 float cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1321 if (ddBalanceRegionHandler.useBalancingRegion())
1323 DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
1324 if (bDoForces && cycles_tmp <= gpuWaitApiOverheadMargin)
1326 /* We measured few cycles, it could be that the kernel
1327 * and transfer finished earlier and there was no actual
1328 * wait time, only API call overhead.
1329 * Then the actual time could be anywhere between 0 and
1330 * cycles_wait_est. We will use half of cycles_wait_est.
1332 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
1334 ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
1338 if (fr->nbv->emulateGpu())
1340 // NOTE: emulation kernel is not included in the balancing region,
1341 // but emulation mode does not target performance anyway
1342 wallcycle_start_nocount(wcycle, ewcFORCE);
1343 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::Local,
1344 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1345 step, nrnb, wcycle);
1346 wallcycle_stop(wcycle, ewcFORCE);
1351 pme_gpu_reinit_computation(fr->pmedata, wcycle);
1356 /* now clear the GPU outputs while we finish the step on the CPU */
1357 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1358 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1359 Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, flags);
1361 if (nbv->pairlistSets().isDynamicPruningStepGpu(step))
1363 nbv->dispatchPruneKernelGpu(step);
1365 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1366 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1369 if (ppForceWorkload->haveGpuBondedWork && (flags & GMX_FORCE_ENERGY))
1371 wallcycle_start(wcycle, ewcWAIT_GPU_BONDED);
1372 // in principle this should be included in the DD balancing region,
1373 // but generally it is infrequent so we'll omit it for the sake of
1375 fr->gpuBonded->accumulateEnergyTerms(enerd);
1376 wallcycle_stop(wcycle, ewcWAIT_GPU_BONDED);
1378 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1379 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_BONDED);
1380 fr->gpuBonded->clearEnergies();
1381 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1382 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1385 /* Do the nonbonded GPU (or emulation) force buffer reduction
1386 * on the non-alternating path. */
1387 if (bUseOrEmulGPU && !alternateGpuWait)
1389 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local,
1390 forceOut.f, wcycle);
1392 if (DOMAINDECOMP(cr))
1394 dd_force_flop_stop(cr->dd, nrnb);
1399 /* If we have NoVirSum forces, but we do not calculate the virial,
1400 * we sum fr->f_novirsum=forceOut.f later.
1402 if (vsite && !(fr->haveDirectVirialContributions && !(flags & GMX_FORCE_VIRIAL)))
1404 spread_vsite_f(vsite, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f, fr->fshift, FALSE, nullptr, nrnb,
1405 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
1408 if (flags & GMX_FORCE_VIRIAL)
1410 /* Calculation of the virial must be done after vsites! */
1411 calc_virial(0, mdatoms->homenr, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f,
1412 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1416 if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME))
1418 /* In case of node-splitting, the PP nodes receive the long-range
1419 * forces, virial and energy from the PME nodes here.
1421 pme_receive_force_ener(cr, &forceOut.forceWithVirial, enerd, wcycle);
1426 post_process_forces(cr, step, nrnb, wcycle,
1427 top, box, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f, &forceOut.forceWithVirial,
1428 vir_force, mdatoms, graph, fr, vsite,
1432 if (flags & GMX_FORCE_ENERGY)
1434 /* Sum the potential energy terms from group contributions */
1435 sum_epot(&(enerd->grpp), enerd->term);
1437 if (!EI_TPI(inputrec->eI))
1439 checkPotentialEnergyValidity(step, *enerd, *inputrec);
1444 static void do_force_cutsGROUP(FILE *fplog,
1445 const t_commrec *cr,
1446 const gmx_multisim_t *ms,
1447 const t_inputrec *inputrec,
1449 gmx_enfrot *enforcedRotation,
1452 gmx_wallcycle_t wcycle,
1453 gmx_localtop_t *top,
1454 const gmx_groups_t *groups,
1455 matrix box, gmx::ArrayRefWithPadding<gmx::RVec> x,
1457 gmx::ArrayRefWithPadding<gmx::RVec> force,
1459 const t_mdatoms *mdatoms,
1460 gmx_enerdata_t *enerd,
1465 const gmx_vsite_t *vsite,
1470 const DDBalanceRegionHandler &ddBalanceRegionHandler)
1474 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
1478 const int start = 0;
1479 const int homenr = mdatoms->homenr;
1481 clear_mat(vir_force);
1484 if (DOMAINDECOMP(cr))
1486 cg1 = cr->dd->globalAtomGroupIndices.size();
1497 bStateChanged = ((flags & GMX_FORCE_STATECHANGED) != 0);
1498 bNS = ((flags & GMX_FORCE_NS) != 0);
1499 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1500 bFillGrid = (bNS && bStateChanged);
1501 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1502 bDoForces = ((flags & GMX_FORCE_FORCES) != 0);
1506 update_forcerec(fr, box);
1508 if (inputrecNeedMutot(inputrec))
1510 /* Calculate total (local) dipole moment in a temporary common array.
1511 * This makes it possible to sum them over nodes faster.
1513 calc_mu(start, homenr,
1514 x.unpaddedArrayRef(), mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1519 if (fr->ePBC != epbcNONE)
1521 /* Compute shift vectors every step,
1522 * because of pressure coupling or box deformation!
1524 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1526 calc_shifts(box, fr->shift_vec);
1531 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1532 &(top->cgs), as_rvec_array(x.unpaddedArrayRef().data()), fr->cg_cm);
1533 inc_nrnb(nrnb, eNR_CGCM, homenr);
1534 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1536 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1538 unshift_self(graph, box, as_rvec_array(x.unpaddedArrayRef().data()));
1543 calc_cgcm(fplog, cg0, cg1, &(top->cgs), as_rvec_array(x.unpaddedArrayRef().data()), fr->cg_cm);
1544 inc_nrnb(nrnb, eNR_CGCM, homenr);
1547 if (bCalcCGCM && gmx_debug_at)
1549 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1553 if (!thisRankHasDuty(cr, DUTY_PME))
1555 /* Send particle coordinates to the pme nodes.
1556 * Since this is only implemented for domain decomposition
1557 * and domain decomposition does not use the graph,
1558 * we do not need to worry about shifting.
1560 gmx_pme_send_coordinates(cr, box, as_rvec_array(x.unpaddedArrayRef().data()),
1561 lambda[efptCOUL], lambda[efptVDW],
1562 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)) != 0,
1565 #endif /* GMX_MPI */
1567 /* Communicate coordinates and sum dipole if necessary */
1568 if (DOMAINDECOMP(cr))
1570 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1572 /* No GPU support, no move_x overlap, so reopen the balance region here */
1573 ddBalanceRegionHandler.reopenRegionCpu();
1576 if (inputrecNeedMutot(inputrec))
1582 gmx_sumd(2*DIM, mu, cr);
1584 ddBalanceRegionHandler.reopenRegionCpu();
1586 for (i = 0; i < 2; i++)
1588 for (j = 0; j < DIM; j++)
1590 fr->mu_tot[i][j] = mu[i*DIM + j];
1594 if (fr->efep == efepNO)
1596 copy_rvec(fr->mu_tot[0], mu_tot);
1600 for (j = 0; j < DIM; j++)
1603 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1608 /* Reset energies */
1609 reset_enerdata(enerd);
1610 clear_rvecs(SHIFTS, fr->fshift);
1614 wallcycle_start(wcycle, ewcNS);
1616 if (graph && bStateChanged)
1618 /* Calculate intramolecular shift vectors to make molecules whole */
1619 mk_mshift(fplog, graph, fr->ePBC, box, as_rvec_array(x.unpaddedArrayRef().data()));
1622 /* Do the actual neighbour searching */
1624 groups, top, mdatoms,
1625 cr, nrnb, bFillGrid);
1627 wallcycle_stop(wcycle, ewcNS);
1630 if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1632 wallcycle_start(wcycle, ewcPPDURINGPME);
1633 dd_force_flop_start(cr->dd, nrnb);
1638 wallcycle_start(wcycle, ewcROT);
1639 do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step, bNS);
1640 wallcycle_stop(wcycle, ewcROT);
1644 /* Start the force cycle counter.
1645 * Note that a different counter is used for dynamic load balancing.
1647 wallcycle_start(wcycle, ewcFORCE);
1649 // set up and clear force outputs
1650 struct ForceOutputs forceOut = setupForceOutputs(fr, *inputrec, force, bDoForces, ((flags & GMX_FORCE_VIRIAL) != 0), wcycle);
1652 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1654 clear_pull_forces(inputrec->pull_work);
1657 /* update QMMMrec, if necessary */
1660 update_QMMMrec(cr, fr, as_rvec_array(x.unpaddedArrayRef().data()), mdatoms, box);
1663 /* Compute the bonded and non-bonded energies and optionally forces */
1664 do_force_lowlevel(fr, inputrec, &(top->idef),
1665 cr, ms, nrnb, wcycle, mdatoms,
1666 as_rvec_array(x.unpaddedArrayRef().data()), hist, forceOut.f, &forceOut.forceWithVirial, enerd, fcd,
1667 box, inputrec->fepvals, lambda,
1668 graph, &(top->excls), fr->mu_tot,
1670 &cycles_pme, ddBalanceRegionHandler);
1672 wallcycle_stop(wcycle, ewcFORCE);
1674 if (DOMAINDECOMP(cr))
1676 dd_force_flop_stop(cr->dd, nrnb);
1678 ddBalanceRegionHandler.closeAfterForceComputationCpu();
1681 computeSpecialForces(fplog, cr, inputrec, awh, enforcedRotation,
1683 fr->forceProviders, box, x.unpaddedArrayRef(), mdatoms, lambda,
1684 flags, &forceOut.forceWithVirial, enerd,
1689 /* Communicate the forces */
1690 if (DOMAINDECOMP(cr))
1692 dd_move_f(cr->dd, force.unpaddedArrayRef(), fr->fshift, wcycle);
1693 /* Do we need to communicate the separate force array
1694 * for terms that do not contribute to the single sum virial?
1695 * Position restraints and electric fields do not introduce
1696 * inter-cg forces, only full electrostatics methods do.
1697 * When we do not calculate the virial, fr->f_novirsum = forceOut.f,
1698 * so we have already communicated these forces.
1700 if (EEL_FULL(fr->ic->eeltype) && cr->dd->n_intercg_excl &&
1701 (flags & GMX_FORCE_VIRIAL))
1703 dd_move_f(cr->dd, forceOut.forceWithVirial.force_, nullptr, wcycle);
1707 /* If we have NoVirSum forces, but we do not calculate the virial,
1708 * we sum fr->f_novirsum=forceOut.f later.
1710 if (vsite && !(fr->haveDirectVirialContributions && !(flags & GMX_FORCE_VIRIAL)))
1712 spread_vsite_f(vsite, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f, fr->fshift, FALSE, nullptr, nrnb,
1713 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
1716 if (flags & GMX_FORCE_VIRIAL)
1718 /* Calculation of the virial must be done after vsites! */
1719 calc_virial(0, mdatoms->homenr, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f,
1720 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1724 if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME))
1726 /* In case of node-splitting, the PP nodes receive the long-range
1727 * forces, virial and energy from the PME nodes here.
1729 pme_receive_force_ener(cr, &forceOut.forceWithVirial, enerd, wcycle);
1734 post_process_forces(cr, step, nrnb, wcycle,
1735 top, box, as_rvec_array(x.unpaddedArrayRef().data()), forceOut.f, &forceOut.forceWithVirial,
1736 vir_force, mdatoms, graph, fr, vsite,
1740 if (flags & GMX_FORCE_ENERGY)
1742 /* Sum the potential energy terms from group contributions */
1743 sum_epot(&(enerd->grpp), enerd->term);
1745 if (!EI_TPI(inputrec->eI))
1747 checkPotentialEnergyValidity(step, *enerd, *inputrec);
1753 void do_force(FILE *fplog,
1754 const t_commrec *cr,
1755 const gmx_multisim_t *ms,
1756 const t_inputrec *inputrec,
1758 gmx_enfrot *enforcedRotation,
1761 gmx_wallcycle_t wcycle,
1762 gmx_localtop_t *top,
1763 const gmx_groups_t *groups,
1765 gmx::ArrayRefWithPadding<gmx::RVec> x, //NOLINT(performance-unnecessary-value-param)
1767 gmx::ArrayRefWithPadding<gmx::RVec> force, //NOLINT(performance-unnecessary-value-param)
1769 const t_mdatoms *mdatoms,
1770 gmx_enerdata_t *enerd,
1772 gmx::ArrayRef<real> lambda,
1775 gmx::PpForceWorkload *ppForceWorkload,
1776 const gmx_vsite_t *vsite,
1781 const DDBalanceRegionHandler &ddBalanceRegionHandler)
1783 /* modify force flag if not doing nonbonded */
1784 if (!fr->bNonbonded)
1786 flags &= ~GMX_FORCE_NONBONDED;
1789 switch (inputrec->cutoff_scheme)
1792 do_force_cutsVERLET(fplog, cr, ms, inputrec,
1793 awh, enforcedRotation, step, nrnb, wcycle,
1800 lambda.data(), graph,
1807 ddBalanceRegionHandler);
1810 do_force_cutsGROUP(fplog, cr, ms, inputrec,
1811 awh, enforcedRotation, step, nrnb, wcycle,
1818 lambda.data(), graph,
1822 ddBalanceRegionHandler);
1825 gmx_incons("Invalid cut-off scheme passed!");
1828 /* In case we don't have constraints and are using GPUs, the next balancing
1829 * region starts here.
1830 * Some "special" work at the end of do_force_cuts?, such as vsite spread,
1831 * virial calculation and COM pulling, is not thus not included in
1832 * the balance timing, which is ok as most tasks do communication.
1834 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);
1838 void do_constrain_first(FILE *fplog, gmx::Constraints *constr,
1839 const t_inputrec *ir, const t_mdatoms *md,
1842 int i, m, start, end;
1844 real dt = ir->delta_t;
1848 /* We need to allocate one element extra, since we might use
1849 * (unaligned) 4-wide SIMD loads to access rvec entries.
1851 snew(savex, state->natoms + 1);
1858 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
1859 start, md->homenr, end);
1861 /* Do a first constrain to reset particles... */
1862 step = ir->init_step;
1865 char buf[STEPSTRSIZE];
1866 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
1867 gmx_step_str(step, buf));
1871 /* constrain the current position */
1872 constr->apply(TRUE, FALSE,
1874 state->x.rvec_array(), state->x.rvec_array(), nullptr,
1876 state->lambda[efptBONDED], &dvdl_dum,
1877 nullptr, nullptr, gmx::ConstraintVariable::Positions);
1880 /* constrain the inital velocity, and save it */
1881 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
1882 constr->apply(TRUE, FALSE,
1884 state->x.rvec_array(), state->v.rvec_array(), state->v.rvec_array(),
1886 state->lambda[efptBONDED], &dvdl_dum,
1887 nullptr, nullptr, gmx::ConstraintVariable::Velocities);
1889 /* constrain the inital velocities at t-dt/2 */
1890 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
1892 auto x = makeArrayRef(state->x).subArray(start, end);
1893 auto v = makeArrayRef(state->v).subArray(start, end);
1894 for (i = start; (i < end); i++)
1896 for (m = 0; (m < DIM); m++)
1898 /* Reverse the velocity */
1900 /* Store the position at t-dt in buf */
1901 savex[i][m] = x[i][m] + dt*v[i][m];
1904 /* Shake the positions at t=-dt with the positions at t=0
1905 * as reference coordinates.
1909 char buf[STEPSTRSIZE];
1910 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
1911 gmx_step_str(step, buf));
1914 constr->apply(TRUE, FALSE,
1916 state->x.rvec_array(), savex, nullptr,
1918 state->lambda[efptBONDED], &dvdl_dum,
1919 state->v.rvec_array(), nullptr, gmx::ConstraintVariable::Positions);
1921 for (i = start; i < end; i++)
1923 for (m = 0; m < DIM; m++)
1925 /* Re-reverse the velocities */
1933 void put_atoms_in_box_omp(int ePBC, const matrix box, gmx::ArrayRef<gmx::RVec> x)
1936 nth = gmx_omp_nthreads_get(emntDefault);
1938 #pragma omp parallel for num_threads(nth) schedule(static)
1939 for (t = 0; t < nth; t++)
1943 size_t natoms = x.size();
1944 size_t offset = (natoms*t )/nth;
1945 size_t len = (natoms*(t + 1))/nth - offset;
1946 put_atoms_in_box(ePBC, box, x.subArray(offset, len));
1948 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1952 void initialize_lambdas(FILE *fplog,
1953 const t_inputrec &ir,
1956 gmx::ArrayRef<real> lambda,
1959 /* TODO: Clean up initialization of fep_state and lambda in
1960 t_state. This function works, but could probably use a logic
1961 rewrite to keep all the different types of efep straight. */
1963 if ((ir.efep == efepNO) && (!ir.bSimTemp))
1968 const t_lambda *fep = ir.fepvals;
1971 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
1972 if checkpoint is set -- a kludge is in for now
1976 for (int i = 0; i < efptNR; i++)
1979 /* overwrite lambda state with init_lambda for now for backwards compatibility */
1980 if (fep->init_lambda >= 0) /* if it's -1, it was never initialized */
1982 thisLambda = fep->init_lambda;
1986 thisLambda = fep->all_lambda[i][fep->init_fep_state];
1990 lambda[i] = thisLambda;
1992 if (lam0 != nullptr)
1994 lam0[i] = thisLambda;
1999 /* need to rescale control temperatures to match current state */
2000 for (int i = 0; i < ir.opts.ngtc; i++)
2002 if (ir.opts.ref_t[i] > 0)
2004 ir.opts.ref_t[i] = ir.simtempvals->temperatures[fep->init_fep_state];
2009 /* Send to the log the information on the current lambdas */
2010 if (fplog != nullptr)
2012 fprintf(fplog, "Initial vector of lambda components:[ ");
2013 for (const auto &l : lambda)
2015 fprintf(fplog, "%10.4f ", l);
2017 fprintf(fplog, "]\n");