2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Common functions for the different NBNXN GPU implementations.
38 * \author Szilard Pall <pall.szilard@gmail.com>
40 * \ingroup module_nbnxm
43 #ifndef GMX_NBNXM_GPU_COMMON_H
44 #define GMX_NBNXM_GPU_COMMON_H
51 # include "cuda/nbnxm_cuda_types.h"
55 # include "opencl/nbnxm_ocl_types.h"
58 #include "gromacs/gpu_utils/gpu_utils.h"
59 #include "gromacs/listed_forces/gpubonded.h"
60 #include "gromacs/math/vec.h"
61 #include "gromacs/mdtypes/simulation_workload.h"
62 #include "gromacs/nbnxm/nbnxm.h"
63 #include "gromacs/pbcutil/ishift.h"
64 #include "gromacs/timing/gpu_timing.h"
65 #include "gromacs/timing/wallcycle.h"
66 #include "gromacs/utility/fatalerror.h"
67 #include "gromacs/utility/stringutil.h"
69 #include "gpu_common_utils.h"
70 #include "nbnxm_gpu.h"
80 /*! \brief Check that atom locality values are valid for the GPU module.
82 * In the GPU module atom locality "all" is not supported, the local and
83 * non-local ranges are treated separately.
85 * \param[in] atomLocality atom locality specifier
87 static inline void validateGpuAtomLocality(const AtomLocality atomLocality)
89 std::string str = gmx::formatString(
90 "Invalid atom locality passed (%d); valid here is only "
91 "local (%d) or nonlocal (%d)",
92 static_cast<int>(atomLocality),
93 static_cast<int>(AtomLocality::Local),
94 static_cast<int>(AtomLocality::NonLocal));
96 GMX_ASSERT(atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal, str.c_str());
99 /*! \brief Convert atom locality to interaction locality.
101 * In the current implementation the this is straightforward conversion:
102 * local to local, non-local to non-local.
104 * \param[in] atomLocality Atom locality specifier
105 * \returns Interaction locality corresponding to the atom locality passed.
107 static inline InteractionLocality gpuAtomToInteractionLocality(const AtomLocality atomLocality)
109 validateGpuAtomLocality(atomLocality);
111 /* determine interaction locality from atom locality */
112 if (atomLocality == AtomLocality::Local)
114 return InteractionLocality::Local;
116 else if (atomLocality == AtomLocality::NonLocal)
118 return InteractionLocality::NonLocal;
122 gmx_incons("Wrong locality");
127 //NOLINTNEXTLINE(misc-definitions-in-headers)
128 void setupGpuShortRangeWork(NbnxmGpu* nb, const gmx::GpuBonded* gpuBonded, const gmx::InteractionLocality iLocality)
130 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
132 // There is short-range work if the pair list for the provided
133 // interaction locality contains entries or if there is any
134 // bonded work (as this is not split into local/nonlocal).
135 nb->haveWork[iLocality] = ((nb->plist[iLocality]->nsci != 0)
136 || (gpuBonded != nullptr && gpuBonded->haveInteractions()));
139 /*! \brief Returns true if there is GPU short-range work for the given interaction locality.
141 * Note that as, unlike nonbonded tasks, bonded tasks are not split into local/nonlocal,
142 * and therefore if there are GPU offloaded bonded interactions, this function will return
143 * true for all interaction localities.
145 * \param[inout] nb Pointer to the nonbonded GPU data structure
146 * \param[in] iLocality Interaction locality identifier
148 static bool haveGpuShortRangeWork(const NbnxmGpu& nb, const gmx::InteractionLocality iLocality)
150 return nb.haveWork[iLocality];
153 //NOLINTNEXTLINE(misc-definitions-in-headers)
154 bool haveGpuShortRangeWork(const NbnxmGpu* nb, const gmx::AtomLocality aLocality)
156 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
158 return haveGpuShortRangeWork(*nb, gpuAtomToInteractionLocality(aLocality));
162 /*! \brief Calculate atom range and return start index and length.
164 * \param[in] atomData Atom descriptor data structure
165 * \param[in] atomLocality Atom locality specifier
166 * \param[out] atomRangeBegin Starting index of the atom range in the atom data array.
167 * \param[out] atomRangeLen Atom range length in the atom data array.
169 template<typename AtomDataT>
170 static inline void getGpuAtomRange(const AtomDataT* atomData,
171 const AtomLocality atomLocality,
176 validateGpuAtomLocality(atomLocality);
178 /* calculate the atom data index range based on locality */
179 if (atomLocality == AtomLocality::Local)
182 *atomRangeLen = atomData->natoms_local;
186 *atomRangeBegin = atomData->natoms_local;
187 *atomRangeLen = atomData->natoms - atomData->natoms_local;
192 /*! \brief Count pruning kernel time if either kernel has been triggered
194 * We do the accounting for either of the two pruning kernel flavors:
195 * - 1st pass prune: ran during the current step (prior to the force kernel);
196 * - rolling prune: ran at the end of the previous step (prior to the current step H2D xq);
198 * Note that the resetting of cu_timers_t::didPrune and cu_timers_t::didRollingPrune should happen
199 * after calling this function.
201 * \param[in] timers structs with GPU timer objects
202 * \param[inout] timings GPU task timing data
203 * \param[in] iloc interaction locality
205 template<typename GpuTimers>
206 static void countPruneKernelTime(GpuTimers* timers,
207 gmx_wallclock_gpu_nbnxn_t* timings,
208 const InteractionLocality iloc)
210 gpu_timers_t::Interaction& iTimers = timers->interaction[iloc];
212 // We might have not done any pruning (e.g. if we skipped with empty domains).
213 if (!iTimers.didPrune && !iTimers.didRollingPrune)
218 if (iTimers.didPrune)
220 timings->pruneTime.c++;
221 timings->pruneTime.t += iTimers.prune_k.getLastRangeTime();
224 if (iTimers.didRollingPrune)
226 timings->dynamicPruneTime.c++;
227 timings->dynamicPruneTime.t += iTimers.rollingPrune_k.getLastRangeTime();
231 /*! \brief Reduce data staged internally in the nbnxn module.
233 * Shift forces and electrostatic/LJ energies copied from the GPU into
234 * a module-internal staging area are immediately reduced (CPU-side buffers passed)
235 * after having waited for the transfers' completion.
237 * Note that this function should always be called after the transfers into the
238 * staging buffers has completed.
240 * \tparam StagingData Type of staging data
241 * \param[in] nbst Nonbonded staging data
242 * \param[in] iLocality Interaction locality specifier
243 * \param[in] reduceEnergies True if energy reduction should be done
244 * \param[in] reduceFshift True if shift force reduction should be done
245 * \param[out] e_lj Variable to accumulate LJ energy into
246 * \param[out] e_el Variable to accumulate electrostatic energy into
247 * \param[out] fshift Pointer to the array of shift forces to accumulate into
249 template<typename StagingData>
250 static inline void gpu_reduce_staged_outputs(const StagingData& nbst,
251 const InteractionLocality iLocality,
252 const bool reduceEnergies,
253 const bool reduceFshift,
258 /* add up energies and shift forces (only once at local F wait) */
259 if (iLocality == InteractionLocality::Local)
269 for (int i = 0; i < SHIFTS; i++)
271 rvec_inc(fshift[i], nbst.fshift[i]);
277 /*! \brief Do the per-step timing accounting of the nonbonded tasks.
279 * Does timing accumulation and call-count increments for the nonbonded kernels.
280 * Note that this function should be called after the current step's nonbonded
281 * nonbonded tasks have completed with the exception of the rolling pruning kernels
282 * that are accounted for during the following step.
284 * NOTE: if timing with multiple GPUs (streams) becomes possible, the
285 * counters could end up being inconsistent due to not being incremented
286 * on some of the node when this is skipped on empty local domains!
288 * \tparam GpuTimers GPU timers type
289 * \tparam GpuPairlist Pair list type
290 * \param[out] timings Pointer to the NB GPU timings data
291 * \param[in] timers Pointer to GPU timers data
292 * \param[in] plist Pointer to the pair list data
293 * \param[in] atomLocality Atom locality specifier
294 * \param[in] stepWork Force schedule flags
295 * \param[in] doTiming True if timing is enabled.
298 template<typename GpuTimers, typename GpuPairlist>
299 static inline void gpu_accumulate_timings(gmx_wallclock_gpu_nbnxn_t* timings,
301 const GpuPairlist* plist,
302 AtomLocality atomLocality,
303 const gmx::StepWorkload& stepWork,
306 /* timing data accumulation */
312 /* determine interaction locality from atom locality */
313 const InteractionLocality iLocality = gpuAtomToInteractionLocality(atomLocality);
314 const bool didEnergyKernels = stepWork.computeEnergy;
316 /* only increase counter once (at local F wait) */
317 if (iLocality == InteractionLocality::Local)
320 timings->ktime[plist->haveFreshList ? 1 : 0][didEnergyKernels ? 1 : 0].c += 1;
324 timings->ktime[plist->haveFreshList ? 1 : 0][didEnergyKernels ? 1 : 0].t +=
325 timers->interaction[iLocality].nb_k.getLastRangeTime();
327 /* X/q H2D and F D2H timings */
328 timings->nb_h2d_t += timers->xf[atomLocality].nb_h2d.getLastRangeTime();
329 timings->nb_d2h_t += timers->xf[atomLocality].nb_d2h.getLastRangeTime();
331 /* Count the pruning kernel times for both cases:1st pass (at search step)
332 and rolling pruning (if called at the previous step).
333 We do the accounting here as this is the only sync point where we
334 know (without checking or additional sync-ing) that prune tasks in
335 in the current stream have completed (having just blocking-waited
336 for the force D2H). */
337 countPruneKernelTime(timers, timings, iLocality);
339 /* only count atdat at pair-search steps (add only once, at local F wait) */
340 if (stepWork.doNeighborSearch && atomLocality == AtomLocality::Local)
342 /* atdat transfer timing */
344 timings->pl_h2d_t += timers->atdat.getLastRangeTime();
347 /* only count pair-list H2D when actually performed */
348 if (timers->interaction[iLocality].didPairlistH2D)
350 timings->pl_h2d_t += timers->interaction[iLocality].pl_h2d.getLastRangeTime();
352 /* Clear the timing flag for the next step */
353 timers->interaction[iLocality].didPairlistH2D = false;
357 /*! \brief Attempts to complete nonbonded GPU task.
359 * See documentation in nbnxm_gpu.h for details.
361 * \todo Move into shared source file, perhaps including
362 * cuda_runtime.h if needed for any remaining CUDA-specific
365 //NOLINTNEXTLINE(misc-definitions-in-headers)
366 bool gpu_try_finish_task(NbnxmGpu* nb,
367 const gmx::StepWorkload& stepWork,
368 const AtomLocality aloc,
371 gmx::ArrayRef<gmx::RVec> shiftForces,
372 GpuTaskCompletion completionKind,
373 gmx_wallcycle* wcycle)
375 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
377 /* determine interaction locality from atom locality */
378 const InteractionLocality iLocality = gpuAtomToInteractionLocality(aloc);
381 // Transfers are launched and therefore need to be waited on if:
382 // - buffer ops is not offloaded
383 // - energies or virials are needed (on the local stream)
385 // (Note that useGpuFBufferOps and computeVirial are mutually exclusive
386 // in current code as virial steps do CPU reduction.)
387 const bool haveResultToWaitFor =
388 (!stepWork.useGpuFBufferOps
389 || (aloc == AtomLocality::Local && (stepWork.computeEnergy || stepWork.computeVirial)));
391 // We skip when during the non-local phase there was actually no work to do.
392 // This is consistent with nbnxn_gpu_launch_kernel but it also considers possible
394 if ((iLocality == InteractionLocality::Local) || haveGpuShortRangeWork(*nb, iLocality))
396 // Query the state of the GPU stream and return early if we're not done
397 if (completionKind == GpuTaskCompletion::Check)
399 // To get the wcycle call count right, when in GpuTaskCompletion::Check mode,
400 // we start without counting and only when the task finished we issue a
401 // start/stop to increment.
402 // GpuTaskCompletion::Wait mode the timing is expected to be done in the caller.
403 wallcycle_start_nocount(wcycle, ewcWAIT_GPU_NB_L);
405 if (!haveStreamTasksCompleted(*nb->deviceStreams[iLocality]))
407 wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
409 // Early return to skip the steps below that we have to do only
410 // after the NB task completed
414 wallcycle_increment_event_count(wcycle, ewcWAIT_GPU_NB_L);
416 else if (haveResultToWaitFor)
418 nb->deviceStreams[iLocality]->synchronize();
421 // TODO: this needs to be moved later because conditional wait could brake timing
422 // with a future OpenCL implementation, but with CUDA timing is anyway disabled
423 // in all cases where we skip the wait.
424 gpu_accumulate_timings(nb->timings, nb->timers, nb->plist[iLocality], aloc, stepWork, nb->bDoTime);
426 if (stepWork.computeEnergy || stepWork.computeVirial)
428 gpu_reduce_staged_outputs(nb->nbst,
430 stepWork.computeEnergy,
431 stepWork.computeVirial,
434 as_rvec_array(shiftForces.data()));
438 /* Always reset both pruning flags (doesn't hurt doing it even when timing is off). */
439 nb->timers->interaction[iLocality].didPrune =
440 nb->timers->interaction[iLocality].didRollingPrune = false;
442 /* Turn off initial list pruning (doesn't hurt if this is not pair-search step). */
443 nb->plist[iLocality]->haveFreshList = false;
449 * Wait for the asynchronously launched nonbonded tasks and data
450 * transfers to finish.
452 * Also does timing accounting and reduction of the internal staging buffers.
453 * As this is called at the end of the step, it also resets the pair list and
456 * \param[in] nb The nonbonded data GPU structure
457 * \param[in] stepWork Force schedule flags
458 * \param[in] aloc Atom locality identifier
459 * \param[out] e_lj Pointer to the LJ energy output to accumulate into
460 * \param[out] e_el Pointer to the electrostatics energy output to accumulate into
461 * \param[out] shiftForces Shift forces buffer to accumulate into
462 * \param[out] wcycle Pointer to wallcycle data structure
463 * \return The number of cycles the gpu wait took
465 //NOLINTNEXTLINE(misc-definitions-in-headers) TODO: move into source file
466 float gpu_wait_finish_task(NbnxmGpu* nb,
467 const gmx::StepWorkload& stepWork,
471 gmx::ArrayRef<gmx::RVec> shiftForces,
472 gmx_wallcycle* wcycle)
474 auto cycleCounter = (gpuAtomToInteractionLocality(aloc) == InteractionLocality::Local)
478 wallcycle_start(wcycle, cycleCounter);
479 gpu_try_finish_task(nb, stepWork, aloc, e_lj, e_el, shiftForces, GpuTaskCompletion::Wait, wcycle);
480 float waitTime = wallcycle_stop(wcycle, cycleCounter);