2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017 by the GROMACS development team.
5 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
36 /*! \libinternal \file
37 * \brief Declare interface for GPU execution for NBNXN module
39 * \author Szilard Pall <pall.szilard@gmail.com>
40 * \author Mark Abraham <mark.j.abraham@gmail.com>
41 * \ingroup module_nbnxm
44 #ifndef GMX_NBNXM_NBNXM_GPU_H
45 #define GMX_NBNXM_NBNXM_GPU_H
47 #include "gromacs/gpu_utils/gpu_macros.h"
48 #include "gromacs/math/vectypes.h"
49 #include "gromacs/mdtypes/locality.h"
50 #include "gromacs/nbnxm/atomdata.h"
51 #include "gromacs/utility/basedefinitions.h"
52 #include "gromacs/utility/real.h"
54 struct interaction_const_t;
55 struct nbnxn_atomdata_t;
57 enum class GpuTaskCompletion;
65 /*! \brief Nbnxm electrostatic GPU kernel flavors.
67 * Types of electrostatics implementations available in the GPU non-bonded
68 * force kernels. These represent both the electrostatics types implemented
69 * by the kernels (cut-off, RF, and Ewald - a subset of what's defined in
70 * enums.h) as well as encode implementation details analytical/tabulated
71 * and single or twin cut-off (for Ewald kernels).
72 * Note that the cut-off and RF kernels have only analytical flavor and unlike
73 * in the CPU kernels, the tabulated kernels are ATM Ewald-only.
75 * The row-order of pointers to different electrostatic kernels defined in
76 * nbnxn_cuda.cu by the nb_*_kfunc_ptr function pointer table
77 * should match the order of enumerated types below.
84 eelTypeEWALD_TAB_TWIN,
86 eelTypeEWALD_ANA_TWIN,
90 /*! \brief Nbnxm VdW GPU kernel flavors.
92 * The enumerates values correspond to the LJ implementations in the GPU non-bonded
95 * The column-order of pointers to different electrostatic kernels defined in
96 * nbnxn_cuda_ocl.cpp/.cu by the nb_*_kfunc_ptr function pointer table
97 * should match the order of enumerated types below.
116 /*! \brief Returns true if LJ combination rules are used in the non-bonded kernels.
118 * \param[in] vdwType The VdW interaction/implementation type as defined by evdwType
121 * \returns Whether combination rules are used by the run.
123 static inline bool useLjCombRule(const int vdwType)
125 return (vdwType == evdwTypeCUTCOMBGEOM || vdwType == evdwTypeCUTCOMBLB);
129 * Launch asynchronously the xq buffer host to device copy.
131 * The nonlocal copy is skipped if there is no dependent work to do,
132 * neither non-local nonbonded interactions nor bonded GPU work.
134 * \param [in] nb GPU nonbonded data.
135 * \param [in] nbdata Host-side atom data structure.
136 * \param [in] aloc Atom locality flag.
139 void gpu_copy_xq_to_gpu(NbnxmGpu gmx_unused* nb,
140 const struct nbnxn_atomdata_t gmx_unused* nbdata,
141 gmx::AtomLocality gmx_unused aloc) GPU_FUNC_TERM;
144 * Launch asynchronously the nonbonded force calculations.
146 * Also launches the initial pruning of a fresh list after search.
148 * The local and non-local interaction calculations are launched in two
149 * separate streams. If there is no work (i.e. empty pair list), the
150 * force kernel launch is omitted.
154 void gpu_launch_kernel(NbnxmGpu gmx_unused* nb,
155 const gmx::StepWorkload gmx_unused& stepWork,
156 gmx::InteractionLocality gmx_unused iloc) GPU_FUNC_TERM;
159 * Launch asynchronously the nonbonded prune-only kernel.
161 * The local and non-local list pruning are launched in their separate streams.
163 * Notes for future scheduling tuning:
164 * Currently we schedule the dynamic pruning between two MD steps *after* both local and
165 * nonlocal force D2H transfers completed. We could launch already after the cpyback
166 * is launched, but we want to avoid prune kernels (especially in the non-local
167 * high prio-stream) competing with nonbonded work.
169 * However, this is not ideal as this schedule does not expose the available
170 * concurrency. The dynamic pruning kernel:
171 * - should be allowed to overlap with any task other than force compute, including
172 * transfers (F D2H and the next step's x H2D as well as force clearing).
173 * - we'd prefer to avoid competition with non-bonded force kernels belonging
174 * to the same rank and ideally other ranks too.
176 * In the most general case, the former would require scheduling pruning in a separate
177 * stream and adding additional event sync points to ensure that force kernels read
178 * consistent pair list data. This would lead to some overhead (due to extra
179 * cudaStreamWaitEvent calls, 3-5 us/call) which we might be able to live with.
180 * The gains from additional overlap might not be significant as long as
181 * update+constraints anyway takes longer than pruning, but there will still
182 * be use-cases where more overlap may help (e.g. multiple ranks per GPU,
183 * no/hbonds only constraints).
184 * The above second point is harder to address given that multiple ranks will often
185 * share a GPU. Ranks that complete their nonbondeds sooner can schedule pruning earlier
186 * and without a third priority level it is difficult to avoid some interference of
187 * prune kernels with force tasks (in particular preemption of low-prio local force task).
189 * \param [inout] nb GPU nonbonded data.
190 * \param [in] iloc Interaction locality flag.
191 * \param [in] numParts Number of parts the pair list is split into in the rolling kernel.
194 void gpu_launch_kernel_pruneonly(NbnxmGpu gmx_unused* nb,
195 gmx::InteractionLocality gmx_unused iloc,
196 int gmx_unused numParts) GPU_FUNC_TERM;
199 * Launch asynchronously the download of short-range forces from the GPU
200 * (and energies/shift forces if required).
203 void gpu_launch_cpyback(NbnxmGpu gmx_unused* nb,
204 nbnxn_atomdata_t gmx_unused* nbatom,
205 const gmx::StepWorkload gmx_unused& stepWork,
206 gmx::AtomLocality gmx_unused aloc) GPU_FUNC_TERM;
208 /*! \brief Attempts to complete nonbonded GPU task.
210 * This function attempts to complete the nonbonded task (both GPU and CPU auxiliary work).
211 * Success, i.e. that the tasks completed and results are ready to be consumed, is signaled
212 * by the return value (always true if blocking wait mode requested).
214 * The \p completionKind parameter controls whether the behavior is non-blocking
215 * (achieved by passing GpuTaskCompletion::Check) or blocking wait until the results
216 * are ready (when GpuTaskCompletion::Wait is passed).
217 * As the "Check" mode the function will return immediately if the GPU stream
218 * still contain tasks that have not completed, it allows more flexible overlapping
219 * of work on the CPU with GPU execution.
221 * Note that it is only safe to use the results, and to continue to the next MD
222 * step when this function has returned true which indicates successful completion of
223 * - All nonbonded GPU tasks: both compute and device transfer(s)
224 * - auxiliary tasks: updating the internal module state (timing accumulation, list pruning states) and
225 * - internal staging reduction of (\p fshift, \p e_el, \p e_lj).
227 * In GpuTaskCompletion::Check mode this function does the timing and keeps correct count
228 * for the nonbonded task (incrementing only once per taks), in the GpuTaskCompletion::Wait mode
229 * timing is expected to be done in the caller.
231 * TODO: improve the handling of outputs e.g. by ensuring that this function explcitly returns the
232 * force buffer (instead of that being passed only to nbnxn_gpu_launch_cpyback()) and by returning
233 * the energy and Fshift contributions for some external/centralized reduction.
235 * \param[in] nb The nonbonded data GPU structure
236 * \param[in] stepWork Step schedule flags
237 * \param[in] aloc Atom locality identifier
238 * \param[out] e_lj Pointer to the LJ energy output to accumulate into
239 * \param[out] e_el Pointer to the electrostatics energy output to accumulate into
240 * \param[out] shiftForces Shift forces buffer to accumulate into
241 * \param[in] completionKind Indicates whether nnbonded task completion should only be checked rather than waited for
242 * \param[out] wcycle Pointer to wallcycle data structure
243 * \returns True if the nonbonded tasks associated with \p aloc locality have completed
246 bool gpu_try_finish_task(NbnxmGpu gmx_unused* nb,
247 const gmx::StepWorkload gmx_unused& stepWork,
248 gmx::AtomLocality gmx_unused aloc,
249 real gmx_unused* e_lj,
250 real gmx_unused* e_el,
251 gmx::ArrayRef<gmx::RVec> gmx_unused shiftForces,
252 GpuTaskCompletion gmx_unused completionKind,
253 gmx_wallcycle gmx_unused* wcycle) GPU_FUNC_TERM_WITH_RETURN(false);
255 /*! \brief Completes the nonbonded GPU task blocking until GPU tasks and data
256 * transfers to finish.
258 * Also does timing accounting and reduction of the internal staging buffers.
259 * As this is called at the end of the step, it also resets the pair list and
262 * \param[in] nb The nonbonded data GPU structure
263 * \param[in] stepWork Step schedule flags
264 * \param[in] aloc Atom locality identifier
265 * \param[out] e_lj Pointer to the LJ energy output to accumulate into
266 * \param[out] e_el Pointer to the electrostatics energy output to accumulate into
267 * \param[out] shiftForces Shift forces buffer to accumulate into
268 * \param[out] wcycle Pointer to wallcycle data structure */
270 float gpu_wait_finish_task(NbnxmGpu gmx_unused* nb,
271 const gmx::StepWorkload gmx_unused& stepWork,
272 gmx::AtomLocality gmx_unused aloc,
273 real gmx_unused* e_lj,
274 real gmx_unused* e_el,
275 gmx::ArrayRef<gmx::RVec> gmx_unused shiftForces,
276 gmx_wallcycle gmx_unused* wcycle) GPU_FUNC_TERM_WITH_RETURN(0.0);
278 /*! \brief Initialization for X buffer operations on GPU.
279 * Called on the NS step and performs (re-)allocations and memory copies. !*/
281 void nbnxn_gpu_init_x_to_nbat_x(const Nbnxm::GridSet gmx_unused& gridSet,
282 NbnxmGpu gmx_unused* gpu_nbv) CUDA_FUNC_TERM;
284 /*! \brief X buffer operations on GPU: performs conversion from rvec to nb format.
286 * \param[in] grid Grid to be converted.
287 * \param[in] setFillerCoords If the filler coordinates are used.
288 * \param[in,out] gpu_nbv The nonbonded data GPU structure.
289 * \param[in] d_x Device-side coordinates in plain rvec format.
290 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in
291 * the device memory. \param[in] locality Copy coordinates for local or non-local atoms.
292 * \param[in] gridId Index of the grid being converted.
293 * \param[in] numColumnsMax Maximum number of columns in the grid.
296 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid gmx_unused& grid,
297 bool gmx_unused setFillerCoords,
298 NbnxmGpu gmx_unused* gpu_nbv,
299 DeviceBuffer<gmx::RVec> gmx_unused d_x,
300 GpuEventSynchronizer gmx_unused* xReadyOnDevice,
301 gmx::AtomLocality gmx_unused locality,
302 int gmx_unused gridId,
303 int gmx_unused numColumnsMax) CUDA_FUNC_TERM;
305 /*! \brief Sync the nonlocal stream with dependent tasks in the local queue.
306 * \param[in] nb The nonbonded data GPU structure
307 * \param[in] interactionLocality Local or NonLocal sync point
310 void nbnxnInsertNonlocalGpuDependency(const NbnxmGpu gmx_unused* nb,
311 gmx::InteractionLocality gmx_unused interactionLocality) CUDA_FUNC_TERM;
313 /*! \brief Set up internal flags that indicate what type of short-range work there is.
315 * As nonbondeds and bondeds share input/output buffers and GPU queues,
316 * both are considered when checking for work in the current domain.
318 * This function is expected to be called every time the work-distribution
319 * can change (i.e. at search/domain decomposition steps).
321 * \param[inout] nb Pointer to the nonbonded GPU data structure
322 * \param[in] gpuBonded Pointer to the GPU bonded data structure
323 * \param[in] iLocality Interaction locality identifier
326 void setupGpuShortRangeWork(NbnxmGpu gmx_unused* nb,
327 const gmx::GpuBonded gmx_unused* gpuBonded,
328 gmx::InteractionLocality gmx_unused iLocality) GPU_FUNC_TERM;
330 /*! \brief Returns true if there is GPU short-range work for the given atom locality.
332 * Note that as, unlike nonbonded tasks, bonded tasks are not split into local/nonlocal,
333 * and therefore if there are GPU offloaded bonded interactions, this function will return
334 * true for both local and nonlocal atom range.
336 * \param[inout] nb Pointer to the nonbonded GPU data structure
337 * \param[in] aLocality Atom locality identifier
340 bool haveGpuShortRangeWork(const NbnxmGpu gmx_unused* nb, gmx::AtomLocality gmx_unused aLocality)
341 GPU_FUNC_TERM_WITH_RETURN(false);
343 /*! \brief sync CPU thread on coordinate copy to device
344 * \param[in] nb The nonbonded data GPU structure
347 void nbnxn_wait_x_on_device(NbnxmGpu gmx_unused* nb) CUDA_FUNC_TERM;
349 /*! \brief Get the pointer to the GPU nonbonded force buffer
351 * \param[in] nb The nonbonded data GPU structure
352 * \returns A pointer to the force buffer in GPU memory
355 void* getGpuForces(NbnxmGpu gmx_unused* nb) CUDA_FUNC_TERM_WITH_RETURN(nullptr);