2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \libinternal \file
36 * \brief Declare interface for GPU execution for NBNXN module
38 * \author Szilard Pall <pall.szilard@gmail.com>
39 * \author Mark Abraham <mark.j.abraham@gmail.com>
40 * \ingroup module_nbnxm
43 #ifndef GMX_NBNXM_NBNXM_GPU_H
44 #define GMX_NBNXM_NBNXM_GPU_H
46 #include "gromacs/gpu_utils/gpu_macros.h"
47 #include "gromacs/math/vectypes.h"
48 #include "gromacs/nbnxm/atomdata.h"
49 #include "gromacs/utility/basedefinitions.h"
50 #include "gromacs/utility/real.h"
52 #include "gpu_types.h"
55 struct nbnxn_atomdata_t;
56 enum class GpuTaskCompletion;
64 * Launch asynchronously the xq buffer host to device copy.
66 * The nonlocal copy is skipped if there is no dependent work to do,
67 * neither non-local nonbonded interactions nor bonded GPU work.
69 * \param [in] nb GPU nonbonded data.
70 * \param [in] nbdata Host-side atom data structure.
71 * \param [in] aloc Atom locality flag.
72 * \param [in] haveOtherWork True if there are other tasks that require the nbnxn coordinate input.
75 void gpu_copy_xq_to_gpu(gmx_nbnxn_gpu_t gmx_unused *nb,
76 const struct nbnxn_atomdata_t gmx_unused *nbdata,
77 AtomLocality gmx_unused aloc,
78 bool gmx_unused haveOtherWork) GPU_FUNC_TERM
81 * Launch asynchronously the nonbonded force calculations.
83 * Also launches the initial pruning of a fresh list after search.
85 * The local and non-local interaction calculations are launched in two
86 * separate streams. If there is no work (i.e. empty pair list), the
87 * force kernel launch is omitted.
91 void gpu_launch_kernel(gmx_nbnxn_gpu_t gmx_unused *nb,
93 InteractionLocality gmx_unused iloc) GPU_FUNC_TERM
96 * Launch asynchronously the nonbonded prune-only kernel.
98 * The local and non-local list pruning are launched in their separate streams.
100 * Notes for future scheduling tuning:
101 * Currently we schedule the dynamic pruning between two MD steps *after* both local and
102 * nonlocal force D2H transfers completed. We could launch already after the cpyback
103 * is launched, but we want to avoid prune kernels (especially in the non-local
104 * high prio-stream) competing with nonbonded work.
106 * However, this is not ideal as this schedule does not expose the available
107 * concurrency. The dynamic pruning kernel:
108 * - should be allowed to overlap with any task other than force compute, including
109 * transfers (F D2H and the next step's x H2D as well as force clearing).
110 * - we'd prefer to avoid competition with non-bonded force kernels belonging
111 * to the same rank and ideally other ranks too.
113 * In the most general case, the former would require scheduling pruning in a separate
114 * stream and adding additional event sync points to ensure that force kernels read
115 * consistent pair list data. This would lead to some overhead (due to extra
116 * cudaStreamWaitEvent calls, 3-5 us/call) which we might be able to live with.
117 * The gains from additional overlap might not be significant as long as
118 * update+constraints anyway takes longer than pruning, but there will still
119 * be use-cases where more overlap may help (e.g. multiple ranks per GPU,
120 * no/hbonds only constraints).
121 * The above second point is harder to address given that multiple ranks will often
122 * share a GPU. Ranks that complete their nonbondeds sooner can schedule pruning earlier
123 * and without a third priority level it is difficult to avoid some interference of
124 * prune kernels with force tasks (in particular preemption of low-prio local force task).
126 * \param [inout] nb GPU nonbonded data.
127 * \param [in] iloc Interaction locality flag.
128 * \param [in] numParts Number of parts the pair list is split into in the rolling kernel.
131 void gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t gmx_unused *nb,
132 InteractionLocality gmx_unused iloc,
133 int gmx_unused numParts) GPU_FUNC_TERM
136 * Launch asynchronously the download of nonbonded forces from the GPU
137 * (and energies/shift forces if required).
138 * When haveOtherWork=true, the copy-back is done even when there was
139 * no non-bonded work.
142 void gpu_launch_cpyback(gmx_nbnxn_gpu_t gmx_unused *nb,
143 nbnxn_atomdata_t gmx_unused *nbatom,
144 int gmx_unused flags,
145 AtomLocality gmx_unused aloc,
146 bool gmx_unused haveOtherWork) GPU_FUNC_TERM
148 /*! \brief Attempts to complete nonbonded GPU task.
150 * This function attempts to complete the nonbonded task (both GPU and CPU auxiliary work).
151 * Success, i.e. that the tasks completed and results are ready to be consumed, is signaled
152 * by the return value (always true if blocking wait mode requested).
154 * The \p completionKind parameter controls whether the behavior is non-blocking
155 * (achieved by passing GpuTaskCompletion::Check) or blocking wait until the results
156 * are ready (when GpuTaskCompletion::Wait is passed).
157 * As the "Check" mode the function will return immediately if the GPU stream
158 * still contain tasks that have not completed, it allows more flexible overlapping
159 * of work on the CPU with GPU execution.
161 * Note that it is only safe to use the results, and to continue to the next MD
162 * step when this function has returned true which indicates successful completion of
163 * - All nonbonded GPU tasks: both compute and device transfer(s)
164 * - auxiliary tasks: updating the internal module state (timing accumulation, list pruning states) and
165 * - internal staging reduction of (\p fshift, \p e_el, \p e_lj).
167 * TODO: improve the handling of outputs e.g. by ensuring that this function explcitly returns the
168 * force buffer (instead of that being passed only to nbnxn_gpu_launch_cpyback()) and by returning
169 * the energy and Fshift contributions for some external/centralized reduction.
171 * \param[in] nb The nonbonded data GPU structure
172 * \param[in] flags Force flags
173 * \param[in] aloc Atom locality identifier
174 * \param[in] haveOtherWork Tells whether there is other work than non-bonded work in the nbnxn stream(s)
175 * \param[out] e_lj Pointer to the LJ energy output to accumulate into
176 * \param[out] e_el Pointer to the electrostatics energy output to accumulate into
177 * \param[out] fshift Pointer to the shift force buffer to accumulate into
178 * \param[in] completionKind Indicates whether nnbonded task completion should only be checked rather than waited for
179 * \returns True if the nonbonded tasks associated with \p aloc locality have completed
182 bool gpu_try_finish_task(gmx_nbnxn_gpu_t gmx_unused *nb,
183 int gmx_unused flags,
184 AtomLocality gmx_unused aloc,
185 bool gmx_unused haveOtherWork,
186 real gmx_unused *e_lj,
187 real gmx_unused *e_el,
188 rvec gmx_unused *fshift,
189 GpuTaskCompletion gmx_unused completionKind) GPU_FUNC_TERM_WITH_RETURN(false)
191 /*! \brief Completes the nonbonded GPU task blocking until GPU tasks and data
192 * transfers to finish.
194 * Also does timing accounting and reduction of the internal staging buffers.
195 * As this is called at the end of the step, it also resets the pair list and
198 * \param[in] nb The nonbonded data GPU structure
199 * \param[in] flags Force flags
200 * \param[in] aloc Atom locality identifier
201 * \param[in] haveOtherWork Tells whether there is other work than non-bonded work in the nbnxn stream(s)
202 * \param[out] e_lj Pointer to the LJ energy output to accumulate into
203 * \param[out] e_el Pointer to the electrostatics energy output to accumulate into
204 * \param[out] fshift Pointer to the shift force buffer to accumulate into
207 void gpu_wait_finish_task(gmx_nbnxn_gpu_t gmx_unused *nb,
208 int gmx_unused flags,
209 AtomLocality gmx_unused aloc,
210 bool gmx_unused haveOtherWork,
211 real gmx_unused *e_lj,
212 real gmx_unused *e_el,
213 rvec gmx_unused *fshift) GPU_FUNC_TERM
215 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
217 int gpu_pick_ewald_kernel_type(bool gmx_unused bTwinCut) GPU_FUNC_TERM_WITH_RETURN(-1)
219 /*! \brief Initialization for X buffer operations on GPU.
220 * Called on the NS step and performs (re-)allocations and memory copies. !*/
222 void nbnxn_gpu_init_x_to_nbat_x(const Nbnxm::GridSet gmx_unused &gridSet,
223 gmx_nbnxn_gpu_t gmx_unused *gpu_nbv,
224 Nbnxm::AtomLocality gmx_unused locality) CUDA_FUNC_TERM
226 /*! \brief X buffer operations on GPU: performs conversion from rvec to nb format.
229 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::GridSet gmx_unused &gridSet,
231 bool gmx_unused FillLocal,
232 gmx_nbnxn_gpu_t gmx_unused *gpu_nbv,
233 void gmx_unused *xPmeDevicePtr,
234 int gmx_unused na_round_max,
235 Nbnxm::AtomLocality gmx_unused locality,
236 const rvec gmx_unused *x) CUDA_FUNC_TERM