2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define CUDA implementation of nbnxn_gpu.h
39 * \author Szilard Pall <pall.szilard@gmail.com>
48 #include "gromacs/nbnxm/nbnxm_gpu.h"
55 #include "nbnxm_cuda.h"
57 #include "gromacs/gpu_utils/gpu_utils.h"
58 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
59 #include "gromacs/gpu_utils/typecasts.cuh"
60 #include "gromacs/gpu_utils/vectype_ops.cuh"
61 #include "gromacs/hardware/device_information.h"
62 #include "gromacs/mdtypes/simulation_workload.h"
63 #include "gromacs/nbnxm/atomdata.h"
64 #include "gromacs/nbnxm/gpu_common.h"
65 #include "gromacs/nbnxm/gpu_common_utils.h"
66 #include "gromacs/nbnxm/gpu_data_mgmt.h"
67 #include "gromacs/nbnxm/grid.h"
68 #include "gromacs/nbnxm/nbnxm.h"
69 #include "gromacs/nbnxm/pairlist.h"
70 #include "gromacs/timing/gpu_timing.h"
71 #include "gromacs/utility/cstringutil.h"
72 #include "gromacs/utility/gmxassert.h"
74 #include "nbnxm_buffer_ops_kernels.cuh"
75 #include "nbnxm_cuda_types.h"
77 /***** The kernel declarations/definitions come here *****/
79 /* Top-level kernel declaration generation: will generate through multiple
80 * inclusion the following flavors for all kernel declarations:
81 * - force-only output;
82 * - force and energy output;
83 * - force-only with pair list pruning;
84 * - force and energy output with pair list pruning.
86 #define FUNCTION_DECLARATION_ONLY
88 #include "nbnxm_cuda_kernels.cuh"
89 /** Force & energy **/
91 #include "nbnxm_cuda_kernels.cuh"
94 /*** Pair-list pruning kernels ***/
97 #include "nbnxm_cuda_kernels.cuh"
98 /** Force & energy **/
100 #include "nbnxm_cuda_kernels.cuh"
104 /* Prune-only kernels */
105 #include "nbnxm_cuda_kernel_pruneonly.cuh"
106 #undef FUNCTION_DECLARATION_ONLY
108 /* Now generate the function definitions if we are using a single compilation unit. */
109 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
110 # include "nbnxm_cuda_kernel_F_noprune.cu"
111 # include "nbnxm_cuda_kernel_F_prune.cu"
112 # include "nbnxm_cuda_kernel_VF_noprune.cu"
113 # include "nbnxm_cuda_kernel_VF_prune.cu"
114 # include "nbnxm_cuda_kernel_pruneonly.cu"
115 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
120 //! Number of CUDA threads in a block
121 // TODO Optimize this through experimentation
122 constexpr static int c_bufOpsThreadsPerBlock = 128;
124 /*! Nonbonded kernel function pointer type */
125 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t, const NBParamGpu, const gpu_plist, bool);
127 /*********************************/
129 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
130 static inline int calc_nb_kernel_nblock(int nwork_units, const DeviceInformation* deviceInfo)
135 /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
136 empty domain) and that case should be handled before this point. */
137 assert(nwork_units > 0);
139 max_grid_x_size = deviceInfo->prop.maxGridSize[0];
141 /* do we exceed the grid x dimension limit? */
142 if (nwork_units > max_grid_x_size)
145 "Watch out, the input system is too large to simulate!\n"
146 "The number of nonbonded work units (=number of super-clusters) exceeds the"
147 "maximum grid size in x dimension (%d > %d)!",
156 /* Constant arrays listing all kernel function pointers and enabling selection
157 of a kernel in an elegant manner. */
159 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
160 * electrostatics and VDW type.
162 * Note that the row- and column-order of function pointers has to match the
163 * order of corresponding enumerated electrostatics and vdw types, resp.,
164 * defined in nbnxn_cuda_types.h.
167 /*! Force-only kernel function pointers. */
168 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
169 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda,
170 nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda,
171 nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda,
172 nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda,
173 nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda,
174 nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda,
175 nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
176 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda,
177 nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda,
178 nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda,
179 nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda,
180 nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda,
181 nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda,
182 nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
183 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda,
184 nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda,
185 nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda,
186 nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda,
187 nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda,
188 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda,
189 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
190 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda,
191 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda,
192 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda,
193 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda,
194 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda,
195 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda,
196 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
197 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda,
198 nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda,
199 nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda,
200 nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda,
201 nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda,
202 nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda,
203 nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
204 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda,
205 nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda,
206 nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda,
207 nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda,
208 nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda,
209 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda,
210 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
213 /*! Force + energy kernel function pointers. */
214 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
215 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda,
216 nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda,
217 nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda,
218 nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda,
219 nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda,
220 nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda,
221 nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
222 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda,
223 nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda,
224 nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda,
225 nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda,
226 nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda,
227 nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda,
228 nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
229 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda,
230 nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda,
231 nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda,
232 nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda,
233 nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda,
234 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda,
235 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
236 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda,
237 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda,
238 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda,
239 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda,
240 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda,
241 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda,
242 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
243 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda,
244 nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda,
245 nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda,
246 nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda,
247 nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda,
248 nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda,
249 nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
250 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda,
251 nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda,
252 nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda,
253 nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda,
254 nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda,
255 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda,
256 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
259 /*! Force + pruning kernel function pointers. */
260 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
261 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda,
262 nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda,
263 nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda,
264 nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda,
265 nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda,
266 nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda,
267 nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
268 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda,
269 nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda,
270 nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda,
271 nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda,
272 nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda,
273 nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda,
274 nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
275 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda,
276 nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda,
277 nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda,
278 nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda,
279 nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda,
280 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda,
281 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
282 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda,
283 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda,
284 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda,
285 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda,
286 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda,
287 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda,
288 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
289 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda,
290 nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda,
291 nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda,
292 nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda,
293 nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda,
294 nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda,
295 nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
296 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda,
297 nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda,
298 nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda,
299 nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda,
300 nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda,
301 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda,
302 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
305 /*! Force + energy + pruning kernel function pointers. */
306 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
307 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda,
308 nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda,
309 nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda,
310 nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda,
311 nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda,
312 nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda,
313 nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
314 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda,
315 nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda,
316 nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda,
317 nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda,
318 nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda,
319 nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda,
320 nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
321 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda,
322 nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda,
323 nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda,
324 nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda,
325 nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda,
326 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda,
327 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
328 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda,
329 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda,
330 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda,
331 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda,
332 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda,
333 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda,
334 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
335 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda,
336 nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda,
337 nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda,
338 nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda,
339 nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda,
340 nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda,
341 nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
342 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda,
343 nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda,
344 nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda,
345 nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda,
346 nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda,
347 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda,
348 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
351 /*! Return a pointer to the kernel version to be executed at the current step. */
352 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(enum ElecType elecType,
353 enum VdwType vdwType,
356 const DeviceInformation gmx_unused* deviceInfo)
358 const int elecTypeIdx = static_cast<int>(elecType);
359 const int vdwTypeIdx = static_cast<int>(vdwType);
361 GMX_ASSERT(elecTypeIdx < c_numElecTypes,
362 "The electrostatics type requested is not implemented in the CUDA kernels.");
363 GMX_ASSERT(vdwTypeIdx < c_numVdwTypes,
364 "The VdW type requested is not implemented in the CUDA kernels.");
366 /* assert assumptions made by the kernels */
367 GMX_ASSERT(c_nbnxnGpuClusterSize * c_nbnxnGpuClusterSize / c_nbnxnGpuClusterpairSplit
368 == deviceInfo->prop.warpSize,
369 "The CUDA kernels require the "
370 "cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size "
371 "of the architecture targeted.");
377 return nb_kfunc_ener_prune_ptr[elecTypeIdx][vdwTypeIdx];
381 return nb_kfunc_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
388 return nb_kfunc_noener_prune_ptr[elecTypeIdx][vdwTypeIdx];
392 return nb_kfunc_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
397 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */
398 static inline int calc_shmem_required_nonbonded(const int num_threads_z,
399 const DeviceInformation gmx_unused* deviceInfo,
400 const NBParamGpu* nbp)
406 /* size of shmem (force-buffers/xq/atom type preloading) */
407 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
408 /* i-atom x+q in shared memory */
409 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float4);
410 /* cj in shared memory, for each warp separately */
411 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
413 if (nbp->vdwType == VdwType::CutCombGeom || nbp->vdwType == VdwType::CutCombLB)
415 /* i-atom LJ combination parameters in shared memory */
416 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float2);
420 /* i-atom types in shared memory */
421 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int);
427 void nbnxnInsertNonlocalGpuDependency(NbnxmGpu* nb, const InteractionLocality interactionLocality)
429 const DeviceStream& deviceStream = *nb->deviceStreams[interactionLocality];
431 /* When we get here all misc operations issued in the local stream as well as
432 the local xq H2D are done,
433 so we record that in the local stream and wait for it in the nonlocal one.
434 This wait needs to precede any PP tasks, bonded or nonbonded, that may
435 compute on interactions between local and nonlocal atoms.
437 if (nb->bUseTwoStreams)
439 if (interactionLocality == InteractionLocality::Local)
441 nb->misc_ops_and_local_H2D_done.markEvent(deviceStream);
445 nb->misc_ops_and_local_H2D_done.enqueueWaitEvent(deviceStream);
450 /*! \brief Launch asynchronously the xq buffer host to device copy. */
451 void gpu_copy_xq_to_gpu(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom, const AtomLocality atomLocality)
453 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
455 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
457 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
459 cu_atomdata_t* adat = nb->atdat;
460 gpu_plist* plist = nb->plist[iloc];
461 cu_timers_t* t = nb->timers;
462 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
464 bool bDoTime = nb->bDoTime;
466 /* Don't launch the non-local H2D copy if there is no dependent
467 work to do: neither non-local nor other (e.g. bonded) work
468 to do that has as input the nbnxn coordaintes.
469 Doing the same for the local kernel is more complicated, since the
470 local part of the force array also depends on the non-local kernel.
471 So to avoid complicating the code and to reduce the risk of bugs,
472 we always call the local local x+q copy (and the rest of the local
473 work in nbnxn_gpu_launch_kernel().
475 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
477 plist->haveFreshList = false;
479 // The event is marked for Local interactions unconditionally,
480 // so it has to be released here because of the early return
481 // for NonLocal interactions.
482 nb->misc_ops_and_local_H2D_done.reset();
487 /* calculate the atom data index range based on locality */
488 if (atomLocality == AtomLocality::Local)
491 adat_len = adat->natoms_local;
495 adat_begin = adat->natoms_local;
496 adat_len = adat->natoms - adat->natoms_local;
499 /* beginning of timed HtoD section */
502 t->xf[atomLocality].nb_h2d.openTimingRegion(deviceStream);
506 static_assert(sizeof(adat->xq[0]) == sizeof(Float4),
507 "The size of the xyzq buffer element should be equal to the size of float4.");
508 copyToDeviceBuffer(&adat->xq,
509 reinterpret_cast<const Float4*>(nbatom->x().data()) + adat_begin,
513 GpuApiCallBehavior::Async,
518 t->xf[atomLocality].nb_h2d.closeTimingRegion(deviceStream);
521 /* When we get here all misc operations issued in the local stream as well as
522 the local xq H2D are done,
523 so we record that in the local stream and wait for it in the nonlocal one.
524 This wait needs to precede any PP tasks, bonded or nonbonded, that may
525 compute on interactions between local and nonlocal atoms.
527 nbnxnInsertNonlocalGpuDependency(nb, iloc);
530 /*! As we execute nonbonded workload in separate streams, before launching
531 the kernel we need to make sure that he following operations have completed:
532 - atomdata allocation and related H2D transfers (every nstlist step);
533 - pair list H2D transfer (every nstlist step);
534 - shift vector H2D transfer (every nstlist step);
535 - force (+shift force and energy) output clearing (every step).
537 These operations are issued in the local stream at the beginning of the step
538 and therefore always complete before the local kernel launch. The non-local
539 kernel is launched after the local on the same device/context hence it is
540 inherently scheduled after the operations in the local stream (including the
541 above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
542 devices with multiple hardware queues the dependency needs to be enforced.
543 We use the misc_ops_and_local_H2D_done event to record the point where
544 the local x+q H2D (and all preceding) tasks are complete and synchronize
545 with this event in the non-local stream before launching the non-bonded kernel.
547 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const InteractionLocality iloc)
549 cu_atomdata_t* adat = nb->atdat;
550 NBParamGpu* nbp = nb->nbparam;
551 gpu_plist* plist = nb->plist[iloc];
552 cu_timers_t* t = nb->timers;
553 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
555 bool bDoTime = nb->bDoTime;
557 /* Don't launch the non-local kernel if there is no work to do.
558 Doing the same for the local kernel is more complicated, since the
559 local part of the force array also depends on the non-local kernel.
560 So to avoid complicating the code and to reduce the risk of bugs,
561 we always call the local kernel, and later (not in
562 this function) the stream wait, local f copyback and the f buffer
563 clearing. All these operations, except for the local interaction kernel,
564 are needed for the non-local interactions. The skip of the local kernel
565 call is taken care of later in this function. */
566 if (canSkipNonbondedWork(*nb, iloc))
568 plist->haveFreshList = false;
573 if (nbp->useDynamicPruning && plist->haveFreshList)
575 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
576 (TODO: ATM that's the way the timing accounting can distinguish between
577 separate prune kernel and combined force+prune, maybe we need a better way?).
579 gpu_launch_kernel_pruneonly(nb, iloc, 1);
582 if (plist->nsci == 0)
584 /* Don't launch an empty local kernel (not allowed with CUDA) */
588 /* beginning of timed nonbonded calculation section */
591 t->interaction[iloc].nb_k.openTimingRegion(deviceStream);
594 /* Kernel launch config:
595 * - The thread block dimensions match the size of i-clusters, j-clusters,
596 * and j-cluster concurrency, in x, y, and z, respectively.
597 * - The 1D block-grid contains as many blocks as super-clusters.
599 int num_threads_z = 1;
600 if (nb->deviceContext_->deviceInfo().prop.major == 3 && nb->deviceContext_->deviceInfo().prop.minor == 7)
604 int nblock = calc_nb_kernel_nblock(plist->nsci, &nb->deviceContext_->deviceInfo());
607 KernelLaunchConfig config;
608 config.blockSize[0] = c_clSize;
609 config.blockSize[1] = c_clSize;
610 config.blockSize[2] = num_threads_z;
611 config.gridSize[0] = nblock;
612 config.sharedMemorySize =
613 calc_shmem_required_nonbonded(num_threads_z, &nb->deviceContext_->deviceInfo(), nbp);
618 "Non-bonded GPU launch configuration:\n\tThread block: %zux%zux%zu\n\t"
619 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
626 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
627 c_nbnxnGpuNumClusterPerSupercluster,
629 config.sharedMemorySize);
632 auto* timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
634 select_nbnxn_kernel(nbp->elecType,
636 stepWork.computeEnergy,
637 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune),
638 &nb->deviceContext_->deviceInfo());
639 const auto kernelArgs =
640 prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &stepWork.computeVirial);
641 launchGpuKernel(kernel, config, deviceStream, timingEvent, "k_calc_nb", kernelArgs);
645 t->interaction[iloc].nb_k.closeTimingRegion(deviceStream);
648 if (GMX_NATIVE_WINDOWS)
650 /* Windows: force flushing WDDM queue */
651 cudaStreamQuery(deviceStream.stream());
655 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
656 static inline int calc_shmem_required_prune(const int num_threads_z)
660 /* i-atom x in shared memory */
661 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float4);
662 /* cj in shared memory, for each warp separately */
663 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
668 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
670 cu_atomdata_t* adat = nb->atdat;
671 NBParamGpu* nbp = nb->nbparam;
672 gpu_plist* plist = nb->plist[iloc];
673 cu_timers_t* t = nb->timers;
674 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
676 bool bDoTime = nb->bDoTime;
678 if (plist->haveFreshList)
680 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
682 /* Set rollingPruningNumParts to signal that it is not set */
683 plist->rollingPruningNumParts = 0;
684 plist->rollingPruningPart = 0;
688 if (plist->rollingPruningNumParts == 0)
690 plist->rollingPruningNumParts = numParts;
694 GMX_ASSERT(numParts == plist->rollingPruningNumParts,
695 "It is not allowed to change numParts in between list generation steps");
699 /* Use a local variable for part and update in plist, so we can return here
700 * without duplicating the part increment code.
702 int part = plist->rollingPruningPart;
704 plist->rollingPruningPart++;
705 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
707 plist->rollingPruningPart = 0;
710 /* Compute the number of list entries to prune in this pass */
711 int numSciInPart = (plist->nsci - part) / numParts;
713 /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */
714 if (numSciInPart <= 0)
716 plist->haveFreshList = false;
721 GpuRegionTimer* timer = nullptr;
724 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
727 /* beginning of timed prune calculation section */
730 timer->openTimingRegion(deviceStream);
733 /* Kernel launch config:
734 * - The thread block dimensions match the size of i-clusters, j-clusters,
735 * and j-cluster concurrency, in x, y, and z, respectively.
736 * - The 1D block-grid contains as many blocks as super-clusters.
738 int num_threads_z = c_pruneKernelJ4Concurrency;
739 int nblock = calc_nb_kernel_nblock(numSciInPart, &nb->deviceContext_->deviceInfo());
740 KernelLaunchConfig config;
741 config.blockSize[0] = c_clSize;
742 config.blockSize[1] = c_clSize;
743 config.blockSize[2] = num_threads_z;
744 config.gridSize[0] = nblock;
745 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
750 "Pruning GPU kernel launch configuration:\n\tThread block: %zux%zux%zu\n\t"
751 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
758 numSciInPart * c_nbnxnGpuNumClusterPerSupercluster,
759 c_nbnxnGpuNumClusterPerSupercluster,
761 config.sharedMemorySize);
764 auto* timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
765 constexpr char kernelName[] = "k_pruneonly";
767 plist->haveFreshList ? nbnxn_kernel_prune_cuda<true> : nbnxn_kernel_prune_cuda<false>;
768 const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &numParts, &part);
769 launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
771 /* TODO: consider a more elegant way to track which kernel has been called
772 (combined or separate 1st pass prune, rolling prune). */
773 if (plist->haveFreshList)
775 plist->haveFreshList = false;
776 /* Mark that pruning has been done */
777 nb->timers->interaction[iloc].didPrune = true;
781 /* Mark that rolling pruning has been done */
782 nb->timers->interaction[iloc].didRollingPrune = true;
787 timer->closeTimingRegion(deviceStream);
790 if (GMX_NATIVE_WINDOWS)
792 /* Windows: force flushing WDDM queue */
793 cudaStreamQuery(deviceStream.stream());
797 void gpu_launch_cpyback(NbnxmGpu* nb,
798 nbnxn_atomdata_t* nbatom,
799 const gmx::StepWorkload& stepWork,
800 const AtomLocality atomLocality)
802 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
804 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
806 /* determine interaction locality from atom locality */
807 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
808 GMX_ASSERT(iloc == InteractionLocality::Local
809 || (iloc == InteractionLocality::NonLocal && nb->bNonLocalStreamDoneMarked == false),
810 "Non-local stream is indicating that the copy back event is enqueued at the "
811 "beginning of the copy back function.");
813 /* extract the data */
814 cu_atomdata_t* adat = nb->atdat;
815 cu_timers_t* t = nb->timers;
816 bool bDoTime = nb->bDoTime;
817 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
819 /* don't launch non-local copy-back if there was no non-local work to do */
820 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
822 nb->bNonLocalStreamDoneMarked = false;
826 getGpuAtomRange(adat, atomLocality, &adat_begin, &adat_len);
828 /* beginning of timed D2H section */
831 t->xf[atomLocality].nb_d2h.openTimingRegion(deviceStream);
834 /* With DD the local D2H transfer can only start after the non-local
835 kernel has finished. */
836 if (iloc == InteractionLocality::Local && nb->bNonLocalStreamDoneMarked)
838 nb->nonlocal_done.enqueueWaitEvent(deviceStream);
839 nb->bNonLocalStreamDoneMarked = false;
843 * Skip if buffer ops / reduction is offloaded to the GPU.
845 if (!stepWork.useGpuFBufferOps)
848 sizeof(adat->f[0]) == sizeof(Float3),
849 "The size of the force buffer element should be equal to the size of float3.");
850 copyFromDeviceBuffer(reinterpret_cast<Float3*>(nbatom->out[0].f.data()) + adat_begin,
855 GpuApiCallBehavior::Async,
859 /* After the non-local D2H is launched the nonlocal_done event can be
860 recorded which signals that the local D2H can proceed. This event is not
861 placed after the non-local kernel because we want the non-local data
863 if (iloc == InteractionLocality::NonLocal)
865 nb->nonlocal_done.markEvent(deviceStream);
866 nb->bNonLocalStreamDoneMarked = true;
869 /* only transfer energies in the local stream */
870 if (iloc == InteractionLocality::Local)
872 /* DtoH fshift when virial is needed */
873 if (stepWork.computeVirial)
875 static_assert(sizeof(nb->nbst.fshift[0]) == sizeof(adat->fshift[0]),
876 "Sizes of host- and device-side shift vectors should be the same.");
877 copyFromDeviceBuffer(
878 nb->nbst.fshift, &adat->fshift, 0, SHIFTS, deviceStream, GpuApiCallBehavior::Async, nullptr);
882 if (stepWork.computeEnergy)
884 static_assert(sizeof(nb->nbst.e_lj[0]) == sizeof(adat->e_lj[0]),
885 "Sizes of host- and device-side LJ energy terms should be the same.");
886 copyFromDeviceBuffer(
887 nb->nbst.e_lj, &adat->e_lj, 0, 1, deviceStream, GpuApiCallBehavior::Async, nullptr);
888 static_assert(sizeof(nb->nbst.e_el[0]) == sizeof(adat->e_el[0]),
889 "Sizes of host- and device-side electrostatic energy terms should be the "
891 copyFromDeviceBuffer(
892 nb->nbst.e_el, &adat->e_el, 0, 1, deviceStream, GpuApiCallBehavior::Async, nullptr);
898 t->xf[atomLocality].nb_d2h.closeTimingRegion(deviceStream);
902 void cuda_set_cacheconfig()
906 for (int i = 0; i < c_numElecTypes; i++)
908 for (int j = 0; j < c_numVdwTypes; j++)
910 /* Default kernel 32/32 kB Shared/L1 */
911 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
912 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
913 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
914 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
915 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");
920 /* X buffer operations on GPU: performs conversion from rvec to nb format. */
921 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid& grid,
922 bool setFillerCoords,
924 DeviceBuffer<gmx::RVec> d_x,
925 GpuEventSynchronizer* xReadyOnDevice,
926 const Nbnxm::AtomLocality locality,
930 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
932 cu_atomdata_t* adat = nb->atdat;
934 const int numColumns = grid.numColumns();
935 const int cellOffset = grid.cellOffset();
936 const int numAtomsPerCell = grid.numAtomsPerCell();
937 Nbnxm::InteractionLocality interactionLoc = gpuAtomToInteractionLocality(locality);
939 const DeviceStream& deviceStream = *nb->deviceStreams[interactionLoc];
941 int numAtoms = grid.srcAtomEnd() - grid.srcAtomBegin();
942 // avoid empty kernel launch, skip to inserting stream dependency
945 // TODO: This will only work with CUDA
946 GMX_ASSERT(d_x, "Need a valid device pointer");
948 // ensure that coordinates are ready on the device before launching the kernel
949 GMX_ASSERT(xReadyOnDevice, "Need a valid GpuEventSynchronizer object");
950 xReadyOnDevice->enqueueWaitEvent(deviceStream);
952 KernelLaunchConfig config;
953 config.blockSize[0] = c_bufOpsThreadsPerBlock;
954 config.blockSize[1] = 1;
955 config.blockSize[2] = 1;
956 config.gridSize[0] = (grid.numCellsColumnMax() * numAtomsPerCell + c_bufOpsThreadsPerBlock - 1)
957 / c_bufOpsThreadsPerBlock;
958 config.gridSize[1] = numColumns;
959 config.gridSize[2] = 1;
960 GMX_ASSERT(config.gridSize[0] > 0,
961 "Can not have empty grid, early return above avoids this");
962 config.sharedMemorySize = 0;
964 auto kernelFn = setFillerCoords ? nbnxn_gpu_x_to_nbat_x_kernel<true>
965 : nbnxn_gpu_x_to_nbat_x_kernel<false>;
966 float4* d_xq = adat->xq;
967 float3* d_xFloat3 = asFloat3(d_x);
968 const int* d_atomIndices = nb->atomIndices;
969 const int* d_cxy_na = &nb->cxy_na[numColumnsMax * gridId];
970 const int* d_cxy_ind = &nb->cxy_ind[numColumnsMax * gridId];
971 const auto kernelArgs = prepareGpuKernelArguments(kernelFn,
981 launchGpuKernel(kernelFn, config, deviceStream, nullptr, "XbufferOps", kernelArgs);
984 // TODO: note that this is not necessary when there are no local atoms, that is:
985 // (numAtoms == 0 && interactionLoc == InteractionLocality::Local)
986 // but for now we avoid that optimization
987 nbnxnInsertNonlocalGpuDependency(nb, interactionLoc);
990 void* getGpuForces(NbnxmGpu* nb)