2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define CUDA implementation of nbnxn_gpu.h
39 * \author Szilard Pall <pall.szilard@gmail.com>
48 #include "gromacs/nbnxm/nbnxm_gpu.h"
55 #include "nbnxm_cuda.h"
57 #include "gromacs/gpu_utils/cudautils.cuh"
58 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
59 #include "gromacs/gpu_utils/typecasts.cuh"
60 #include "gromacs/gpu_utils/vectype_ops.cuh"
61 #include "gromacs/mdtypes/simulation_workload.h"
62 #include "gromacs/nbnxm/atomdata.h"
63 #include "gromacs/nbnxm/gpu_common.h"
64 #include "gromacs/nbnxm/gpu_common_utils.h"
65 #include "gromacs/nbnxm/gpu_data_mgmt.h"
66 #include "gromacs/nbnxm/grid.h"
67 #include "gromacs/nbnxm/nbnxm.h"
68 #include "gromacs/nbnxm/pairlist.h"
69 #include "gromacs/timing/gpu_timing.h"
70 #include "gromacs/utility/cstringutil.h"
71 #include "gromacs/utility/gmxassert.h"
73 #include "nbnxm_buffer_ops_kernels.cuh"
74 #include "nbnxm_cuda_types.h"
76 /***** The kernel declarations/definitions come here *****/
78 /* Top-level kernel declaration generation: will generate through multiple
79 * inclusion the following flavors for all kernel declarations:
80 * - force-only output;
81 * - force and energy output;
82 * - force-only with pair list pruning;
83 * - force and energy output with pair list pruning.
85 #define FUNCTION_DECLARATION_ONLY
87 #include "nbnxm_cuda_kernels.cuh"
88 /** Force & energy **/
90 #include "nbnxm_cuda_kernels.cuh"
93 /*** Pair-list pruning kernels ***/
96 #include "nbnxm_cuda_kernels.cuh"
97 /** Force & energy **/
99 #include "nbnxm_cuda_kernels.cuh"
103 /* Prune-only kernels */
104 #include "nbnxm_cuda_kernel_pruneonly.cuh"
105 #undef FUNCTION_DECLARATION_ONLY
107 /* Now generate the function definitions if we are using a single compilation unit. */
108 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
109 # include "nbnxm_cuda_kernel_F_noprune.cu"
110 # include "nbnxm_cuda_kernel_F_prune.cu"
111 # include "nbnxm_cuda_kernel_VF_noprune.cu"
112 # include "nbnxm_cuda_kernel_VF_prune.cu"
113 # include "nbnxm_cuda_kernel_pruneonly.cu"
114 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
119 //! Number of CUDA threads in a block
120 // TODO Optimize this through experimentation
121 constexpr static int c_bufOpsThreadsPerBlock = 128;
123 /*! Nonbonded kernel function pointer type */
124 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t, const cu_nbparam_t, const cu_plist_t, bool);
126 /*********************************/
128 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
129 static inline int calc_nb_kernel_nblock(int nwork_units, const DeviceInformation* deviceInfo)
134 /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
135 empty domain) and that case should be handled before this point. */
136 assert(nwork_units > 0);
138 max_grid_x_size = deviceInfo->prop.maxGridSize[0];
140 /* do we exceed the grid x dimension limit? */
141 if (nwork_units > max_grid_x_size)
144 "Watch out, the input system is too large to simulate!\n"
145 "The number of nonbonded work units (=number of super-clusters) exceeds the"
146 "maximum grid size in x dimension (%d > %d)!",
147 nwork_units, max_grid_x_size);
154 /* Constant arrays listing all kernel function pointers and enabling selection
155 of a kernel in an elegant manner. */
157 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
158 * electrostatics and VDW type.
160 * Note that the row- and column-order of function pointers has to match the
161 * order of corresponding enumerated electrostatics and vdw types, resp.,
162 * defined in nbnxn_cuda_types.h.
165 /*! Force-only kernel function pointers. */
166 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] = {
167 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda,
168 nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda,
169 nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda,
170 nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
171 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda,
172 nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda,
173 nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda,
174 nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
175 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda,
176 nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda,
177 nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda,
178 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
179 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda,
180 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda,
181 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda,
182 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
183 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda,
184 nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda,
185 nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda,
186 nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
187 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda,
188 nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda,
189 nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda,
190 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
193 /*! Force + energy kernel function pointers. */
194 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] = {
195 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda,
196 nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda,
197 nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda,
198 nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
199 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda,
200 nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda,
201 nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda,
202 nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
203 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda,
204 nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda,
205 nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda,
206 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
207 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda,
208 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda,
209 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda,
210 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
211 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda,
212 nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda,
213 nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda,
214 nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
215 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda,
216 nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda,
217 nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda,
218 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
221 /*! Force + pruning kernel function pointers. */
222 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] = {
223 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda,
224 nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda,
225 nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda,
226 nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
227 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda,
228 nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda,
229 nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda,
230 nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
231 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda,
232 nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda,
233 nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda,
234 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
235 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda,
236 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda,
237 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda,
238 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda,
239 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda,
240 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
241 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda,
242 nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda,
243 nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda,
244 nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
245 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda,
246 nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda,
247 nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda,
248 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
251 /*! Force + energy + pruning kernel function pointers. */
252 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] = {
253 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda,
254 nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda,
255 nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda,
256 nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
257 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda,
258 nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda,
259 nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda,
260 nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
261 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda,
262 nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda,
263 nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda,
264 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
265 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda,
266 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda,
267 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda,
268 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda,
269 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda,
270 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda,
271 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
272 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda,
273 nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda,
274 nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda,
275 nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
276 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda,
277 nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda,
278 nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda,
279 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda,
280 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
283 /*! Return a pointer to the kernel version to be executed at the current step. */
284 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
288 const DeviceInformation gmx_unused* deviceInfo)
290 nbnxn_cu_kfunc_ptr_t res;
292 GMX_ASSERT(eeltype < eelCuNR,
293 "The electrostatics type requested is not implemented in the CUDA kernels.");
294 GMX_ASSERT(evdwtype < evdwCuNR,
295 "The VdW type requested is not implemented in the CUDA kernels.");
297 /* assert assumptions made by the kernels */
298 GMX_ASSERT(c_nbnxnGpuClusterSize * c_nbnxnGpuClusterSize / c_nbnxnGpuClusterpairSplit
299 == deviceInfo->prop.warpSize,
300 "The CUDA kernels require the "
301 "cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size "
302 "of the architecture targeted.");
308 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
312 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
319 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
323 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
330 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */
331 static inline int calc_shmem_required_nonbonded(const int num_threads_z,
332 const DeviceInformation gmx_unused* deviceInfo,
333 const cu_nbparam_t* nbp)
339 /* size of shmem (force-buffers/xq/atom type preloading) */
340 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
341 /* i-atom x+q in shared memory */
342 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float4);
343 /* cj in shared memory, for each warp separately */
344 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
346 if (nbp->vdwtype == evdwCuCUTCOMBGEOM || nbp->vdwtype == evdwCuCUTCOMBLB)
348 /* i-atom LJ combination parameters in shared memory */
349 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float2);
353 /* i-atom types in shared memory */
354 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int);
360 /*! \brief Sync the nonlocal stream with dependent tasks in the local queue.
362 * As the point where the local stream tasks can be considered complete happens
363 * at the same call point where the nonlocal stream should be synced with the
364 * the local, this function records the event if called with the local stream as
365 * argument and inserts in the GPU stream a wait on the event on the nonlocal.
367 void nbnxnInsertNonlocalGpuDependency(const NbnxmGpu* nb, const InteractionLocality interactionLocality)
369 cudaStream_t stream = nb->stream[interactionLocality];
371 /* When we get here all misc operations issued in the local stream as well as
372 the local xq H2D are done,
373 so we record that in the local stream and wait for it in the nonlocal one.
374 This wait needs to precede any PP tasks, bonded or nonbonded, that may
375 compute on interactions between local and nonlocal atoms.
377 if (nb->bUseTwoStreams)
379 if (interactionLocality == InteractionLocality::Local)
381 cudaError_t stat = cudaEventRecord(nb->misc_ops_and_local_H2D_done, stream);
382 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_and_local_H2D_done failed");
386 cudaError_t stat = cudaStreamWaitEvent(stream, nb->misc_ops_and_local_H2D_done, 0);
387 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_and_local_H2D_done failed");
392 /*! \brief Launch asynchronously the xq buffer host to device copy. */
393 void gpu_copy_xq_to_gpu(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom, const AtomLocality atomLocality)
395 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
397 GMX_ASSERT(atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal,
398 "Only local and non-local xq transfers are supported");
400 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
402 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
404 cu_atomdata_t* adat = nb->atdat;
405 cu_plist_t* plist = nb->plist[iloc];
406 cu_timers_t* t = nb->timers;
407 cudaStream_t stream = nb->stream[iloc];
409 bool bDoTime = nb->bDoTime;
411 /* Don't launch the non-local H2D copy if there is no dependent
412 work to do: neither non-local nor other (e.g. bonded) work
413 to do that has as input the nbnxn coordaintes.
414 Doing the same for the local kernel is more complicated, since the
415 local part of the force array also depends on the non-local kernel.
416 So to avoid complicating the code and to reduce the risk of bugs,
417 we always call the local local x+q copy (and the rest of the local
418 work in nbnxn_gpu_launch_kernel().
420 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
422 plist->haveFreshList = false;
427 /* calculate the atom data index range based on locality */
428 if (atomLocality == AtomLocality::Local)
431 adat_len = adat->natoms_local;
435 adat_begin = adat->natoms_local;
436 adat_len = adat->natoms - adat->natoms_local;
440 /* beginning of timed HtoD section */
443 t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
446 cu_copy_H2D_async(adat->xq + adat_begin,
447 static_cast<const void*>(nbatom->x().data() + adat_begin * 4),
448 adat_len * sizeof(*adat->xq), stream);
452 t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
455 /* When we get here all misc operations issued in the local stream as well as
456 the local xq H2D are done,
457 so we record that in the local stream and wait for it in the nonlocal one.
458 This wait needs to precede any PP tasks, bonded or nonbonded, that may
459 compute on interactions between local and nonlocal atoms.
461 nbnxnInsertNonlocalGpuDependency(nb, iloc);
464 /*! As we execute nonbonded workload in separate streams, before launching
465 the kernel we need to make sure that he following operations have completed:
466 - atomdata allocation and related H2D transfers (every nstlist step);
467 - pair list H2D transfer (every nstlist step);
468 - shift vector H2D transfer (every nstlist step);
469 - force (+shift force and energy) output clearing (every step).
471 These operations are issued in the local stream at the beginning of the step
472 and therefore always complete before the local kernel launch. The non-local
473 kernel is launched after the local on the same device/context hence it is
474 inherently scheduled after the operations in the local stream (including the
475 above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
476 devices with multiple hardware queues the dependency needs to be enforced.
477 We use the misc_ops_and_local_H2D_done event to record the point where
478 the local x+q H2D (and all preceding) tasks are complete and synchronize
479 with this event in the non-local stream before launching the non-bonded kernel.
481 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const InteractionLocality iloc)
483 cu_atomdata_t* adat = nb->atdat;
484 cu_nbparam_t* nbp = nb->nbparam;
485 cu_plist_t* plist = nb->plist[iloc];
486 cu_timers_t* t = nb->timers;
487 cudaStream_t stream = nb->stream[iloc];
489 bool bDoTime = nb->bDoTime;
491 /* Don't launch the non-local kernel if there is no work to do.
492 Doing the same for the local kernel is more complicated, since the
493 local part of the force array also depends on the non-local kernel.
494 So to avoid complicating the code and to reduce the risk of bugs,
495 we always call the local kernel, and later (not in
496 this function) the stream wait, local f copyback and the f buffer
497 clearing. All these operations, except for the local interaction kernel,
498 are needed for the non-local interactions. The skip of the local kernel
499 call is taken care of later in this function. */
500 if (canSkipNonbondedWork(*nb, iloc))
502 plist->haveFreshList = false;
507 if (nbp->useDynamicPruning && plist->haveFreshList)
509 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
510 (TODO: ATM that's the way the timing accounting can distinguish between
511 separate prune kernel and combined force+prune, maybe we need a better way?).
513 gpu_launch_kernel_pruneonly(nb, iloc, 1);
516 if (plist->nsci == 0)
518 /* Don't launch an empty local kernel (not allowed with CUDA) */
522 /* beginning of timed nonbonded calculation section */
525 t->interaction[iloc].nb_k.openTimingRegion(stream);
528 /* Kernel launch config:
529 * - The thread block dimensions match the size of i-clusters, j-clusters,
530 * and j-cluster concurrency, in x, y, and z, respectively.
531 * - The 1D block-grid contains as many blocks as super-clusters.
533 int num_threads_z = 1;
534 if (nb->deviceInfo->prop.major == 3 && nb->deviceInfo->prop.minor == 7)
538 int nblock = calc_nb_kernel_nblock(plist->nsci, nb->deviceInfo);
541 KernelLaunchConfig config;
542 config.blockSize[0] = c_clSize;
543 config.blockSize[1] = c_clSize;
544 config.blockSize[2] = num_threads_z;
545 config.gridSize[0] = nblock;
546 config.sharedMemorySize = calc_shmem_required_nonbonded(num_threads_z, nb->deviceInfo, nbp);
547 config.stream = stream;
552 "Non-bonded GPU launch configuration:\n\tThread block: %zux%zux%zu\n\t"
553 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
555 config.blockSize[0], config.blockSize[1], config.blockSize[2], config.gridSize[0],
556 config.gridSize[1], plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
557 c_nbnxnGpuNumClusterPerSupercluster, plist->na_c, config.sharedMemorySize);
560 auto* timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
561 const auto kernel = select_nbnxn_kernel(
562 nbp->eeltype, nbp->vdwtype, stepWork.computeEnergy,
563 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune), nb->deviceInfo);
564 const auto kernelArgs =
565 prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &stepWork.computeVirial);
566 launchGpuKernel(kernel, config, timingEvent, "k_calc_nb", kernelArgs);
570 t->interaction[iloc].nb_k.closeTimingRegion(stream);
573 if (GMX_NATIVE_WINDOWS)
575 /* Windows: force flushing WDDM queue */
576 cudaStreamQuery(stream);
580 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
581 static inline int calc_shmem_required_prune(const int num_threads_z)
585 /* i-atom x in shared memory */
586 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float4);
587 /* cj in shared memory, for each warp separately */
588 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
593 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
595 cu_atomdata_t* adat = nb->atdat;
596 cu_nbparam_t* nbp = nb->nbparam;
597 cu_plist_t* plist = nb->plist[iloc];
598 cu_timers_t* t = nb->timers;
599 cudaStream_t stream = nb->stream[iloc];
601 bool bDoTime = nb->bDoTime;
603 if (plist->haveFreshList)
605 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
607 /* Set rollingPruningNumParts to signal that it is not set */
608 plist->rollingPruningNumParts = 0;
609 plist->rollingPruningPart = 0;
613 if (plist->rollingPruningNumParts == 0)
615 plist->rollingPruningNumParts = numParts;
619 GMX_ASSERT(numParts == plist->rollingPruningNumParts,
620 "It is not allowed to change numParts in between list generation steps");
624 /* Use a local variable for part and update in plist, so we can return here
625 * without duplicating the part increment code.
627 int part = plist->rollingPruningPart;
629 plist->rollingPruningPart++;
630 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
632 plist->rollingPruningPart = 0;
635 /* Compute the number of list entries to prune in this pass */
636 int numSciInPart = (plist->nsci - part) / numParts;
638 /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */
639 if (numSciInPart <= 0)
641 plist->haveFreshList = false;
646 GpuRegionTimer* timer = nullptr;
649 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
652 /* beginning of timed prune calculation section */
655 timer->openTimingRegion(stream);
658 /* Kernel launch config:
659 * - The thread block dimensions match the size of i-clusters, j-clusters,
660 * and j-cluster concurrency, in x, y, and z, respectively.
661 * - The 1D block-grid contains as many blocks as super-clusters.
663 int num_threads_z = c_cudaPruneKernelJ4Concurrency;
664 int nblock = calc_nb_kernel_nblock(numSciInPart, nb->deviceInfo);
665 KernelLaunchConfig config;
666 config.blockSize[0] = c_clSize;
667 config.blockSize[1] = c_clSize;
668 config.blockSize[2] = num_threads_z;
669 config.gridSize[0] = nblock;
670 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
671 config.stream = stream;
676 "Pruning GPU kernel launch configuration:\n\tThread block: %zux%zux%zu\n\t"
677 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
679 config.blockSize[0], config.blockSize[1], config.blockSize[2], config.gridSize[0],
680 config.gridSize[1], numSciInPart * c_nbnxnGpuNumClusterPerSupercluster,
681 c_nbnxnGpuNumClusterPerSupercluster, plist->na_c, config.sharedMemorySize);
684 auto* timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
685 constexpr char kernelName[] = "k_pruneonly";
687 plist->haveFreshList ? nbnxn_kernel_prune_cuda<true> : nbnxn_kernel_prune_cuda<false>;
688 const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &numParts, &part);
689 launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
691 /* TODO: consider a more elegant way to track which kernel has been called
692 (combined or separate 1st pass prune, rolling prune). */
693 if (plist->haveFreshList)
695 plist->haveFreshList = false;
696 /* Mark that pruning has been done */
697 nb->timers->interaction[iloc].didPrune = true;
701 /* Mark that rolling pruning has been done */
702 nb->timers->interaction[iloc].didRollingPrune = true;
707 timer->closeTimingRegion(stream);
710 if (GMX_NATIVE_WINDOWS)
712 /* Windows: force flushing WDDM queue */
713 cudaStreamQuery(stream);
717 void gpu_launch_cpyback(NbnxmGpu* nb,
718 nbnxn_atomdata_t* nbatom,
719 const gmx::StepWorkload& stepWork,
720 const AtomLocality atomLocality)
722 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
725 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
727 /* determine interaction locality from atom locality */
728 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
730 /* extract the data */
731 cu_atomdata_t* adat = nb->atdat;
732 cu_timers_t* t = nb->timers;
733 bool bDoTime = nb->bDoTime;
734 cudaStream_t stream = nb->stream[iloc];
736 /* don't launch non-local copy-back if there was no non-local work to do */
737 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
742 getGpuAtomRange(adat, atomLocality, &adat_begin, &adat_len);
744 /* beginning of timed D2H section */
747 t->xf[atomLocality].nb_d2h.openTimingRegion(stream);
750 /* With DD the local D2H transfer can only start after the non-local
751 kernel has finished. */
752 if (iloc == InteractionLocality::Local && nb->bUseTwoStreams)
754 stat = cudaStreamWaitEvent(stream, nb->nonlocal_done, 0);
755 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
759 * Skip if buffer ops / reduction is offloaded to the GPU.
761 if (!stepWork.useGpuFBufferOps)
763 cu_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f + adat_begin,
764 (adat_len) * sizeof(*adat->f), stream);
767 /* After the non-local D2H is launched the nonlocal_done event can be
768 recorded which signals that the local D2H can proceed. This event is not
769 placed after the non-local kernel because we want the non-local data
771 if (iloc == InteractionLocality::NonLocal)
773 stat = cudaEventRecord(nb->nonlocal_done, stream);
774 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
777 /* only transfer energies in the local stream */
778 if (iloc == InteractionLocality::Local)
780 /* DtoH fshift when virial is needed */
781 if (stepWork.computeVirial)
783 cu_copy_D2H_async(nb->nbst.fshift, adat->fshift, SHIFTS * sizeof(*nb->nbst.fshift), stream);
787 if (stepWork.computeEnergy)
789 cu_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, sizeof(*nb->nbst.e_lj), stream);
790 cu_copy_D2H_async(nb->nbst.e_el, adat->e_el, sizeof(*nb->nbst.e_el), stream);
796 t->xf[atomLocality].nb_d2h.closeTimingRegion(stream);
800 void cuda_set_cacheconfig()
804 for (int i = 0; i < eelCuNR; i++)
806 for (int j = 0; j < evdwCuNR; j++)
808 /* Default kernel 32/32 kB Shared/L1 */
809 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
810 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
811 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
812 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
813 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");
818 /* X buffer operations on GPU: performs conversion from rvec to nb format. */
819 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid& grid,
820 bool setFillerCoords,
822 DeviceBuffer<gmx::RVec> d_x,
823 GpuEventSynchronizer* xReadyOnDevice,
824 const Nbnxm::AtomLocality locality,
828 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
830 cu_atomdata_t* adat = nb->atdat;
832 const int numColumns = grid.numColumns();
833 const int cellOffset = grid.cellOffset();
834 const int numAtomsPerCell = grid.numAtomsPerCell();
835 Nbnxm::InteractionLocality interactionLoc = gpuAtomToInteractionLocality(locality);
837 cudaStream_t stream = nb->stream[interactionLoc];
839 int numAtoms = grid.srcAtomEnd() - grid.srcAtomBegin();
840 // avoid empty kernel launch, skip to inserting stream dependency
843 // TODO: This will only work with CUDA
844 GMX_ASSERT(d_x, "Need a valid device pointer");
846 // ensure that coordinates are ready on the device before launching the kernel
847 GMX_ASSERT(xReadyOnDevice, "Need a valid GpuEventSynchronizer object");
848 xReadyOnDevice->enqueueWaitEvent(stream);
850 KernelLaunchConfig config;
851 config.blockSize[0] = c_bufOpsThreadsPerBlock;
852 config.blockSize[1] = 1;
853 config.blockSize[2] = 1;
854 config.gridSize[0] = (grid.numCellsColumnMax() * numAtomsPerCell + c_bufOpsThreadsPerBlock - 1)
855 / c_bufOpsThreadsPerBlock;
856 config.gridSize[1] = numColumns;
857 config.gridSize[2] = 1;
858 GMX_ASSERT(config.gridSize[0] > 0,
859 "Can not have empty grid, early return above avoids this");
860 config.sharedMemorySize = 0;
861 config.stream = stream;
863 auto kernelFn = setFillerCoords ? nbnxn_gpu_x_to_nbat_x_kernel<true>
864 : nbnxn_gpu_x_to_nbat_x_kernel<false>;
865 float4* d_xq = adat->xq;
866 float3* d_xFloat3 = asFloat3(d_x);
867 const int* d_atomIndices = nb->atomIndices;
868 const int* d_cxy_na = &nb->cxy_na[numColumnsMax * gridId];
869 const int* d_cxy_ind = &nb->cxy_ind[numColumnsMax * gridId];
870 const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config, &numColumns, &d_xq,
871 &d_xFloat3, &d_atomIndices, &d_cxy_na,
872 &d_cxy_ind, &cellOffset, &numAtomsPerCell);
873 launchGpuKernel(kernelFn, config, nullptr, "XbufferOps", kernelArgs);
876 // TODO: note that this is not necessary when there are no local atoms, that is:
877 // (numAtoms == 0 && interactionLoc == InteractionLocality::Local)
878 // but for now we avoid that optimization
879 nbnxnInsertNonlocalGpuDependency(nb, interactionLoc);
882 /* F buffer operations on GPU: performs force summations and conversion from nb to rvec format.
884 * NOTE: When the total force device buffer is reallocated and its size increases, it is cleared in
885 * Local stream. Hence, if accumulateForce is true, NonLocal stream should start accumulating
886 * forces only after Local stream already done so.
888 void nbnxn_gpu_add_nbat_f_to_f(const AtomLocality atomLocality,
889 DeviceBuffer<gmx::RVec> totalForcesDevice,
891 void* pmeForcesDevice,
892 gmx::ArrayRef<GpuEventSynchronizer* const> dependencyList,
895 bool useGpuFPmeReduction,
896 bool accumulateForce)
898 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
899 GMX_ASSERT(numAtoms != 0, "Cannot call function with no atoms");
900 GMX_ASSERT(totalForcesDevice, "Need a valid totalForcesDevice pointer");
902 const InteractionLocality iLocality = gpuAtomToInteractionLocality(atomLocality);
903 cudaStream_t stream = nb->stream[iLocality];
904 cu_atomdata_t* adat = nb->atdat;
906 size_t gmx_used_in_debug numDependency = static_cast<size_t>((useGpuFPmeReduction == true))
907 + static_cast<size_t>((accumulateForce == true));
908 GMX_ASSERT(numDependency >= dependencyList.size(),
909 "Mismatching number of dependencies and call signature");
911 // Enqueue wait on all dependencies passed
912 for (auto const synchronizer : dependencyList)
914 synchronizer->enqueueWaitEvent(stream);
919 KernelLaunchConfig config;
920 config.blockSize[0] = c_bufOpsThreadsPerBlock;
921 config.blockSize[1] = 1;
922 config.blockSize[2] = 1;
923 config.gridSize[0] = ((numAtoms + 1) + c_bufOpsThreadsPerBlock - 1) / c_bufOpsThreadsPerBlock;
924 config.gridSize[1] = 1;
925 config.gridSize[2] = 1;
926 config.sharedMemorySize = 0;
927 config.stream = stream;
929 auto kernelFn = accumulateForce ? nbnxn_gpu_add_nbat_f_to_f_kernel<true, false>
930 : nbnxn_gpu_add_nbat_f_to_f_kernel<false, false>;
932 if (useGpuFPmeReduction)
934 GMX_ASSERT(pmeForcesDevice, "Need a valid pmeForcesDevice pointer");
935 kernelFn = accumulateForce ? nbnxn_gpu_add_nbat_f_to_f_kernel<true, true>
936 : nbnxn_gpu_add_nbat_f_to_f_kernel<false, true>;
939 const float3* d_fNB = adat->f;
940 const float3* d_fPme = static_cast<float3*>(pmeForcesDevice);
941 float3* d_fTotal = asFloat3(totalForcesDevice);
942 const int* d_cell = nb->cell;
944 const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config, &d_fNB, &d_fPme, &d_fTotal,
945 &d_cell, &atomStart, &numAtoms);
947 launchGpuKernel(kernelFn, config, nullptr, "FbufferOps", kernelArgs);
949 if (atomLocality == AtomLocality::Local)
951 GMX_ASSERT(nb->localFReductionDone != nullptr,
952 "localFReductionDone has to be a valid pointer");
953 nb->localFReductionDone->markEvent(stream);