2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define CUDA implementation of nbnxn_gpu.h
38 * \author Szilard Pall <pall.szilard@gmail.com>
47 #include "gromacs/nbnxm/nbnxm_gpu.h"
54 #include "nbnxm_cuda.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/mdlib/force_flags.h"
58 #include "gromacs/nbnxm/gpu_common.h"
59 #include "gromacs/nbnxm/gpu_common_utils.h"
60 #include "gromacs/nbnxm/gpu_data_mgmt.h"
61 #include "gromacs/nbnxm/nbnxm.h"
62 #include "gromacs/nbnxm/pairlist.h"
63 #include "gromacs/timing/gpu_timing.h"
64 #include "gromacs/utility/cstringutil.h"
65 #include "gromacs/utility/gmxassert.h"
67 #include "nbnxm_cuda_types.h"
69 /***** The kernel declarations/definitions come here *****/
71 /* Top-level kernel declaration generation: will generate through multiple
72 * inclusion the following flavors for all kernel declarations:
73 * - force-only output;
74 * - force and energy output;
75 * - force-only with pair list pruning;
76 * - force and energy output with pair list pruning.
78 #define FUNCTION_DECLARATION_ONLY
80 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
81 /** Force & energy **/
83 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
86 /*** Pair-list pruning kernels ***/
89 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
90 /** Force & energy **/
92 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
96 /* Prune-only kernels */
97 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_pruneonly.cuh"
98 #undef FUNCTION_DECLARATION_ONLY
100 /* Now generate the function definitions if we are using a single compilation unit. */
101 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
102 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_F_noprune.cu"
103 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_F_prune.cu"
104 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_VF_noprune.cu"
105 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_VF_prune.cu"
106 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_pruneonly.cu"
107 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
113 /*! Nonbonded kernel function pointer type */
114 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
119 /*********************************/
121 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
122 static inline int calc_nb_kernel_nblock(int nwork_units, const gmx_device_info_t *dinfo)
127 /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
128 empty domain) and that case should be handled before this point. */
129 assert(nwork_units > 0);
131 max_grid_x_size = dinfo->prop.maxGridSize[0];
133 /* do we exceed the grid x dimension limit? */
134 if (nwork_units > max_grid_x_size)
136 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
137 "The number of nonbonded work units (=number of super-clusters) exceeds the"
138 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
145 /* Constant arrays listing all kernel function pointers and enabling selection
146 of a kernel in an elegant manner. */
148 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
149 * electrostatics and VDW type.
151 * Note that the row- and column-order of function pointers has to match the
152 * order of corresponding enumerated electrostatics and vdw types, resp.,
153 * defined in nbnxn_cuda_types.h.
156 /*! Force-only kernel function pointers. */
157 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] =
159 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
160 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
161 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
162 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
163 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
164 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
167 /*! Force + energy kernel function pointers. */
168 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] =
170 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
171 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
172 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
173 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
174 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
175 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
178 /*! Force + pruning kernel function pointers. */
179 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] =
181 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
182 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
183 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
184 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
185 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
186 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
189 /*! Force + energy + pruning kernel function pointers. */
190 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] =
192 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
193 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
194 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
195 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
196 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
197 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
200 /*! Return a pointer to the kernel version to be executed at the current step. */
201 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
205 const gmx_device_info_t gmx_unused *devInfo)
207 nbnxn_cu_kfunc_ptr_t res;
209 GMX_ASSERT(eeltype < eelCuNR,
210 "The electrostatics type requested is not implemented in the CUDA kernels.");
211 GMX_ASSERT(evdwtype < evdwCuNR,
212 "The VdW type requested is not implemented in the CUDA kernels.");
214 /* assert assumptions made by the kernels */
215 GMX_ASSERT(c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit == devInfo->prop.warpSize,
216 "The CUDA kernels require the cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size of the architecture targeted.");
222 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
226 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
233 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
237 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
244 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */
245 static inline int calc_shmem_required_nonbonded(const int num_threads_z, const gmx_device_info_t gmx_unused *dinfo, const cu_nbparam_t *nbp)
251 /* size of shmem (force-buffers/xq/atom type preloading) */
252 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
253 /* i-atom x+q in shared memory */
254 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
255 /* cj in shared memory, for each warp separately */
256 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
258 if (nbp->vdwtype == evdwCuCUTCOMBGEOM ||
259 nbp->vdwtype == evdwCuCUTCOMBLB)
261 /* i-atom LJ combination parameters in shared memory */
262 shmem += c_numClPerSupercl * c_clSize * sizeof(float2);
266 /* i-atom types in shared memory */
267 shmem += c_numClPerSupercl * c_clSize * sizeof(int);
273 /*! \brief Launch asynchronously the xq buffer host to device copy. */
274 void gpu_copy_xq_to_gpu(gmx_nbnxn_cuda_t *nb,
275 const nbnxn_atomdata_t *nbatom,
276 const AtomLocality atomLocality,
277 const bool haveOtherWork)
279 GMX_ASSERT(atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal,
280 "Only local and non-local xq transfers are supported");
282 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
284 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
286 cu_atomdata_t *adat = nb->atdat;
287 cu_plist_t *plist = nb->plist[iloc];
288 cu_timers_t *t = nb->timers;
289 cudaStream_t stream = nb->stream[iloc];
291 bool bDoTime = nb->bDoTime;
293 /* Don't launch the non-local H2D copy if there is no dependent
294 work to do: neither non-local nor other (e.g. bonded) work
295 to do that has as input the nbnxn coordaintes.
296 Doing the same for the local kernel is more complicated, since the
297 local part of the force array also depends on the non-local kernel.
298 So to avoid complicating the code and to reduce the risk of bugs,
299 we always call the local local x+q copy (and the rest of the local
300 work in nbnxn_gpu_launch_kernel().
302 if (!haveOtherWork && canSkipWork(*nb, iloc))
304 plist->haveFreshList = false;
309 /* calculate the atom data index range based on locality */
310 if (atomLocality == AtomLocality::Local)
313 adat_len = adat->natoms_local;
317 adat_begin = adat->natoms_local;
318 adat_len = adat->natoms - adat->natoms_local;
321 /* beginning of timed HtoD section */
324 t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
328 cu_copy_H2D_async(adat->xq + adat_begin,
329 static_cast<const void *>(nbatom->x().data() + adat_begin * 4),
330 adat_len * sizeof(*adat->xq), stream);
334 t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
337 /* When we get here all misc operations issued in the local stream as well as
338 the local xq H2D are done,
339 so we record that in the local stream and wait for it in the nonlocal one.
340 This wait needs to precede any PP tasks, bonded or nonbonded, that may
341 compute on interactions between local and nonlocal atoms.
343 if (nb->bUseTwoStreams)
345 if (iloc == InteractionLocality::Local)
347 cudaError_t stat = cudaEventRecord(nb->misc_ops_and_local_H2D_done, stream);
348 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_and_local_H2D_done failed");
352 cudaError_t stat = cudaStreamWaitEvent(stream, nb->misc_ops_and_local_H2D_done, 0);
353 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_and_local_H2D_done failed");
358 /*! As we execute nonbonded workload in separate streams, before launching
359 the kernel we need to make sure that he following operations have completed:
360 - atomdata allocation and related H2D transfers (every nstlist step);
361 - pair list H2D transfer (every nstlist step);
362 - shift vector H2D transfer (every nstlist step);
363 - force (+shift force and energy) output clearing (every step).
365 These operations are issued in the local stream at the beginning of the step
366 and therefore always complete before the local kernel launch. The non-local
367 kernel is launched after the local on the same device/context hence it is
368 inherently scheduled after the operations in the local stream (including the
369 above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
370 devices with multiple hardware queues the dependency needs to be enforced.
371 We use the misc_ops_and_local_H2D_done event to record the point where
372 the local x+q H2D (and all preceding) tasks are complete and synchronize
373 with this event in the non-local stream before launching the non-bonded kernel.
375 void gpu_launch_kernel(gmx_nbnxn_cuda_t *nb,
377 const InteractionLocality iloc)
379 /* CUDA kernel launch-related stuff */
381 dim3 dim_block, dim_grid;
382 nbnxn_cu_kfunc_ptr_t nb_kernel = nullptr; /* fn pointer to the nonbonded kernel */
384 cu_atomdata_t *adat = nb->atdat;
385 cu_nbparam_t *nbp = nb->nbparam;
386 cu_plist_t *plist = nb->plist[iloc];
387 cu_timers_t *t = nb->timers;
388 cudaStream_t stream = nb->stream[iloc];
390 bool bCalcEner = flags & GMX_FORCE_ENERGY;
391 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
392 bool bDoTime = nb->bDoTime;
394 /* Don't launch the non-local kernel if there is no work to do.
395 Doing the same for the local kernel is more complicated, since the
396 local part of the force array also depends on the non-local kernel.
397 So to avoid complicating the code and to reduce the risk of bugs,
398 we always call the local kernel, and later (not in
399 this function) the stream wait, local f copyback and the f buffer
400 clearing. All these operations, except for the local interaction kernel,
401 are needed for the non-local interactions. The skip of the local kernel
402 call is taken care of later in this function. */
403 if (canSkipWork(*nb, iloc))
405 plist->haveFreshList = false;
410 if (nbp->useDynamicPruning && plist->haveFreshList)
412 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
413 (TODO: ATM that's the way the timing accounting can distinguish between
414 separate prune kernel and combined force+prune, maybe we need a better way?).
416 gpu_launch_kernel_pruneonly(nb, iloc, 1);
419 if (plist->nsci == 0)
421 /* Don't launch an empty local kernel (not allowed with CUDA) */
425 /* beginning of timed nonbonded calculation section */
428 t->interaction[iloc].nb_k.openTimingRegion(stream);
431 /* get the pointer to the kernel flavor we need to use */
432 nb_kernel = select_nbnxn_kernel(nbp->eeltype,
435 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune),
438 /* Kernel launch config:
439 * - The thread block dimensions match the size of i-clusters, j-clusters,
440 * and j-cluster concurrency, in x, y, and z, respectively.
441 * - The 1D block-grid contains as many blocks as super-clusters.
443 int num_threads_z = 1;
444 if (nb->dev_info->prop.major == 3 && nb->dev_info->prop.minor == 7)
448 nblock = calc_nb_kernel_nblock(plist->nsci, nb->dev_info);
450 KernelLaunchConfig config;
451 config.blockSize[0] = c_clSize;
452 config.blockSize[1] = c_clSize;
453 config.blockSize[2] = num_threads_z;
454 config.gridSize[0] = nblock;
455 config.sharedMemorySize = calc_shmem_required_nonbonded(num_threads_z, nb->dev_info, nbp);
456 config.stream = stream;
460 fprintf(debug, "Non-bonded GPU launch configuration:\n\tThread block: %zux%zux%zu\n\t"
461 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
463 config.blockSize[0], config.blockSize[1], config.blockSize[2],
464 config.gridSize[0], config.gridSize[1], plist->nsci*c_numClPerSupercl,
465 c_numClPerSupercl, plist->na_c,
466 config.sharedMemorySize);
469 auto *timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
470 const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config, adat, nbp, plist, &bCalcFshift);
471 launchGpuKernel(nb_kernel, config, timingEvent, "k_calc_nb", kernelArgs);
475 t->interaction[iloc].nb_k.closeTimingRegion(stream);
478 if (GMX_NATIVE_WINDOWS)
480 /* Windows: force flushing WDDM queue */
481 cudaStreamQuery(stream);
485 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
486 static inline int calc_shmem_required_prune(const int num_threads_z)
490 /* i-atom x in shared memory */
491 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
492 /* cj in shared memory, for each warp separately */
493 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
498 void gpu_launch_kernel_pruneonly(gmx_nbnxn_cuda_t *nb,
499 const InteractionLocality iloc,
502 cu_atomdata_t *adat = nb->atdat;
503 cu_nbparam_t *nbp = nb->nbparam;
504 cu_plist_t *plist = nb->plist[iloc];
505 cu_timers_t *t = nb->timers;
506 cudaStream_t stream = nb->stream[iloc];
508 bool bDoTime = nb->bDoTime;
510 if (plist->haveFreshList)
512 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
514 /* Set rollingPruningNumParts to signal that it is not set */
515 plist->rollingPruningNumParts = 0;
516 plist->rollingPruningPart = 0;
520 if (plist->rollingPruningNumParts == 0)
522 plist->rollingPruningNumParts = numParts;
526 GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
530 /* Use a local variable for part and update in plist, so we can return here
531 * without duplicating the part increment code.
533 int part = plist->rollingPruningPart;
535 plist->rollingPruningPart++;
536 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
538 plist->rollingPruningPart = 0;
541 /* Compute the number of list entries to prune in this pass */
542 int numSciInPart = (plist->nsci - part)/numParts;
544 /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */
545 if (numSciInPart <= 0)
547 plist->haveFreshList = false;
552 GpuRegionTimer *timer = nullptr;
555 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
558 /* beginning of timed prune calculation section */
561 timer->openTimingRegion(stream);
564 /* Kernel launch config:
565 * - The thread block dimensions match the size of i-clusters, j-clusters,
566 * and j-cluster concurrency, in x, y, and z, respectively.
567 * - The 1D block-grid contains as many blocks as super-clusters.
569 int num_threads_z = c_cudaPruneKernelJ4Concurrency;
570 int nblock = calc_nb_kernel_nblock(numSciInPart, nb->dev_info);
571 KernelLaunchConfig config;
572 config.blockSize[0] = c_clSize;
573 config.blockSize[1] = c_clSize;
574 config.blockSize[2] = num_threads_z;
575 config.gridSize[0] = nblock;
576 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
577 config.stream = stream;
581 fprintf(debug, "Pruning GPU kernel launch configuration:\n\tThread block: %zux%zux%zu\n\t"
582 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
584 config.blockSize[0], config.blockSize[1], config.blockSize[2],
585 config.gridSize[0], config.gridSize[1], numSciInPart*c_numClPerSupercl,
586 c_numClPerSupercl, plist->na_c,
587 config.sharedMemorySize);
590 auto *timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
591 constexpr char kernelName[] = "k_pruneonly";
592 const auto &kernel = plist->haveFreshList ? nbnxn_kernel_prune_cuda<true> : nbnxn_kernel_prune_cuda<false>;
593 const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &numParts, &part);
594 launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
596 /* TODO: consider a more elegant way to track which kernel has been called
597 (combined or separate 1st pass prune, rolling prune). */
598 if (plist->haveFreshList)
600 plist->haveFreshList = false;
601 /* Mark that pruning has been done */
602 nb->timers->interaction[iloc].didPrune = true;
606 /* Mark that rolling pruning has been done */
607 nb->timers->interaction[iloc].didRollingPrune = true;
612 timer->closeTimingRegion(stream);
615 if (GMX_NATIVE_WINDOWS)
617 /* Windows: force flushing WDDM queue */
618 cudaStreamQuery(stream);
622 void gpu_launch_cpyback(gmx_nbnxn_cuda_t *nb,
623 nbnxn_atomdata_t *nbatom,
625 const AtomLocality atomLocality,
626 const bool haveOtherWork)
629 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
631 /* determine interaction locality from atom locality */
632 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
634 /* extract the data */
635 cu_atomdata_t *adat = nb->atdat;
636 cu_timers_t *t = nb->timers;
637 bool bDoTime = nb->bDoTime;
638 cudaStream_t stream = nb->stream[iloc];
640 bool bCalcEner = flags & GMX_FORCE_ENERGY;
641 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
643 /* don't launch non-local copy-back if there was no non-local work to do */
644 if (!haveOtherWork && canSkipWork(*nb, iloc))
649 getGpuAtomRange(adat, atomLocality, &adat_begin, &adat_len);
651 /* beginning of timed D2H section */
654 t->xf[atomLocality].nb_d2h.openTimingRegion(stream);
657 /* With DD the local D2H transfer can only start after the non-local
658 kernel has finished. */
659 if (iloc == InteractionLocality::Local && nb->bUseTwoStreams)
661 stat = cudaStreamWaitEvent(stream, nb->nonlocal_done, 0);
662 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
666 cu_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f + adat_begin,
667 (adat_len)*sizeof(*adat->f), stream);
669 /* After the non-local D2H is launched the nonlocal_done event can be
670 recorded which signals that the local D2H can proceed. This event is not
671 placed after the non-local kernel because we want the non-local data
673 if (iloc == InteractionLocality::NonLocal)
675 stat = cudaEventRecord(nb->nonlocal_done, stream);
676 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
679 /* only transfer energies in the local stream */
680 if (iloc == InteractionLocality::Local)
685 cu_copy_D2H_async(nb->nbst.fshift, adat->fshift,
686 SHIFTS * sizeof(*nb->nbst.fshift), stream);
692 cu_copy_D2H_async(nb->nbst.e_lj, adat->e_lj,
693 sizeof(*nb->nbst.e_lj), stream);
694 cu_copy_D2H_async(nb->nbst.e_el, adat->e_el,
695 sizeof(*nb->nbst.e_el), stream);
701 t->xf[atomLocality].nb_d2h.closeTimingRegion(stream);
705 void cuda_set_cacheconfig()
709 for (int i = 0; i < eelCuNR; i++)
711 for (int j = 0; j < evdwCuNR; j++)
713 /* Default kernel 32/32 kB Shared/L1 */
714 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
715 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
716 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
717 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
718 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");