2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define CUDA implementation of nbnxn_gpu.h
38 * \author Szilard Pall <pall.szilard@gmail.com>
47 #include "gromacs/nbnxm/nbnxm_gpu.h"
54 #include "nbnxm_cuda.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/mdlib/force_flags.h"
58 #include "gromacs/nbnxm/atomdata.h"
59 #include "gromacs/nbnxm/gpu_common.h"
60 #include "gromacs/nbnxm/gpu_common_utils.h"
61 #include "gromacs/nbnxm/gpu_data_mgmt.h"
62 #include "gromacs/nbnxm/grid.h"
63 #include "gromacs/nbnxm/nbnxm.h"
64 #include "gromacs/nbnxm/pairlist.h"
65 #include "gromacs/nbnxm/cuda/nbnxm_buffer_ops_kernels.cuh"
66 #include "gromacs/timing/gpu_timing.h"
67 #include "gromacs/utility/cstringutil.h"
68 #include "gromacs/utility/gmxassert.h"
70 #include "nbnxm_cuda_types.h"
72 /***** The kernel declarations/definitions come here *****/
74 /* Top-level kernel declaration generation: will generate through multiple
75 * inclusion the following flavors for all kernel declarations:
76 * - force-only output;
77 * - force and energy output;
78 * - force-only with pair list pruning;
79 * - force and energy output with pair list pruning.
81 #define FUNCTION_DECLARATION_ONLY
83 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
84 /** Force & energy **/
86 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
89 /*** Pair-list pruning kernels ***/
92 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
93 /** Force & energy **/
95 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
99 /* Prune-only kernels */
100 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_pruneonly.cuh"
101 #undef FUNCTION_DECLARATION_ONLY
103 /* Now generate the function definitions if we are using a single compilation unit. */
104 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
105 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_F_noprune.cu"
106 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_F_prune.cu"
107 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_VF_noprune.cu"
108 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_VF_prune.cu"
109 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_pruneonly.cu"
110 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
116 /*! Nonbonded kernel function pointer type */
117 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
122 /*********************************/
124 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
125 static inline int calc_nb_kernel_nblock(int nwork_units, const gmx_device_info_t *dinfo)
130 /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
131 empty domain) and that case should be handled before this point. */
132 assert(nwork_units > 0);
134 max_grid_x_size = dinfo->prop.maxGridSize[0];
136 /* do we exceed the grid x dimension limit? */
137 if (nwork_units > max_grid_x_size)
139 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
140 "The number of nonbonded work units (=number of super-clusters) exceeds the"
141 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
148 /* Constant arrays listing all kernel function pointers and enabling selection
149 of a kernel in an elegant manner. */
151 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
152 * electrostatics and VDW type.
154 * Note that the row- and column-order of function pointers has to match the
155 * order of corresponding enumerated electrostatics and vdw types, resp.,
156 * defined in nbnxn_cuda_types.h.
159 /*! Force-only kernel function pointers. */
160 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] =
162 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
163 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
164 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
165 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
166 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
167 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
170 /*! Force + energy kernel function pointers. */
171 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] =
173 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
174 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
175 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
176 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
177 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
178 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
181 /*! Force + pruning kernel function pointers. */
182 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] =
184 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
185 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
186 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
187 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
188 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
189 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
192 /*! Force + energy + pruning kernel function pointers. */
193 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] =
195 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
196 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
197 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
198 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
199 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
200 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
203 /*! Return a pointer to the kernel version to be executed at the current step. */
204 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
208 const gmx_device_info_t gmx_unused *devInfo)
210 nbnxn_cu_kfunc_ptr_t res;
212 GMX_ASSERT(eeltype < eelCuNR,
213 "The electrostatics type requested is not implemented in the CUDA kernels.");
214 GMX_ASSERT(evdwtype < evdwCuNR,
215 "The VdW type requested is not implemented in the CUDA kernels.");
217 /* assert assumptions made by the kernels */
218 GMX_ASSERT(c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit == devInfo->prop.warpSize,
219 "The CUDA kernels require the cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size of the architecture targeted.");
225 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
229 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
236 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
240 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
247 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */
248 static inline int calc_shmem_required_nonbonded(const int num_threads_z, const gmx_device_info_t gmx_unused *dinfo, const cu_nbparam_t *nbp)
254 /* size of shmem (force-buffers/xq/atom type preloading) */
255 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
256 /* i-atom x+q in shared memory */
257 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
258 /* cj in shared memory, for each warp separately */
259 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
261 if (nbp->vdwtype == evdwCuCUTCOMBGEOM ||
262 nbp->vdwtype == evdwCuCUTCOMBLB)
264 /* i-atom LJ combination parameters in shared memory */
265 shmem += c_numClPerSupercl * c_clSize * sizeof(float2);
269 /* i-atom types in shared memory */
270 shmem += c_numClPerSupercl * c_clSize * sizeof(int);
276 /*! \brief Sync the nonlocal stream with dependent tasks in the local queue.
278 * As the point where the local stream tasks can be considered complete happens
279 * at the same call point where the nonlocal stream should be synced with the
280 * the local, this function records the event if called with the local stream as
281 * argument and inserts in the GPU stream a wait on the event on the nonlocal.
283 void nbnxnInsertNonlocalGpuDependency(const gmx_nbnxn_cuda_t *nb,
284 const InteractionLocality interactionLocality)
286 cudaStream_t stream = nb->stream[interactionLocality];
288 /* When we get here all misc operations issued in the local stream as well as
289 the local xq H2D are done,
290 so we record that in the local stream and wait for it in the nonlocal one.
291 This wait needs to precede any PP tasks, bonded or nonbonded, that may
292 compute on interactions between local and nonlocal atoms.
294 if (nb->bUseTwoStreams)
296 if (interactionLocality == InteractionLocality::Local)
298 cudaError_t stat = cudaEventRecord(nb->misc_ops_and_local_H2D_done, stream);
299 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_and_local_H2D_done failed");
303 cudaError_t stat = cudaStreamWaitEvent(stream, nb->misc_ops_and_local_H2D_done, 0);
304 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_and_local_H2D_done failed");
309 /*! \brief Launch asynchronously the xq buffer host to device copy. */
310 void gpu_copy_xq_to_gpu(gmx_nbnxn_cuda_t *nb,
311 const nbnxn_atomdata_t *nbatom,
312 const AtomLocality atomLocality)
314 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
316 GMX_ASSERT(atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal,
317 "Only local and non-local xq transfers are supported");
319 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
321 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
323 cu_atomdata_t *adat = nb->atdat;
324 cu_plist_t *plist = nb->plist[iloc];
325 cu_timers_t *t = nb->timers;
326 cudaStream_t stream = nb->stream[iloc];
328 bool bDoTime = nb->bDoTime;
330 /* Don't launch the non-local H2D copy if there is no dependent
331 work to do: neither non-local nor other (e.g. bonded) work
332 to do that has as input the nbnxn coordaintes.
333 Doing the same for the local kernel is more complicated, since the
334 local part of the force array also depends on the non-local kernel.
335 So to avoid complicating the code and to reduce the risk of bugs,
336 we always call the local local x+q copy (and the rest of the local
337 work in nbnxn_gpu_launch_kernel().
339 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
341 plist->haveFreshList = false;
346 /* calculate the atom data index range based on locality */
347 if (atomLocality == AtomLocality::Local)
350 adat_len = adat->natoms_local;
354 adat_begin = adat->natoms_local;
355 adat_len = adat->natoms - adat->natoms_local;
359 /* beginning of timed HtoD section */
362 t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
365 cu_copy_H2D_async(adat->xq + adat_begin, static_cast<const void *>(nbatom->x().data() + adat_begin * 4),
366 adat_len * sizeof(*adat->xq), stream);
370 t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
373 /* When we get here all misc operations issued in the local stream as well as
374 the local xq H2D are done,
375 so we record that in the local stream and wait for it in the nonlocal one.
376 This wait needs to precede any PP tasks, bonded or nonbonded, that may
377 compute on interactions between local and nonlocal atoms.
379 nbnxnInsertNonlocalGpuDependency(nb, iloc);
382 /*! As we execute nonbonded workload in separate streams, before launching
383 the kernel we need to make sure that he following operations have completed:
384 - atomdata allocation and related H2D transfers (every nstlist step);
385 - pair list H2D transfer (every nstlist step);
386 - shift vector H2D transfer (every nstlist step);
387 - force (+shift force and energy) output clearing (every step).
389 These operations are issued in the local stream at the beginning of the step
390 and therefore always complete before the local kernel launch. The non-local
391 kernel is launched after the local on the same device/context hence it is
392 inherently scheduled after the operations in the local stream (including the
393 above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
394 devices with multiple hardware queues the dependency needs to be enforced.
395 We use the misc_ops_and_local_H2D_done event to record the point where
396 the local x+q H2D (and all preceding) tasks are complete and synchronize
397 with this event in the non-local stream before launching the non-bonded kernel.
399 void gpu_launch_kernel(gmx_nbnxn_cuda_t *nb,
401 const InteractionLocality iloc)
403 cu_atomdata_t *adat = nb->atdat;
404 cu_nbparam_t *nbp = nb->nbparam;
405 cu_plist_t *plist = nb->plist[iloc];
406 cu_timers_t *t = nb->timers;
407 cudaStream_t stream = nb->stream[iloc];
409 bool bCalcEner = flags & GMX_FORCE_ENERGY;
410 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
411 bool bDoTime = nb->bDoTime;
413 /* Don't launch the non-local kernel if there is no work to do.
414 Doing the same for the local kernel is more complicated, since the
415 local part of the force array also depends on the non-local kernel.
416 So to avoid complicating the code and to reduce the risk of bugs,
417 we always call the local kernel, and later (not in
418 this function) the stream wait, local f copyback and the f buffer
419 clearing. All these operations, except for the local interaction kernel,
420 are needed for the non-local interactions. The skip of the local kernel
421 call is taken care of later in this function. */
422 if (canSkipNonbondedWork(*nb, iloc))
424 plist->haveFreshList = false;
429 if (nbp->useDynamicPruning && plist->haveFreshList)
431 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
432 (TODO: ATM that's the way the timing accounting can distinguish between
433 separate prune kernel and combined force+prune, maybe we need a better way?).
435 gpu_launch_kernel_pruneonly(nb, iloc, 1);
438 if (plist->nsci == 0)
440 /* Don't launch an empty local kernel (not allowed with CUDA) */
444 /* beginning of timed nonbonded calculation section */
447 t->interaction[iloc].nb_k.openTimingRegion(stream);
450 /* Kernel launch config:
451 * - The thread block dimensions match the size of i-clusters, j-clusters,
452 * and j-cluster concurrency, in x, y, and z, respectively.
453 * - The 1D block-grid contains as many blocks as super-clusters.
455 int num_threads_z = 1;
456 if (nb->dev_info->prop.major == 3 && nb->dev_info->prop.minor == 7)
460 int nblock = calc_nb_kernel_nblock(plist->nsci, nb->dev_info);
463 KernelLaunchConfig config;
464 config.blockSize[0] = c_clSize;
465 config.blockSize[1] = c_clSize;
466 config.blockSize[2] = num_threads_z;
467 config.gridSize[0] = nblock;
468 config.sharedMemorySize = calc_shmem_required_nonbonded(num_threads_z, nb->dev_info, nbp);
469 config.stream = stream;
473 fprintf(debug, "Non-bonded GPU launch configuration:\n\tThread block: %zux%zux%zu\n\t"
474 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
476 config.blockSize[0], config.blockSize[1], config.blockSize[2],
477 config.gridSize[0], config.gridSize[1], plist->nsci*c_numClPerSupercl,
478 c_numClPerSupercl, plist->na_c,
479 config.sharedMemorySize);
482 auto *timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
483 const auto kernel = select_nbnxn_kernel(nbp->eeltype,
486 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune),
488 const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &bCalcFshift);
489 launchGpuKernel(kernel, config, timingEvent, "k_calc_nb", kernelArgs);
493 t->interaction[iloc].nb_k.closeTimingRegion(stream);
496 if (GMX_NATIVE_WINDOWS)
498 /* Windows: force flushing WDDM queue */
499 cudaStreamQuery(stream);
503 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
504 static inline int calc_shmem_required_prune(const int num_threads_z)
508 /* i-atom x in shared memory */
509 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
510 /* cj in shared memory, for each warp separately */
511 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
516 void gpu_launch_kernel_pruneonly(gmx_nbnxn_cuda_t *nb,
517 const InteractionLocality iloc,
520 cu_atomdata_t *adat = nb->atdat;
521 cu_nbparam_t *nbp = nb->nbparam;
522 cu_plist_t *plist = nb->plist[iloc];
523 cu_timers_t *t = nb->timers;
524 cudaStream_t stream = nb->stream[iloc];
526 bool bDoTime = nb->bDoTime;
528 if (plist->haveFreshList)
530 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
532 /* Set rollingPruningNumParts to signal that it is not set */
533 plist->rollingPruningNumParts = 0;
534 plist->rollingPruningPart = 0;
538 if (plist->rollingPruningNumParts == 0)
540 plist->rollingPruningNumParts = numParts;
544 GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
548 /* Use a local variable for part and update in plist, so we can return here
549 * without duplicating the part increment code.
551 int part = plist->rollingPruningPart;
553 plist->rollingPruningPart++;
554 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
556 plist->rollingPruningPart = 0;
559 /* Compute the number of list entries to prune in this pass */
560 int numSciInPart = (plist->nsci - part)/numParts;
562 /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */
563 if (numSciInPart <= 0)
565 plist->haveFreshList = false;
570 GpuRegionTimer *timer = nullptr;
573 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
576 /* beginning of timed prune calculation section */
579 timer->openTimingRegion(stream);
582 /* Kernel launch config:
583 * - The thread block dimensions match the size of i-clusters, j-clusters,
584 * and j-cluster concurrency, in x, y, and z, respectively.
585 * - The 1D block-grid contains as many blocks as super-clusters.
587 int num_threads_z = c_cudaPruneKernelJ4Concurrency;
588 int nblock = calc_nb_kernel_nblock(numSciInPart, nb->dev_info);
589 KernelLaunchConfig config;
590 config.blockSize[0] = c_clSize;
591 config.blockSize[1] = c_clSize;
592 config.blockSize[2] = num_threads_z;
593 config.gridSize[0] = nblock;
594 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
595 config.stream = stream;
599 fprintf(debug, "Pruning GPU kernel launch configuration:\n\tThread block: %zux%zux%zu\n\t"
600 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
602 config.blockSize[0], config.blockSize[1], config.blockSize[2],
603 config.gridSize[0], config.gridSize[1], numSciInPart*c_numClPerSupercl,
604 c_numClPerSupercl, plist->na_c,
605 config.sharedMemorySize);
608 auto *timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
609 constexpr char kernelName[] = "k_pruneonly";
610 const auto kernel = plist->haveFreshList ? nbnxn_kernel_prune_cuda<true> : nbnxn_kernel_prune_cuda<false>;
611 const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &numParts, &part);
612 launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
614 /* TODO: consider a more elegant way to track which kernel has been called
615 (combined or separate 1st pass prune, rolling prune). */
616 if (plist->haveFreshList)
618 plist->haveFreshList = false;
619 /* Mark that pruning has been done */
620 nb->timers->interaction[iloc].didPrune = true;
624 /* Mark that rolling pruning has been done */
625 nb->timers->interaction[iloc].didRollingPrune = true;
630 timer->closeTimingRegion(stream);
633 if (GMX_NATIVE_WINDOWS)
635 /* Windows: force flushing WDDM queue */
636 cudaStreamQuery(stream);
640 void gpu_launch_cpyback(gmx_nbnxn_cuda_t *nb,
641 nbnxn_atomdata_t *nbatom,
643 const AtomLocality atomLocality)
645 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
648 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
650 /* determine interaction locality from atom locality */
651 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
653 /* extract the data */
654 cu_atomdata_t *adat = nb->atdat;
655 cu_timers_t *t = nb->timers;
656 bool bDoTime = nb->bDoTime;
657 cudaStream_t stream = nb->stream[iloc];
659 bool bCalcEner = flags & GMX_FORCE_ENERGY;
660 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
662 /* don't launch non-local copy-back if there was no non-local work to do */
663 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
668 getGpuAtomRange(adat, atomLocality, &adat_begin, &adat_len);
670 /* beginning of timed D2H section */
673 t->xf[atomLocality].nb_d2h.openTimingRegion(stream);
676 /* With DD the local D2H transfer can only start after the non-local
677 kernel has finished. */
678 if (iloc == InteractionLocality::Local && nb->bUseTwoStreams)
680 stat = cudaStreamWaitEvent(stream, nb->nonlocal_done, 0);
681 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
685 cu_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f + adat_begin,
686 (adat_len)*sizeof(*adat->f), stream);
688 /* After the non-local D2H is launched the nonlocal_done event can be
689 recorded which signals that the local D2H can proceed. This event is not
690 placed after the non-local kernel because we want the non-local data
692 if (iloc == InteractionLocality::NonLocal)
694 stat = cudaEventRecord(nb->nonlocal_done, stream);
695 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
698 /* only transfer energies in the local stream */
699 if (iloc == InteractionLocality::Local)
704 cu_copy_D2H_async(nb->nbst.fshift, adat->fshift,
705 SHIFTS * sizeof(*nb->nbst.fshift), stream);
711 cu_copy_D2H_async(nb->nbst.e_lj, adat->e_lj,
712 sizeof(*nb->nbst.e_lj), stream);
713 cu_copy_D2H_async(nb->nbst.e_el, adat->e_el,
714 sizeof(*nb->nbst.e_el), stream);
720 t->xf[atomLocality].nb_d2h.closeTimingRegion(stream);
724 void cuda_set_cacheconfig()
728 for (int i = 0; i < eelCuNR; i++)
730 for (int j = 0; j < evdwCuNR; j++)
732 /* Default kernel 32/32 kB Shared/L1 */
733 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
734 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
735 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
736 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
737 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");
742 /* X buffer operations on GPU: performs conversion from rvec to nb format. */
743 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid &grid,
744 bool setFillerCoords,
747 const Nbnxm::AtomLocality locality,
752 cu_atomdata_t *adat = nb->atdat;
753 bool bDoTime = nb->bDoTime;
755 const int numColumns = grid.numColumns();
756 const int cellOffset = grid.cellOffset();
757 const int numAtomsPerCell = grid.numAtomsPerCell();
758 Nbnxm::InteractionLocality interactionLoc = gpuAtomToInteractionLocality(locality);
759 int nCopyAtoms = grid.srcAtomEnd() - grid.srcAtomBegin();
760 int copyAtomStart = grid.srcAtomBegin();
762 cudaStream_t stream = nb->stream[interactionLoc];
764 // FIXME: need to either let the local stream get to the
765 // insertNonlocalGpuDependency call or call it separately here
766 if (nCopyAtoms == 0) // empty domain
768 if (interactionLoc == Nbnxm::InteractionLocality::Local)
770 nbnxnInsertNonlocalGpuDependency(nb, interactionLoc);
777 // copy of coordinates will be required if null pointer has been
778 // passed to function
779 // TODO improve this mechanism
780 bool copyCoord = (xPmeDevicePtr == nullptr);
782 // copy X-coordinate data to device
787 nb->timers->xf[locality].nb_h2d.openTimingRegion(stream);
790 rvec *devicePtrDest = reinterpret_cast<rvec *> (nb->xrvec[copyAtomStart]);
791 const rvec *devicePtrSrc = reinterpret_cast<const rvec *> (x[copyAtomStart]);
792 copyToDeviceBuffer(&devicePtrDest, devicePtrSrc, 0, nCopyAtoms,
793 stream, GpuApiCallBehavior::Async, nullptr);
797 nb->timers->xf[locality].nb_h2d.closeTimingRegion(stream);
802 else //coordinates have already been copied by PME stream
804 d_x = (rvec*) xPmeDevicePtr;
807 /* launch kernel on GPU */
808 const int threadsPerBlock = 128;
810 KernelLaunchConfig config;
811 config.blockSize[0] = threadsPerBlock;
812 config.blockSize[1] = 1;
813 config.blockSize[2] = 1;
814 config.gridSize[0] = (grid.numCellsColumnMax()*numAtomsPerCell + threadsPerBlock - 1)/threadsPerBlock;
815 config.gridSize[1] = numColumns;
816 config.gridSize[2] = 1;
817 GMX_ASSERT(config.gridSize[0] > 0, "Can not have empty grid, early return above avoids this");
818 config.sharedMemorySize = 0;
819 config.stream = stream;
821 auto kernelFn = nbnxn_gpu_x_to_nbat_x_kernel;
822 float *xqPtr = &(adat->xq->x);
823 const int *d_atomIndices = nb->atomIndices;
824 const int *d_cxy_na = &nb->cxy_na[numColumnsMax*gridId];
825 const int *d_cxy_ind = &nb->cxy_ind[numColumnsMax*gridId];
826 const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config,
836 launchGpuKernel(kernelFn, config, nullptr, "XbufferOps", kernelArgs);
838 nbnxnInsertNonlocalGpuDependency(nb, interactionLoc);