2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define CUDA implementation of nbnxn_gpu.h
38 * \author Szilard Pall <pall.szilard@gmail.com>
47 #include "gromacs/mdlib/nbnxn_gpu.h"
54 #include "nbnxn_cuda.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/mdlib/force_flags.h"
58 #include "gromacs/mdlib/nb_verlet.h"
59 #include "gromacs/mdlib/nbnxn_gpu_common.h"
60 #include "gromacs/mdlib/nbnxn_gpu_common_utils.h"
61 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
62 #include "gromacs/mdlib/nbnxn_pairlist.h"
63 #include "gromacs/timing/gpu_timing.h"
64 #include "gromacs/utility/cstringutil.h"
65 #include "gromacs/utility/gmxassert.h"
67 #include "nbnxn_cuda_types.h"
70 * Texture references are created at compile-time and need to be declared
71 * at file scope as global variables (see http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#texture-reference-api).
72 * The texture references below are used in two translation units;
73 * we declare them here along the kernels that use them (when compiling legacy Fermi kernels),
74 * and provide getters (see below) used by the data_mgmt module where the
75 * textures are bound/unbound.
76 * (In principle we could do it the other way arond, but that would likely require
77 * device linking and we'd rather avoid technical hurdles.)
79 /*! Texture reference for LJ C6/C12 parameters; bound to cu_nbparam_t.nbfp */
80 texture<float, 1, cudaReadModeElementType> nbfp_texref;
82 /*! Texture reference for LJ-PME parameters; bound to cu_nbparam_t.nbfp_comb */
83 texture<float, 1, cudaReadModeElementType> nbfp_comb_texref;
85 /*! Texture reference for Ewald coulomb force table; bound to cu_nbparam_t.coulomb_tab */
86 texture<float, 1, cudaReadModeElementType> coulomb_tab_texref;
89 /***** The kernel declarations/definitions come here *****/
91 /* Top-level kernel declaration generation: will generate through multiple
92 * inclusion the following flavors for all kernel declarations:
93 * - force-only output;
94 * - force and energy output;
95 * - force-only with pair list pruning;
96 * - force and energy output with pair list pruning.
98 #define FUNCTION_DECLARATION_ONLY
100 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
101 /** Force & energy **/
102 #define CALC_ENERGIES
103 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
106 /*** Pair-list pruning kernels ***/
109 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
110 /** Force & energy **/
111 #define CALC_ENERGIES
112 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
116 /* Prune-only kernels */
117 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_pruneonly.cuh"
118 #undef FUNCTION_DECLARATION_ONLY
120 /* Now generate the function definitions if we are using a single compilation unit. */
121 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
122 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_F_noprune.cu"
123 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_F_prune.cu"
124 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_VF_noprune.cu"
125 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_VF_prune.cu"
126 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_pruneonly.cu"
128 /* Prevent compilation in multiple compilation unit mode for CC 2.x. Although we have
129 * build-time checks to prevent this, the user could manually tweaks nvcc flags
130 * which would lead to buggy kernels getting compiled.
132 #if GMX_PTX_ARCH > 0 && GMX_PTX_ARCH <= 210 && !defined(__clang__)
133 #error Due to an CUDA nvcc compiler bug, the CUDA non-bonded module can not be compiled with multiple compilation units for CC 2.x devices. If you have changed the nvcc flags manually, either use the GMX_CUDA_TARGET_* variables instead or set GMX_CUDA_NB_SINGLE_COMPILATION_UNIT=ON CMake option.
135 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
138 /*! Nonbonded kernel function pointer type */
139 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
144 /*********************************/
146 /* XXX switch between chevron and cudaLaunch (supported only in CUDA >=7.0)
147 -- only for benchmarking purposes */
148 static const bool bUseCudaLaunchKernel =
149 (GMX_CUDA_VERSION >= 7000) && (getenv("GMX_DISABLE_CUDALAUNCH") == NULL);
151 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
152 static inline int calc_nb_kernel_nblock(int nwork_units, const gmx_device_info_t *dinfo)
157 /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
158 empty domain) and that case should be handled before this point. */
159 assert(nwork_units > 0);
161 max_grid_x_size = dinfo->prop.maxGridSize[0];
163 /* do we exceed the grid x dimension limit? */
164 if (nwork_units > max_grid_x_size)
166 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
167 "The number of nonbonded work units (=number of super-clusters) exceeds the"
168 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
175 /* Constant arrays listing all kernel function pointers and enabling selection
176 of a kernel in an elegant manner. */
178 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
179 * electrostatics and VDW type.
181 * Note that the row- and column-order of function pointers has to match the
182 * order of corresponding enumerated electrostatics and vdw types, resp.,
183 * defined in nbnxn_cuda_types.h.
186 /*! Force-only kernel function pointers. */
187 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] =
189 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
190 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
191 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
192 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
193 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
194 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
197 /*! Force + energy kernel function pointers. */
198 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] =
200 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
201 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
202 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
203 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
204 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
205 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
208 /*! Force + pruning kernel function pointers. */
209 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] =
211 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
212 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
213 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
214 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
215 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
216 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
219 /*! Force + energy + pruning kernel function pointers. */
220 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] =
222 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
223 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
224 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
225 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
226 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
227 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
230 /*! Return a pointer to the kernel version to be executed at the current step. */
231 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
235 const gmx_device_info_t gmx_unused *devInfo)
237 nbnxn_cu_kfunc_ptr_t res;
239 GMX_ASSERT(eeltype < eelCuNR,
240 "The electrostatics type requested is not implemented in the CUDA kernels.");
241 GMX_ASSERT(evdwtype < evdwCuNR,
242 "The VdW type requested is not implemented in the CUDA kernels.");
244 /* assert assumptions made by the kernels */
245 GMX_ASSERT(c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit == devInfo->prop.warpSize,
246 "The CUDA kernels require the cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size of the architecture targeted.");
252 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
256 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
263 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
267 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
274 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */
275 static inline int calc_shmem_required_nonbonded(const int num_threads_z, const gmx_device_info_t gmx_unused *dinfo, const cu_nbparam_t *nbp)
281 /* size of shmem (force-buffers/xq/atom type preloading) */
282 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
283 /* i-atom x+q in shared memory */
284 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
285 /* cj in shared memory, for each warp separately */
286 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
287 if (dinfo->prop.major >= 3)
289 if (nbp->vdwtype == evdwCuCUTCOMBGEOM ||
290 nbp->vdwtype == evdwCuCUTCOMBLB)
292 /* i-atom LJ combination parameters in shared memory */
293 shmem += c_numClPerSupercl * c_clSize * sizeof(float2);
297 /* i-atom types in shared memory */
298 shmem += c_numClPerSupercl * c_clSize * sizeof(int);
301 if (dinfo->prop.major < 3)
303 /* force reduction buffers in shared memory */
304 shmem += c_clSize * c_clSize * 3 * sizeof(float);
309 /*! As we execute nonbonded workload in separate streams, before launching
310 the kernel we need to make sure that he following operations have completed:
311 - atomdata allocation and related H2D transfers (every nstlist step);
312 - pair list H2D transfer (every nstlist step);
313 - shift vector H2D transfer (every nstlist step);
314 - force (+shift force and energy) output clearing (every step).
316 These operations are issued in the local stream at the beginning of the step
317 and therefore always complete before the local kernel launch. The non-local
318 kernel is launched after the local on the same device/context hence it is
319 inherently scheduled after the operations in the local stream (including the
320 above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
321 devices with multiple hardware queues the dependency needs to be enforced.
322 We use the misc_ops_and_local_H2D_done event to record the point where
323 the local x+q H2D (and all preceding) tasks are complete and synchronize
324 with this event in the non-local stream before launching the non-bonded kernel.
326 void nbnxn_gpu_launch_kernel(gmx_nbnxn_cuda_t *nb,
327 const nbnxn_atomdata_t *nbatom,
332 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
333 /* CUDA kernel launch-related stuff */
335 dim3 dim_block, dim_grid;
336 nbnxn_cu_kfunc_ptr_t nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
338 cu_atomdata_t *adat = nb->atdat;
339 cu_nbparam_t *nbp = nb->nbparam;
340 cu_plist_t *plist = nb->plist[iloc];
341 cu_timers_t *t = nb->timers;
342 cudaStream_t stream = nb->stream[iloc];
344 bool bCalcEner = flags & GMX_FORCE_ENERGY;
345 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
346 bool bDoTime = nb->bDoTime;
348 /* Don't launch the non-local kernel if there is no work to do.
349 Doing the same for the local kernel is more complicated, since the
350 local part of the force array also depends on the non-local kernel.
351 So to avoid complicating the code and to reduce the risk of bugs,
352 we always call the local kernel, the local x+q copy and later (not in
353 this function) the stream wait, local f copyback and the f buffer
354 clearing. All these operations, except for the local interaction kernel,
355 are needed for the non-local interactions. The skip of the local kernel
356 call is taken care of later in this function. */
357 if (canSkipWork(nb, iloc))
359 plist->haveFreshList = false;
364 /* calculate the atom data index range based on locality */
368 adat_len = adat->natoms_local;
372 adat_begin = adat->natoms_local;
373 adat_len = adat->natoms - adat->natoms_local;
376 /* beginning of timed HtoD section */
379 t->nb_h2d[iloc].openTimingRegion(stream);
383 cu_copy_H2D_async(adat->xq + adat_begin, nbatom->x + adat_begin * 4,
384 adat_len * sizeof(*adat->xq), stream);
388 t->nb_h2d[iloc].closeTimingRegion(stream);
391 /* When we get here all misc operations issues in the local stream as well as
392 the local xq H2D are done,
393 so we record that in the local stream and wait for it in the nonlocal one. */
394 if (nb->bUseTwoStreams)
396 if (iloc == eintLocal)
398 stat = cudaEventRecord(nb->misc_ops_and_local_H2D_done, stream);
399 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_and_local_H2D_done failed");
403 stat = cudaStreamWaitEvent(stream, nb->misc_ops_and_local_H2D_done, 0);
404 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_and_local_H2D_done failed");
408 if (nbp->useDynamicPruning && plist->haveFreshList)
410 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
411 (TODO: ATM that's the way the timing accounting can distinguish between
412 separate prune kernel and combined force+prune, maybe we need a better way?).
414 nbnxn_gpu_launch_kernel_pruneonly(nb, iloc, 1);
417 if (plist->nsci == 0)
419 /* Don't launch an empty local kernel (not allowed with CUDA) */
423 /* beginning of timed nonbonded calculation section */
426 t->nb_k[iloc].openTimingRegion(stream);
429 /* get the pointer to the kernel flavor we need to use */
430 nb_kernel = select_nbnxn_kernel(nbp->eeltype,
433 (plist->haveFreshList && !nb->timers->didPrune[iloc]),
436 /* Kernel launch config:
437 * - The thread block dimensions match the size of i-clusters, j-clusters,
438 * and j-cluster concurrency, in x, y, and z, respectively.
439 * - The 1D block-grid contains as many blocks as super-clusters.
441 int num_threads_z = 1;
442 if (nb->dev_info->prop.major == 3 && nb->dev_info->prop.minor == 7)
446 nblock = calc_nb_kernel_nblock(plist->nsci, nb->dev_info);
447 dim_block = dim3(c_clSize, c_clSize, num_threads_z);
448 dim_grid = dim3(nblock, 1, 1);
449 shmem = calc_shmem_required_nonbonded(num_threads_z, nb->dev_info, nbp);
453 fprintf(debug, "Non-bonded GPU launch configuration:\n\tThread block: %ux%ux%u\n\t"
454 "\tGrid: %ux%u\n\t#Super-clusters/clusters: %d/%d (%d)\n"
456 dim_block.x, dim_block.y, dim_block.z,
457 dim_grid.x, dim_grid.y, plist->nsci*c_numClPerSupercl,
458 c_numClPerSupercl, plist->na_c,
462 if (bUseCudaLaunchKernel)
464 gmx_unused void* kernel_args[4];
465 kernel_args[0] = adat;
466 kernel_args[1] = nbp;
467 kernel_args[2] = plist;
468 kernel_args[3] = &bCalcFshift;
470 #if GMX_CUDA_VERSION >= 7000
471 cudaLaunchKernel((void *)nb_kernel, dim_grid, dim_block, kernel_args, shmem, stream);
476 nb_kernel<<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, bCalcFshift);
478 CU_LAUNCH_ERR("k_calc_nb");
482 t->nb_k[iloc].closeTimingRegion(stream);
485 #if (defined(WIN32) || defined( _WIN32 ))
486 /* Windows: force flushing WDDM queue */
487 stat = cudaStreamQuery(stream);
491 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
492 static inline int calc_shmem_required_prune(const int num_threads_z)
496 /* i-atom x in shared memory */
497 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
498 /* cj in shared memory, for each warp separately */
499 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
504 void nbnxn_gpu_launch_kernel_pruneonly(gmx_nbnxn_cuda_t *nb,
508 cu_atomdata_t *adat = nb->atdat;
509 cu_nbparam_t *nbp = nb->nbparam;
510 cu_plist_t *plist = nb->plist[iloc];
511 cu_timers_t *t = nb->timers;
512 cudaStream_t stream = nb->stream[iloc];
514 bool bDoTime = nb->bDoTime;
516 if (plist->haveFreshList)
518 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
520 /* Set rollingPruningNumParts to signal that it is not set */
521 plist->rollingPruningNumParts = 0;
522 plist->rollingPruningPart = 0;
526 if (plist->rollingPruningNumParts == 0)
528 plist->rollingPruningNumParts = numParts;
532 GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
536 /* Use a local variable for part and update in plist, so we can return here
537 * without duplicating the part increment code.
539 int part = plist->rollingPruningPart;
541 plist->rollingPruningPart++;
542 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
544 plist->rollingPruningPart = 0;
547 /* Compute the number of list entries to prune in this pass */
548 int numSciInPart = (plist->nsci - part)/numParts;
550 /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */
551 if (numSciInPart <= 0)
553 plist->haveFreshList = false;
558 GpuRegionTimer *timer = nullptr;
561 timer = &(plist->haveFreshList ? t->prune_k[iloc] : t->rollingPrune_k[iloc]);
564 /* beginning of timed prune calculation section */
567 timer->openTimingRegion(stream);
570 /* Kernel launch config:
571 * - The thread block dimensions match the size of i-clusters, j-clusters,
572 * and j-cluster concurrency, in x, y, and z, respectively.
573 * - The 1D block-grid contains as many blocks as super-clusters.
575 int num_threads_z = c_cudaPruneKernelJ4Concurrency;
576 int nblock = calc_nb_kernel_nblock(numSciInPart, nb->dev_info);
577 dim3 dim_block = dim3(c_clSize, c_clSize, num_threads_z);
578 dim3 dim_grid = dim3(nblock, 1, 1);
579 int shmem = calc_shmem_required_prune(num_threads_z);
583 fprintf(debug, "Pruning GPU kernel launch configuration:\n\tThread block: %ux%ux%u\n\t"
584 "\tGrid: %ux%u\n\t#Super-clusters/clusters: %d/%d (%d)\n"
586 dim_block.x, dim_block.y, dim_block.z,
587 dim_grid.x, dim_grid.y, numSciInPart*c_numClPerSupercl,
588 c_numClPerSupercl, plist->na_c,
592 if (bUseCudaLaunchKernel)
594 gmx_unused void* kernel_args[5];
595 kernel_args[0] = adat;
596 kernel_args[1] = nbp;
597 kernel_args[2] = plist;
598 kernel_args[3] = &numParts;
599 kernel_args[4] = ∂
601 #if GMX_CUDA_VERSION >= 7000
602 if (plist->haveFreshList)
604 cudaLaunchKernel((void *)nbnxn_kernel_prune_cuda<true>, dim_grid, dim_block, kernel_args, shmem, stream);
608 cudaLaunchKernel((void *)nbnxn_kernel_prune_cuda<false>, dim_grid, dim_block, kernel_args, shmem, stream);
614 if (plist->haveFreshList)
616 nbnxn_kernel_prune_cuda<true><<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, numParts, part);
620 nbnxn_kernel_prune_cuda<false><<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, numParts, part);
623 CU_LAUNCH_ERR("k_pruneonly");
625 /* TODO: consider a more elegant way to track which kernel has been called
626 (combined or separate 1st pass prune, rolling prune). */
627 if (plist->haveFreshList)
629 plist->haveFreshList = false;
630 /* Mark that pruning has been done */
631 nb->timers->didPrune[iloc] = true;
635 /* Mark that rolling pruning has been done */
636 nb->timers->didRollingPrune[iloc] = true;
641 timer->closeTimingRegion(stream);
644 #if (defined(WIN32) || defined( _WIN32 ))
645 /* Windows: force flushing WDDM queue */
646 stat = cudaStreamQuery(stream);
650 void nbnxn_gpu_launch_cpyback(gmx_nbnxn_cuda_t *nb,
651 const nbnxn_atomdata_t *nbatom,
656 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
658 /* determine interaction locality from atom locality */
659 int iloc = gpuAtomToInteractionLocality(aloc);
661 cu_atomdata_t *adat = nb->atdat;
662 cu_timers_t *t = nb->timers;
663 bool bDoTime = nb->bDoTime;
664 cudaStream_t stream = nb->stream[iloc];
666 bool bCalcEner = flags & GMX_FORCE_ENERGY;
667 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
669 /* don't launch non-local copy-back if there was no non-local work to do */
670 if (canSkipWork(nb, iloc))
675 getGpuAtomRange(adat, aloc, adat_begin, adat_len);
677 /* beginning of timed D2H section */
680 t->nb_d2h[iloc].openTimingRegion(stream);
683 /* With DD the local D2H transfer can only start after the non-local
684 kernel has finished. */
685 if (iloc == eintLocal && nb->bUseTwoStreams)
687 stat = cudaStreamWaitEvent(stream, nb->nonlocal_done, 0);
688 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
692 cu_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f + adat_begin,
693 (adat_len)*sizeof(*adat->f), stream);
695 /* After the non-local D2H is launched the nonlocal_done event can be
696 recorded which signals that the local D2H can proceed. This event is not
697 placed after the non-local kernel because we want the non-local data
699 if (iloc == eintNonlocal)
701 stat = cudaEventRecord(nb->nonlocal_done, stream);
702 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
705 /* only transfer energies in the local stream */
711 cu_copy_D2H_async(nb->nbst.fshift, adat->fshift,
712 SHIFTS * sizeof(*nb->nbst.fshift), stream);
718 cu_copy_D2H_async(nb->nbst.e_lj, adat->e_lj,
719 sizeof(*nb->nbst.e_lj), stream);
720 cu_copy_D2H_async(nb->nbst.e_el, adat->e_el,
721 sizeof(*nb->nbst.e_el), stream);
727 t->nb_d2h[iloc].closeTimingRegion(stream);
731 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_texref()
736 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_comb_texref()
738 return nbfp_comb_texref;
741 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_coulomb_tab_texref()
743 return coulomb_tab_texref;
746 void nbnxn_cuda_set_cacheconfig(const gmx_device_info_t *devinfo)
750 for (int i = 0; i < eelCuNR; i++)
752 for (int j = 0; j < evdwCuNR; j++)
754 if (devinfo->prop.major >= 3)
756 /* Default kernel on sm 3.x and later 32/32 kB Shared/L1 */
757 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
758 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
759 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
760 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
764 /* On Fermi prefer L1 gives 2% higher performance */
765 /* Default kernel on sm_2.x 16/48 kB Shared/L1 */
766 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferL1);
767 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferL1);
768 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferL1);
769 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferL1);
771 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");