2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define CUDA implementation of nbnxn_gpu.h
38 * \author Szilard Pall <pall.szilard@gmail.com>
47 #include "gromacs/mdlib/nbnxn_gpu.h"
54 #include "gromacs/gpu_utils/cudautils.cuh"
55 #include "gromacs/mdlib/force_flags.h"
56 #include "gromacs/mdlib/nb_verlet.h"
57 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
58 #include "gromacs/mdlib/nbnxn_pairlist.h"
59 #include "gromacs/timing/gpu_timing.h"
60 #include "gromacs/utility/cstringutil.h"
61 #include "gromacs/utility/gmxassert.h"
63 #include "nbnxn_cuda_types.h"
66 * Texture references are created at compile-time and need to be declared
67 * at file scope as global variables (see http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#texture-reference-api).
68 * The texture references below are used in two translation units;
69 * we declare them here along the kernels that use them (when compiling legacy Fermi kernels),
70 * and provide getters (see below) used by the data_mgmt module where the
71 * textures are bound/unbound.
72 * (In principle we could do it the other way arond, but that would likely require
73 * device linking and we'd rather avoid technical hurdles.)
75 /*! Texture reference for LJ C6/C12 parameters; bound to cu_nbparam_t.nbfp */
76 texture<float, 1, cudaReadModeElementType> nbfp_texref;
78 /*! Texture reference for LJ-PME parameters; bound to cu_nbparam_t.nbfp_comb */
79 texture<float, 1, cudaReadModeElementType> nbfp_comb_texref;
81 /*! Texture reference for Ewald coulomb force table; bound to cu_nbparam_t.coulomb_tab */
82 texture<float, 1, cudaReadModeElementType> coulomb_tab_texref;
85 /***** The kernel declarations/definitions come here *****/
87 /* Top-level kernel declaration generation: will generate through multiple
88 * inclusion the following flavors for all kernel declarations:
89 * - force-only output;
90 * - force and energy output;
91 * - force-only with pair list pruning;
92 * - force and energy output with pair list pruning.
94 #define FUNCTION_DECLARATION_ONLY
96 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
97 /** Force & energy **/
99 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
102 /*** Pair-list pruning kernels ***/
105 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
106 /** Force & energy **/
107 #define CALC_ENERGIES
108 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
112 /* Prune-only kernels */
113 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_pruneonly.cuh"
114 #undef FUNCTION_DECLARATION_ONLY
116 /* Now generate the function definitions if we are using a single compilation unit. */
117 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
118 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_F_noprune.cu"
119 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_F_prune.cu"
120 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_VF_noprune.cu"
121 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_VF_prune.cu"
122 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_pruneonly.cu"
124 /* Prevent compilation in multiple compilation unit mode for CC 2.x. Although we have
125 * build-time checks to prevent this, the user could manually tweaks nvcc flags
126 * which would lead to buggy kernels getting compiled.
128 #if GMX_PTX_ARCH > 0 && GMX_PTX_ARCH <= 210
129 #error Due to an CUDA compiler bug, the CUDA non-bonded module can not be compiled with multiple compilation units for CC 2.x devices. If you have changed the nvcc flags manually, either use the GMX_CUDA_TARGET_* variables instead or set GMX_CUDA_NB_SINGLE_COMPILATION_UNIT=ON CMake option.
131 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
134 /*! Nonbonded kernel function pointer type */
135 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
140 /*********************************/
142 /* XXX switch between chevron and cudaLaunch (supported only in CUDA >=7.0)
143 -- only for benchmarking purposes */
144 static const bool bUseCudaLaunchKernel =
145 (GMX_CUDA_VERSION >= 7000) && (getenv("GMX_DISABLE_CUDALAUNCH") == NULL);
147 /* XXX always/never run the energy/pruning kernels -- only for benchmarking purposes */
148 static bool always_ener = (getenv("GMX_GPU_ALWAYS_ENER") != NULL);
149 static bool never_ener = (getenv("GMX_GPU_NEVER_ENER") != NULL);
150 static bool always_prune = (getenv("GMX_GPU_ALWAYS_PRUNE") != NULL);
153 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
154 static inline int calc_nb_kernel_nblock(int nwork_units, const gmx_device_info_t *dinfo)
159 /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
160 empty domain) and that case should be handled before this point. */
161 assert(nwork_units > 0);
163 max_grid_x_size = dinfo->prop.maxGridSize[0];
165 /* do we exceed the grid x dimension limit? */
166 if (nwork_units > max_grid_x_size)
168 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
169 "The number of nonbonded work units (=number of super-clusters) exceeds the"
170 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
177 /* Constant arrays listing all kernel function pointers and enabling selection
178 of a kernel in an elegant manner. */
180 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
181 * electrostatics and VDW type.
183 * Note that the row- and column-order of function pointers has to match the
184 * order of corresponding enumerated electrostatics and vdw types, resp.,
185 * defined in nbnxn_cuda_types.h.
188 /*! Force-only kernel function pointers. */
189 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] =
191 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
192 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
193 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
194 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
195 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
196 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
199 /*! Force + energy kernel function pointers. */
200 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] =
202 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
203 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
204 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
205 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
206 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
207 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
210 /*! Force + pruning kernel function pointers. */
211 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] =
213 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
214 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
215 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
216 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
217 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
218 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
221 /*! Force + energy + pruning kernel function pointers. */
222 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] =
224 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
225 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
226 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
227 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
228 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
229 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
232 /*! Return a pointer to the kernel version to be executed at the current step. */
233 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
237 const gmx_device_info_t gmx_unused *devInfo)
239 nbnxn_cu_kfunc_ptr_t res;
241 GMX_ASSERT(eeltype < eelCuNR,
242 "The electrostatics type requested is not implemented in the CUDA kernels.");
243 GMX_ASSERT(evdwtype < evdwCuNR,
244 "The VdW type requested is not implemented in the CUDA kernels.");
246 /* assert assumptions made by the kernels */
247 GMX_ASSERT(c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit == devInfo->prop.warpSize,
248 "The CUDA kernels require the cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size of the architecture targeted.");
254 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
258 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
265 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
269 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
276 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */
277 static inline int calc_shmem_required_nonbonded(const int num_threads_z, const gmx_device_info_t gmx_unused *dinfo, const cu_nbparam_t *nbp)
283 /* size of shmem (force-buffers/xq/atom type preloading) */
284 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
285 /* i-atom x+q in shared memory */
286 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
287 /* cj in shared memory, for each warp separately */
288 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
289 if (dinfo->prop.major >= 3)
291 if (nbp->vdwtype == evdwCuCUTCOMBGEOM ||
292 nbp->vdwtype == evdwCuCUTCOMBLB)
294 /* i-atom LJ combination parameters in shared memory */
295 shmem += c_numClPerSupercl * c_clSize * sizeof(float2);
299 /* i-atom types in shared memory */
300 shmem += c_numClPerSupercl * c_clSize * sizeof(int);
303 if (dinfo->prop.major < 3)
305 /* force reduction buffers in shared memory */
306 shmem += c_clSize * c_clSize * 3 * sizeof(float);
311 /*! As we execute nonbonded workload in separate streams, before launching
312 the kernel we need to make sure that he following operations have completed:
313 - atomdata allocation and related H2D transfers (every nstlist step);
314 - pair list H2D transfer (every nstlist step);
315 - shift vector H2D transfer (every nstlist step);
316 - force (+shift force and energy) output clearing (every step).
318 These operations are issued in the local stream at the beginning of the step
319 and therefore always complete before the local kernel launch. The non-local
320 kernel is launched after the local on the same device/context hence it is
321 inherently scheduled after the operations in the local stream (including the
322 above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
323 devices with multiple hardware queues the dependency needs to be enforced.
324 We use the misc_ops_and_local_H2D_done event to record the point where
325 the local x+q H2D (and all preceding) tasks are complete and synchronize
326 with this event in the non-local stream before launching the non-bonded kernel.
328 void nbnxn_gpu_launch_kernel(gmx_nbnxn_cuda_t *nb,
329 const nbnxn_atomdata_t *nbatom,
334 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
335 /* CUDA kernel launch-related stuff */
337 dim3 dim_block, dim_grid;
338 nbnxn_cu_kfunc_ptr_t nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
340 cu_atomdata_t *adat = nb->atdat;
341 cu_nbparam_t *nbp = nb->nbparam;
342 cu_plist_t *plist = nb->plist[iloc];
343 cu_timers_t *t = nb->timers;
344 cudaStream_t stream = nb->stream[iloc];
346 bool bCalcEner = flags & GMX_FORCE_ENERGY;
347 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
348 bool bDoTime = nb->bDoTime;
350 /* turn energy calculation always on/off (for debugging/testing only) */
351 bCalcEner = (bCalcEner || always_ener) && !never_ener;
353 /* Don't launch the non-local kernel if there is no work to do.
354 Doing the same for the local kernel is more complicated, since the
355 local part of the force array also depends on the non-local kernel.
356 So to avoid complicating the code and to reduce the risk of bugs,
357 we always call the local kernel, the local x+q copy and later (not in
358 this function) the stream wait, local f copyback and the f buffer
359 clearing. All these operations, except for the local interaction kernel,
360 are needed for the non-local interactions. The skip of the local kernel
361 call is taken care of later in this function. */
362 if (iloc == eintNonlocal && plist->nsci == 0)
364 plist->haveFreshList = false;
369 /* calculate the atom data index range based on locality */
373 adat_len = adat->natoms_local;
377 adat_begin = adat->natoms_local;
378 adat_len = adat->natoms - adat->natoms_local;
381 /* beginning of timed HtoD section */
384 stat = cudaEventRecord(t->start_nb_h2d[iloc], stream);
385 CU_RET_ERR(stat, "cudaEventRecord failed");
389 cu_copy_H2D_async(adat->xq + adat_begin, nbatom->x + adat_begin * 4,
390 adat_len * sizeof(*adat->xq), stream);
392 /* When we get here all misc operations issues in the local stream as well as
393 the local xq H2D are done,
394 so we record that in the local stream and wait for it in the nonlocal one. */
395 if (nb->bUseTwoStreams)
397 if (iloc == eintLocal)
399 stat = cudaEventRecord(nb->misc_ops_and_local_H2D_done, stream);
400 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_and_local_H2D_done failed");
404 stat = cudaStreamWaitEvent(stream, nb->misc_ops_and_local_H2D_done, 0);
405 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_and_local_H2D_done failed");
411 stat = cudaEventRecord(t->stop_nb_h2d[iloc], stream);
412 CU_RET_ERR(stat, "cudaEventRecord failed");
415 if (nbp->useDynamicPruning && plist->haveFreshList)
417 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
418 (TODO: ATM that's the way the timing accounting can distinguish between
419 separate prune kernel and combined force+prune, maybe we need a better way?).
421 nbnxn_gpu_launch_kernel_pruneonly(nb, iloc, 1);
424 if (plist->nsci == 0)
426 /* Don't launch an empty local kernel (not allowed with CUDA) */
430 /* beginning of timed nonbonded calculation section */
433 stat = cudaEventRecord(t->start_nb_k[iloc], stream);
434 CU_RET_ERR(stat, "cudaEventRecord failed");
437 /* get the pointer to the kernel flavor we need to use */
438 nb_kernel = select_nbnxn_kernel(nbp->eeltype,
441 (plist->haveFreshList && !nb->timers->didPrune[iloc]) || always_prune,
444 /* Kernel launch config:
445 * - The thread block dimensions match the size of i-clusters, j-clusters,
446 * and j-cluster concurrency, in x, y, and z, respectively.
447 * - The 1D block-grid contains as many blocks as super-clusters.
449 int num_threads_z = 1;
450 if (nb->dev_info->prop.major == 3 && nb->dev_info->prop.minor == 7)
454 nblock = calc_nb_kernel_nblock(plist->nsci, nb->dev_info);
455 dim_block = dim3(c_clSize, c_clSize, num_threads_z);
456 dim_grid = dim3(nblock, 1, 1);
457 shmem = calc_shmem_required_nonbonded(num_threads_z, nb->dev_info, nbp);
461 fprintf(debug, "Non-bonded GPU launch configuration:\n\tThread block: %ux%ux%u\n\t"
462 "\tGrid: %ux%u\n\t#Super-clusters/clusters: %d/%d (%d)\n"
464 dim_block.x, dim_block.y, dim_block.z,
465 dim_grid.x, dim_grid.y, plist->nsci*c_numClPerSupercl,
466 c_numClPerSupercl, plist->na_c,
470 if (bUseCudaLaunchKernel)
472 gmx_unused void* kernel_args[4];
473 kernel_args[0] = adat;
474 kernel_args[1] = nbp;
475 kernel_args[2] = plist;
476 kernel_args[3] = &bCalcFshift;
478 #if GMX_CUDA_VERSION >= 7000
479 cudaLaunchKernel((void *)nb_kernel, dim_grid, dim_block, kernel_args, shmem, stream);
484 nb_kernel<<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, bCalcFshift);
486 CU_LAUNCH_ERR("k_calc_nb");
490 stat = cudaEventRecord(t->stop_nb_k[iloc], stream);
491 CU_RET_ERR(stat, "cudaEventRecord failed");
494 #if (defined(WIN32) || defined( _WIN32 ))
495 /* Windows: force flushing WDDM queue */
496 stat = cudaStreamQuery(stream);
500 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
501 static inline int calc_shmem_required_prune(const int num_threads_z)
505 /* i-atom x in shared memory */
506 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
507 /* cj in shared memory, for each warp separately */
508 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
513 void nbnxn_gpu_launch_kernel_pruneonly(gmx_nbnxn_cuda_t *nb,
519 cu_atomdata_t *adat = nb->atdat;
520 cu_nbparam_t *nbp = nb->nbparam;
521 cu_plist_t *plist = nb->plist[iloc];
522 cu_timers_t *t = nb->timers;
523 cudaStream_t stream = nb->stream[iloc];
525 bool bDoTime = nb->bDoTime;
527 if (plist->haveFreshList)
529 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
531 /* Set rollingPruningNumParts to signal that it is not set */
532 plist->rollingPruningNumParts = 0;
533 plist->rollingPruningPart = 0;
537 if (plist->rollingPruningNumParts == 0)
539 plist->rollingPruningNumParts = numParts;
543 GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
547 /* Use a local variable for part and update in plist, so we can return here
548 * without duplicating the part increment code.
550 int part = plist->rollingPruningPart;
552 plist->rollingPruningPart++;
553 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
555 plist->rollingPruningPart = 0;
558 /* Compute the number of list entries to prune in this pass */
559 int numSciInPart = (plist->nsci - part)/numParts;
561 /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */
562 if (numSciInPart <= 0)
564 plist->haveFreshList = false;
569 cudaEvent_t startEvent, stopEvent;
572 startEvent = (plist->haveFreshList ? t->start_prune_k[iloc] : t->start_rollingPrune_k[iloc]);
573 stopEvent = (plist->haveFreshList ? t->stop_prune_k[iloc] : t->stop_rollingPrune_k[iloc]);
576 /* beginning of timed prune calculation section */
579 stat = cudaEventRecord(startEvent, stream);
580 CU_RET_ERR(stat, "cudaEventRecord failed");
583 /* Kernel launch config:
584 * - The thread block dimensions match the size of i-clusters, j-clusters,
585 * and j-cluster concurrency, in x, y, and z, respectively.
586 * - The 1D block-grid contains as many blocks as super-clusters.
588 int num_threads_z = c_cudaPruneKernelJ4Concurrency;
589 int nblock = calc_nb_kernel_nblock(numSciInPart, nb->dev_info);
590 dim3 dim_block = dim3(c_clSize, c_clSize, num_threads_z);
591 dim3 dim_grid = dim3(nblock, 1, 1);
592 int shmem = calc_shmem_required_prune(num_threads_z);
596 fprintf(debug, "Pruning GPU kernel launch configuration:\n\tThread block: %dx%dx%d\n\t"
597 "\tGrid: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n"
599 dim_block.x, dim_block.y, dim_block.z,
600 dim_grid.x, dim_grid.y, numSciInPart*c_numClPerSupercl,
601 c_numClPerSupercl, plist->na_c,
605 if (bUseCudaLaunchKernel)
607 gmx_unused void* kernel_args[5];
608 kernel_args[0] = adat;
609 kernel_args[1] = nbp;
610 kernel_args[2] = plist;
611 kernel_args[3] = &numParts;
612 kernel_args[4] = ∂
614 #if GMX_CUDA_VERSION >= 7000
615 if (plist->haveFreshList)
617 cudaLaunchKernel((void *)nbnxn_kernel_prune_cuda<true>, dim_grid, dim_block, kernel_args, shmem, stream);
621 cudaLaunchKernel((void *)nbnxn_kernel_prune_cuda<false>, dim_grid, dim_block, kernel_args, shmem, stream);
627 if (plist->haveFreshList)
629 nbnxn_kernel_prune_cuda<true><<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, numParts, part);
633 nbnxn_kernel_prune_cuda<false><<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, numParts, part);
636 CU_LAUNCH_ERR("k_pruneonly");
638 /* TODO: consider a more elegant way to track which kernel has been called
639 (combined or separate 1st pass prune, rolling prune). */
640 if (plist->haveFreshList)
642 plist->haveFreshList = false;
643 /* Mark that pruning has been done */
644 nb->timers->didPrune[iloc] = true;
648 /* Mark that rolling pruning has been done */
649 nb->timers->didRollingPrune[iloc] = true;
654 stat = cudaEventRecord(stopEvent, stream);
655 CU_RET_ERR(stat, "cudaEventRecord failed");
658 #if (defined(WIN32) || defined( _WIN32 ))
659 /* Windows: force flushing WDDM queue */
660 stat = cudaStreamQuery(stream);
664 void nbnxn_gpu_launch_cpyback(gmx_nbnxn_cuda_t *nb,
665 const nbnxn_atomdata_t *nbatom,
670 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
673 /* determine interaction locality from atom locality */
678 else if (NONLOCAL_A(aloc))
685 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
686 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
690 cu_atomdata_t *adat = nb->atdat;
691 cu_timers_t *t = nb->timers;
692 bool bDoTime = nb->bDoTime;
693 cudaStream_t stream = nb->stream[iloc];
695 bool bCalcEner = flags & GMX_FORCE_ENERGY;
696 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
698 /* don't launch non-local copy-back if there was no non-local work to do */
699 if (iloc == eintNonlocal && nb->plist[iloc]->nsci == 0)
704 /* calculate the atom data index range based on locality */
708 adat_len = adat->natoms_local;
712 adat_begin = adat->natoms_local;
713 adat_len = adat->natoms - adat->natoms_local;
716 /* beginning of timed D2H section */
719 stat = cudaEventRecord(t->start_nb_d2h[iloc], stream);
720 CU_RET_ERR(stat, "cudaEventRecord failed");
723 /* With DD the local D2H transfer can only start after the non-local
724 kernel has finished. */
725 if (iloc == eintLocal && nb->bUseTwoStreams)
727 stat = cudaStreamWaitEvent(stream, nb->nonlocal_done, 0);
728 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
732 cu_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f + adat_begin,
733 (adat_len)*sizeof(*adat->f), stream);
735 /* After the non-local D2H is launched the nonlocal_done event can be
736 recorded which signals that the local D2H can proceed. This event is not
737 placed after the non-local kernel because we want the non-local data
739 if (iloc == eintNonlocal)
741 stat = cudaEventRecord(nb->nonlocal_done, stream);
742 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
745 /* only transfer energies in the local stream */
751 cu_copy_D2H_async(nb->nbst.fshift, adat->fshift,
752 SHIFTS * sizeof(*nb->nbst.fshift), stream);
758 cu_copy_D2H_async(nb->nbst.e_lj, adat->e_lj,
759 sizeof(*nb->nbst.e_lj), stream);
760 cu_copy_D2H_async(nb->nbst.e_el, adat->e_el,
761 sizeof(*nb->nbst.e_el), stream);
767 stat = cudaEventRecord(t->stop_nb_d2h[iloc], stream);
768 CU_RET_ERR(stat, "cudaEventRecord failed");
772 /*! \brief Count pruning kernel time if either kernel has been triggered
774 * We do the accounting for either of the two pruning kernel flavors:
775 * - 1st pass prune: ran during the current step (prior to the force kernel);
776 * - rolling prune: ran at the end of the previous step (prior to the current step H2D xq);
778 * Note that the resetting of cu_timers_t::didPrune and cu_timers_t::didRollingPrune should happen
779 * after calling this function.
781 * \param[in] timers structs with CUDA timer objects
782 * \param[inout] timings GPU task timing data
783 * \param[in] iloc interaction locality
785 static void countPruneKernelTime(const cu_timers_t *timers,
786 gmx_wallclock_gpu_t *timings,
789 // We might have not done any pruning (e.g. if we skipped with empty domains).
790 if (!timers->didPrune[iloc] && !timers->didRollingPrune[iloc])
795 if (timers->didPrune[iloc])
797 timings->pruneTime.c++;
798 timings->pruneTime.t += cu_event_elapsed(timers->start_prune_k[iloc],
799 timers->stop_prune_k[iloc]);
801 if (timers->didRollingPrune[iloc])
803 timings->dynamicPruneTime.c++;
804 timings->dynamicPruneTime.t += cu_event_elapsed(timers->start_rollingPrune_k[iloc],
805 timers->stop_rollingPrune_k[iloc]);
809 void nbnxn_gpu_wait_for_gpu(gmx_nbnxn_cuda_t *nb,
811 real *e_lj, real *e_el, rvec *fshift)
813 /* NOTE: only implemented for single-precision at this time */
817 /* determine interaction locality from atom locality */
822 else if (NONLOCAL_A(aloc))
829 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
830 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
834 cu_plist_t *plist = nb->plist[iloc];
835 cu_timers_t *timers = nb->timers;
836 struct gmx_wallclock_gpu_t *timings = nb->timings;
837 nb_staging nbst = nb->nbst;
839 bool bCalcEner = flags & GMX_FORCE_ENERGY;
840 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
842 /* turn energy calculation always on/off (for debugging/testing only) */
843 bCalcEner = (bCalcEner || always_ener) && !never_ener;
845 /* Launch wait/update timers & counters and do reduction into staging buffers
846 BUT skip it when during the non-local phase there was actually no work to do.
847 This is consistent with nbnxn_gpu_launch_kernel.
849 NOTE: if timing with multiple GPUs (streams) becomes possible, the
850 counters could end up being inconsistent due to not being incremented
851 on some of the nodes! */
852 if (!(iloc == eintNonlocal && nb->plist[iloc]->nsci == 0))
854 stat = cudaStreamSynchronize(nb->stream[iloc]);
855 CU_RET_ERR(stat, "cudaStreamSynchronize failed in cu_blockwait_nb");
857 /* timing data accumulation */
860 /* only increase counter once (at local F wait) */
864 timings->ktime[plist->haveFreshList ? 1 : 0][bCalcEner ? 1 : 0].c += 1;
868 timings->ktime[plist->haveFreshList ? 1 : 0][bCalcEner ? 1 : 0].t +=
869 cu_event_elapsed(timers->start_nb_k[iloc], timers->stop_nb_k[iloc]);
871 /* X/q H2D and F D2H timings */
872 timings->nb_h2d_t += cu_event_elapsed(timers->start_nb_h2d[iloc],
873 timers->stop_nb_h2d[iloc]);
874 timings->nb_d2h_t += cu_event_elapsed(timers->start_nb_d2h[iloc],
875 timers->stop_nb_d2h[iloc]);
877 /* Count the pruning kernel times for both cases:1st pass (at search step)
878 and rolling pruning (if called at the previous step).
879 We do the accounting here as this is the only sync point where we
880 know (without checking or additional sync-ing) that prune tasks in
881 in the current stream have completed (having just blocking-waited
882 for the force D2H). */
883 countPruneKernelTime(timers, timings, iloc);
885 /* only count atdat and pair-list H2D at pair-search step */
886 if (timers->didPairlistH2D[iloc])
888 /* atdat transfer timing (add only once, at local F wait) */
892 timings->pl_h2d_t += cu_event_elapsed(timers->start_atdat,
896 timings->pl_h2d_t += cu_event_elapsed(timers->start_pl_h2d[iloc],
897 timers->stop_pl_h2d[iloc]);
899 /* Clear the timing flag for the next step */
900 timers->didPairlistH2D[iloc] = false;
904 /* add up energies and shift forces (only once at local F wait) */
915 for (int i = 0; i < SHIFTS; i++)
917 fshift[i][0] += nbst.fshift[i].x;
918 fshift[i][1] += nbst.fshift[i].y;
919 fshift[i][2] += nbst.fshift[i].z;
925 /* Always reset both pruning flags (doesn't hurt doing it even when timing is off). */
926 timers->didPrune[iloc] = timers->didRollingPrune[iloc] = false;
928 /* Turn off initial list pruning (doesn't hurt if this is not pair-search step). */
929 plist->haveFreshList = false;
932 /*! Return the reference to the nbfp texture. */
933 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_texref()
935 assert(!c_disableCudaTextures);
939 /*! Return the reference to the nbfp_comb texture. */
940 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_comb_texref()
942 assert(!c_disableCudaTextures);
943 return nbfp_comb_texref;
946 /*! Return the reference to the coulomb_tab. */
947 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_coulomb_tab_texref()
949 assert(!c_disableCudaTextures);
950 return coulomb_tab_texref;
953 /*! Set up the cache configuration for the non-bonded kernels,
955 void nbnxn_cuda_set_cacheconfig(const gmx_device_info_t *devinfo)
959 for (int i = 0; i < eelCuNR; i++)
961 for (int j = 0; j < evdwCuNR; j++)
963 if (devinfo->prop.major >= 3)
965 /* Default kernel on sm 3.x and later 32/32 kB Shared/L1 */
966 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
967 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
968 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
969 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
973 /* On Fermi prefer L1 gives 2% higher performance */
974 /* Default kernel on sm_2.x 16/48 kB Shared/L1 */
975 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferL1);
976 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferL1);
977 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferL1);
978 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferL1);
980 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");