2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define CUDA implementation of nbnxn_gpu.h
38 * \author Szilard Pall <pall.szilard@gmail.com>
47 #include "gromacs/mdlib/nbnxn_gpu.h"
54 #include "gromacs/gpu_utils/cudautils.cuh"
55 #include "gromacs/mdlib/force_flags.h"
56 #include "gromacs/mdlib/nb_verlet.h"
57 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
58 #include "gromacs/mdlib/nbnxn_pairlist.h"
59 #include "gromacs/timing/gpu_timing.h"
60 #include "gromacs/utility/cstringutil.h"
61 #include "gromacs/utility/gmxassert.h"
63 #include "nbnxn_cuda_types.h"
65 /*! Texture reference for LJ C6/C12 parameters; bound to cu_nbparam_t.nbfp */
66 texture<float, 1, cudaReadModeElementType> nbfp_texref;
68 /*! Texture reference for LJ-PME parameters; bound to cu_nbparam_t.nbfp_comb */
69 texture<float, 1, cudaReadModeElementType> nbfp_comb_texref;
71 /*! Texture reference for Ewald coulomb force table; bound to cu_nbparam_t.coulomb_tab */
72 texture<float, 1, cudaReadModeElementType> coulomb_tab_texref;
75 /***** The kernel declarations/definitions come here *****/
76 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_utils.cuh"
78 /* Top-level kernel declaration generation: will generate through multiple
79 * inclusion the following flavors for all kernel declarations:
80 * - force-only output;
81 * - force and energy output;
82 * - force-only with pair list pruning;
83 * - force and energy output with pair list pruning.
85 #define FUNCTION_DECLARATION_ONLY
87 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
88 /** Force & energy **/
90 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
93 /*** Pair-list pruning kernels ***/
96 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
97 /** Force & energy **/
99 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
102 #undef FUNCTION_DECLARATION_ONLY
104 /* Now generate the function definitions if we are using a single compilation unit. */
105 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
106 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_F_noprune.cu"
107 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_F_prune.cu"
108 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_VF_noprune.cu"
109 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_VF_prune.cu"
111 /* Prevent compilation in multiple compilation unit mode for CC 2.x. Although we have
112 * build-time checks to prevent this, the user could manually tweaks nvcc flags
113 * which would lead to buggy kernels getting compiled.
115 #if GMX_PTX_ARCH > 0 && GMX_PTX_ARCH <= 210
116 #error Due to an CUDA compiler bug, the CUDA non-bonded module can not be compiled with multiple compilation units for CC 2.x devices. If you have changed the nvcc flags manually, either use the GMX_CUDA_TARGET_* variables instead or set GMX_CUDA_NB_SINGLE_COMPILATION_UNIT=ON CMake option.
118 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
122 /*! Nonbonded kernel function pointer type */
123 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
128 /*********************************/
130 /* XXX switch between chevron and cudaLaunch (supported only in CUDA >=7.0)
131 -- only for benchmarking purposes */
132 static const bool bUseCudaLaunchKernel =
133 (GMX_CUDA_VERSION >= 7000) && (getenv("GMX_DISABLE_CUDALAUNCH") == NULL);
135 /* XXX always/never run the energy/pruning kernels -- only for benchmarking purposes */
136 static bool always_ener = (getenv("GMX_GPU_ALWAYS_ENER") != NULL);
137 static bool never_ener = (getenv("GMX_GPU_NEVER_ENER") != NULL);
138 static bool always_prune = (getenv("GMX_GPU_ALWAYS_PRUNE") != NULL);
141 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
142 static inline int calc_nb_kernel_nblock(int nwork_units, gmx_device_info_t *dinfo)
147 /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
148 empty domain) and that case should be handled before this point. */
149 assert(nwork_units > 0);
151 max_grid_x_size = dinfo->prop.maxGridSize[0];
153 /* do we exceed the grid x dimension limit? */
154 if (nwork_units > max_grid_x_size)
156 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
157 "The number of nonbonded work units (=number of super-clusters) exceeds the"
158 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
165 /* Constant arrays listing all kernel function pointers and enabling selection
166 of a kernel in an elegant manner. */
168 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
169 * electrostatics and VDW type.
171 * Note that the row- and column-order of function pointers has to match the
172 * order of corresponding enumerated electrostatics and vdw types, resp.,
173 * defined in nbnxn_cuda_types.h.
176 /*! Force-only kernel function pointers. */
177 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] =
179 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
180 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
181 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
182 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
183 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
184 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
187 /*! Force + energy kernel function pointers. */
188 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] =
190 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
191 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
192 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
193 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
194 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
195 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
198 /*! Force + pruning kernel function pointers. */
199 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] =
201 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
202 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
203 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
204 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
205 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
206 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
209 /*! Force + energy + pruning kernel function pointers. */
210 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] =
212 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
213 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
214 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
215 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
216 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
217 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
220 /*! Return a pointer to the kernel version to be executed at the current step. */
221 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
225 struct gmx_device_info_t gmx_unused *devInfo)
227 nbnxn_cu_kfunc_ptr_t res;
229 GMX_ASSERT(eeltype < eelCuNR,
230 "The electrostatics type requested is not implemented in the CUDA kernels.");
231 GMX_ASSERT(evdwtype < evdwCuNR,
232 "The VdW type requested is not implemented in the CUDA kernels.");
234 /* assert assumptions made by the kernels */
235 GMX_ASSERT(c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit == devInfo->prop.warpSize,
236 "The CUDA kernels require the cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size of the architecture targeted.");
242 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
246 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
253 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
257 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
264 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
265 static inline int calc_shmem_required(const int num_threads_z, gmx_device_info_t gmx_unused *dinfo, const cu_nbparam_t *nbp)
271 /* size of shmem (force-buffers/xq/atom type preloading) */
272 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
273 /* i-atom x+q in shared memory */
274 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
275 /* cj in shared memory, for each warp separately */
276 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
277 if (dinfo->prop.major >= 3)
279 if (nbp->vdwtype == evdwCuCUTCOMBGEOM ||
280 nbp->vdwtype == evdwCuCUTCOMBLB)
282 /* i-atom LJ combination parameters in shared memory */
283 shmem += c_numClPerSupercl * c_clSize * sizeof(float2);
287 /* i-atom types in shared memory */
288 shmem += c_numClPerSupercl * c_clSize * sizeof(int);
291 if (dinfo->prop.major < 3)
293 /* force reduction buffers in shared memory */
294 shmem += c_clSize * c_clSize * 3 * sizeof(float);
299 /*! As we execute nonbonded workload in separate streams, before launching
300 the kernel we need to make sure that he following operations have completed:
301 - atomdata allocation and related H2D transfers (every nstlist step);
302 - pair list H2D transfer (every nstlist step);
303 - shift vector H2D transfer (every nstlist step);
304 - force (+shift force and energy) output clearing (every step).
306 These operations are issued in the local stream at the beginning of the step
307 and therefore always complete before the local kernel launch. The non-local
308 kernel is launched after the local on the same device/context hence it is
309 inherently scheduled after the operations in the local stream (including the
310 above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
311 devices with multiple hardware queues the dependency needs to be enforced.
312 We use the misc_ops_and_local_H2D_done event to record the point where
313 the local x+q H2D (and all preceding) tasks are complete and synchronize
314 with this event in the non-local stream before launching the non-bonded kernel.
316 void nbnxn_gpu_launch_kernel(gmx_nbnxn_cuda_t *nb,
317 const nbnxn_atomdata_t *nbatom,
322 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
323 /* CUDA kernel launch-related stuff */
325 dim3 dim_block, dim_grid;
326 nbnxn_cu_kfunc_ptr_t nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
328 cu_atomdata_t *adat = nb->atdat;
329 cu_nbparam_t *nbp = nb->nbparam;
330 cu_plist_t *plist = nb->plist[iloc];
331 cu_timers_t *t = nb->timers;
332 cudaStream_t stream = nb->stream[iloc];
334 bool bCalcEner = flags & GMX_FORCE_ENERGY;
335 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
336 bool bDoTime = nb->bDoTime;
338 /* turn energy calculation always on/off (for debugging/testing only) */
339 bCalcEner = (bCalcEner || always_ener) && !never_ener;
341 /* Don't launch the non-local kernel if there is no work to do.
342 Doing the same for the local kernel is more complicated, since the
343 local part of the force array also depends on the non-local kernel.
344 So to avoid complicating the code and to reduce the risk of bugs,
345 we always call the local kernel, the local x+q copy and later (not in
346 this function) the stream wait, local f copyback and the f buffer
347 clearing. All these operations, except for the local interaction kernel,
348 are needed for the non-local interactions. The skip of the local kernel
349 call is taken care of later in this function. */
350 if (iloc == eintNonlocal && plist->nsci == 0)
355 /* calculate the atom data index range based on locality */
359 adat_len = adat->natoms_local;
363 adat_begin = adat->natoms_local;
364 adat_len = adat->natoms - adat->natoms_local;
367 /* beginning of timed HtoD section */
370 stat = cudaEventRecord(t->start_nb_h2d[iloc], stream);
371 CU_RET_ERR(stat, "cudaEventRecord failed");
375 cu_copy_H2D_async(adat->xq + adat_begin, nbatom->x + adat_begin * 4,
376 adat_len * sizeof(*adat->xq), stream);
378 /* When we get here all misc operations issues in the local stream as well as
379 the local xq H2D are done,
380 so we record that in the local stream and wait for it in the nonlocal one. */
381 if (nb->bUseTwoStreams)
383 if (iloc == eintLocal)
385 stat = cudaEventRecord(nb->misc_ops_and_local_H2D_done, stream);
386 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_and_local_H2D_done failed");
390 stat = cudaStreamWaitEvent(stream, nb->misc_ops_and_local_H2D_done, 0);
391 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_and_local_H2D_done failed");
397 stat = cudaEventRecord(t->stop_nb_h2d[iloc], stream);
398 CU_RET_ERR(stat, "cudaEventRecord failed");
401 if (plist->nsci == 0)
403 /* Don't launch an empty local kernel (not allowed with CUDA) */
407 /* beginning of timed nonbonded calculation section */
410 stat = cudaEventRecord(t->start_nb_k[iloc], stream);
411 CU_RET_ERR(stat, "cudaEventRecord failed");
414 /* get the pointer to the kernel flavor we need to use */
415 nb_kernel = select_nbnxn_kernel(nbp->eeltype,
418 plist->bDoPrune || always_prune,
421 /* Kernel launch config:
422 * - The thread block dimensions match the size of i-clusters, j-clusters,
423 * and j-cluster concurrency, in x, y, and z, respectively.
424 * - The 1D block-grid contains as many blocks as super-clusters.
426 int num_threads_z = 1;
427 if ((nb->dev_info->prop.major == 3 && nb->dev_info->prop.minor == 7) ||
428 (nb->dev_info->prop.major == 6 && nb->dev_info->prop.minor == 0))
432 nblock = calc_nb_kernel_nblock(plist->nsci, nb->dev_info);
433 dim_block = dim3(c_clSize, c_clSize, num_threads_z);
434 dim_grid = dim3(nblock, 1, 1);
435 shmem = calc_shmem_required(num_threads_z, nb->dev_info, nbp);
439 fprintf(debug, "GPU launch configuration:\n\tThread block: %dx%dx%d\n\t"
440 "\tGrid: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n"
442 dim_block.x, dim_block.y, dim_block.z,
443 dim_grid.x, dim_grid.y, plist->nsci*c_numClPerSupercl,
444 c_numClPerSupercl, plist->na_c,
448 if (bUseCudaLaunchKernel)
450 gmx_unused void* kernel_args[4];
451 kernel_args[0] = adat;
452 kernel_args[1] = nbp;
453 kernel_args[2] = plist;
454 kernel_args[3] = &bCalcFshift;
456 #if GMX_CUDA_VERSION >= 7000
457 cudaLaunchKernel((void *)nb_kernel, dim_grid, dim_block, kernel_args, shmem, stream);
462 nb_kernel<<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, bCalcFshift);
464 CU_LAUNCH_ERR("k_calc_nb");
468 stat = cudaEventRecord(t->stop_nb_k[iloc], stream);
469 CU_RET_ERR(stat, "cudaEventRecord failed");
472 #if (defined(WIN32) || defined( _WIN32 ))
473 /* Windows: force flushing WDDM queue */
474 stat = cudaStreamQuery(stream);
478 void nbnxn_gpu_launch_cpyback(gmx_nbnxn_cuda_t *nb,
479 const nbnxn_atomdata_t *nbatom,
484 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
487 /* determine interaction locality from atom locality */
492 else if (NONLOCAL_A(aloc))
499 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
500 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
504 cu_atomdata_t *adat = nb->atdat;
505 cu_timers_t *t = nb->timers;
506 bool bDoTime = nb->bDoTime;
507 cudaStream_t stream = nb->stream[iloc];
509 bool bCalcEner = flags & GMX_FORCE_ENERGY;
510 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
512 /* don't launch non-local copy-back if there was no non-local work to do */
513 if (iloc == eintNonlocal && nb->plist[iloc]->nsci == 0)
518 /* calculate the atom data index range based on locality */
522 adat_len = adat->natoms_local;
526 adat_begin = adat->natoms_local;
527 adat_len = adat->natoms - adat->natoms_local;
530 /* beginning of timed D2H section */
533 stat = cudaEventRecord(t->start_nb_d2h[iloc], stream);
534 CU_RET_ERR(stat, "cudaEventRecord failed");
537 /* With DD the local D2H transfer can only start after the non-local
538 kernel has finished. */
539 if (iloc == eintLocal && nb->bUseTwoStreams)
541 stat = cudaStreamWaitEvent(stream, nb->nonlocal_done, 0);
542 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
546 cu_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f + adat_begin,
547 (adat_len)*sizeof(*adat->f), stream);
549 /* After the non-local D2H is launched the nonlocal_done event can be
550 recorded which signals that the local D2H can proceed. This event is not
551 placed after the non-local kernel because we want the non-local data
553 if (iloc == eintNonlocal)
555 stat = cudaEventRecord(nb->nonlocal_done, stream);
556 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
559 /* only transfer energies in the local stream */
565 cu_copy_D2H_async(nb->nbst.fshift, adat->fshift,
566 SHIFTS * sizeof(*nb->nbst.fshift), stream);
572 cu_copy_D2H_async(nb->nbst.e_lj, adat->e_lj,
573 sizeof(*nb->nbst.e_lj), stream);
574 cu_copy_D2H_async(nb->nbst.e_el, adat->e_el,
575 sizeof(*nb->nbst.e_el), stream);
581 stat = cudaEventRecord(t->stop_nb_d2h[iloc], stream);
582 CU_RET_ERR(stat, "cudaEventRecord failed");
586 void nbnxn_gpu_wait_for_gpu(gmx_nbnxn_cuda_t *nb,
588 real *e_lj, real *e_el, rvec *fshift)
590 /* NOTE: only implemented for single-precision at this time */
594 /* determine interaction locality from atom locality */
599 else if (NONLOCAL_A(aloc))
606 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
607 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
611 cu_plist_t *plist = nb->plist[iloc];
612 cu_timers_t *timers = nb->timers;
613 struct gmx_wallclock_gpu_t *timings = nb->timings;
614 nb_staging nbst = nb->nbst;
616 bool bCalcEner = flags & GMX_FORCE_ENERGY;
617 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
619 /* turn energy calculation always on/off (for debugging/testing only) */
620 bCalcEner = (bCalcEner || always_ener) && !never_ener;
622 /* Launch wait/update timers & counters, unless doing the non-local phase
623 when there is not actually work to do. This is consistent with
624 nbnxn_cuda_launch_kernel.
626 NOTE: if timing with multiple GPUs (streams) becomes possible, the
627 counters could end up being inconsistent due to not being incremented
628 on some of the nodes! */
629 if (iloc == eintNonlocal && nb->plist[iloc]->nsci == 0)
634 stat = cudaStreamSynchronize(nb->stream[iloc]);
635 CU_RET_ERR(stat, "cudaStreamSynchronize failed in cu_blockwait_nb");
637 /* timing data accumulation */
640 /* only increase counter once (at local F wait) */
644 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].c += 1;
648 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].t +=
649 cu_event_elapsed(timers->start_nb_k[iloc], timers->stop_nb_k[iloc]);
651 /* X/q H2D and F D2H timings */
652 timings->nb_h2d_t += cu_event_elapsed(timers->start_nb_h2d[iloc],
653 timers->stop_nb_h2d[iloc]);
654 timings->nb_d2h_t += cu_event_elapsed(timers->start_nb_d2h[iloc],
655 timers->stop_nb_d2h[iloc]);
657 /* only count atdat and pair-list H2D at pair-search step */
660 /* atdat transfer timing (add only once, at local F wait) */
664 timings->pl_h2d_t += cu_event_elapsed(timers->start_atdat,
668 timings->pl_h2d_t += cu_event_elapsed(timers->start_pl_h2d[iloc],
669 timers->stop_pl_h2d[iloc]);
673 /* add up energies and shift forces (only once at local F wait) */
684 for (int i = 0; i < SHIFTS; i++)
686 fshift[i][0] += nbst.fshift[i].x;
687 fshift[i][1] += nbst.fshift[i].y;
688 fshift[i][2] += nbst.fshift[i].z;
693 /* turn off pruning (doesn't matter if this is pair-search step or not) */
694 plist->bDoPrune = false;
697 /*! Return the reference to the nbfp texture. */
698 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_texref()
703 /*! Return the reference to the nbfp_comb texture. */
704 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_comb_texref()
706 return nbfp_comb_texref;
709 /*! Return the reference to the coulomb_tab. */
710 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_coulomb_tab_texref()
712 return coulomb_tab_texref;
715 /*! Set up the cache configuration for the non-bonded kernels,
717 void nbnxn_cuda_set_cacheconfig(gmx_device_info_t *devinfo)
721 for (int i = 0; i < eelCuNR; i++)
723 for (int j = 0; j < evdwCuNR; j++)
725 if (devinfo->prop.major >= 3)
727 /* Default kernel on sm 3.x and later 32/32 kB Shared/L1 */
728 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
729 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
730 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
731 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
735 /* On Fermi prefer L1 gives 2% higher performance */
736 /* Default kernel on sm_2.x 16/48 kB Shared/L1 */
737 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferL1);
738 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferL1);
739 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferL1);
740 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferL1);
742 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");