2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS development team.
6 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
40 * Data types used internally in the nbnxn_cuda module.
42 * \author Szilárd Páll <pall.szilard@gmail.com>
43 * \ingroup module_nbnxm
46 #ifndef NBNXM_CUDA_TYPES_H
47 #define NBNXM_CUDA_TYPES_H
49 #include "gromacs/gpu_utils/cuda_arch_utils.cuh"
50 #include "gromacs/gpu_utils/cudautils.cuh"
51 #include "gromacs/gpu_utils/devicebuffer.h"
52 #include "gromacs/gpu_utils/gputraits.cuh"
53 #include "gromacs/mdtypes/interaction_const.h"
54 #include "gromacs/nbnxm/gpu_types_common.h"
55 #include "gromacs/nbnxm/pairlist.h"
56 #include "gromacs/timing/gpu_timing.h"
58 /*! \brief Macro definining default for the prune kernel's j4 processing concurrency.
60 * The GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY macro allows compile-time override.
62 #ifndef GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY
63 #define GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY 4
65 /*! \brief Default for the prune kernel's j4 processing concurrency.
67 * Initialized using the #GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY macro which allows compile-time override.
69 const int c_cudaPruneKernelJ4Concurrency = GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY;
71 /* TODO: consider moving this to kernel_utils */
72 /* Convenience defines */
73 /*! \brief number of clusters per supercluster. */
74 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
75 /*! \brief cluster size = number of atoms per cluster. */
76 static const int c_clSize = c_nbnxnGpuClusterSize;
78 /*! \brief Electrostatic CUDA kernel flavors.
80 * Types of electrostatics implementations available in the CUDA non-bonded
81 * force kernels. These represent both the electrostatics types implemented
82 * by the kernels (cut-off, RF, and Ewald - a subset of what's defined in
83 * enums.h) as well as encode implementation details analytical/tabulated
84 * and single or twin cut-off (for Ewald kernels).
85 * Note that the cut-off and RF kernels have only analytical flavor and unlike
86 * in the CPU kernels, the tabulated kernels are ATM Ewald-only.
88 * The row-order of pointers to different electrostatic kernels defined in
89 * nbnxn_cuda.cu by the nb_*_kfunc_ptr function pointer table
90 * should match the order of enumerated types below.
93 eelCuCUT, eelCuRF, eelCuEWALD_TAB, eelCuEWALD_TAB_TWIN, eelCuEWALD_ANA, eelCuEWALD_ANA_TWIN, eelCuNR
96 /*! \brief VdW CUDA kernel flavors.
98 * The enumerates values correspond to the LJ implementations in the CUDA non-bonded
101 * The column-order of pointers to different electrostatic kernels defined in
102 * nbnxn_cuda.cu by the nb_*_kfunc_ptr function pointer table
103 * should match the order of enumerated types below.
106 evdwCuCUT, evdwCuCUTCOMBGEOM, evdwCuCUTCOMBLB, evdwCuFSWITCH, evdwCuPSWITCH, evdwCuEWALDGEOM, evdwCuEWALDLB, evdwCuNR
109 /* All structs prefixed with "cu_" hold data used in GPU calculations and
110 * are passed to the kernels, except cu_timers_t. */
112 typedef struct cu_atomdata cu_atomdata_t;
113 typedef struct cu_nbparam cu_nbparam_t;
114 typedef struct nb_staging nb_staging_t;
119 * \brief Staging area for temporary data downloaded from the GPU.
121 * The energies/shift forces get downloaded here first, before getting added
122 * to the CPU-side aggregate values.
126 float *e_lj; /**< LJ energy */
127 float *e_el; /**< electrostatic energy */
128 float3 *fshift; /**< shift forces */
132 * \brief Nonbonded atom data - both inputs and outputs.
136 int natoms; /**< number of atoms */
137 int natoms_local; /**< number of local atoms */
138 int nalloc; /**< allocation size for the atom data (xq, f) */
140 float4 *xq; /**< atom coordinates + charges, size natoms */
141 float3 *f; /**< force output array, size natoms */
143 float *e_lj; /**< LJ energy output, size 1 */
144 float *e_el; /**< Electrostatics energy input, size 1 */
146 float3 *fshift; /**< shift forces */
148 int ntypes; /**< number of atom types */
149 int *atom_types; /**< atom type indices, size natoms */
150 float2 *lj_comb; /**< sqrt(c6),sqrt(c12) size natoms */
152 float3 *shift_vec; /**< shifts */
153 bool bShiftVecUploaded; /**< true if the shift vector has been uploaded */
157 * \brief Parameters required for the CUDA nonbonded calculations.
162 int eeltype; /**< type of electrostatics, takes values from #eelCu */
163 int vdwtype; /**< type of VdW impl., takes values from #evdwCu */
165 float epsfac; /**< charge multiplication factor */
166 float c_rf; /**< Reaction-field/plain cutoff electrostatics const. */
167 float two_k_rf; /**< Reaction-field electrostatics constant */
168 float ewald_beta; /**< Ewald/PME parameter */
169 float sh_ewald; /**< Ewald/PME correction term substracted from the direct-space potential */
170 float sh_lj_ewald; /**< LJ-Ewald/PME correction term added to the correction potential */
171 float ewaldcoeff_lj; /**< LJ-Ewald/PME coefficient */
173 float rcoulomb_sq; /**< Coulomb cut-off squared */
175 float rvdw_sq; /**< VdW cut-off squared */
176 float rvdw_switch; /**< VdW switched cut-off */
177 float rlistOuter_sq; /**< Full, outer pair-list cut-off squared */
178 float rlistInner_sq; /**< Inner, dynamic pruned pair-list cut-off squared */
179 bool useDynamicPruning; /**< True if we use dynamic pair-list pruning */
181 shift_consts_t dispersion_shift; /**< VdW shift dispersion constants */
182 shift_consts_t repulsion_shift; /**< VdW shift repulsion constants */
183 switch_consts_t vdw_switch; /**< VdW switch constants */
185 /* LJ non-bonded parameters - accessed through texture memory */
186 float *nbfp; /**< nonbonded parameter table with C6/C12 pairs per atom type-pair, 2*ntype^2 elements */
187 cudaTextureObject_t nbfp_texobj; /**< texture object bound to nbfp */
188 float *nbfp_comb; /**< nonbonded parameter table per atom type, 2*ntype elements */
189 cudaTextureObject_t nbfp_comb_texobj; /**< texture object bound to nbfp_texobj */
191 /* Ewald Coulomb force table data - accessed through texture memory */
192 float coulomb_tab_scale; /**< table scale/spacing */
193 float *coulomb_tab; /**< pointer to the table in the device memory */
194 cudaTextureObject_t coulomb_tab_texobj; /**< texture object bound to coulomb_tab */
198 * \brief Pair list data.
200 using cu_plist_t = gpu_plist;
203 * \brief Typedef of actual timer type.
205 typedef struct nbnxn_gpu_timers_t cu_timers_t;
208 * \brief Main data structure for CUDA nonbonded force calculations.
210 struct gmx_nbnxn_cuda_t
212 const gmx_device_info_t *dev_info; /**< CUDA device information */
213 bool bUseTwoStreams; /**< true if doing both local/non-local NB work on GPU */
214 cu_atomdata_t *atdat; /**< atom data */
215 cu_nbparam_t *nbparam; /**< parameters required for the non-bonded calc. */
216 cu_plist_t *plist[2]; /**< pair-list data structures (local and non-local) */
217 nb_staging_t nbst; /**< staging area where fshift/energies get downloaded */
219 cudaStream_t stream[2]; /**< local and non-local GPU streams */
221 /** events used for synchronization */
222 cudaEvent_t nonlocal_done; /**< event triggered when the non-local non-bonded kernel
223 is done (and the local transfer can proceed) */
224 cudaEvent_t misc_ops_and_local_H2D_done; /**< event triggered when the tasks issued in
225 the local stream that need to precede the
226 non-local force calculations are done
227 (e.g. f buffer 0-ing, local x/q H2D) */
229 /* NOTE: With current CUDA versions (<=5.0) timing doesn't work with multiple
230 * concurrent streams, so we won't time if both l/nl work is done on GPUs.
231 * Timer init/uninit is still done even with timing off so only the condition
232 * setting bDoTime needs to be change if this CUDA "feature" gets fixed. */
233 bool bDoTime; /**< True if event-based timing is enabled. */
234 cu_timers_t *timers; /**< CUDA event-based timers. */
235 gmx_wallclock_gpu_nbnxn_t *timings; /**< Timing data. TODO: deprecate this and query timers for accumulated data instead */
238 #endif /* NBNXN_CUDA_TYPES_H */