2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS development team.
6 * Copyright (c) 2012,2013, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
38 #ifndef NBNXN_CUDA_TYPES_H
39 #define NBNXN_CUDA_TYPES_H
41 #include "types/nbnxn_pairlist.h"
42 #include "types/nbnxn_cuda_types_ext.h"
43 #include "../../gmxlib/cuda_tools/cudautils.cuh"
45 /* CUDA versions from 5.0 above support texture objects. */
46 #if CUDA_VERSION >= 5000
47 #define TEXOBJ_SUPPORTED
48 #else /* CUDA_VERSION */
49 /* This typedef allows us to define only one version of struct cu_nbparam */
50 typedef int cudaTextureObject_t;
51 #endif /* CUDA_VERSION */
57 /** Types of electrostatics implementations available in the CUDA non-bonded
58 * force kernels. These represent both the electrostatics types implemented
59 * by the kernels (cut-off, RF, and Ewald - a subset of what's defined in
60 * enums.h) as well as encode implementation details analytical/tabulated
61 * and single or twin cut-off (for Ewald kernels).
62 * Note that the cut-off and RF kernels have only analytical flavor and unlike
63 * in the CPU kernels, the tabulated kernels are ATM Ewald-only.
65 * The order of pointers to different electrostatic kernels defined in
66 * nbnxn_cuda.cu by the nb_default_kfunc_ptr and nb_legacy_kfunc_ptr arrays
67 * should match the order of enumerated types below. */
69 eelCuCUT, eelCuRF, eelCuEWALD_TAB, eelCuEWALD_TAB_TWIN, eelCuEWALD_ANA, eelCuEWALD_ANA_TWIN, eelCuNR
72 /** Kernel flavors with different set of optimizations: default for CUDA <=v4.1
73 * compilers and legacy for earlier, 3.2 and 4.0 CUDA compilers. */
75 eNbnxnCuKDefault, eNbnxnCuKLegacy, eNbnxnCuKNR
78 #define NBNXN_KVER_OLD(k) (k == eNbnxnCuKOld)
79 #define NBNXN_KVER_LEGACY(k) (k == eNbnxnCuKLegacy)
80 #define NBNXN_KVER_DEFAULT(k) (k == eNbnxnCuKDefault)
82 /* Non-bonded kernel versions. */
84 /* All structs prefixed with "cu_" hold data used in GPU calculations and
85 * are passed to the kernels, except cu_timers_t. */
86 typedef struct cu_plist cu_plist_t;
87 typedef struct cu_atomdata cu_atomdata_t;
88 typedef struct cu_nbparam cu_nbparam_t;
89 typedef struct cu_timers cu_timers_t;
90 typedef struct nb_staging nb_staging_t;
93 /** Staging area for temporary data. The energies get downloaded here first,
94 * before getting added to the CPU-side aggregate values.
98 float *e_lj; /**< LJ energy */
99 float *e_el; /**< electrostatic energy */
100 float3 *fshift; /**< shift forces */
103 /** Nonbonded atom data -- both inputs and outputs. */
106 int natoms; /**< number of atoms */
107 int natoms_local; /**< number of local atoms */
108 int nalloc; /**< allocation size for the atom data (xq, f) */
110 float4 *xq; /**< atom coordinates + charges, size natoms */
111 float3 *f; /**< force output array, size natoms */
112 /* TODO: try float2 for the energies */
113 float *e_lj, /**< LJ energy output, size 1 */
114 *e_el; /**< Electrostatics energy input, size 1 */
116 float3 *fshift; /**< shift forces */
118 int ntypes; /**< number of atom types */
119 int *atom_types; /**< atom type indices, size natoms */
121 float3 *shift_vec; /**< shifts */
122 bool bShiftVecUploaded; /**< true if the shift vector has been uploaded */
125 /** Parameters required for the CUDA nonbonded calculations. */
128 int eeltype; /**< type of electrostatics */
130 float epsfac; /**< charge multiplication factor */
131 float c_rf; /**< Reaction-field/plain cutoff electrostatics const. */
132 float two_k_rf; /**< Reaction-field electrostatics constant */
133 float ewald_beta; /**< Ewald/PME parameter */
134 float sh_ewald; /**< Ewald/PME correction term */
135 float rvdw_sq; /**< VdW cut-off */
136 float rcoulomb_sq; /**< Coulomb cut-off */
137 float rlist_sq; /**< pair-list cut-off */
138 float sh_invrc6; /**< LJ potential correction term */
140 /* Non-bonded parameters - accessed through texture memory */
141 float *nbfp; /**< nonbonded parameter table with C6/C12 pairs */
142 cudaTextureObject_t nbfp_texobj; /**< texture object bound to nbfp */
144 /* Ewald Coulomb force table data - accessed through texture memory */
145 int coulomb_tab_size; /**< table size (s.t. it fits in texture cache) */
146 float coulomb_tab_scale; /**< table scale/spacing */
147 float *coulomb_tab; /**< pointer to the table in the device memory */
148 cudaTextureObject_t coulomb_tab_texobj; /**< texture object bound to coulomb_tab */
151 /** Pair list data */
154 int na_c; /**< number of atoms per cluster */
156 int nsci; /**< size of sci, # of i clusters in the list */
157 int sci_nalloc; /**< allocation size of sci */
158 nbnxn_sci_t *sci; /**< list of i-cluster ("super-clusters") */
160 int ncj4; /**< total # of 4*j clusters */
161 int cj4_nalloc; /**< allocation size of cj4 */
162 nbnxn_cj4_t *cj4; /**< 4*j cluster list, contains j cluster number
163 and index into the i cluster list */
164 nbnxn_excl_t *excl; /**< atom interaction bits */
165 int nexcl; /**< count for excl */
166 int excl_nalloc; /**< allocation size of excl */
168 bool bDoPrune; /**< true if pair-list pruning needs to be
169 done during the current step */
172 /** CUDA events used for timing GPU kernels and H2D/D2H transfers.
173 * The two-sized arrays hold the local and non-local values and should always
174 * be indexed with eintLocal/eintNonlocal.
178 cudaEvent_t start_atdat; /**< start event for atom data transfer (every PS step) */
179 cudaEvent_t stop_atdat; /**< stop event for atom data transfer (every PS step) */
180 cudaEvent_t start_nb_h2d[2]; /**< start events for x/q H2D transfers (l/nl, every step) */
181 cudaEvent_t stop_nb_h2d[2]; /**< stop events for x/q H2D transfers (l/nl, every step) */
182 cudaEvent_t start_nb_d2h[2]; /**< start events for f D2H transfer (l/nl, every step) */
183 cudaEvent_t stop_nb_d2h[2]; /**< stop events for f D2H transfer (l/nl, every step) */
184 cudaEvent_t start_pl_h2d[2]; /**< start events for pair-list H2D transfers (l/nl, every PS step) */
185 cudaEvent_t stop_pl_h2d[2]; /**< start events for pair-list H2D transfers (l/nl, every PS step) */
186 cudaEvent_t start_nb_k[2]; /**< start event for non-bonded kernels (l/nl, every step) */
187 cudaEvent_t stop_nb_k[2]; /**< stop event non-bonded kernels (l/nl, every step) */
190 /** Main data structure for CUDA nonbonded force calculations. */
193 cuda_dev_info_t *dev_info; /**< CUDA device information */
194 int kernel_ver; /**< The version of the kernel to be executed on the
195 device in use, possible values: eNbnxnCuK* */
196 bool bUseTwoStreams; /**< true if doing both local/non-local NB work on GPU */
197 bool bUseStreamSync; /**< true if the standard cudaStreamSynchronize is used
198 and not memory polling-based waiting */
199 cu_atomdata_t *atdat; /**< atom data */
200 cu_nbparam_t *nbparam; /**< parameters required for the non-bonded calc. */
201 cu_plist_t *plist[2]; /**< pair-list data structures (local and non-local) */
202 nb_staging_t nbst; /**< staging area where fshift/energies get downloaded */
204 cudaStream_t stream[2]; /**< local and non-local GPU streams */
206 /** events used for synchronization */
207 cudaEvent_t nonlocal_done; /**< event triggered when the non-local non-bonded kernel
208 is done (and the local transfer can proceed) */
209 cudaEvent_t misc_ops_done; /**< event triggered when the operations that precede the
210 main force calculations are done (e.g. buffer 0-ing) */
212 /* NOTE: With current CUDA versions (<=5.0) timing doesn't work with multiple
213 * concurrent streams, so we won't time if both l/nl work is done on GPUs.
214 * Timer init/uninit is still done even with timing off so only the condition
215 * setting bDoTime needs to be change if this CUDA "feature" gets fixed. */
216 bool bDoTime; /**< True if event-based timing is enabled. */
217 cu_timers_t *timers; /**< CUDA event-based timers. */
218 wallclock_gpu_t *timings; /**< Timing data. */
225 #endif /* NBNXN_CUDA_TYPES_H */