2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012,2013, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
40 /* Note that floating-point constants in CUDA code should be suffixed
41 * with f (e.g. 0.5f), to stop the compiler producing intermediate
42 * code that is in double precision.
45 #if __CUDA_ARCH__ >= 300
46 #define REDUCE_SHUFFLE
47 /* On Kepler pre-loading i-atom types to shmem gives a few %,
48 but on Fermi it does not */
52 #if defined EL_EWALD_ANA || defined EL_EWALD_TAB
53 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
58 Kernel launch parameters:
59 - #blocks = #pair lists, blockId = pair list Id
60 - #threads = CL_SIZE^2
61 - shmem = CL_SIZE^2 * sizeof(float)
63 Each thread calculates an i force-component taking one pair of i-j atoms.
65 #if __CUDA_ARCH__ >= 350
66 __launch_bounds__(64,16)
70 __global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _ener_prune)
72 __global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _prune)
76 __global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _ener)
78 __global__ void NB_KERNEL_FUNC_NAME(k_nbnxn)
81 (const cu_atomdata_t atdat,
82 const cu_nbparam_t nbparam,
83 const cu_plist_t plist,
86 /* convenience variables */
87 const nbnxn_sci_t *pl_sci = plist.sci;
91 nbnxn_cj4_t *pl_cj4 = plist.cj4;
92 const nbnxn_excl_t *excl = plist.excl;
93 const int *atom_types = atdat.atom_types;
94 int ntypes = atdat.ntypes;
95 const float4 *xq = atdat.xq;
97 const float3 *shift_vec = atdat.shift_vec;
98 float rcoulomb_sq = nbparam.rcoulomb_sq;
99 #ifdef VDW_CUTOFF_CHECK
100 float rvdw_sq = nbparam.rvdw_sq;
104 float two_k_rf = nbparam.two_k_rf;
107 float coulomb_tab_scale = nbparam.coulomb_tab_scale;
110 float beta2 = nbparam.ewald_beta*nbparam.ewald_beta;
111 float beta3 = nbparam.ewald_beta*nbparam.ewald_beta*nbparam.ewald_beta;
114 float rlist_sq = nbparam.rlist_sq;
118 float lj_shift = nbparam.sh_invrc6;
120 float beta = nbparam.ewald_beta;
121 float ewald_shift = nbparam.sh_ewald;
123 float c_rf = nbparam.c_rf;
125 float *e_lj = atdat.e_lj;
126 float *e_el = atdat.e_el;
129 /* thread/block/warp id-s */
130 unsigned int tidxi = threadIdx.x;
131 unsigned int tidxj = threadIdx.y;
132 unsigned int tidx = threadIdx.y * blockDim.x + threadIdx.x;
133 unsigned int bidx = blockIdx.x;
134 unsigned int widx = tidx / WARP_SIZE; /* warp index */
136 int sci, ci, cj, ci_offset,
138 cij4_start, cij4_end,
140 i, jm, j4, wexcl_idx;
142 r2, inv_r, inv_r2, inv_r6,
149 unsigned int wexcl, imask, mask_ji;
151 float3 xi, xj, rv, f_ij, fcj_buf, fshift_buf;
152 float3 fci_buf[NCL_PER_SUPERCL]; /* i force buffer */
155 /* shmem buffer for i x+q pre-loading */
156 extern __shared__ float4 xqib[];
157 /* shmem buffer for cj, for both warps separately */
158 int *cjs = (int *)(xqib + NCL_PER_SUPERCL * CL_SIZE);
160 /* shmem buffer for i atom-type pre-loading */
161 int *atib = (int *)(cjs + 2 * NBNXN_GPU_JGROUP_SIZE);
164 #ifndef REDUCE_SHUFFLE
165 /* shmem j force buffer */
167 float *f_buf = (float *)(atib + NCL_PER_SUPERCL * CL_SIZE);
169 float *f_buf = (float *)(cjs + 2 * NBNXN_GPU_JGROUP_SIZE);
173 nb_sci = pl_sci[bidx]; /* my i super-cluster's index = current bidx */
174 sci = nb_sci.sci; /* super-cluster */
175 cij4_start = nb_sci.cj4_ind_start; /* first ...*/
176 cij4_end = nb_sci.cj4_ind_end; /* and last index of j clusters */
178 /* Store the i-atom x and q in shared memory */
179 /* Note: the thread indexing here is inverted with respect to the
180 inner-loop as this results in slightly higher performance */
181 ci = sci * NCL_PER_SUPERCL + tidxi;
182 ai = ci * CL_SIZE + tidxj;
183 xqib[tidxi * CL_SIZE + tidxj] = xq[ai] + shift_vec[nb_sci.shift];
185 ci = sci * NCL_PER_SUPERCL + tidxj;
186 ai = ci * CL_SIZE + tidxi;
187 atib[tidxj * CL_SIZE + tidxi] = atom_types[ai];
191 for(ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
193 fci_buf[ci_offset] = make_float3(0.0f);
200 #if defined EL_EWALD_ANY || defined EL_RF
201 if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci*NCL_PER_SUPERCL)
203 /* we have the diagonal: add the charge self interaction energy term */
204 for (i = 0; i < NCL_PER_SUPERCL; i++)
206 qi = xqib[i * CL_SIZE + tidxi].w;
209 /* divide the self term equally over the j-threads */
212 E_el *= -nbparam.epsfac*0.5f*c_rf;
214 E_el *= -nbparam.epsfac*beta*M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
220 /* skip central shifts when summing shift forces */
221 if (nb_sci.shift == CENTRAL)
226 fshift_buf = make_float3(0.0f);
228 /* loop over the j clusters = seen by any of the atoms in the current super-cluster */
229 for (j4 = cij4_start; j4 < cij4_end; j4++)
231 wexcl_idx = pl_cj4[j4].imei[widx].excl_ind;
232 imask = pl_cj4[j4].imei[widx].imask;
233 wexcl = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)];
239 /* Pre-load cj into shared memory on both warps separately */
240 if ((tidxj == 0 || tidxj == 4) && tidxi < NBNXN_GPU_JGROUP_SIZE)
242 cjs[tidxi + tidxj * NBNXN_GPU_JGROUP_SIZE / 4] = pl_cj4[j4].cj[tidxi];
245 /* Unrolling this loop
246 - with pruning leads to register spilling;
247 - on Kepler is much slower;
248 - doesn't work on CUDA <v4.1
249 Tested with nvcc 3.2 - 5.0.7 */
250 #if !defined PRUNE_NBL && __CUDA_ARCH__ < 300 && CUDA_VERSION >= 4010
253 for (jm = 0; jm < NBNXN_GPU_JGROUP_SIZE; jm++)
255 if (imask & (supercl_interaction_mask << (jm * NCL_PER_SUPERCL)))
257 mask_ji = (1U << (jm * NCL_PER_SUPERCL));
259 cj = cjs[jm + (tidxj & 4) * NBNXN_GPU_JGROUP_SIZE / 4];
260 aj = cj * CL_SIZE + tidxj;
262 /* load j atom data */
264 xj = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
265 qj_f = nbparam.epsfac * xqbuf.w;
266 typej = atom_types[aj];
268 fcj_buf = make_float3(0.0f);
270 /* The PME and RF kernels don't unroll with CUDA <v4.1. */
271 #if !defined PRUNE_NBL && !(CUDA_VERSION < 4010 && (defined EL_EWALD_ANY || defined EL_RF))
274 for(i = 0; i < NCL_PER_SUPERCL; i++)
278 ci_offset = i; /* i force buffer offset */
280 ci = sci * NCL_PER_SUPERCL + i; /* i cluster index */
281 ai = ci * CL_SIZE + tidxi; /* i atom index */
283 /* all threads load an atom from i cluster ci into shmem! */
284 xqbuf = xqib[i * CL_SIZE + tidxi];
285 xi = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
287 /* distance between i and j atoms */
292 /* If _none_ of the atoms pairs are in cutoff range,
293 the bit corresponding to the current
294 cluster-pair in imask gets set to 0. */
295 if (!__any(r2 < rlist_sq))
301 int_bit = (wexcl & mask_ji) ? 1.0f : 0.0f;
303 /* cutoff & exclusion check */
304 #if defined EL_EWALD_ANY || defined EL_RF
305 if (r2 < rcoulomb_sq *
306 (nb_sci.shift != CENTRAL || ci != cj || tidxj > tidxi))
308 if (r2 < rcoulomb_sq * int_bit)
311 /* load the rest of the i-atom parameters */
314 typei = atib[i * CL_SIZE + tidxi];
316 typei = atom_types[ai];
319 /* LJ 6*C6 and 12*C12 */
321 c6 = tex1Dfetch<float>(nbparam.nbfp_texobj, 2 * (ntypes * typei + typej));
322 c12 = tex1Dfetch<float>(nbparam.nbfp_texobj, 2 * (ntypes * typei + typej) + 1);
324 c6 = tex1Dfetch(nbfp_texref, 2 * (ntypes * typei + typej));
325 c12 = tex1Dfetch(nbfp_texref, 2 * (ntypes * typei + typej) + 1);
326 #endif /* USE_TEXOBJ */
329 /* avoid NaN for excluded pairs at r=0 */
330 r2 += (1.0f - int_bit) * NBNXN_AVOID_SING_R2_INC;
333 inv_r2 = inv_r * inv_r;
334 inv_r6 = inv_r2 * inv_r2 * inv_r2;
335 #if defined EL_EWALD_ANY || defined EL_RF
336 /* We could mask inv_r2, but with Ewald
337 * masking both inv_r6 and F_invr is faster */
341 F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
344 E_lj_p = int_bit * (c12 * (inv_r6 * inv_r6 - lj_shift * lj_shift) * 0.08333333f - c6 * (inv_r6 - lj_shift) * 0.16666667f);
347 #ifdef VDW_CUTOFF_CHECK
348 /* this enables twin-range cut-offs (rvdw < rcoulomb <= rlist) */
349 vdw_in_range = (r2 < rvdw_sq) ? 1.0f : 0.0f;
350 F_invr *= vdw_in_range;
352 E_lj_p *= vdw_in_range;
361 F_invr += qi * qj_f * inv_r2 * inv_r;
364 F_invr += qi * qj_f * (int_bit*inv_r2 * inv_r - two_k_rf);
366 #if defined EL_EWALD_ANA
367 F_invr += qi * qj_f * (int_bit*inv_r2*inv_r + pmecorrF(beta2*r2)*beta3);
368 #elif defined EL_EWALD_TAB
369 F_invr += qi * qj_f * (int_bit*inv_r2 -
371 interpolate_coulomb_force_r(nbparam.coulomb_tab_texobj, r2 * inv_r, coulomb_tab_scale)
373 interpolate_coulomb_force_r(r2 * inv_r, coulomb_tab_scale)
374 #endif /* USE_TEXOBJ */
376 #endif /* EL_EWALD_ANA/TAB */
380 E_el += qi * qj_f * (inv_r - c_rf);
383 E_el += qi * qj_f * (int_bit*inv_r + 0.5f * two_k_rf * r2 - c_rf);
386 /* 1.0f - erff is faster than erfcf */
387 E_el += qi * qj_f * (inv_r * (int_bit - erff(r2 * inv_r * beta)) - int_bit * ewald_shift);
388 #endif /* EL_EWALD_ANY */
392 /* accumulate j forces in registers */
395 /* accumulate i forces in registers */
396 fci_buf[ci_offset] += f_ij;
400 /* shift the mask bit by 1 */
404 /* reduce j forces */
405 #ifdef REDUCE_SHUFFLE
406 reduce_force_j_warp_shfl(fcj_buf, f, tidxi, aj);
408 /* store j forces in shmem */
409 f_buf[ tidx] = fcj_buf.x;
410 f_buf[ FBUF_STRIDE + tidx] = fcj_buf.y;
411 f_buf[2 * FBUF_STRIDE + tidx] = fcj_buf.z;
413 reduce_force_j_generic(f_buf, f, tidxi, tidxj, aj);
418 /* Update the imask with the new one which does not contain the
419 out of range clusters anymore. */
420 pl_cj4[j4].imei[widx].imask = imask;
425 /* reduce i forces */
426 for(ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
428 ai = (sci * NCL_PER_SUPERCL + ci_offset) * CL_SIZE + tidxi;
429 #ifdef REDUCE_SHUFFLE
430 reduce_force_i_warp_shfl(fci_buf[ci_offset], f,
431 &fshift_buf, bCalcFshift,
434 f_buf[ tidx] = fci_buf[ci_offset].x;
435 f_buf[ FBUF_STRIDE + tidx] = fci_buf[ci_offset].y;
436 f_buf[2 * FBUF_STRIDE + tidx] = fci_buf[ci_offset].z;
438 reduce_force_i(f_buf, f,
439 &fshift_buf, bCalcFshift,
445 /* add up local shift forces into global mem */
446 #ifdef REDUCE_SHUFFLE
447 if (bCalcFshift && (tidxj == 0 || tidxj == 4))
449 if (bCalcFshift && tidxj == 0)
452 atomicAdd(&atdat.fshift[nb_sci.shift].x, fshift_buf.x);
453 atomicAdd(&atdat.fshift[nb_sci.shift].y, fshift_buf.y);
454 atomicAdd(&atdat.fshift[nb_sci.shift].z, fshift_buf.z);
458 #ifdef REDUCE_SHUFFLE
459 /* reduce the energies over warps and store into global memory */
460 reduce_energy_warp_shfl(E_lj, E_el, e_lj, e_el, tidx);
462 /* flush the energies to shmem and reduce them */
464 f_buf[FBUF_STRIDE + tidx] = E_el;
465 reduce_energy_pow2(f_buf + (tidx & WARP_SIZE), e_lj, e_el, tidx & ~WARP_SIZE);