1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2012, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
37 /* Note that floating-point constants in CUDA code should be suffixed
38 * with f (e.g. 0.5f), to stop the compiler producing intermediate
39 * code that is in double precision.
42 /* Analytical Ewald is not implemented for the legacy kernels (as it is anyway
43 slower than the tabulated kernel on Fermi). */
45 #error Trying to generate Analytical Ewald legacy kernels which is not implemented in the legacy kernels!
49 Kernel launch parameters:
50 - #blocks = #pair lists, blockId = pair list Id
51 - #threads = CL_SIZE^2
52 - shmem = CL_SIZE^2 * sizeof(float)
54 Each thread calculates an i force-component taking one pair of i-j atoms.
56 #if __CUDA_ARCH__ >= 350
57 __launch_bounds__(64,16)
61 __global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _ener_prune_legacy)
63 __global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _prune_legacy)
67 __global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _ener_legacy)
69 __global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _legacy)
72 (const cu_atomdata_t atdat,
73 const cu_nbparam_t nbparam,
74 const cu_plist_t plist,
77 /* convenience variables */
78 const nbnxn_sci_t *pl_sci = plist.sci;
82 nbnxn_cj4_t *pl_cj4 = plist.cj4;
83 const nbnxn_excl_t *excl = plist.excl;
84 const int *atom_types = atdat.atom_types;
85 int ntypes = atdat.ntypes;
86 const float4 *xq = atdat.xq;
88 const float3 *shift_vec = atdat.shift_vec;
89 float rcoulomb_sq = nbparam.rcoulomb_sq;
90 #ifdef VDW_CUTOFF_CHECK
91 float rvdw_sq = nbparam.rvdw_sq;
95 float two_k_rf = nbparam.two_k_rf;
98 float coulomb_tab_scale = nbparam.coulomb_tab_scale;
101 float rlist_sq = nbparam.rlist_sq;
105 float lj_shift = nbparam.sh_invrc6;
107 float beta = nbparam.ewald_beta;
108 float ewald_shift = nbparam.sh_ewald;
110 float c_rf = nbparam.c_rf;
112 float *e_lj = atdat.e_lj;
113 float *e_el = atdat.e_el;
116 /* thread/block/warp id-s */
117 unsigned int tidxi = threadIdx.x;
118 unsigned int tidxj = threadIdx.y;
119 unsigned int tidx = threadIdx.y * blockDim.x + threadIdx.x;
120 unsigned int bidx = blockIdx.x;
121 unsigned int widx = tidx / WARP_SIZE; /* warp index */
123 int sci, ci, cj, ci_offset,
125 cij4_start, cij4_end,
127 i, cii, jm, j4, nsubi, wexcl_idx;
129 r2, inv_r, inv_r2, inv_r6,
136 unsigned int wexcl, imask, mask_ji;
138 float3 xi, xj, rv, f_ij, fcj_buf, fshift_buf;
139 float3 fci_buf[NCL_PER_SUPERCL]; /* i force buffer */
142 /* shmem buffer for i x+q pre-loading */
143 extern __shared__ float4 xqib[];
144 /* shmem j force buffer */
145 float *f_buf = (float *)(xqib + NCL_PER_SUPERCL * CL_SIZE);
147 nb_sci = pl_sci[bidx]; /* my i super-cluster's index = current bidx */
148 sci = nb_sci.sci; /* super-cluster */
149 cij4_start = nb_sci.cj4_ind_start; /* first ...*/
150 cij4_end = nb_sci.cj4_ind_end; /* and last index of j clusters */
152 /* Store the i-atom x and q in shared memory */
153 /* Note: the thread indexing here is inverted with respect to the
154 inner-loop as this results in slightly higher performance */
155 ci = sci * NCL_PER_SUPERCL + tidxi;
156 ai = ci * CL_SIZE + tidxj;
157 xqib[tidxi * CL_SIZE + tidxj] = xq[ai] + shift_vec[nb_sci.shift];
160 for(ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
162 fci_buf[ci_offset] = make_float3(0.0f);
169 #if defined EL_EWALD_TAB || defined EL_RF
170 if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci*NCL_PER_SUPERCL)
172 /* we have the diagonal: add the charge self interaction energy term */
173 for (i = 0; i < NCL_PER_SUPERCL; i++)
175 qi = xqib[i * CL_SIZE + tidxi].w;
178 /* divide the self term equally over the j-threads */
181 E_el *= -nbparam.epsfac*0.5f*c_rf;
183 E_el *= -nbparam.epsfac*beta*M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
189 /* skip central shifts when summing shift forces */
190 if (nb_sci.shift == CENTRAL)
195 fshift_buf = make_float3(0.0f);
197 /* loop over the j clusters = seen by any of the atoms in the current super-cluster */
198 for (j4 = cij4_start; j4 < cij4_end; j4++)
200 wexcl_idx = pl_cj4[j4].imei[widx].excl_ind;
201 imask = pl_cj4[j4].imei[widx].imask;
202 wexcl = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)];
208 /* nvcc >v4.1 doesn't like this loop, it refuses to unroll it */
209 #if CUDA_VERSION >= 4010
212 for (jm = 0; jm < NBNXN_GPU_JGROUP_SIZE; jm++)
214 mask_ji = (imask >> (jm * CL_SIZE)) & supercl_interaction_mask;
217 nsubi = __popc(mask_ji);
219 cj = pl_cj4[j4].cj[jm];
220 aj = cj * CL_SIZE + tidxj;
222 /* load j atom data */
224 xj = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
225 qj_f = nbparam.epsfac * xqbuf.w;
226 typej = atom_types[aj];
228 fcj_buf = make_float3(0.0f);
230 /* loop over the i-clusters in sci */
232 -- nvcc doesn't like my code, it refuses to unroll it
233 which is a pity because here unrolling could help. */
234 for (cii = 0; cii < nsubi; cii++)
236 i = __ffs(mask_ji) - 1;
237 mask_ji &= ~(1U << i);
239 ci_offset = i; /* i force buffer offset */
241 ci = sci * NCL_PER_SUPERCL + i; /* i cluster index */
242 ai = ci * CL_SIZE + tidxi; /* i atom index */
244 /* all threads load an atom from i cluster ci into shmem! */
245 xqbuf = xqib[i * CL_SIZE + tidxi];
246 xi = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
248 /* distance between i and j atoms */
253 /* If _none_ of the atoms pairs are in cutoff range,
254 the bit corresponding to the current
255 cluster-pair in imask gets set to 0. */
256 if (!__any(r2 < rlist_sq))
258 imask &= ~(1U << (jm * NCL_PER_SUPERCL + i));
262 int_bit = ((wexcl >> (jm * NCL_PER_SUPERCL + i)) & 1) ? 1.0f : 0.0f;
264 /* cutoff & exclusion check */
265 #if defined EL_EWALD_TAB || defined EL_RF
266 if (r2 < rcoulomb_sq *
267 (nb_sci.shift != CENTRAL || ci != cj || tidxj > tidxi))
269 if (r2 < rcoulomb_sq * int_bit)
272 /* load the rest of the i-atom parameters */
274 typei = atom_types[ai];
276 /* LJ 6*C6 and 12*C12 */
277 c6 = tex1Dfetch(nbfp_texref, 2 * (ntypes * typei + typej));
278 c12 = tex1Dfetch(nbfp_texref, 2 * (ntypes * typei + typej) + 1);
280 /* avoid NaN for excluded pairs at r=0 */
281 r2 += (1.0f - int_bit) * NBNXN_AVOID_SING_R2_INC;
284 inv_r2 = inv_r * inv_r;
285 inv_r6 = inv_r2 * inv_r2 * inv_r2;
286 #if defined EL_EWALD_TAB || defined EL_RF
287 /* We could mask inv_r2, but with Ewald
288 * masking both inv_r6 and F_invr is faster */
292 F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
295 E_lj_p = int_bit * (c12 * (inv_r6 * inv_r6 - lj_shift * lj_shift) * 0.08333333f - c6 * (inv_r6 - lj_shift) * 0.16666667f);
298 #ifdef VDW_CUTOFF_CHECK
299 /* this enables twin-range cut-offs (rvdw < rcoulomb <= rlist) */
300 vdw_in_range = (r2 < rvdw_sq) ? 1.0f : 0.0f;
301 F_invr *= vdw_in_range;
303 E_lj_p *= vdw_in_range;
312 F_invr += qi * qj_f * inv_r2 * inv_r;
315 F_invr += qi * qj_f * (int_bit*inv_r2 * inv_r - two_k_rf);
318 F_invr += qi * qj_f * (int_bit*inv_r2 - interpolate_coulomb_force_r(r2 * inv_r, coulomb_tab_scale)) * inv_r;
319 #endif /* EL_EWALD_TAB */
323 E_el += qi * qj_f * (inv_r - c_rf);
326 E_el += qi * qj_f * (int_bit*inv_r + 0.5f * two_k_rf * r2 - c_rf);
329 /* 1.0f - erff is faster than erfcf */
330 E_el += qi * qj_f * (inv_r * (int_bit - erff(r2 * inv_r * beta)) - int_bit * ewald_shift);
335 /* accumulate j forces in registers */
338 /* accumulate i forces in registers */
339 fci_buf[ci_offset] += f_ij;
343 /* store j forces in shmem */
344 f_buf[ tidx] = fcj_buf.x;
345 f_buf[ FBUF_STRIDE + tidx] = fcj_buf.y;
346 f_buf[2 * FBUF_STRIDE + tidx] = fcj_buf.z;
348 /* reduce j forces */
349 reduce_force_j_generic(f_buf, f, tidxi, tidxj, aj);
353 /* Update the imask with the new one which does not contain the
354 out of range clusters anymore. */
355 pl_cj4[j4].imei[widx].imask = imask;
360 /* reduce i forces */
361 for(ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
363 ai = (sci * NCL_PER_SUPERCL + ci_offset) * CL_SIZE + tidxi;
364 f_buf[ tidx] = fci_buf[ci_offset].x;
365 f_buf[ FBUF_STRIDE + tidx] = fci_buf[ci_offset].y;
366 f_buf[2 * FBUF_STRIDE + tidx] = fci_buf[ci_offset].z;
368 reduce_force_i(f_buf, f,
369 &fshift_buf, bCalcFshift,
374 /* add up local shift forces into global mem */
375 if (bCalcFshift && tidxj == 0)
377 atomicAdd(&atdat.fshift[nb_sci.shift].x, fshift_buf.x);
378 atomicAdd(&atdat.fshift[nb_sci.shift].y, fshift_buf.y);
379 atomicAdd(&atdat.fshift[nb_sci.shift].z, fshift_buf.z);
383 /* flush the energies to shmem and reduce them */
385 f_buf[FBUF_STRIDE + tidx] = E_el;
386 reduce_energy_pow2(f_buf + (tidx & WARP_SIZE), e_lj, e_el, tidx & ~WARP_SIZE);