2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * CUDA non-bonded kernel used through preprocessor-based code generation
39 * of multiple kernel flavors, see nbnxn_cuda_kernels.cuh.
41 * NOTE: No include fence as it is meant to be included multiple times.
43 * \author Szilárd Páll <pall.szilard@gmail.com>
44 * \ingroup module_mdlib
48 #include "gromacs/math/utilities.h"
49 #include "gromacs/pbcutil/ishift.h"
50 /* Note that floating-point constants in CUDA code should be suffixed
51 * with f (e.g. 0.5f), to stop the compiler producing intermediate
52 * code that is in double precision.
55 #if __CUDA_ARCH__ >= 300
56 /* Note: convenience macros, need to be undef-ed at the end of the file. */
57 #define REDUCE_SHUFFLE
58 /* On Kepler pre-loading i-atom types to shmem gives a few %,
59 but on Fermi it does not */
61 #ifdef HAVE_CUDA_TEXOBJ_SUPPORT
66 #if defined EL_EWALD_ANA || defined EL_EWALD_TAB
67 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
71 #if defined EL_EWALD_ANY || defined EL_RF || defined LJ_EWALD || (defined EL_CUTOFF && defined CALC_ENERGIES)
72 /* Macro to control the calculation of exclusion forces in the kernel
73 * We do that with Ewald (elec/vdw) and RF. Cut-off only has exclusion
76 * Note: convenience macro, needs to be undef-ed at the end of the file.
78 #define EXCLUSION_FORCES
81 #if defined LJ_EWALD_COMB_GEOM || defined LJ_EWALD_COMB_LB
82 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
88 Kernel launch parameters:
89 - #blocks = #pair lists, blockId = pair list Id
90 - #threads = NTHREAD_Z * CL_SIZE^2
91 - shmem = see nbnxn_cuda.cu:calc_shmem_required()
93 Each thread calculates an i force-component taking one pair of i-j atoms.
96 /* NTHREAD_Z controls the number of j-clusters processed concurrently on NTHREAD_Z
97 * warp-pairs per block.
99 * - On CC 2.0-3.5, 5.0, and 5.2, NTHREAD_Z == 1, translating to 64 th/block with 16
100 * blocks/multiproc, is the fastest even though this setup gives low occupancy.
101 * NTHREAD_Z > 1 results in excessive register spilling unless the minimum blocks
102 * per multiprocessor is reduced proportionally to get the original number of max
103 * threads in flight (and slightly lower performance).
104 * - On CC 3.7 and 6.0 there are enough registers to double the number of threads; using
105 * NTHREADS_Z == 2 is fastest with 16 blocks (TODO: test with RF and other kernels
106 * with low-register use).
108 * Note that the current kernel implementation only supports NTHREAD_Z > 1 with
109 * shuffle-based reduction, hence CC >= 3.0.
112 /* Kernel launch bounds as function of NTHREAD_Z.
113 * - CC 3.0/3.5/5.x, >=6.1: NTHREAD_Z=1, (64, 16) bounds
114 * - CC 3.7, 6.0: NTHREAD_Z=2, (128, 16) bounds
116 * Note: convenience macros, need to be undef-ed at the end of the file.
118 #if __CUDA_ARCH__ == 370 || __CUDA_ARCH__ == 600
119 #define NTHREAD_Z (2)
120 #define MIN_BLOCKS_PER_MP (16)
122 #define NTHREAD_Z (1)
123 #define MIN_BLOCKS_PER_MP (16)
125 #define THREADS_PER_BLOCK (CL_SIZE*CL_SIZE*NTHREAD_Z)
127 #if __CUDA_ARCH__ >= 350
128 __launch_bounds__(THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP)
130 __launch_bounds__(THREADS_PER_BLOCK)
134 __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_prune_cuda)
136 __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_prune_cuda)
140 __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_cuda)
142 __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_cuda)
145 (const cu_atomdata_t atdat,
146 const cu_nbparam_t nbparam,
147 const cu_plist_t plist,
150 /* convenience variables */
151 const nbnxn_sci_t *pl_sci = plist.sci;
155 nbnxn_cj4_t *pl_cj4 = plist.cj4;
156 const nbnxn_excl_t *excl = plist.excl;
157 const int *atom_types = atdat.atom_types;
158 int ntypes = atdat.ntypes;
159 const float4 *xq = atdat.xq;
161 const float3 *shift_vec = atdat.shift_vec;
162 float rcoulomb_sq = nbparam.rcoulomb_sq;
163 #ifdef VDW_CUTOFF_CHECK
164 float rvdw_sq = nbparam.rvdw_sq;
168 float lje_coeff2, lje_coeff6_6;
171 float two_k_rf = nbparam.two_k_rf;
174 float coulomb_tab_scale = nbparam.coulomb_tab_scale;
177 float beta2 = nbparam.ewald_beta*nbparam.ewald_beta;
178 float beta3 = nbparam.ewald_beta*nbparam.ewald_beta*nbparam.ewald_beta;
181 float rlist_sq = nbparam.rlist_sq;
186 float beta = nbparam.ewald_beta;
187 float ewald_shift = nbparam.sh_ewald;
189 float c_rf = nbparam.c_rf;
190 #endif /* EL_EWALD_ANY */
191 float *e_lj = atdat.e_lj;
192 float *e_el = atdat.e_el;
193 #endif /* CALC_ENERGIES */
195 /* thread/block/warp id-s */
196 unsigned int tidxi = threadIdx.x;
197 unsigned int tidxj = threadIdx.y;
198 unsigned int tidx = threadIdx.y * blockDim.x + threadIdx.x;
200 unsigned int tidxz = 0;
202 unsigned int tidxz = threadIdx.z;
204 unsigned int bidx = blockIdx.x;
205 unsigned int widx = tidx / WARP_SIZE; /* warp index */
207 int sci, ci, cj, ci_offset,
209 cij4_start, cij4_end,
211 i, jm, j4, wexcl_idx;
213 r2, inv_r, inv_r2, inv_r6,
220 #if defined CALC_ENERGIES || defined LJ_POT_SWITCH
223 unsigned int wexcl, imask, mask_ji;
225 float3 xi, xj, rv, f_ij, fcj_buf;
226 float3 fci_buf[NCL_PER_SUPERCL]; /* i force buffer */
229 /* shmem buffer for i x+q pre-loading */
230 extern __shared__ float4 xqib[];
231 /* shmem buffer for cj, for each warp separately */
232 int *cjs = ((int *)(xqib + NCL_PER_SUPERCL * CL_SIZE)) + tidxz * 2 * NBNXN_GPU_JGROUP_SIZE;
234 /* shmem buffer for i atom-type pre-loading */
235 int *atib = ((int *)(xqib + NCL_PER_SUPERCL * CL_SIZE)) + NTHREAD_Z * 2 * NBNXN_GPU_JGROUP_SIZE;
238 #ifndef REDUCE_SHUFFLE
239 /* shmem j force buffer */
241 float *f_buf = (float *)(atib + NCL_PER_SUPERCL * CL_SIZE);
243 float *f_buf = (float *)(cjs + NTHREAD_Z * 2 * NBNXN_GPU_JGROUP_SIZE);
247 nb_sci = pl_sci[bidx]; /* my i super-cluster's index = current bidx */
248 sci = nb_sci.sci; /* super-cluster */
249 cij4_start = nb_sci.cj4_ind_start; /* first ...*/
250 cij4_end = nb_sci.cj4_ind_end; /* and last index of j clusters */
254 /* Pre-load i-atom x and q into shared memory */
255 ci = sci * NCL_PER_SUPERCL + tidxj;
256 ai = ci * CL_SIZE + tidxi;
257 xqib[tidxj * CL_SIZE + tidxi] = xq[ai] + shift_vec[nb_sci.shift];
259 /* Pre-load the i-atom types into shared memory */
260 atib[tidxj * CL_SIZE + tidxi] = atom_types[ai];
265 for (ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
267 fci_buf[ci_offset] = make_float3(0.0f);
271 /* TODO: we are trading registers with flops by keeping lje_coeff-s, try re-calculating it later */
272 lje_coeff2 = nbparam.ewaldcoeff_lj*nbparam.ewaldcoeff_lj;
273 lje_coeff6_6 = lje_coeff2*lje_coeff2*lje_coeff2*ONE_SIXTH_F;
274 #endif /* LJ_EWALD */
281 #if defined EXCLUSION_FORCES /* Ewald or RF */
282 if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci*NCL_PER_SUPERCL)
284 /* we have the diagonal: add the charge and LJ self interaction energy term */
285 for (i = 0; i < NCL_PER_SUPERCL; i++)
287 #if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
288 qi = xqib[i * CL_SIZE + tidxi].w;
294 E_lj += tex1Dfetch<float>(nbparam.nbfp_texobj, atom_types[(sci*NCL_PER_SUPERCL + i)*CL_SIZE + tidxi]*(ntypes + 1)*2);
296 E_lj += tex1Dfetch(nbfp_texref, atom_types[(sci*NCL_PER_SUPERCL + i)*CL_SIZE + tidxi]*(ntypes + 1)*2);
297 #endif /* USE_TEXOBJ */
298 #endif /* LJ_EWALD */
302 /* divide the self term(s) equally over the j-threads, then multiply with the coefficients. */
304 E_lj /= CL_SIZE*NTHREAD_Z;
305 E_lj *= 0.5f*ONE_SIXTH_F*lje_coeff6_6;
306 #endif /* LJ_EWALD */
308 #if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
309 E_el /= CL_SIZE*NTHREAD_Z;
310 #if defined EL_RF || defined EL_CUTOFF
311 E_el *= -nbparam.epsfac*0.5f*c_rf;
313 E_el *= -nbparam.epsfac*beta*M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
315 #endif /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */
317 #endif /* EXCLUSION_FORCES */
319 #endif /* CALC_ENERGIES */
321 /* skip central shifts when summing shift forces */
322 if (nb_sci.shift == CENTRAL)
327 /* loop over the j clusters = seen by any of the atoms in the current super-cluster */
328 for (j4 = cij4_start + tidxz; j4 < cij4_end; j4 += NTHREAD_Z)
330 wexcl_idx = pl_cj4[j4].imei[widx].excl_ind;
331 imask = pl_cj4[j4].imei[widx].imask;
332 wexcl = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)];
338 /* Pre-load cj into shared memory on both warps separately */
339 if ((tidxj == 0 || tidxj == 4) && tidxi < NBNXN_GPU_JGROUP_SIZE)
341 cjs[tidxi + tidxj * NBNXN_GPU_JGROUP_SIZE / 4] = pl_cj4[j4].cj[tidxi];
344 /* Unrolling this loop
345 - with pruning leads to register spilling;
346 - on Kepler is much slower;
347 - doesn't work on CUDA <v4.1
348 Tested with nvcc 3.2 - 5.0.7 */
349 #if !defined PRUNE_NBL && __CUDA_ARCH__ < 300 && GMX_CUDA_VERSION >= 4010
352 for (jm = 0; jm < NBNXN_GPU_JGROUP_SIZE; jm++)
354 /* ((1U << NCL_PER_SUPERCL) - 1U) is the i-cluster interaction
355 * mask for a super-cluster with all NCL_PER_SUPERCL bits set.
357 if (imask & (((1U << NCL_PER_SUPERCL) - 1U) << (jm * NCL_PER_SUPERCL)))
359 mask_ji = (1U << (jm * NCL_PER_SUPERCL));
361 cj = cjs[jm + (tidxj & 4) * NBNXN_GPU_JGROUP_SIZE / 4];
362 aj = cj * CL_SIZE + tidxj;
364 /* load j atom data */
366 xj = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
367 qj_f = nbparam.epsfac * xqbuf.w;
368 typej = atom_types[aj];
370 fcj_buf = make_float3(0.0f);
372 /* The PME and RF kernels don't unroll with CUDA <v4.1. */
373 #if !defined PRUNE_NBL && !(GMX_CUDA_VERSION < 4010 && defined EXCLUSION_FORCES)
376 for (i = 0; i < NCL_PER_SUPERCL; i++)
380 ci_offset = i; /* i force buffer offset */
382 ci = sci * NCL_PER_SUPERCL + i; /* i cluster index */
383 ai = ci * CL_SIZE + tidxi; /* i atom index */
385 /* all threads load an atom from i cluster ci into shmem! */
386 xqbuf = xqib[i * CL_SIZE + tidxi];
387 xi = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
389 /* distance between i and j atoms */
394 /* If _none_ of the atoms pairs are in cutoff range,
395 the bit corresponding to the current
396 cluster-pair in imask gets set to 0. */
397 if (!__any(r2 < rlist_sq))
403 int_bit = (wexcl & mask_ji) ? 1.0f : 0.0f;
405 /* cutoff & exclusion check */
406 #ifdef EXCLUSION_FORCES
407 if (r2 < rcoulomb_sq *
408 (nb_sci.shift != CENTRAL || ci != cj || tidxj > tidxi))
410 if (r2 < rcoulomb_sq * int_bit)
413 /* load the rest of the i-atom parameters */
416 typei = atib[i * CL_SIZE + tidxi];
418 typei = atom_types[ai];
421 /* LJ 6*C6 and 12*C12 */
423 c6 = tex1Dfetch<float>(nbparam.nbfp_texobj, 2 * (ntypes * typei + typej));
424 c12 = tex1Dfetch<float>(nbparam.nbfp_texobj, 2 * (ntypes * typei + typej) + 1);
426 c6 = tex1Dfetch(nbfp_texref, 2 * (ntypes * typei + typej));
427 c12 = tex1Dfetch(nbfp_texref, 2 * (ntypes * typei + typej) + 1);
428 #endif /* USE_TEXOBJ */
431 /* avoid NaN for excluded pairs at r=0 */
432 r2 += (1.0f - int_bit) * NBNXN_AVOID_SING_R2_INC;
435 inv_r2 = inv_r * inv_r;
436 inv_r6 = inv_r2 * inv_r2 * inv_r2;
437 #if defined EXCLUSION_FORCES
438 /* We could mask inv_r2, but with Ewald
439 * masking both inv_r6 and F_invr is faster */
441 #endif /* EXCLUSION_FORCES */
443 F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
444 #if defined CALC_ENERGIES || defined LJ_POT_SWITCH
445 E_lj_p = int_bit * (c12 * (inv_r6 * inv_r6 + nbparam.repulsion_shift.cpot)*ONE_TWELVETH_F -
446 c6 * (inv_r6 + nbparam.dispersion_shift.cpot)*ONE_SIXTH_F);
449 #ifdef LJ_FORCE_SWITCH
451 calculate_force_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
453 calculate_force_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr);
454 #endif /* CALC_ENERGIES */
455 #endif /* LJ_FORCE_SWITCH */
459 #ifdef LJ_EWALD_COMB_GEOM
461 calculate_lj_ewald_comb_geom_F_E(nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, int_bit, &F_invr, &E_lj_p);
463 calculate_lj_ewald_comb_geom_F(nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, &F_invr);
464 #endif /* CALC_ENERGIES */
465 #elif defined LJ_EWALD_COMB_LB
466 calculate_lj_ewald_comb_LB_F_E(nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6,
468 int_bit, &F_invr, &E_lj_p
471 #endif /* CALC_ENERGIES */
473 #endif /* LJ_EWALD_COMB_GEOM */
474 #endif /* LJ_EWALD */
476 #ifdef VDW_CUTOFF_CHECK
477 /* Separate VDW cut-off check to enable twin-range cut-offs
478 * (rvdw < rcoulomb <= rlist)
480 vdw_in_range = (r2 < rvdw_sq) ? 1.0f : 0.0f;
481 F_invr *= vdw_in_range;
483 E_lj_p *= vdw_in_range;
485 #endif /* VDW_CUTOFF_CHECK */
489 calculate_potential_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
491 calculate_potential_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
492 #endif /* CALC_ENERGIES */
493 #endif /* LJ_POT_SWITCH */
501 #ifdef EXCLUSION_FORCES
502 F_invr += qi * qj_f * int_bit * inv_r2 * inv_r;
504 F_invr += qi * qj_f * inv_r2 * inv_r;
508 F_invr += qi * qj_f * (int_bit*inv_r2 * inv_r - two_k_rf);
510 #if defined EL_EWALD_ANA
511 F_invr += qi * qj_f * (int_bit*inv_r2*inv_r + pmecorrF(beta2*r2)*beta3);
512 #elif defined EL_EWALD_TAB
513 F_invr += qi * qj_f * (int_bit*inv_r2 -
515 interpolate_coulomb_force_r(nbparam.coulomb_tab_texobj, r2 * inv_r, coulomb_tab_scale)
517 interpolate_coulomb_force_r(r2 * inv_r, coulomb_tab_scale)
518 #endif /* USE_TEXOBJ */
520 #endif /* EL_EWALD_ANA/TAB */
524 E_el += qi * qj_f * (int_bit*inv_r - c_rf);
527 E_el += qi * qj_f * (int_bit*inv_r + 0.5f * two_k_rf * r2 - c_rf);
530 /* 1.0f - erff is faster than erfcf */
531 E_el += qi * qj_f * (inv_r * (int_bit - erff(r2 * inv_r * beta)) - int_bit * ewald_shift);
532 #endif /* EL_EWALD_ANY */
536 /* accumulate j forces in registers */
539 /* accumulate i forces in registers */
540 fci_buf[ci_offset] += f_ij;
544 /* shift the mask bit by 1 */
548 /* reduce j forces */
549 #ifdef REDUCE_SHUFFLE
550 reduce_force_j_warp_shfl(fcj_buf, f, tidxi, aj);
552 /* store j forces in shmem */
553 f_buf[ tidx] = fcj_buf.x;
554 f_buf[ FBUF_STRIDE + tidx] = fcj_buf.y;
555 f_buf[2 * FBUF_STRIDE + tidx] = fcj_buf.z;
557 reduce_force_j_generic(f_buf, f, tidxi, tidxj, aj);
562 /* Update the imask with the new one which does not contain the
563 out of range clusters anymore. */
564 pl_cj4[j4].imei[widx].imask = imask;
569 float fshift_buf = 0.0f;
571 /* reduce i forces */
572 for (ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
574 ai = (sci * NCL_PER_SUPERCL + ci_offset) * CL_SIZE + tidxi;
575 #ifdef REDUCE_SHUFFLE
576 reduce_force_i_warp_shfl(fci_buf[ci_offset], f,
577 &fshift_buf, bCalcFshift,
580 f_buf[ tidx] = fci_buf[ci_offset].x;
581 f_buf[ FBUF_STRIDE + tidx] = fci_buf[ci_offset].y;
582 f_buf[2 * FBUF_STRIDE + tidx] = fci_buf[ci_offset].z;
584 reduce_force_i(f_buf, f,
585 &fshift_buf, bCalcFshift,
591 /* add up local shift forces into global mem, tidxj indexes x,y,z */
592 #ifdef REDUCE_SHUFFLE
593 if (bCalcFshift && (tidxj & 3) < 3)
595 atomicAdd(&(atdat.fshift[nb_sci.shift].x) + (tidxj & ~4), fshift_buf);
598 if (bCalcFshift && tidxj < 3)
600 atomicAdd(&(atdat.fshift[nb_sci.shift].x) + tidxj, fshift_buf);
605 #ifdef REDUCE_SHUFFLE
606 /* reduce the energies over warps and store into global memory */
607 reduce_energy_warp_shfl(E_lj, E_el, e_lj, e_el, tidx);
609 /* flush the energies to shmem and reduce them */
611 f_buf[FBUF_STRIDE + tidx] = E_el;
612 reduce_energy_pow2(f_buf + (tidx & WARP_SIZE), e_lj, e_el, tidx & ~WARP_SIZE);
617 #undef REDUCE_SHUFFLE
622 #undef MIN_BLOCKS_PER_MP
623 #undef THREADS_PER_BLOCK
626 #undef EXCLUSION_FORCES