2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * CUDA non-bonded kernel used through preprocessor-based code generation
39 * of multiple kernel flavors for CC 2.x, see nbnxn_cuda_kernels.cuh.
41 * NOTE: No include fence as it is meant to be included multiple times.
43 * \author Szilárd Páll <pall.szilard@gmail.com>
44 * \author Berk Hess <hess@kth.se>
45 * \ingroup module_mdlib
48 #include "gromacs/gpu_utils/cuda_arch_utils.cuh"
49 #include "gromacs/gpu_utils/cuda_kernel_utils.cuh"
50 #include "gromacs/math/utilities.h"
51 #include "gromacs/pbcutil/ishift.h"
52 /* Note that floating-point constants in CUDA code should be suffixed
53 * with f (e.g. 0.5f), to stop the compiler producing intermediate
54 * code that is in double precision.
57 #if GMX_PTX_ARCH >= 300
58 #error "nbnxn_cuda_kernel_fermi.cuh included with GMX_PTX_ARCH >= 300"
61 #if defined EL_EWALD_ANA || defined EL_EWALD_TAB
62 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
66 #if defined EL_EWALD_ANY || defined EL_RF || defined LJ_EWALD || (defined EL_CUTOFF && defined CALC_ENERGIES)
67 /* Macro to control the calculation of exclusion forces in the kernel
68 * We do that with Ewald (elec/vdw) and RF. Cut-off only has exclusion
71 * Note: convenience macro, needs to be undef-ed at the end of the file.
73 #define EXCLUSION_FORCES
76 #if defined LJ_EWALD_COMB_GEOM || defined LJ_EWALD_COMB_LB
77 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
81 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
86 Kernel launch parameters:
87 - #blocks = #pair lists, blockId = pair list Id
88 - #threads = c_clSize^2
89 - shmem = see nbnxn_cuda.cu:calc_shmem_required_nonbonded()
91 Each thread calculates an i force-component taking one pair of i-j atoms.
95 /*! \brief Definition of kernel launch configuration parameters for CC 2.x.
98 /* Kernel launch bounds, 16 blocks/multiprocessor can be kept in flight. */
99 #define THREADS_PER_BLOCK (c_clSize*c_clSize)
101 __launch_bounds__(THREADS_PER_BLOCK)
104 __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_prune_cuda)
106 __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_prune_cuda)
107 #endif /* CALC_ENERGIES */
110 __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_cuda)
112 __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_cuda)
113 #endif /* CALC_ENERGIES */
114 #endif /* PRUNE_NBL */
115 (const cu_atomdata_t atdat,
116 const cu_nbparam_t nbparam,
117 const cu_plist_t plist,
119 #ifdef FUNCTION_DECLARATION_ONLY
120 ; /* Only do function declaration, omit the function body. */
123 /* convenience variables */
124 const nbnxn_sci_t *pl_sci = plist.sci;
128 nbnxn_cj4_t *pl_cj4 = plist.cj4;
129 const nbnxn_excl_t *excl = plist.excl;
131 const int *atom_types = atdat.atom_types;
132 int ntypes = atdat.ntypes;
134 const float2 *lj_comb = atdat.lj_comb;
135 float2 ljcp_i, ljcp_j;
137 const float4 *xq = atdat.xq;
139 const float3 *shift_vec = atdat.shift_vec;
140 float rcoulomb_sq = nbparam.rcoulomb_sq;
141 #ifdef VDW_CUTOFF_CHECK
142 float rvdw_sq = nbparam.rvdw_sq;
146 float lje_coeff2, lje_coeff6_6;
149 float two_k_rf = nbparam.two_k_rf;
152 float beta2 = nbparam.ewald_beta*nbparam.ewald_beta;
153 float beta3 = nbparam.ewald_beta*nbparam.ewald_beta*nbparam.ewald_beta;
156 float rlist_sq = nbparam.rlistOuter_sq;
161 float beta = nbparam.ewald_beta;
162 float ewald_shift = nbparam.sh_ewald;
164 float c_rf = nbparam.c_rf;
165 #endif /* EL_EWALD_ANY */
166 float *e_lj = atdat.e_lj;
167 float *e_el = atdat.e_el;
168 #endif /* CALC_ENERGIES */
170 /* thread/block/warp id-s */
171 unsigned int tidxi = threadIdx.x;
172 unsigned int tidxj = threadIdx.y;
173 unsigned int tidx = threadIdx.y * blockDim.x + threadIdx.x;
174 unsigned int bidx = blockIdx.x;
175 unsigned int widx = tidx / warp_size; /* warp index */
179 cij4_start, cij4_end;
183 int i, jm, j4, wexcl_idx;
186 #if !defined LJ_COMB_LB || defined CALC_ENERGIES
187 float inv_r6, c6, c12;
190 float sigma, epsilon;
197 #if defined CALC_ENERGIES || defined LJ_POT_SWITCH
200 unsigned int wexcl, imask, mask_ji;
202 float3 xi, xj, rv, f_ij, fcj_buf;
203 float3 fci_buf[c_numClPerSupercl]; /* i force buffer */
206 /*! i-cluster interaction mask for a super-cluster with all c_numClPerSupercl=8 bits set */
207 const unsigned superClInteractionMask = ((1U << c_numClPerSupercl) - 1U);
209 /*********************************************************************
210 * Set up shared memory pointers.
211 * sm_nextSlotPtr should always be updated to point to the "next slot",
212 * that is past the last point where data has been stored.
214 extern __shared__ char sm_dynamicShmem[];
215 char *sm_nextSlotPtr = sm_dynamicShmem;
216 static_assert(sizeof(char) == 1, "The shared memory offset calculation assumes that char is 1 byte");
218 /* shmem buffer for i x+q pre-loading */
219 float4 *xqib = (float4 *)sm_nextSlotPtr;
220 sm_nextSlotPtr += (c_numClPerSupercl * c_clSize * sizeof(*xqib));
222 /* shmem buffer for cj, for each warp separately */
223 int *cjs = (int *)(sm_nextSlotPtr);
224 sm_nextSlotPtr += (c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(*cjs));
226 /* shmem j force buffer */
227 float *f_buf = (float *)(sm_nextSlotPtr);
228 sm_nextSlotPtr += (c_clSize * c_clSize * 3*sizeof(*f_buf));
229 /*********************************************************************/
231 nb_sci = pl_sci[bidx]; /* my i super-cluster's index = current bidx */
232 sci = nb_sci.sci; /* super-cluster */
233 cij4_start = nb_sci.cj4_ind_start; /* first ...*/
234 cij4_end = nb_sci.cj4_ind_end; /* and last index of j clusters */
237 /* Pre-load i-atom x and q into shared memory */
238 ci = sci * c_numClPerSupercl + tidxj;
239 ai = ci * c_clSize + tidxi;
241 xqbuf = xq[ai] + shift_vec[nb_sci.shift];
242 xqbuf.w *= nbparam.epsfac;
243 xqib[tidxj * c_clSize + tidxi] = xqbuf;
247 for (i = 0; i < c_numClPerSupercl; i++)
249 fci_buf[i] = make_float3(0.0f);
253 /* TODO: we are trading registers with flops by keeping lje_coeff-s, try re-calculating it later */
254 lje_coeff2 = nbparam.ewaldcoeff_lj*nbparam.ewaldcoeff_lj;
255 lje_coeff6_6 = lje_coeff2*lje_coeff2*lje_coeff2*c_oneSixth;
263 #ifdef EXCLUSION_FORCES /* Ewald or RF */
264 if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci*c_numClPerSupercl)
266 /* we have the diagonal: add the charge and LJ self interaction energy term */
267 for (i = 0; i < c_numClPerSupercl; i++)
269 #if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
270 qi = xqib[i * c_clSize + tidxi].w;
275 E_lj += LDG(&nbparam.nbfp[atom_types[(sci*c_numClPerSupercl + i)*c_clSize + tidxi]*(ntypes + 1)*2]);
279 /* divide the self term(s) equally over the j-threads, then multiply with the coefficients. */
282 E_lj *= 0.5f*c_oneSixth*lje_coeff6_6;
285 #if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
286 /* Correct for epsfac^2 due to adding qi^2 */
287 E_el /= nbparam.epsfac*c_clSize;
288 #if defined EL_RF || defined EL_CUTOFF
291 E_el *= -beta*M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
293 #endif /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */
295 #endif /* EXCLUSION_FORCES */
297 #endif /* CALC_ENERGIES */
299 #ifdef EXCLUSION_FORCES
300 const int nonSelfInteraction = !(nb_sci.shift == CENTRAL & tidxj <= tidxi);
303 /* loop over the j clusters = seen by any of the atoms in the current super-cluster */
304 for (j4 = cij4_start; j4 < cij4_end; j4++)
306 wexcl_idx = pl_cj4[j4].imei[widx].excl_ind;
307 imask = pl_cj4[j4].imei[widx].imask;
308 wexcl = excl[wexcl_idx].pair[(tidx) & (warp_size - 1)];
314 /* Pre-load cj into shared memory on both warps separately */
315 if ((tidxj == 0 | tidxj == 4) & (tidxi < c_nbnxnGpuJgroupSize))
317 cjs[tidxi + tidxj * c_nbnxnGpuJgroupSize/c_splitClSize] = pl_cj4[j4].cj[tidxi];
320 /* Unrolling this loop with pruning leads to register spilling;
321 Tested with up to nvcc 7.5 */
322 #if !defined PRUNE_NBL
325 for (jm = 0; jm < c_nbnxnGpuJgroupSize; jm++)
327 if (imask & (superClInteractionMask << (jm * c_numClPerSupercl)))
329 mask_ji = (1U << (jm * c_numClPerSupercl));
331 cj = cjs[jm + (tidxj & 4) * c_nbnxnGpuJgroupSize/c_splitClSize];
332 aj = cj * c_clSize + tidxj;
334 /* load j atom data */
336 xj = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
339 typej = atom_types[aj];
341 ljcp_j = lj_comb[aj];
344 fcj_buf = make_float3(0.0f);
346 #if !defined PRUNE_NBL
349 for (i = 0; i < c_numClPerSupercl; i++)
353 ci = sci * c_numClPerSupercl + i; /* i cluster index */
354 ai = ci * c_clSize + tidxi; /* i atom index */
356 /* all threads load an atom from i cluster ci into shmem! */
357 xqbuf = xqib[i * c_clSize + tidxi];
358 xi = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
360 /* distance between i and j atoms */
365 /* If _none_ of the atoms pairs are in cutoff range,
366 the bit corresponding to the current
367 cluster-pair in imask gets set to 0. */
368 if (!__any(r2 < rlist_sq))
374 int_bit = (wexcl & mask_ji) ? 1.0f : 0.0f;
376 /* cutoff & exclusion check */
377 #ifdef EXCLUSION_FORCES
378 if ((r2 < rcoulomb_sq) * (nonSelfInteraction | (ci != cj)))
380 if ((r2 < rcoulomb_sq) * int_bit)
383 /* load the rest of the i-atom parameters */
387 /* LJ 6*C6 and 12*C12 */
388 typei = atom_types[ai];
389 fetch_nbfp_c6_c12(c6, c12, nbparam, ntypes * typei + typej);
391 ljcp_i = lj_comb[ai];
393 c6 = ljcp_i.x * ljcp_j.x;
394 c12 = ljcp_i.y * ljcp_j.y;
396 /* LJ 2^(1/6)*sigma and 12*epsilon */
397 sigma = ljcp_i.x + ljcp_j.x;
398 epsilon = ljcp_i.y * ljcp_j.y;
399 #if defined CALC_ENERGIES || defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
400 convert_sigma_epsilon_to_c6_c12(sigma, epsilon, &c6, &c12);
402 #endif /* LJ_COMB_GEOM */
405 // Ensure distance do not become so small that r^-12 overflows
406 r2 = max(r2, NBNXN_MIN_RSQ);
409 inv_r2 = inv_r * inv_r;
410 #if !defined LJ_COMB_LB || defined CALC_ENERGIES
411 inv_r6 = inv_r2 * inv_r2 * inv_r2;
412 #ifdef EXCLUSION_FORCES
413 /* We could mask inv_r2, but with Ewald
414 * masking both inv_r6 and F_invr is faster */
416 #endif /* EXCLUSION_FORCES */
418 F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
419 #if defined CALC_ENERGIES || defined LJ_POT_SWITCH
420 E_lj_p = int_bit * (c12 * (inv_r6 * inv_r6 + nbparam.repulsion_shift.cpot)*c_oneTwelveth -
421 c6 * (inv_r6 + nbparam.dispersion_shift.cpot)*c_oneSixth);
423 #else /* !LJ_COMB_LB || CALC_ENERGIES */
424 float sig_r = sigma*inv_r;
425 float sig_r2 = sig_r*sig_r;
426 float sig_r6 = sig_r2*sig_r2*sig_r2;
427 #ifdef EXCLUSION_FORCES
429 #endif /* EXCLUSION_FORCES */
431 F_invr = epsilon * sig_r6 * (sig_r6 - 1.0f) * inv_r2;
432 #endif /* !LJ_COMB_LB || CALC_ENERGIES */
434 #ifdef LJ_FORCE_SWITCH
436 calculate_force_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
438 calculate_force_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr);
439 #endif /* CALC_ENERGIES */
440 #endif /* LJ_FORCE_SWITCH */
444 #ifdef LJ_EWALD_COMB_GEOM
446 calculate_lj_ewald_comb_geom_F_E(nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, int_bit, &F_invr, &E_lj_p);
448 calculate_lj_ewald_comb_geom_F(nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, &F_invr);
449 #endif /* CALC_ENERGIES */
450 #elif defined LJ_EWALD_COMB_LB
451 calculate_lj_ewald_comb_LB_F_E(nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6,
453 int_bit, &F_invr, &E_lj_p
456 #endif /* CALC_ENERGIES */
458 #endif /* LJ_EWALD_COMB_GEOM */
459 #endif /* LJ_EWALD */
463 calculate_potential_switch_F_E(nbparam, inv_r, r2, &F_invr, &E_lj_p);
465 calculate_potential_switch_F(nbparam, inv_r, r2, &F_invr, &E_lj_p);
466 #endif /* CALC_ENERGIES */
467 #endif /* LJ_POT_SWITCH */
469 #ifdef VDW_CUTOFF_CHECK
470 /* Separate VDW cut-off check to enable twin-range cut-offs
471 * (rvdw < rcoulomb <= rlist)
473 vdw_in_range = (r2 < rvdw_sq) ? 1.0f : 0.0f;
474 F_invr *= vdw_in_range;
476 E_lj_p *= vdw_in_range;
478 #endif /* VDW_CUTOFF_CHECK */
486 #ifdef EXCLUSION_FORCES
487 F_invr += qi * qj_f * int_bit * inv_r2 * inv_r;
489 F_invr += qi * qj_f * inv_r2 * inv_r;
493 F_invr += qi * qj_f * (int_bit*inv_r2 * inv_r - two_k_rf);
495 #if defined EL_EWALD_ANA
496 F_invr += qi * qj_f * (int_bit*inv_r2*inv_r + pmecorrF(beta2*r2)*beta3);
497 #elif defined EL_EWALD_TAB
498 F_invr += qi * qj_f * (int_bit*inv_r2 -
499 interpolate_coulomb_force_r(nbparam, r2 * inv_r)) * inv_r;
500 #endif /* EL_EWALD_ANA/TAB */
504 E_el += qi * qj_f * (int_bit*inv_r - c_rf);
507 E_el += qi * qj_f * (int_bit*inv_r + 0.5f * two_k_rf * r2 - c_rf);
510 /* 1.0f - erff is faster than erfcf */
511 E_el += qi * qj_f * (inv_r * (int_bit - erff(r2 * inv_r * beta)) - int_bit * ewald_shift);
512 #endif /* EL_EWALD_ANY */
516 /* accumulate j forces in registers */
519 /* accumulate i forces in registers */
524 /* shift the mask bit by 1 */
528 /* reduce j forces */
529 /* store j forces in shmem */
530 f_buf[ tidx] = fcj_buf.x;
531 f_buf[ c_fbufStride + tidx] = fcj_buf.y;
532 f_buf[2 * c_fbufStride + tidx] = fcj_buf.z;
534 reduce_force_j_generic(f_buf, f, tidxi, tidxj, aj);
538 /* Update the imask with the new one which does not contain the
539 out of range clusters anymore. */
540 pl_cj4[j4].imei[widx].imask = imask;
545 /* skip central shifts when summing shift forces */
546 if (nb_sci.shift == CENTRAL)
551 float fshift_buf = 0.0f;
553 /* reduce i forces */
554 for (i = 0; i < c_numClPerSupercl; i++)
556 ai = (sci * c_numClPerSupercl + i) * c_clSize + tidxi;
557 f_buf[ tidx] = fci_buf[i].x;
558 f_buf[ c_fbufStride + tidx] = fci_buf[i].y;
559 f_buf[2 * c_fbufStride + tidx] = fci_buf[i].z;
561 reduce_force_i(f_buf, f,
562 &fshift_buf, bCalcFshift,
567 /* add up local shift forces into global mem, tidxj indexes x,y,z */
568 if (bCalcFshift && tidxj < 3)
570 atomicAdd(&(atdat.fshift[nb_sci.shift].x) + tidxj, fshift_buf);
574 /* flush the energies to shmem and reduce them */
576 f_buf[c_fbufStride + tidx] = E_el;
577 reduce_energy_pow2(f_buf + (tidx & warp_size), e_lj, e_el, tidx & ~warp_size);
580 #endif /* FUNCTION_DECLARATION_ONLY */
582 #undef THREADS_PER_BLOCK
585 #undef EXCLUSION_FORCES