2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
39 * CUDA non-bonded kernel used through preprocessor-based code generation
40 * of multiple kernel flavors, see nbnxn_cuda_kernels.cuh.
42 * NOTE: No include fence as it is meant to be included multiple times.
44 * \author Szilárd Páll <pall.szilard@gmail.com>
45 * \author Berk Hess <hess@kth.se>
46 * \ingroup module_nbnxm
49 #include "gromacs/gpu_utils/cuda_arch_utils.cuh"
50 #include "gromacs/gpu_utils/cuda_kernel_utils.cuh"
51 #include "gromacs/gpu_utils/typecasts.cuh"
52 #include "gromacs/math/units.h"
53 #include "gromacs/math/utilities.h"
54 #include "gromacs/pbcutil/ishift.h"
55 /* Note that floating-point constants in CUDA code should be suffixed
56 * with f (e.g. 0.5f), to stop the compiler producing intermediate
57 * code that is in double precision.
60 #if defined EL_EWALD_ANA || defined EL_EWALD_TAB
61 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
65 #if defined LJ_EWALD_COMB_GEOM || defined LJ_EWALD_COMB_LB
66 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
70 #if defined EL_EWALD_ANY || defined EL_RF || defined LJ_EWALD \
71 || (defined EL_CUTOFF && defined CALC_ENERGIES)
72 /* Macro to control the calculation of exclusion forces in the kernel
73 * We do that with Ewald (elec/vdw) and RF. Cut-off only has exclusion
76 * Note: convenience macro, needs to be undef-ed at the end of the file.
78 # define EXCLUSION_FORCES
81 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
86 Kernel launch parameters:
87 - #blocks = #pair lists, blockId = pair list Id
88 - #threads = NTHREAD_Z * c_clSize^2
89 - shmem = see nbnxn_cuda.cu:calc_shmem_required_nonbonded()
91 Each thread calculates an i force-component taking one pair of i-j atoms.
95 /*! \brief Compute capability dependent definition of kernel launch configuration parameters.
97 * NTHREAD_Z controls the number of j-clusters processed concurrently on NTHREAD_Z
98 * warp-pairs per block.
100 * - On CC 3.0-3.5, and >=5.0 NTHREAD_Z == 1, translating to 64 th/block with 16
101 * blocks/multiproc, is the fastest even though this setup gives low occupancy
103 * NTHREAD_Z > 1 results in excessive register spilling unless the minimum blocks
104 * per multiprocessor is reduced proportionally to get the original number of max
105 * threads in flight (and slightly lower performance).
106 * - On CC 3.7 there are enough registers to double the number of threads; using
107 * NTHREADS_Z == 2 is fastest with 16 blocks (TODO: test with RF and other kernels
108 * with low-register use).
110 * Note that the current kernel implementation only supports NTHREAD_Z > 1 with
111 * shuffle-based reduction, hence CC >= 3.0.
114 * NOTEs on Volta / CUDA 9 extensions:
116 * - While active thread masks are required for the warp collectives
117 * (we use any and shfl), the kernel is designed such that all conditions
118 * (other than the inner-most distance check) including loop trip counts
119 * are warp-synchronous. Therefore, we don't need ballot to compute the
120 * active masks as these are all full-warp masks.
124 /* Kernel launch bounds for different compute capabilities. The value of NTHREAD_Z
125 * determines the number of threads per block and it is chosen such that
126 * 16 blocks/multiprocessor can be kept in flight.
127 * - CC 3.0,3.5, and >=5.0: NTHREAD_Z=1, (64, 16) bounds
128 * - CC 3.7: NTHREAD_Z=2, (128, 16) bounds
130 * Note: convenience macros, need to be undef-ed at the end of the file.
132 #if GMX_PTX_ARCH == 370
133 # define NTHREAD_Z (2)
134 # define MIN_BLOCKS_PER_MP (16)
136 # define NTHREAD_Z (1)
137 # define MIN_BLOCKS_PER_MP (16)
138 #endif /* GMX_PTX_ARCH == 370 */
139 #define THREADS_PER_BLOCK (c_clSize * c_clSize * NTHREAD_Z)
141 #if GMX_PTX_ARCH >= 350
143 __launch_bounds__(THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP)
145 __launch_bounds__(THREADS_PER_BLOCK)
146 #endif /* GMX_PTX_ARCH >= 350 */
148 # ifdef CALC_ENERGIES
149 __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_prune_cuda)
151 __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_prune_cuda)
152 # endif /* CALC_ENERGIES */
154 # ifdef CALC_ENERGIES
155 __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_cuda)
157 __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_cuda)
158 # endif /* CALC_ENERGIES */
159 #endif /* PRUNE_NBL */
160 (NBAtomDataGpu atdat, NBParamGpu nbparam, Nbnxm::gpu_plist plist, bool bCalcFshift)
161 #ifdef FUNCTION_DECLARATION_ONLY
162 ; /* Only do function declaration, omit the function body. */
165 /* convenience variables */
166 const nbnxn_sci_t* pl_sci = plist.sci;
170 nbnxn_cj4_t* pl_cj4 = plist.cj4;
171 const nbnxn_excl_t* excl = plist.excl;
173 const int* atom_types = atdat.atomTypes;
174 int ntypes = atdat.numTypes;
176 const float2* lj_comb = atdat.ljComb;
177 float2 ljcp_i, ljcp_j;
179 const float4* xq = atdat.xq;
180 float3* f = asFloat3(atdat.f);
181 const float3* shift_vec = asFloat3(atdat.shiftVec);
182 float rcoulomb_sq = nbparam.rcoulomb_sq;
183 # ifdef VDW_CUTOFF_CHECK
184 float rvdw_sq = nbparam.rvdw_sq;
188 float lje_coeff2, lje_coeff6_6;
191 float two_k_rf = nbparam.two_k_rf;
194 float beta2 = nbparam.ewald_beta * nbparam.ewald_beta;
195 float beta3 = nbparam.ewald_beta * nbparam.ewald_beta * nbparam.ewald_beta;
198 float rlist_sq = nbparam.rlistOuter_sq;
201 # ifdef CALC_ENERGIES
203 float beta = nbparam.ewald_beta;
204 float ewald_shift = nbparam.sh_ewald;
206 float reactionFieldShift = nbparam.c_rf;
207 # endif /* EL_EWALD_ANY */
208 float* e_lj = atdat.eLJ;
209 float* e_el = atdat.eElec;
210 # endif /* CALC_ENERGIES */
212 /* thread/block/warp id-s */
213 unsigned int tidxi = threadIdx.x;
214 unsigned int tidxj = threadIdx.y;
215 unsigned int tidx = threadIdx.y * blockDim.x + threadIdx.x;
217 unsigned int tidxz = 0;
219 unsigned int tidxz = threadIdx.z;
221 unsigned int bidx = blockIdx.x;
222 unsigned int widx = tidx / warp_size; /* warp index */
224 int sci, ci, cj, ai, aj, cij4_start, cij4_end;
228 int i, jm, j4, wexcl_idx;
229 float qi, qj_f, r2, inv_r, inv_r2;
230 # if !defined LJ_COMB_LB || defined CALC_ENERGIES
231 float inv_r6, c6, c12;
234 float sigma, epsilon;
236 float int_bit, F_invr;
237 # ifdef CALC_ENERGIES
240 # if defined CALC_ENERGIES || defined LJ_POT_SWITCH
243 unsigned int wexcl, imask, mask_ji;
245 float3 xi, xj, rv, f_ij, fcj_buf;
246 float3 fci_buf[c_nbnxnGpuNumClusterPerSupercluster]; /* i force buffer */
249 /*! i-cluster interaction mask for a super-cluster with all c_nbnxnGpuNumClusterPerSupercluster=8 bits set */
250 const unsigned superClInteractionMask = ((1U << c_nbnxnGpuNumClusterPerSupercluster) - 1U);
252 // cj preload is off in the following cases:
253 // - sm_70 (V100), sm_80 (A100), sm_86 (GA02)
254 // - for future arch (> 8.6 at the time of writing) we assume it is better to keep it off
255 // cj preload is left on for:
256 // - sm_75: improvements +/- very small
257 // - sm_61: tested and slower without preload
258 // - sm_6x and earlier not tested to
259 constexpr bool c_preloadCj = (GMX_PTX_ARCH < 700 || GMX_PTX_ARCH == 750);
261 /*********************************************************************
262 * Set up shared memory pointers.
263 * sm_nextSlotPtr should always be updated to point to the "next slot",
264 * that is past the last point where data has been stored.
266 // NOLINTNEXTLINE(readability-redundant-declaration)
267 extern __shared__ char sm_dynamicShmem[];
268 char* sm_nextSlotPtr = sm_dynamicShmem;
269 static_assert(sizeof(char) == 1,
270 "The shared memory offset calculation assumes that char is 1 byte");
272 /* shmem buffer for i x+q pre-loading */
273 float4* xqib = reinterpret_cast<float4*>(sm_nextSlotPtr);
274 sm_nextSlotPtr += (c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(*xqib));
276 /* shmem buffer for cj, for each warp separately */
277 int* cjs = reinterpret_cast<int*>(sm_nextSlotPtr);
280 /* the cjs buffer's use expects a base pointer offset for pairs of warps in the j-concurrent execution */
281 cjs += tidxz * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize;
282 sm_nextSlotPtr += (NTHREAD_Z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(*cjs));
286 /* shmem buffer for i atom-type pre-loading */
287 int* atib = reinterpret_cast<int*>(sm_nextSlotPtr);
288 sm_nextSlotPtr += (c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(*atib));
290 /* shmem buffer for i-atom LJ combination rule parameters */
291 float2* ljcpib = reinterpret_cast<float2*>(sm_nextSlotPtr);
292 sm_nextSlotPtr += (c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(*ljcpib));
294 /*********************************************************************/
296 nb_sci = pl_sci[bidx]; /* my i super-cluster's index = current bidx */
297 sci = nb_sci.sci; /* super-cluster */
298 cij4_start = nb_sci.cj4_ind_start; /* first ...*/
299 cij4_end = nb_sci.cj4_ind_end; /* and last index of j clusters */
303 /* Pre-load i-atom x and q into shared memory */
304 ci = sci * c_nbnxnGpuNumClusterPerSupercluster + tidxj;
305 ai = ci * c_clSize + tidxi;
307 const float* shiftptr = reinterpret_cast<const float*>(&shift_vec[nb_sci.shift]);
308 xqbuf = xq[ai] + make_float4(LDG(shiftptr), LDG(shiftptr + 1), LDG(shiftptr + 2), 0.0F);
309 xqbuf.w *= nbparam.epsfac;
310 xqib[tidxj * c_clSize + tidxi] = xqbuf;
313 /* Pre-load the i-atom types into shared memory */
314 atib[tidxj * c_clSize + tidxi] = atom_types[ai];
316 /* Pre-load the LJ combination parameters into shared memory */
317 ljcpib[tidxj * c_clSize + tidxi] = lj_comb[ai];
322 for (i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
324 fci_buf[i] = make_float3(0.0F);
328 /* TODO: we are trading registers with flops by keeping lje_coeff-s, try re-calculating it later */
329 lje_coeff2 = nbparam.ewaldcoeff_lj * nbparam.ewaldcoeff_lj;
330 lje_coeff6_6 = lje_coeff2 * lje_coeff2 * lje_coeff2 * c_oneSixth;
334 # ifdef CALC_ENERGIES
338 # ifdef EXCLUSION_FORCES /* Ewald or RF */
339 if (nb_sci.shift == gmx::c_centralShiftIndex
340 && pl_cj4[cij4_start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
342 /* we have the diagonal: add the charge and LJ self interaction energy term */
343 for (i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
345 # if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
346 qi = xqib[i * c_clSize + tidxi].w;
351 // load only the first 4 bytes of the parameter pair (equivalent with nbfp[idx].x)
352 E_lj += LDG(reinterpret_cast<float*>(
353 &nbparam.nbfp[atom_types[(sci * c_nbnxnGpuNumClusterPerSupercluster + i) * c_clSize + tidxi]
358 /* divide the self term(s) equally over the j-threads, then multiply with the coefficients. */
360 E_lj /= c_clSize * NTHREAD_Z;
361 E_lj *= 0.5F * c_oneSixth * lje_coeff6_6;
364 # if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
365 /* Correct for epsfac^2 due to adding qi^2 */
366 E_el /= nbparam.epsfac * c_clSize * NTHREAD_Z;
367 # if defined EL_RF || defined EL_CUTOFF
368 E_el *= -0.5F * reactionFieldShift;
370 E_el *= -beta * M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
372 # endif /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */
374 # endif /* EXCLUSION_FORCES */
376 # endif /* CALC_ENERGIES */
378 # ifdef EXCLUSION_FORCES
379 const int nonSelfInteraction = !(nb_sci.shift == gmx::c_centralShiftIndex & tidxj <= tidxi);
382 /* loop over the j clusters = seen by any of the atoms in the current super-cluster;
383 * The loop stride NTHREAD_Z ensures that consecutive warps-pairs are assigned
384 * consecutive j4's entries.
386 for (j4 = cij4_start + tidxz; j4 < cij4_end; j4 += NTHREAD_Z)
388 wexcl_idx = pl_cj4[j4].imei[widx].excl_ind;
389 imask = pl_cj4[j4].imei[widx].imask;
390 wexcl = excl[wexcl_idx].pair[(tidx) & (warp_size - 1)];
398 /* Pre-load cj into shared memory on both warps separately */
399 if ((tidxj == 0 | tidxj == 4) & (tidxi < c_nbnxnGpuJgroupSize))
401 cjs[tidxi + tidxj * c_nbnxnGpuJgroupSize / c_splitClSize] = pl_cj4[j4].cj[tidxi];
403 __syncwarp(c_fullWarpMask);
406 /* Unrolling this loop
407 - with pruning leads to register spilling;
408 - on Kepler and later it is much slower;
409 Tested with up to nvcc 7.5 */
410 for (jm = 0; jm < c_nbnxnGpuJgroupSize; jm++)
412 if (imask & (superClInteractionMask << (jm * c_nbnxnGpuNumClusterPerSupercluster)))
414 mask_ji = (1U << (jm * c_nbnxnGpuNumClusterPerSupercluster));
416 cj = c_preloadCj ? cjs[jm + (tidxj & 4) * c_nbnxnGpuJgroupSize / c_splitClSize]
417 : cj = pl_cj4[j4].cj[jm];
419 aj = cj * c_clSize + tidxj;
421 /* load j atom data */
423 xj = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
426 typej = atom_types[aj];
428 ljcp_j = lj_comb[aj];
431 fcj_buf = make_float3(0.0F);
433 # if !defined PRUNE_NBL
436 for (i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
440 ci = sci * c_nbnxnGpuNumClusterPerSupercluster + i; /* i cluster index */
442 /* all threads load an atom from i cluster ci into shmem! */
443 xqbuf = xqib[i * c_clSize + tidxi];
444 xi = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
446 /* distance between i and j atoms */
451 /* If _none_ of the atoms pairs are in cutoff range,
452 the bit corresponding to the current
453 cluster-pair in imask gets set to 0. */
454 if (!__any_sync(c_fullWarpMask, r2 < rlist_sq))
460 int_bit = (wexcl & mask_ji) ? 1.0F : 0.0F;
462 /* cutoff & exclusion check */
463 # ifdef EXCLUSION_FORCES
464 if ((r2 < rcoulomb_sq) * (nonSelfInteraction | (ci != cj)))
466 if ((r2 < rcoulomb_sq) * int_bit)
469 /* load the rest of the i-atom parameters */
473 /* LJ 6*C6 and 12*C12 */
474 typei = atib[i * c_clSize + tidxi];
475 fetch_nbfp_c6_c12(c6, c12, nbparam, ntypes * typei + typej);
477 ljcp_i = ljcpib[i * c_clSize + tidxi];
479 c6 = ljcp_i.x * ljcp_j.x;
480 c12 = ljcp_i.y * ljcp_j.y;
482 /* LJ 2^(1/6)*sigma and 12*epsilon */
483 sigma = ljcp_i.x + ljcp_j.x;
484 epsilon = ljcp_i.y * ljcp_j.y;
485 # if defined CALC_ENERGIES || defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
486 convert_sigma_epsilon_to_c6_c12(sigma, epsilon, &c6, &c12);
488 # endif /* LJ_COMB_GEOM */
489 # endif /* LJ_COMB */
491 // Ensure distance do not become so small that r^-12 overflows
492 r2 = max(r2, c_nbnxnMinDistanceSquared);
495 inv_r2 = inv_r * inv_r;
496 # if !defined LJ_COMB_LB || defined CALC_ENERGIES
497 inv_r6 = inv_r2 * inv_r2 * inv_r2;
498 # ifdef EXCLUSION_FORCES
499 /* We could mask inv_r2, but with Ewald
500 * masking both inv_r6 and F_invr is faster */
502 # endif /* EXCLUSION_FORCES */
504 F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
505 # if defined CALC_ENERGIES || defined LJ_POT_SWITCH
507 * (c12 * (inv_r6 * inv_r6 + nbparam.repulsion_shift.cpot) * c_oneTwelveth
508 - c6 * (inv_r6 + nbparam.dispersion_shift.cpot) * c_oneSixth);
510 # else /* !LJ_COMB_LB || CALC_ENERGIES */
511 float sig_r = sigma * inv_r;
512 float sig_r2 = sig_r * sig_r;
513 float sig_r6 = sig_r2 * sig_r2 * sig_r2;
514 # ifdef EXCLUSION_FORCES
516 # endif /* EXCLUSION_FORCES */
518 F_invr = epsilon * sig_r6 * (sig_r6 - 1.0F) * inv_r2;
519 # endif /* !LJ_COMB_LB || CALC_ENERGIES */
521 # ifdef LJ_FORCE_SWITCH
522 # ifdef CALC_ENERGIES
523 calculate_force_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
525 calculate_force_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr);
526 # endif /* CALC_ENERGIES */
527 # endif /* LJ_FORCE_SWITCH */
531 # ifdef LJ_EWALD_COMB_GEOM
532 # ifdef CALC_ENERGIES
533 calculate_lj_ewald_comb_geom_F_E(
534 nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, int_bit, &F_invr, &E_lj_p);
536 calculate_lj_ewald_comb_geom_F(
537 nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, &F_invr);
538 # endif /* CALC_ENERGIES */
539 # elif defined LJ_EWALD_COMB_LB
540 calculate_lj_ewald_comb_LB_F_E(nbparam,
547 # ifdef CALC_ENERGIES
555 # endif /* CALC_ENERGIES */
557 # endif /* LJ_EWALD_COMB_GEOM */
558 # endif /* LJ_EWALD */
560 # ifdef LJ_POT_SWITCH
561 # ifdef CALC_ENERGIES
562 calculate_potential_switch_F_E(nbparam, inv_r, r2, &F_invr, &E_lj_p);
564 calculate_potential_switch_F(nbparam, inv_r, r2, &F_invr, &E_lj_p);
565 # endif /* CALC_ENERGIES */
566 # endif /* LJ_POT_SWITCH */
568 # ifdef VDW_CUTOFF_CHECK
569 /* Separate VDW cut-off check to enable twin-range cut-offs
570 * (rvdw < rcoulomb <= rlist)
572 vdw_in_range = (r2 < rvdw_sq) ? 1.0F : 0.0F;
573 F_invr *= vdw_in_range;
574 # ifdef CALC_ENERGIES
575 E_lj_p *= vdw_in_range;
577 # endif /* VDW_CUTOFF_CHECK */
579 # ifdef CALC_ENERGIES
585 # ifdef EXCLUSION_FORCES
586 F_invr += qi * qj_f * int_bit * inv_r2 * inv_r;
588 F_invr += qi * qj_f * inv_r2 * inv_r;
592 F_invr += qi * qj_f * (int_bit * inv_r2 * inv_r - two_k_rf);
594 # if defined EL_EWALD_ANA
596 * (int_bit * inv_r2 * inv_r + pmecorrF(beta2 * r2) * beta3);
597 # elif defined EL_EWALD_TAB
600 - interpolate_coulomb_force_r(nbparam, r2 * inv_r))
602 # endif /* EL_EWALD_ANA/TAB */
604 # ifdef CALC_ENERGIES
606 E_el += qi * qj_f * (int_bit * inv_r - reactionFieldShift);
610 * (int_bit * inv_r + 0.5F * two_k_rf * r2 - reactionFieldShift);
613 /* 1.0F - erff is faster than erfcf */
615 * (inv_r * (int_bit - erff(r2 * inv_r * beta)) - int_bit * ewald_shift);
616 # endif /* EL_EWALD_ANY */
620 /* accumulate j forces in registers */
623 /* accumulate i forces in registers */
628 /* shift the mask bit by 1 */
632 /* reduce j forces */
633 reduce_force_j_warp_shfl(fcj_buf, f, tidxi, aj, c_fullWarpMask);
637 /* Update the imask with the new one which does not contain the
638 out of range clusters anymore. */
639 pl_cj4[j4].imei[widx].imask = imask;
644 // avoid shared memory WAR hazards on sm_cjs between loop iterations
645 __syncwarp(c_fullWarpMask);
649 /* skip central shifts when summing shift forces */
650 if (nb_sci.shift == gmx::c_centralShiftIndex)
655 float fshift_buf = 0.0F;
657 /* reduce i forces */
658 for (i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
660 ai = (sci * c_nbnxnGpuNumClusterPerSupercluster + i) * c_clSize + tidxi;
661 reduce_force_i_warp_shfl(fci_buf[i], f, &fshift_buf, bCalcFshift, tidxj, ai, c_fullWarpMask);
664 /* add up local shift forces into global mem, tidxj indexes x,y,z */
665 if (bCalcFshift && (tidxj & 3) < 3)
667 float3* fShift = asFloat3(atdat.fShift);
668 atomicAdd(&(fShift[nb_sci.shift].x) + (tidxj & 3), fshift_buf);
671 # ifdef CALC_ENERGIES
672 /* reduce the energies over warps and store into global memory */
673 reduce_energy_warp_shfl(E_lj, E_el, e_lj, e_el, tidx, c_fullWarpMask);
676 #endif /* FUNCTION_DECLARATION_ONLY */
679 #undef MIN_BLOCKS_PER_MP
680 #undef THREADS_PER_BLOCK
683 #undef EXCLUSION_FORCES