6f90d0069425dc0fffb5fbe8c3a2f88083c4069a
[alexxy/gromacs.git] / src / gromacs / nbnxm / cuda / nbnxm_cuda_kernel.cuh
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 /*! \internal \file
38  *  \brief
39  *  CUDA non-bonded kernel used through preprocessor-based code generation
40  *  of multiple kernel flavors, see nbnxn_cuda_kernels.cuh.
41  *
42  *  NOTE: No include fence as it is meant to be included multiple times.
43  *
44  *  \author Szilárd Páll <pall.szilard@gmail.com>
45  *  \author Berk Hess <hess@kth.se>
46  *  \ingroup module_nbnxm
47  */
48
49 #include "gromacs/gpu_utils/cuda_arch_utils.cuh"
50 #include "gromacs/gpu_utils/cuda_kernel_utils.cuh"
51 #include "gromacs/gpu_utils/typecasts.cuh"
52 #include "gromacs/math/utilities.h"
53 #include "gromacs/pbcutil/ishift.h"
54 /* Note that floating-point constants in CUDA code should be suffixed
55  * with f (e.g. 0.5f), to stop the compiler producing intermediate
56  * code that is in double precision.
57  */
58
59 #if defined EL_EWALD_ANA || defined EL_EWALD_TAB
60 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
61 #    define EL_EWALD_ANY
62 #endif
63
64 #if defined LJ_EWALD_COMB_GEOM || defined LJ_EWALD_COMB_LB
65 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
66 #    define LJ_EWALD
67 #endif
68
69 #if defined EL_EWALD_ANY || defined EL_RF || defined LJ_EWALD \
70         || (defined EL_CUTOFF && defined CALC_ENERGIES)
71 /* Macro to control the calculation of exclusion forces in the kernel
72  * We do that with Ewald (elec/vdw) and RF. Cut-off only has exclusion
73  * energy terms.
74  *
75  * Note: convenience macro, needs to be undef-ed at the end of the file.
76  */
77 #    define EXCLUSION_FORCES
78 #endif
79
80 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
81 #    define LJ_COMB
82 #endif
83
84 /*
85    Kernel launch parameters:
86     - #blocks   = #pair lists, blockId = pair list Id
87     - #threads  = NTHREAD_Z * c_clSize^2
88     - shmem     = see nbnxn_cuda.cu:calc_shmem_required_nonbonded()
89
90     Each thread calculates an i force-component taking one pair of i-j atoms.
91  */
92
93 /**@{*/
94 /*! \brief Compute capability dependent definition of kernel launch configuration parameters.
95  *
96  * NTHREAD_Z controls the number of j-clusters processed concurrently on NTHREAD_Z
97  * warp-pairs per block.
98  *
99  * - On CC 3.0-3.5, and >=5.0 NTHREAD_Z == 1, translating to 64 th/block with 16
100  * blocks/multiproc, is the fastest even though this setup gives low occupancy
101  * (except on 6.0).
102  * NTHREAD_Z > 1 results in excessive register spilling unless the minimum blocks
103  * per multiprocessor is reduced proportionally to get the original number of max
104  * threads in flight (and slightly lower performance).
105  * - On CC 3.7 there are enough registers to double the number of threads; using
106  * NTHREADS_Z == 2 is fastest with 16 blocks (TODO: test with RF and other kernels
107  * with low-register use).
108  *
109  * Note that the current kernel implementation only supports NTHREAD_Z > 1 with
110  * shuffle-based reduction, hence CC >= 3.0.
111  *
112  *
113  * NOTEs on Volta / CUDA 9 extensions:
114  *
115  * - While active thread masks are required for the warp collectives
116  *   (we use any and shfl), the kernel is designed such that all conditions
117  *   (other than the inner-most distance check) including loop trip counts
118  *   are warp-synchronous. Therefore, we don't need ballot to compute the
119  *   active masks as these are all full-warp masks.
120  *
121  * - TODO: reconsider the use of __syncwarp(): its only role is currently to prevent
122  *   WAR hazard due to the cj preload; we should try to replace it with direct
123  *   loads (which may be faster given the improved L1 on Volta).
124  */
125
126 /* Kernel launch bounds for different compute capabilities. The value of NTHREAD_Z
127  * determines the number of threads per block and it is chosen such that
128  * 16 blocks/multiprocessor can be kept in flight.
129  * - CC 3.0,3.5, and >=5.0: NTHREAD_Z=1, (64, 16) bounds
130  * - CC 3.7:                NTHREAD_Z=2, (128, 16) bounds
131  *
132  * Note: convenience macros, need to be undef-ed at the end of the file.
133  */
134 #if GMX_PTX_ARCH == 370
135 #    define NTHREAD_Z (2)
136 #    define MIN_BLOCKS_PER_MP (16)
137 #else
138 #    define NTHREAD_Z (1)
139 #    define MIN_BLOCKS_PER_MP (16)
140 #endif /* GMX_PTX_ARCH == 370 */
141 #define THREADS_PER_BLOCK (c_clSize * c_clSize * NTHREAD_Z)
142
143 #if GMX_PTX_ARCH >= 350
144 /**@}*/
145 __launch_bounds__(THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP)
146 #else
147 __launch_bounds__(THREADS_PER_BLOCK)
148 #endif /* GMX_PTX_ARCH >= 350 */
149 #ifdef PRUNE_NBL
150 #    ifdef CALC_ENERGIES
151         __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_prune_cuda)
152 #    else
153         __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_prune_cuda)
154 #    endif /* CALC_ENERGIES */
155 #else
156 #    ifdef CALC_ENERGIES
157         __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_cuda)
158 #    else
159         __global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_cuda)
160 #    endif /* CALC_ENERGIES */
161 #endif     /* PRUNE_NBL */
162                 (const NBAtomData atdat, const NBParamGpu nbparam, const Nbnxm::gpu_plist plist, bool bCalcFshift)
163 #ifdef FUNCTION_DECLARATION_ONLY
164                         ; /* Only do function declaration, omit the function body. */
165 #else
166 {
167     /* convenience variables */
168     const nbnxn_sci_t* pl_sci = plist.sci;
169 #    ifndef PRUNE_NBL
170     const
171 #    endif
172             nbnxn_cj4_t* pl_cj4      = plist.cj4;
173     const nbnxn_excl_t*  excl        = plist.excl;
174 #    ifndef LJ_COMB
175     const int*           atom_types  = atdat.atomTypes;
176     int                  ntypes      = atdat.numTypes;
177 #    else
178     const float2* lj_comb = atdat.ljComb;
179     float2        ljcp_i, ljcp_j;
180 #    endif
181     const float4*        xq          = atdat.xq;
182     float3*              f           = asFloat3(atdat.f);
183     const float3*        shift_vec   = asFloat3(atdat.shiftVec);
184     float                rcoulomb_sq = nbparam.rcoulomb_sq;
185 #    ifdef VDW_CUTOFF_CHECK
186     float                rvdw_sq     = nbparam.rvdw_sq;
187     float                vdw_in_range;
188 #    endif
189 #    ifdef LJ_EWALD
190     float                lje_coeff2, lje_coeff6_6;
191 #    endif
192 #    ifdef EL_RF
193     float                two_k_rf    = nbparam.two_k_rf;
194 #    endif
195 #    ifdef EL_EWALD_ANA
196     float                beta2       = nbparam.ewald_beta * nbparam.ewald_beta;
197     float                beta3       = nbparam.ewald_beta * nbparam.ewald_beta * nbparam.ewald_beta;
198 #    endif
199 #    ifdef PRUNE_NBL
200     float                rlist_sq    = nbparam.rlistOuter_sq;
201 #    endif
202
203 #    ifdef CALC_ENERGIES
204 #        ifdef EL_EWALD_ANY
205     float                beta        = nbparam.ewald_beta;
206     float                ewald_shift = nbparam.sh_ewald;
207 #        else
208     float reactionFieldShift = nbparam.c_rf;
209 #        endif /* EL_EWALD_ANY */
210     float*               e_lj        = atdat.eLJ;
211     float*               e_el        = atdat.eElec;
212 #    endif     /* CALC_ENERGIES */
213
214     /* thread/block/warp id-s */
215     unsigned int tidxi = threadIdx.x;
216     unsigned int tidxj = threadIdx.y;
217     unsigned int tidx  = threadIdx.y * blockDim.x + threadIdx.x;
218 #    if NTHREAD_Z == 1
219     unsigned int tidxz = 0;
220 #    else
221     unsigned int  tidxz = threadIdx.z;
222 #    endif
223     unsigned int bidx  = blockIdx.x;
224     unsigned int widx  = tidx / warp_size; /* warp index */
225
226     int          sci, ci, cj, ai, aj, cij4_start, cij4_end;
227 #    ifndef LJ_COMB
228     int          typei, typej;
229 #    endif
230     int          i, jm, j4, wexcl_idx;
231     float        qi, qj_f, r2, inv_r, inv_r2;
232 #    if !defined LJ_COMB_LB || defined CALC_ENERGIES
233     float        inv_r6, c6, c12;
234 #    endif
235 #    ifdef LJ_COMB_LB
236     float        sigma, epsilon;
237 #    endif
238     float        int_bit, F_invr;
239 #    ifdef CALC_ENERGIES
240     float        E_lj, E_el;
241 #    endif
242 #    if defined CALC_ENERGIES || defined LJ_POT_SWITCH
243     float        E_lj_p;
244 #    endif
245     unsigned int wexcl, imask, mask_ji;
246     float4       xqbuf;
247     float3       xi, xj, rv, f_ij, fcj_buf;
248     float3       fci_buf[c_nbnxnGpuNumClusterPerSupercluster]; /* i force buffer */
249     nbnxn_sci_t  nb_sci;
250
251     /*! i-cluster interaction mask for a super-cluster with all c_nbnxnGpuNumClusterPerSupercluster=8 bits set */
252     const unsigned superClInteractionMask = ((1U << c_nbnxnGpuNumClusterPerSupercluster) - 1U);
253
254     /*********************************************************************
255      * Set up shared memory pointers.
256      * sm_nextSlotPtr should always be updated to point to the "next slot",
257      * that is past the last point where data has been stored.
258      */
259     extern __shared__ char sm_dynamicShmem[];
260     char*                  sm_nextSlotPtr = sm_dynamicShmem;
261     static_assert(sizeof(char) == 1,
262                   "The shared memory offset calculation assumes that char is 1 byte");
263
264     /* shmem buffer for i x+q pre-loading */
265     float4* xqib = (float4*)sm_nextSlotPtr;
266     sm_nextSlotPtr += (c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(*xqib));
267
268     /* shmem buffer for cj, for each warp separately */
269     int* cjs = (int*)(sm_nextSlotPtr);
270     /* the cjs buffer's use expects a base pointer offset for pairs of warps in the j-concurrent execution */
271     cjs += tidxz * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize;
272     sm_nextSlotPtr += (NTHREAD_Z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(*cjs));
273
274 #    ifndef LJ_COMB
275     /* shmem buffer for i atom-type pre-loading */
276     int* atib = (int*)sm_nextSlotPtr;
277     sm_nextSlotPtr += (c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(*atib));
278 #    else
279     /* shmem buffer for i-atom LJ combination rule parameters */
280     float2* ljcpib = (float2*)sm_nextSlotPtr;
281     sm_nextSlotPtr += (c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(*ljcpib));
282 #    endif
283     /*********************************************************************/
284
285     nb_sci     = pl_sci[bidx];         /* my i super-cluster's index = current bidx */
286     sci        = nb_sci.sci;           /* super-cluster */
287     cij4_start = nb_sci.cj4_ind_start; /* first ...*/
288     cij4_end   = nb_sci.cj4_ind_end;   /* and last index of j clusters */
289
290     if (tidxz == 0)
291     {
292         /* Pre-load i-atom x and q into shared memory */
293         ci = sci * c_nbnxnGpuNumClusterPerSupercluster + tidxj;
294         ai = ci * c_clSize + tidxi;
295
296         float* shiftptr = (float*)&shift_vec[nb_sci.shift];
297         xqbuf = xq[ai] + make_float4(LDG(shiftptr), LDG(shiftptr + 1), LDG(shiftptr + 2), 0.0f);
298         xqbuf.w *= nbparam.epsfac;
299         xqib[tidxj * c_clSize + tidxi] = xqbuf;
300
301 #    ifndef LJ_COMB
302         /* Pre-load the i-atom types into shared memory */
303         atib[tidxj * c_clSize + tidxi] = atom_types[ai];
304 #    else
305         /* Pre-load the LJ combination parameters into shared memory */
306         ljcpib[tidxj * c_clSize + tidxi] = lj_comb[ai];
307 #    endif
308     }
309     __syncthreads();
310
311     for (i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
312     {
313         fci_buf[i] = make_float3(0.0f);
314     }
315
316 #    ifdef LJ_EWALD
317     /* TODO: we are trading registers with flops by keeping lje_coeff-s, try re-calculating it later */
318     lje_coeff2   = nbparam.ewaldcoeff_lj * nbparam.ewaldcoeff_lj;
319     lje_coeff6_6 = lje_coeff2 * lje_coeff2 * lje_coeff2 * c_oneSixth;
320 #    endif
321
322
323 #    ifdef CALC_ENERGIES
324     E_lj         = 0.0f;
325     E_el         = 0.0f;
326
327 #        ifdef EXCLUSION_FORCES /* Ewald or RF */
328     if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
329     {
330         /* we have the diagonal: add the charge and LJ self interaction energy term */
331         for (i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
332         {
333 #            if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
334             qi = xqib[i * c_clSize + tidxi].w;
335             E_el += qi * qi;
336 #            endif
337
338 #            ifdef LJ_EWALD
339             // load only the first 4 bytes of the parameter pair (equivalent with nbfp[idx].x)
340             E_lj += LDG((float*)&nbparam.nbfp[atom_types[(sci * c_nbnxnGpuNumClusterPerSupercluster + i) * c_clSize + tidxi]
341                                               * (ntypes + 1)]);
342 #            endif
343         }
344
345         /* divide the self term(s) equally over the j-threads, then multiply with the coefficients. */
346 #            ifdef LJ_EWALD
347         E_lj /= c_clSize * NTHREAD_Z;
348         E_lj *= 0.5f * c_oneSixth * lje_coeff6_6;
349 #            endif
350
351 #            if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
352         /* Correct for epsfac^2 due to adding qi^2 */
353         E_el /= nbparam.epsfac * c_clSize * NTHREAD_Z;
354 #                if defined EL_RF || defined EL_CUTOFF
355         E_el *= -0.5f * reactionFieldShift;
356 #                else
357         E_el *= -beta * M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
358 #                endif
359 #            endif /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */
360     }
361 #        endif     /* EXCLUSION_FORCES */
362
363 #    endif /* CALC_ENERGIES */
364
365 #    ifdef EXCLUSION_FORCES
366     const int nonSelfInteraction = !(nb_sci.shift == CENTRAL & tidxj <= tidxi);
367 #    endif
368
369     /* loop over the j clusters = seen by any of the atoms in the current super-cluster;
370      * The loop stride NTHREAD_Z ensures that consecutive warps-pairs are assigned
371      * consecutive j4's entries.
372      */
373     for (j4 = cij4_start + tidxz; j4 < cij4_end; j4 += NTHREAD_Z)
374     {
375         wexcl_idx = pl_cj4[j4].imei[widx].excl_ind;
376         imask     = pl_cj4[j4].imei[widx].imask;
377         wexcl     = excl[wexcl_idx].pair[(tidx) & (warp_size - 1)];
378
379 #    ifndef PRUNE_NBL
380         if (imask)
381 #    endif
382         {
383             /* Pre-load cj into shared memory on both warps separately */
384             if ((tidxj == 0 | tidxj == 4) & (tidxi < c_nbnxnGpuJgroupSize))
385             {
386                 cjs[tidxi + tidxj * c_nbnxnGpuJgroupSize / c_splitClSize] = pl_cj4[j4].cj[tidxi];
387             }
388             __syncwarp(c_fullWarpMask);
389
390             /* Unrolling this loop
391                - with pruning leads to register spilling;
392                - on Kepler and later it is much slower;
393                Tested with up to nvcc 7.5 */
394             for (jm = 0; jm < c_nbnxnGpuJgroupSize; jm++)
395             {
396                 if (imask & (superClInteractionMask << (jm * c_nbnxnGpuNumClusterPerSupercluster)))
397                 {
398                     mask_ji = (1U << (jm * c_nbnxnGpuNumClusterPerSupercluster));
399
400                     cj = cjs[jm + (tidxj & 4) * c_nbnxnGpuJgroupSize / c_splitClSize];
401                     aj = cj * c_clSize + tidxj;
402
403                     /* load j atom data */
404                     xqbuf = xq[aj];
405                     xj    = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
406                     qj_f  = xqbuf.w;
407 #    ifndef LJ_COMB
408                     typej = atom_types[aj];
409 #    else
410                     ljcp_j = lj_comb[aj];
411 #    endif
412
413                     fcj_buf = make_float3(0.0f);
414
415 #    if !defined PRUNE_NBL
416 #        pragma unroll 8
417 #    endif
418                     for (i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
419                     {
420                         if (imask & mask_ji)
421                         {
422                             ci = sci * c_nbnxnGpuNumClusterPerSupercluster + i; /* i cluster index */
423
424                             /* all threads load an atom from i cluster ci into shmem! */
425                             xqbuf = xqib[i * c_clSize + tidxi];
426                             xi    = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
427
428                             /* distance between i and j atoms */
429                             rv = xi - xj;
430                             r2 = norm2(rv);
431
432 #    ifdef PRUNE_NBL
433                             /* If _none_ of the atoms pairs are in cutoff range,
434                                the bit corresponding to the current
435                                cluster-pair in imask gets set to 0. */
436                             if (!__any_sync(c_fullWarpMask, r2 < rlist_sq))
437                             {
438                                 imask &= ~mask_ji;
439                             }
440 #    endif
441
442                             int_bit = (wexcl & mask_ji) ? 1.0f : 0.0f;
443
444                             /* cutoff & exclusion check */
445 #    ifdef EXCLUSION_FORCES
446                             if ((r2 < rcoulomb_sq) * (nonSelfInteraction | (ci != cj)))
447 #    else
448                             if ((r2 < rcoulomb_sq) * int_bit)
449 #    endif
450                             {
451                                 /* load the rest of the i-atom parameters */
452                                 qi = xqbuf.w;
453
454 #    ifndef LJ_COMB
455                                 /* LJ 6*C6 and 12*C12 */
456                                 typei = atib[i * c_clSize + tidxi];
457                                 fetch_nbfp_c6_c12(c6, c12, nbparam, ntypes * typei + typej);
458 #    else
459                                 ljcp_i       = ljcpib[i * c_clSize + tidxi];
460 #        ifdef LJ_COMB_GEOM
461                                 c6           = ljcp_i.x * ljcp_j.x;
462                                 c12          = ljcp_i.y * ljcp_j.y;
463 #        else
464                                 /* LJ 2^(1/6)*sigma and 12*epsilon */
465                                 sigma   = ljcp_i.x + ljcp_j.x;
466                                 epsilon = ljcp_i.y * ljcp_j.y;
467 #            if defined CALC_ENERGIES || defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
468                                 convert_sigma_epsilon_to_c6_c12(sigma, epsilon, &c6, &c12);
469 #            endif
470 #        endif /* LJ_COMB_GEOM */
471 #    endif     /* LJ_COMB */
472
473                                 // Ensure distance do not become so small that r^-12 overflows
474                                 r2 = max(r2, c_nbnxnMinDistanceSquared);
475
476                                 inv_r  = rsqrt(r2);
477                                 inv_r2 = inv_r * inv_r;
478 #    if !defined LJ_COMB_LB || defined CALC_ENERGIES
479                                 inv_r6 = inv_r2 * inv_r2 * inv_r2;
480 #        ifdef EXCLUSION_FORCES
481                                 /* We could mask inv_r2, but with Ewald
482                                  * masking both inv_r6 and F_invr is faster */
483                                 inv_r6 *= int_bit;
484 #        endif /* EXCLUSION_FORCES */
485
486                                 F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
487 #        if defined CALC_ENERGIES || defined LJ_POT_SWITCH
488                                 E_lj_p = int_bit
489                                          * (c12 * (inv_r6 * inv_r6 + nbparam.repulsion_shift.cpot) * c_oneTwelveth
490                                             - c6 * (inv_r6 + nbparam.dispersion_shift.cpot) * c_oneSixth);
491 #        endif
492 #    else /* !LJ_COMB_LB || CALC_ENERGIES */
493                                 float sig_r  = sigma * inv_r;
494                                 float sig_r2 = sig_r * sig_r;
495                                 float sig_r6 = sig_r2 * sig_r2 * sig_r2;
496 #        ifdef EXCLUSION_FORCES
497                                 sig_r6 *= int_bit;
498 #        endif /* EXCLUSION_FORCES */
499
500                                 F_invr = epsilon * sig_r6 * (sig_r6 - 1.0f) * inv_r2;
501 #    endif     /* !LJ_COMB_LB || CALC_ENERGIES */
502
503 #    ifdef LJ_FORCE_SWITCH
504 #        ifdef CALC_ENERGIES
505                                 calculate_force_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
506 #        else
507                                 calculate_force_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr);
508 #        endif /* CALC_ENERGIES */
509 #    endif     /* LJ_FORCE_SWITCH */
510
511
512 #    ifdef LJ_EWALD
513 #        ifdef LJ_EWALD_COMB_GEOM
514 #            ifdef CALC_ENERGIES
515                                 calculate_lj_ewald_comb_geom_F_E(
516                                         nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, int_bit, &F_invr, &E_lj_p);
517 #            else
518                                 calculate_lj_ewald_comb_geom_F(
519                                         nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, &F_invr);
520 #            endif /* CALC_ENERGIES */
521 #        elif defined LJ_EWALD_COMB_LB
522                                 calculate_lj_ewald_comb_LB_F_E(nbparam,
523                                                                typei,
524                                                                typej,
525                                                                r2,
526                                                                inv_r2,
527                                                                lje_coeff2,
528                                                                lje_coeff6_6,
529 #            ifdef CALC_ENERGIES
530                                                                int_bit,
531                                                                &F_invr,
532                                                                &E_lj_p
533 #            else
534                                                                0,
535                                                                &F_invr,
536                                                                nullptr
537 #            endif /* CALC_ENERGIES */
538                                 );
539 #        endif     /* LJ_EWALD_COMB_GEOM */
540 #    endif         /* LJ_EWALD */
541
542 #    ifdef LJ_POT_SWITCH
543 #        ifdef CALC_ENERGIES
544                                 calculate_potential_switch_F_E(nbparam, inv_r, r2, &F_invr, &E_lj_p);
545 #        else
546                                 calculate_potential_switch_F(nbparam, inv_r, r2, &F_invr, &E_lj_p);
547 #        endif /* CALC_ENERGIES */
548 #    endif     /* LJ_POT_SWITCH */
549
550 #    ifdef VDW_CUTOFF_CHECK
551                                 /* Separate VDW cut-off check to enable twin-range cut-offs
552                                  * (rvdw < rcoulomb <= rlist)
553                                  */
554                                 vdw_in_range = (r2 < rvdw_sq) ? 1.0f : 0.0f;
555                                 F_invr *= vdw_in_range;
556 #        ifdef CALC_ENERGIES
557                                 E_lj_p *= vdw_in_range;
558 #        endif
559 #    endif /* VDW_CUTOFF_CHECK */
560
561 #    ifdef CALC_ENERGIES
562                                 E_lj += E_lj_p;
563 #    endif
564
565
566 #    ifdef EL_CUTOFF
567 #        ifdef EXCLUSION_FORCES
568                                 F_invr += qi * qj_f * int_bit * inv_r2 * inv_r;
569 #        else
570                                 F_invr += qi * qj_f * inv_r2 * inv_r;
571 #        endif
572 #    endif
573 #    ifdef EL_RF
574                                 F_invr += qi * qj_f * (int_bit * inv_r2 * inv_r - two_k_rf);
575 #    endif
576 #    if defined   EL_EWALD_ANA
577                                 F_invr += qi * qj_f
578                                           * (int_bit * inv_r2 * inv_r + pmecorrF(beta2 * r2) * beta3);
579 #    elif defined EL_EWALD_TAB
580                                 F_invr += qi * qj_f
581                                           * (int_bit * inv_r2
582                                              - interpolate_coulomb_force_r(nbparam, r2 * inv_r))
583                                           * inv_r;
584 #    endif /* EL_EWALD_ANA/TAB */
585
586 #    ifdef CALC_ENERGIES
587 #        ifdef EL_CUTOFF
588                                 E_el += qi * qj_f * (int_bit * inv_r - reactionFieldShift);
589 #        endif
590 #        ifdef EL_RF
591                                 E_el += qi * qj_f
592                                         * (int_bit * inv_r + 0.5f * two_k_rf * r2 - reactionFieldShift);
593 #        endif
594 #        ifdef EL_EWALD_ANY
595                                 /* 1.0f - erff is faster than erfcf */
596                                 E_el += qi * qj_f
597                                         * (inv_r * (int_bit - erff(r2 * inv_r * beta)) - int_bit * ewald_shift);
598 #        endif /* EL_EWALD_ANY */
599 #    endif
600                                 f_ij = rv * F_invr;
601
602                                 /* accumulate j forces in registers */
603                                 fcj_buf -= f_ij;
604
605                                 /* accumulate i forces in registers */
606                                 fci_buf[i] += f_ij;
607                             }
608                         }
609
610                         /* shift the mask bit by 1 */
611                         mask_ji += mask_ji;
612                     }
613
614                     /* reduce j forces */
615                     reduce_force_j_warp_shfl(fcj_buf, f, tidxi, aj, c_fullWarpMask);
616                 }
617             }
618 #    ifdef PRUNE_NBL
619             /* Update the imask with the new one which does not contain the
620                out of range clusters anymore. */
621             pl_cj4[j4].imei[widx].imask = imask;
622 #    endif
623         }
624         // avoid shared memory WAR hazards between loop iterations
625         __syncwarp(c_fullWarpMask);
626     }
627
628     /* skip central shifts when summing shift forces */
629     if (nb_sci.shift == CENTRAL)
630     {
631         bCalcFshift = false;
632     }
633
634     float fshift_buf = 0.0f;
635
636     /* reduce i forces */
637     for (i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
638     {
639         ai = (sci * c_nbnxnGpuNumClusterPerSupercluster + i) * c_clSize + tidxi;
640         reduce_force_i_warp_shfl(fci_buf[i], f, &fshift_buf, bCalcFshift, tidxj, ai, c_fullWarpMask);
641     }
642
643     /* add up local shift forces into global mem, tidxj indexes x,y,z */
644     if (bCalcFshift && (tidxj & 3) < 3)
645     {
646         float3* fShift = asFloat3(atdat.fShift);
647         atomicAdd(&(fShift[nb_sci.shift].x) + (tidxj & 3), fshift_buf);
648     }
649
650 #    ifdef CALC_ENERGIES
651     /* reduce the energies over warps and store into global memory */
652     reduce_energy_warp_shfl(E_lj, E_el, e_lj, e_el, tidx, c_fullWarpMask);
653 #    endif
654 }
655 #endif /* FUNCTION_DECLARATION_ONLY */
656
657 #undef NTHREAD_Z
658 #undef MIN_BLOCKS_PER_MP
659 #undef THREADS_PER_BLOCK
660
661 #undef EL_EWALD_ANY
662 #undef EXCLUSION_FORCES
663 #undef LJ_EWALD
664
665 #undef LJ_COMB