68c1a9df7accde262fbb5633db92bbc7bfbcefd9
[alexxy/gromacs.git] / src / gromacs / nbnxm / opencl / nbnxm_ocl_kernel.clh
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012-2018, The GROMACS development team.
5  * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 /*! \internal \file
38  *  \brief OpenCL non-bonded kernel.
39  *
40  *  OpenCL 1.2 support is expected.
41  *
42  *  \author Anca Hamuraru <anca@streamcomputing.eu>
43  *  \author Szilárd Páll <pall.szilard@gmail.com>
44  *  \ingroup module_nbnxm
45  */
46
47 /* Currently we enable CJ prefetch for AMD/NVIDIA and disable it for the "nowarp" kernel
48  * Note that this should precede the kernel_utils include.
49  */
50 #include "nbnxm_ocl_kernel_utils.clh"
51
52 /////////////////////////////////////////////////////////////////////////////////////////////////
53
54 #if defined EL_EWALD_ANA || defined EL_EWALD_TAB
55 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
56 #    define EL_EWALD_ANY
57 #endif
58
59 #if defined LJ_EWALD_COMB_GEOM || defined LJ_EWALD_COMB_LB
60 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
61 #    define LJ_EWALD
62 #endif
63
64 #if defined EL_EWALD_ANY || defined EL_RF || defined LJ_EWALD \
65         || (defined EL_CUTOFF && defined CALC_ENERGIES)
66 /* Macro to control the calculation of exclusion forces in the kernel
67  * We do that with Ewald (elec/vdw) and RF. Cut-off only has exclusion
68  * energy terms.
69  *
70  * Note: convenience macro, needs to be undef-ed at the end of the file.
71  */
72 #    define EXCLUSION_FORCES
73 #endif
74
75 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
76 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
77 #    define LJ_COMB
78 #endif
79
80 /*
81    Kernel launch parameters:
82     - #blocks   = #pair lists, blockId = pair list Id
83     - #threads  = CL_SIZE^2
84     - shmem     = CL_SIZE^2 * sizeof(float)
85
86     Each thread calculates an i force-component taking one pair of i-j atoms.
87
88    TODO: implement 128 threads/wavefront by porting over the NTHREAD_Z/j4 loop
89    "horizontal splitting" over threads.
90  */
91
92 /* NOTE:
93    NB_KERNEL_FUNC_NAME differs from the CUDA equivalent as it is not a variadic macro due to OpenCL
94    not having a support for them, this version only takes exactly 2 arguments. Thus if more strings
95    need to be appended a new macro must be written or it must be directly appended here.
96  */
97 __attribute__((reqd_work_group_size(CL_SIZE, CL_SIZE, 1)))
98 #ifdef cl_intel_required_subgroup_size
99 __attribute__((intel_reqd_sub_group_size(SUBGROUP_SIZE)))
100 #endif
101 #ifdef PRUNE_NBL
102 #    ifdef CALC_ENERGIES
103 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_prune_opencl)
104 #    else
105 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_prune_opencl)
106 #    endif
107 #else
108 #    ifdef CALC_ENERGIES
109 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_opencl)
110 #    else
111 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_opencl)
112 #    endif
113 #endif
114         (
115 #ifndef LJ_COMB
116                 int ntypes, /* IN  */
117 #endif
118                 cl_nbparam_params_t nbparam_params,       /* IN  */
119                 const __global float4* restrict xq,       /* IN  */
120                 __global float* restrict f,               /* OUT stores float3 values */
121                 __global float* restrict gmx_unused e_lj, /* OUT */
122                 __global float* restrict gmx_unused e_el, /* OUT */
123                 __global float* restrict fshift,          /* OUT stores float3 values */
124 #ifdef LJ_COMB
125                 const __global float2* restrict lj_comb, /* IN stores float2 values */
126 #else
127                 const __global int* restrict atom_types, /* IN  */
128 #endif
129                 const __global float* restrict shift_vec,          /* IN stores float3 values */
130                 __constant const float2* restrict gmx_unused nbfp, /* IN  */
131                 __constant const float2* restrict gmx_unused nbfp_comb,  /* IN  */
132                 __constant const float* restrict gmx_unused coulomb_tab, /* IN  */
133                 const __global nbnxn_sci_t* pl_sci,                      /* IN  */
134 #ifndef PRUNE_NBL
135                 const
136 #endif
137                 __global nbnxn_cj4_t* pl_cj4,             /* OUT / IN */
138                 const __global nbnxn_excl_t* excl,        /* IN  */
139                 int                          bCalcFshift, /* IN  */
140                 __local float4* xqib                      /* Pointer to dyn alloc'ed shmem */
141         )
142 {
143     /* convenience variables */
144     const cl_nbparam_params_t* const nbparam = &nbparam_params;
145
146     const float rcoulomb_sq = nbparam->rcoulomb_sq;
147 #ifdef VDW_CUTOFF_CHECK
148     const float rvdw_sq = nbparam_params.rvdw_sq;
149 #endif
150 #ifdef EL_RF
151     const float two_k_rf = nbparam->two_k_rf;
152 #endif
153 #ifdef EL_EWALD_TAB
154     const float coulomb_tab_scale = nbparam->coulomb_tab_scale;
155 #endif
156 #ifdef EL_EWALD_ANA
157     const float beta2 = nbparam->ewald_beta * nbparam->ewald_beta;
158     const float beta3 = nbparam->ewald_beta * nbparam->ewald_beta * nbparam->ewald_beta;
159 #endif
160 #ifdef PRUNE_NBL
161     const float rlist_sq = nbparam->rlistOuter_sq;
162 #endif
163
164 #ifdef CALC_ENERGIES
165 #    ifdef EL_EWALD_ANY
166     const float beta        = nbparam->ewald_beta;
167     const float ewald_shift = nbparam->sh_ewald;
168 #    else
169     const float gmx_unused c_rf = nbparam->c_rf;
170 #    endif /* EL_EWALD_ANY */
171 #endif     /* CALC_ENERGIES */
172
173     /* thread/block/warp id-s */
174     const int tidxi = get_local_id(0);
175     const int tidxj = get_local_id(1);
176     const int tidx  = (int)(get_local_id(1) * get_local_size(0) + get_local_id(0));
177     const int bidx  = get_group_id(0);
178     const int widx  = tidx / WARP_SIZE; /* warp index */
179
180     /*! i-cluster interaction mask for a super-cluster with all c_nbnxnGpuNumClusterPerSupercluster=8 bits set */
181     const unsigned superClInteractionMask = ((1U << c_nbnxnGpuNumClusterPerSupercluster) - 1U);
182
183 #define LOCAL_OFFSET (xqib + c_nbnxnGpuNumClusterPerSupercluster * CL_SIZE)
184     CjType cjs = 0;
185 #if USE_CJ_PREFETCH
186     /* shmem buffer for cj, for both warps separately */
187     cjs = (__local int*)(LOCAL_OFFSET);
188 #    undef LOCAL_OFFSET
189 #    define LOCAL_OFFSET (cjs + 2 * c_nbnxnGpuJgroupSize)
190 #endif // USE_CJ_PREFETCH
191
192 #ifdef IATYPE_SHMEM
193 #    ifndef LJ_COMB
194     /* shmem buffer for i atom-type pre-loading */
195     __local int* atib = (__local int*)(LOCAL_OFFSET); //NOLINT(google-readability-casting)
196 #        undef LOCAL_OFFSET
197 #        define LOCAL_OFFSET (atib + c_nbnxnGpuNumClusterPerSupercluster * CL_SIZE)
198 #    else
199     __local float2* ljcpib      = (__local float2*)(LOCAL_OFFSET);
200 #        undef LOCAL_OFFSET
201 #        define LOCAL_OFFSET (ljcpib + c_nbnxnGpuNumClusterPerSupercluster * CL_SIZE)
202 #    endif
203 #endif
204
205 #if !REDUCE_SHUFFLE
206     /* shmem j force buffer */
207     __local float* f_buf = (__local float*)(LOCAL_OFFSET);
208 #    undef LOCAL_OFFSET
209 #    define LOCAL_OFFSET (f_buf + CL_SIZE * CL_SIZE * 3)
210 #else
211     __local float* f_buf             = 0;
212 #endif
213 #if !USE_SUBGROUP_ANY
214     /* Local buffer used to implement __any warp vote function from CUDA.
215        volatile is used to avoid compiler optimizations for AMD builds. */
216     //NOLINTNEXTLINE(google-readability-casting)
217     volatile __local int* warp_any = (__local int*)(LOCAL_OFFSET);
218 #else
219     __local int gmx_unused* warp_any = 0;
220 #endif
221 #undef LOCAL_OFFSET
222
223     const nbnxn_sci_t nb_sci     = pl_sci[bidx]; /* my i super-cluster's index = current bidx */
224     const int         sci        = nb_sci.sci;   /* super-cluster */
225     const int         cij4_start = nb_sci.cj4_ind_start; /* first ...*/
226     const int         cij4_end   = nb_sci.cj4_ind_end;   /* and last index of j clusters */
227
228     for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i += CL_SIZE)
229     {
230         /* Pre-load i-atom x and q into shared memory */
231         const int ci = sci * c_nbnxnGpuNumClusterPerSupercluster + tidxj + i;
232         const int ai = ci * CL_SIZE + tidxi;
233
234         float4 xqbuf = xq[ai]
235                        + (float4)(shift_vec[3 * nb_sci.shift],
236                                   shift_vec[3 * nb_sci.shift + 1],
237                                   shift_vec[3 * nb_sci.shift + 2],
238                                   0.0F);
239         xqbuf.w *= nbparam->epsfac;
240         xqib[(tidxj + i) * CL_SIZE + tidxi] = xqbuf;
241 #ifdef IATYPE_SHMEM
242 #    ifndef LJ_COMB
243         /* Pre-load the i-atom types into shared memory */
244         atib[(tidxj + i) * CL_SIZE + tidxi] = atom_types[ai];
245 #    else
246         ljcpib[(tidxj + i) * CL_SIZE + tidxi] = lj_comb[ai];
247 #    endif
248 #endif
249     }
250 #if !USE_SUBGROUP_ANY
251     /* Initialise warp vote. (8x8 block) 2 warps for nvidia */
252     if (tidx == 0 || tidx == WARP_SIZE)
253     {
254         warp_any[widx] = 0;
255     }
256 #endif
257     barrier(CLK_LOCAL_MEM_FENCE);
258
259     fvec fci_buf[c_nbnxnGpuNumClusterPerSupercluster]; /* i force buffer */
260     for (int ci_offset = 0; ci_offset < c_nbnxnGpuNumClusterPerSupercluster; ci_offset++)
261     {
262         fci_buf[ci_offset][0] = 0.0F;
263         fci_buf[ci_offset][1] = 0.0F;
264         fci_buf[ci_offset][2] = 0.0F;
265     }
266
267 #ifdef LJ_EWALD
268     /* TODO: we are trading registers with flops by keeping lje_coeff-s, try re-calculating it later */
269     const float lje_coeff2   = nbparam->ewaldcoeff_lj * nbparam->ewaldcoeff_lj;
270     const float lje_coeff6_6 = lje_coeff2 * lje_coeff2 * lje_coeff2 * ONE_SIXTH_F;
271 #endif /* LJ_EWALD */
272
273
274 #ifdef CALC_ENERGIES
275     float E_lj = 0.0F;
276     float E_el = 0.0F;
277
278 #    if defined EXCLUSION_FORCES /* Ewald or RF */
279     if (nb_sci.shift == c_centralShiftIndex
280         && pl_cj4[cij4_start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
281     {
282         /* we have the diagonal: add the charge and LJ self interaction energy term */
283         for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
284         {
285 #        if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
286             const float qi = xqib[i * CL_SIZE + tidxi].w;
287             E_el += qi * qi;
288 #        endif
289 #        if defined LJ_EWALD
290             E_lj += nbfp[atom_types[(sci * c_nbnxnGpuNumClusterPerSupercluster + i) * CL_SIZE + tidxi] * (ntypes + 1)]
291                             .x;
292 #        endif /* LJ_EWALD */
293         }
294
295         /* divide the self term(s) equally over the j-threads, then multiply with the coefficients. */
296 #        ifdef LJ_EWALD
297         E_lj /= CL_SIZE;
298         E_lj *= HALF_F * ONE_SIXTH_F * lje_coeff6_6;
299 #        endif /* LJ_EWALD */
300
301 #        if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
302         /* Correct for epsfac^2 due to adding qi^2 */
303         E_el /= nbparam->epsfac * CL_SIZE;
304 #            if defined EL_RF || defined EL_CUTOFF
305         E_el *= -HALF_F * c_rf;
306 #            else
307         E_el *= -beta * M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
308 #            endif
309 #        endif /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */
310     }
311 #    endif /* EXCLUSION_FORCES */
312
313 #endif /* CALC_ENERGIES */
314
315 #ifdef EXCLUSION_FORCES
316     const int nonSelfInteraction = !(nb_sci.shift == c_centralShiftIndex & tidxj <= tidxi);
317 #endif
318
319     /* loop over the j clusters = seen by any of the atoms in the current super-cluster */
320     for (int j4 = cij4_start; j4 < cij4_end; j4++)
321     {
322         const int          wexcl_idx = pl_cj4[j4].imei[widx].excl_ind;
323         unsigned int       imask     = pl_cj4[j4].imei[widx].imask;
324         const unsigned int wexcl     = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)];
325
326         preloadCj4(&cjs, pl_cj4[j4].cj, tidxi, tidxj, imask != 0U);
327
328 #ifndef PRUNE_NBL
329         if (imask)
330 #endif
331         {
332             /* Unrolling this loop improves performance without pruning but
333              * with pruning it leads to slowdown.
334              *
335              * Tested with driver 1800.5
336              *
337              * TODO: check loop unrolling with NVIDIA OpenCL
338              */
339 #if !defined PRUNE_NBL && !defined _NVIDIA_SOURCE_
340 #    pragma unroll 4
341 #endif
342             for (int jm = 0; jm < c_nbnxnGpuJgroupSize; jm++)
343             {
344                 if (imask & (superClInteractionMask << (jm * c_nbnxnGpuNumClusterPerSupercluster)))
345                 {
346                     unsigned int mask_ji = (1U << (jm * c_nbnxnGpuNumClusterPerSupercluster));
347
348                     const int cj = loadCj(cjs, pl_cj4[j4].cj, jm, tidxi, tidxj);
349                     const int aj = cj * CL_SIZE + tidxj;
350
351                     /* load j atom data */
352                     const float4 xjqbuf = xq[aj];
353                     const float3 xj     = (float3)(xjqbuf.xyz);
354                     const float  qj_f   = xjqbuf.w;
355 #ifndef LJ_COMB
356                     const int typej = atom_types[aj];
357 #else
358                     const float2 ljcp_j = lj_comb[aj];
359 #endif
360
361                     float3 fcj_buf = (float3)(0.0F);
362
363 #if !defined PRUNE_NBL
364 #    pragma unroll 8
365 #endif
366                     for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
367                     {
368                         if (imask & mask_ji)
369                         {
370                             const int gmx_unused ci = sci * c_nbnxnGpuNumClusterPerSupercluster + i; /* i cluster index */
371
372                             /* all threads load an atom from i cluster ci into shmem! */
373                             const float4 xiqbuf = xqib[i * CL_SIZE + tidxi];
374                             const float3 xi     = (float3)(xiqbuf.xyz);
375
376                             /* distance between i and j atoms */
377                             const float3 rv = xi - xj;
378                             float        r2 = norm2(rv);
379
380 #ifdef PRUNE_NBL
381                             if (!gmx_sub_group_any(warp_any, widx, r2 < rlist_sq))
382                             {
383                                 imask &= ~mask_ji;
384                             }
385 #endif
386
387                             const float int_bit = (wexcl & mask_ji) ? 1.0F : 0.0F;
388
389                             /* cutoff & exclusion check */
390 #ifdef EXCLUSION_FORCES
391                             if ((r2 < rcoulomb_sq) * (nonSelfInteraction | (ci != cj)))
392 #else
393                             if ((float)(r2 < rcoulomb_sq) * int_bit != 0.0F)
394 #endif
395                             {
396                                 /* load the rest of the i-atom parameters */
397                                 const float qi = xiqbuf.w;
398 #ifdef IATYPE_SHMEM
399 #    ifndef LJ_COMB
400                                 const int typei = atib[i * CL_SIZE + tidxi];
401 #    else
402                                 const float2 ljcp_i = ljcpib[i * CL_SIZE + tidxi];
403 #    endif
404 #else /* IATYPE_SHMEM */
405                                 const int   ai     = ci * CL_SIZE + tidxi; /* i atom index */
406
407 #    ifndef LJ_COMB
408                                 const int   typei  = atom_types[ai];
409 #    else
410                                 const float2 ljcp_i = lj_comb[ai];
411 #    endif
412 #endif                          /* IATYPE_SHMEM */
413                                 /* LJ 6*C6 and 12*C12 */
414 #ifndef LJ_COMB
415                                 const float2 c6c12 = nbfp[ntypes * typei + typej];
416
417                                 const float c6  = c6c12.x;
418                                 const float c12 = c6c12.y;
419 #else /* LJ_COMB */
420 #    ifdef LJ_COMB_GEOM
421                                 const float c6     = ljcp_i.x * ljcp_j.x;
422                                 const float c12    = ljcp_i.y * ljcp_j.y;
423 #    else
424                                 /* LJ 2^(1/6)*sigma and 12*epsilon */
425                                 const float  sigma   = ljcp_i.x + ljcp_j.x;
426                                 const float  epsilon = ljcp_i.y * ljcp_j.y;
427 #        if defined CALC_ENERGIES || defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
428                                 const float2 c6c12 = convert_sigma_epsilon_to_c6_c12(sigma, epsilon);
429                                 const float  c6    = c6c12.x;
430                                 const float  c12   = c6c12.y;
431 #        endif
432 #    endif /* LJ_COMB_GEOM */
433 #endif     /* LJ_COMB */
434
435                                 // Ensure distance do not become so small that r^-12 overflows.
436                                 r2 = max(r2, c_nbnxnMinDistanceSquared);
437
438                                 const float inv_r  = rsqrt(r2);
439                                 const float inv_r2 = inv_r * inv_r;
440 #if !defined LJ_COMB_LB || defined CALC_ENERGIES
441                                 float inv_r6 = inv_r2 * inv_r2 * inv_r2;
442 #    if defined EXCLUSION_FORCES
443                                 /* We could mask inv_r2, but with Ewald
444                                  * masking both inv_r6 and F_invr is faster */
445                                 inv_r6 *= int_bit;
446 #    endif /* EXCLUSION_FORCES */
447
448                                 float F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
449 #    if defined CALC_ENERGIES || defined LJ_POT_SWITCH
450                                 float E_lj_p =
451                                         int_bit
452                                         * (c12 * (inv_r6 * inv_r6 + nbparam->repulsion_shift.cpot) * ONE_TWELVETH_F
453                                            - c6 * (inv_r6 + nbparam->dispersion_shift.cpot) * ONE_SIXTH_F);
454
455 #    endif
456 #else /* ! LJ_COMB_LB || CALC_ENERGIES */
457                                 const float sig_r  = sigma * inv_r;
458                                 const float sig_r2 = sig_r * sig_r;
459                                 float       sig_r6 = sig_r2 * sig_r2 * sig_r2;
460 #    if defined EXCLUSION_FORCES
461                                 sig_r6 *= int_bit;
462 #    endif /* EXCLUSION_FORCES */
463
464                                 float F_invr = epsilon * sig_r6 * (sig_r6 - 1.0F) * inv_r2;
465 #endif     /* ! LJ_COMB_LB || CALC_ENERGIES */
466
467
468 #ifdef LJ_FORCE_SWITCH
469 #    ifdef CALC_ENERGIES
470                                 calculate_force_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
471 #    else
472                                 calculate_force_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr);
473 #    endif /* CALC_ENERGIES */
474 #endif     /* LJ_FORCE_SWITCH */
475
476
477 #ifdef LJ_EWALD
478 #    ifdef LJ_EWALD_COMB_GEOM
479 #        ifdef CALC_ENERGIES
480                                 calculate_lj_ewald_comb_geom_F_E(nbfp_comb,
481                                                                  nbparam,
482                                                                  typei,
483                                                                  typej,
484                                                                  r2,
485                                                                  inv_r2,
486                                                                  lje_coeff2,
487                                                                  lje_coeff6_6,
488                                                                  int_bit,
489                                                                  &F_invr,
490                                                                  &E_lj_p);
491 #        else
492                                 calculate_lj_ewald_comb_geom_F(
493                                         nbfp_comb, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, &F_invr);
494 #        endif /* CALC_ENERGIES */
495 #    elif defined LJ_EWALD_COMB_LB
496                                 calculate_lj_ewald_comb_LB_F_E(nbfp_comb,
497                                                                nbparam,
498                                                                typei,
499                                                                typej,
500                                                                r2,
501                                                                inv_r2,
502                                                                lje_coeff2,
503                                                                lje_coeff6_6,
504 #        ifdef CALC_ENERGIES
505                                                                int_bit,
506                                                                true,
507                                                                &F_invr,
508                                                                &E_lj_p
509 #        else
510                                                                0,
511                                                                false,
512                                                                &F_invr,
513                                                                0
514 #        endif /* CALC_ENERGIES */
515                                 );
516 #    endif     /* LJ_EWALD_COMB_GEOM */
517 #endif         /* LJ_EWALD */
518
519 #ifdef LJ_POT_SWITCH
520 #    ifdef CALC_ENERGIES
521                                 calculate_potential_switch_F_E(nbparam, inv_r, r2, &F_invr, &E_lj_p);
522 #    else
523                                 calculate_potential_switch_F(nbparam, inv_r, r2, &F_invr, &E_lj_p);
524 #    endif /* CALC_ENERGIES */
525 #endif     /* LJ_POT_SWITCH */
526
527 #ifdef VDW_CUTOFF_CHECK
528                                 /* Separate VDW cut-off check to enable twin-range cut-offs
529                                  * (rvdw < rcoulomb <= rlist)
530                                  */
531                                 const float vdw_in_range = (r2 < rvdw_sq) ? 1.0F : 0.0F;
532                                 F_invr *= vdw_in_range;
533 #    ifdef CALC_ENERGIES
534                                 E_lj_p *= vdw_in_range;
535 #    endif
536 #endif /* VDW_CUTOFF_CHECK */
537
538 #ifdef CALC_ENERGIES
539                                 E_lj += E_lj_p;
540
541 #endif
542
543
544 #ifdef EL_CUTOFF
545 #    ifdef EXCLUSION_FORCES
546                                 F_invr += qi * qj_f * int_bit * inv_r2 * inv_r;
547 #    else
548                                 F_invr += qi * qj_f * inv_r2 * inv_r;
549 #    endif
550 #endif
551 #ifdef EL_RF
552                                 F_invr += qi * qj_f * (int_bit * inv_r2 * inv_r - two_k_rf);
553 #endif
554 #if defined EL_EWALD_ANA
555                                 F_invr += qi * qj_f
556                                           * (int_bit * inv_r2 * inv_r + pmecorrF(beta2 * r2) * beta3);
557 #elif defined EL_EWALD_TAB
558                                 F_invr += qi * qj_f
559                                           * (int_bit * inv_r2
560                                              - interpolate_coulomb_force_r(
561                                                        coulomb_tab, r2 * inv_r, coulomb_tab_scale))
562                                           * inv_r;
563 #endif /* EL_EWALD_ANA/TAB */
564
565 #ifdef CALC_ENERGIES
566 #    ifdef EL_CUTOFF
567                                 E_el += qi * qj_f * (int_bit * inv_r - c_rf);
568 #    endif
569 #    ifdef EL_RF
570                                 E_el += qi * qj_f * (int_bit * inv_r + HALF_F * two_k_rf * r2 - c_rf);
571 #    endif
572 #    ifdef EL_EWALD_ANY
573                                 /* 1.0F - erff is faster than erfcf */
574                                 E_el += qi * qj_f
575                                         * (inv_r * (int_bit - erf(r2 * inv_r * beta)) - int_bit * ewald_shift);
576 #    endif /* EL_EWALD_ANY */
577 #endif
578                                 const float3 f_ij = rv * F_invr;
579
580                                 /* accumulate j forces in registers */
581                                 fcj_buf -= f_ij;
582
583                                 /* accumulate i forces in registers */
584                                 fci_buf[i][0] += f_ij.x;
585                                 fci_buf[i][1] += f_ij.y;
586                                 fci_buf[i][2] += f_ij.z;
587                             }
588                         }
589
590                         /* shift the mask bit by 1 */
591                         mask_ji += mask_ji;
592                     }
593
594                     /* reduce j forces */
595                     reduce_force_j(f_buf, fcj_buf, f, tidxi, tidxj, aj);
596                 }
597             }
598 #ifdef PRUNE_NBL
599             /* Update the imask with the new one which does not contain the
600                out of range clusters anymore. */
601
602             pl_cj4[j4].imei[widx].imask = imask;
603 #endif
604         }
605     }
606
607     /* skip central shifts when summing shift forces */
608     if (nb_sci.shift == c_centralShiftIndex)
609     {
610         bCalcFshift = 0;
611     }
612     /* reduce i forces */
613     reduce_force_i_and_shift(f_buf, fci_buf, f, bCalcFshift != 0, tidxi, tidxj, sci, nb_sci.shift, fshift);
614
615 #ifdef CALC_ENERGIES
616     reduce_energy(f_buf, E_lj, E_el, e_lj, e_el, tidx);
617 #endif
618 }
619
620 #undef EL_EWALD_ANY
621 #undef EXCLUSION_FORCES
622 #undef LJ_EWALD
623
624 #undef LJ_COMB
625
626 #undef USE_CJ_PREFETCH