e51957eae6dd1b2e436fa3d77010431baee39570
[alexxy/gromacs.git] / src / gromacs / nbnxm / opencl / nbnxm_ocl_kernel.clh
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012-2018, The GROMACS development team.
5  * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 /*! \internal \file
38  *  \brief OpenCL non-bonded kernel.
39  *
40  *  OpenCL 1.2 support is expected.
41  *
42  *  \author Anca Hamuraru <anca@streamcomputing.eu>
43  *  \author Szilárd Páll <pall.szilard@gmail.com>
44  *  \ingroup module_nbnxm
45  */
46
47 /* Currently we enable CJ prefetch for AMD/NVIDIA and disable it for the "nowarp" kernel
48  * Note that this should precede the kernel_utils include.
49  */
50 #include "nbnxm_ocl_kernel_utils.clh"
51
52 /////////////////////////////////////////////////////////////////////////////////////////////////
53
54 #if defined EL_EWALD_ANA || defined EL_EWALD_TAB
55 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
56 #    define EL_EWALD_ANY
57 #endif
58
59 #if defined LJ_EWALD_COMB_GEOM || defined LJ_EWALD_COMB_LB
60 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
61 #    define LJ_EWALD
62 #endif
63
64 #if defined EL_EWALD_ANY || defined EL_RF || defined LJ_EWALD \
65         || (defined EL_CUTOFF && defined CALC_ENERGIES)
66 /* Macro to control the calculation of exclusion forces in the kernel
67  * We do that with Ewald (elec/vdw) and RF. Cut-off only has exclusion
68  * energy terms.
69  *
70  * Note: convenience macro, needs to be undef-ed at the end of the file.
71  */
72 #    define EXCLUSION_FORCES
73 #endif
74
75 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
76 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
77 #    define LJ_COMB
78 #endif
79
80 /*
81    Kernel launch parameters:
82     - #blocks   = #pair lists, blockId = pair list Id
83     - #threads  = CL_SIZE^2
84     - shmem     = CL_SIZE^2 * sizeof(float)
85
86     Each thread calculates an i force-component taking one pair of i-j atoms.
87
88    TODO: implement 128 threads/wavefront by porting over the NTHREAD_Z/j4 loop
89    "horizontal splitting" over threads.
90  */
91
92 /* NOTE:
93    NB_KERNEL_FUNC_NAME differs from the CUDA equivalent as it is not a variadic macro due to OpenCL
94    not having a support for them, this version only takes exactly 2 arguments. Thus if more strings
95    need to be appended a new macro must be written or it must be directly appended here.
96  */
97 __attribute__((reqd_work_group_size(CL_SIZE, CL_SIZE, 1)))
98 #ifdef cl_intel_required_subgroup_size
99 __attribute__((intel_reqd_sub_group_size(SUBGROUP_SIZE)))
100 #endif
101 #ifdef PRUNE_NBL
102 #    ifdef CALC_ENERGIES
103 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_prune_opencl)
104 #    else
105 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_prune_opencl)
106 #    endif
107 #else
108 #    ifdef CALC_ENERGIES
109 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_opencl)
110 #    else
111 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_opencl)
112 #    endif
113 #endif
114         (
115 #ifndef LJ_COMB
116                 int ntypes, /* IN  */
117 #endif
118                 cl_nbparam_params_t nbparam_params,       /* IN  */
119                 const __global float4* restrict xq,       /* IN  */
120                 __global float* restrict f,               /* OUT stores float3 values */
121                 __global float* restrict gmx_unused e_lj, /* OUT */
122                 __global float* restrict gmx_unused e_el, /* OUT */
123                 __global float* restrict fshift,          /* OUT stores float3 values */
124 #ifdef LJ_COMB
125                 const __global float2* restrict lj_comb, /* IN stores float2 values */
126 #else
127                 const __global int* restrict atom_types, /* IN  */
128 #endif
129                 const __global float* restrict shift_vec,          /* IN stores float3 values */
130                 __constant const float2* restrict gmx_unused nbfp, /* IN  */
131                 __constant const float2* restrict gmx_unused nbfp_comb,  /* IN  */
132                 __constant const float* restrict gmx_unused coulomb_tab, /* IN  */
133                 const __global nbnxn_sci_t* pl_sci,                      /* IN  */
134 #ifndef PRUNE_NBL
135                 const
136 #endif
137                 __global nbnxn_cj4_t* pl_cj4,             /* OUT / IN */
138                 const __global nbnxn_excl_t* excl,        /* IN  */
139                 int                          bCalcFshift, /* IN  */
140                 __local float4* xqib                      /* Pointer to dyn alloc'ed shmem */
141         )
142 {
143     /* convenience variables */
144     const cl_nbparam_params_t* const nbparam = &nbparam_params;
145
146     const float rcoulomb_sq = nbparam->rcoulomb_sq;
147 #ifdef VDW_CUTOFF_CHECK
148     const float rvdw_sq = nbparam_params.rvdw_sq;
149 #endif
150 #ifdef EL_RF
151     const float two_k_rf = nbparam->two_k_rf;
152 #endif
153 #ifdef EL_EWALD_TAB
154     const float coulomb_tab_scale = nbparam->coulomb_tab_scale;
155 #endif
156 #ifdef EL_EWALD_ANA
157     const float beta2 = nbparam->ewald_beta * nbparam->ewald_beta;
158     const float beta3 = nbparam->ewald_beta * nbparam->ewald_beta * nbparam->ewald_beta;
159 #endif
160 #ifdef PRUNE_NBL
161     const float rlist_sq = nbparam->rlistOuter_sq;
162 #endif
163
164 #ifdef CALC_ENERGIES
165 #    ifdef EL_EWALD_ANY
166     const float beta        = nbparam->ewald_beta;
167     const float ewald_shift = nbparam->sh_ewald;
168 #    else
169     const float gmx_unused c_rf = nbparam->c_rf;
170 #    endif /* EL_EWALD_ANY */
171 #endif     /* CALC_ENERGIES */
172
173     /* thread/block/warp id-s */
174     const int tidxi = get_local_id(0);
175     const int tidxj = get_local_id(1);
176     const int tidx  = (int)(get_local_id(1) * get_local_size(0) + get_local_id(0));
177     const int bidx  = get_group_id(0);
178     const int widx  = tidx / WARP_SIZE; /* warp index */
179
180     /*! i-cluster interaction mask for a super-cluster with all c_nbnxnGpuNumClusterPerSupercluster=8 bits set */
181     const unsigned superClInteractionMask = ((1U << c_nbnxnGpuNumClusterPerSupercluster) - 1U);
182
183 #define LOCAL_OFFSET (xqib + c_nbnxnGpuNumClusterPerSupercluster * CL_SIZE)
184     CjType cjs = 0;
185 #if USE_CJ_PREFETCH
186     /* shmem buffer for cj, for both warps separately */
187     cjs = (__local int*)(LOCAL_OFFSET);
188 #    undef LOCAL_OFFSET
189 #    define LOCAL_OFFSET (cjs + 2 * c_nbnxnGpuJgroupSize)
190 #endif // USE_CJ_PREFETCH
191
192 #ifdef IATYPE_SHMEM
193 #    ifndef LJ_COMB
194     /* shmem buffer for i atom-type pre-loading */
195     __local int* atib = (__local int*)(LOCAL_OFFSET); //NOLINT(google-readability-casting)
196 #        undef LOCAL_OFFSET
197 #        define LOCAL_OFFSET (atib + c_nbnxnGpuNumClusterPerSupercluster * CL_SIZE)
198 #    else
199     __local float2* ljcpib      = (__local float2*)(LOCAL_OFFSET);
200 #        undef LOCAL_OFFSET
201 #        define LOCAL_OFFSET (ljcpib + c_nbnxnGpuNumClusterPerSupercluster * CL_SIZE)
202 #    endif
203 #endif
204
205 #if !REDUCE_SHUFFLE
206     /* shmem j force buffer */
207     __local float* f_buf = (__local float*)(LOCAL_OFFSET);
208 #    undef LOCAL_OFFSET
209 #    define LOCAL_OFFSET (f_buf + CL_SIZE * CL_SIZE * 3)
210 #else
211     __local float* f_buf             = 0;
212 #endif
213 #if !USE_SUBGROUP_ANY
214     /* Local buffer used to implement __any warp vote function from CUDA.
215        volatile is used to avoid compiler optimizations for AMD builds. */
216     //NOLINTNEXTLINE(google-readability-casting)
217     volatile __local int* warp_any = (__local int*)(LOCAL_OFFSET);
218 #else
219     __local int gmx_unused* warp_any = 0;
220 #endif
221 #undef LOCAL_OFFSET
222
223     const nbnxn_sci_t nb_sci     = pl_sci[bidx]; /* my i super-cluster's index = current bidx */
224     const int         sci        = nb_sci.sci;   /* super-cluster */
225     const int         cij4_start = nb_sci.cj4_ind_start; /* first ...*/
226     const int         cij4_end   = nb_sci.cj4_ind_end;   /* and last index of j clusters */
227
228     for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i += CL_SIZE)
229     {
230         /* Pre-load i-atom x and q into shared memory */
231         const int ci = sci * c_nbnxnGpuNumClusterPerSupercluster + tidxj + i;
232         const int ai = ci * CL_SIZE + tidxi;
233
234         float4 xqbuf = xq[ai]
235                        + (float4)(shift_vec[3 * nb_sci.shift],
236                                   shift_vec[3 * nb_sci.shift + 1],
237                                   shift_vec[3 * nb_sci.shift + 2],
238                                   0.0F);
239         xqbuf.w *= nbparam->epsfac;
240         xqib[(tidxj + i) * CL_SIZE + tidxi] = xqbuf;
241 #ifdef IATYPE_SHMEM
242 #    ifndef LJ_COMB
243         /* Pre-load the i-atom types into shared memory */
244         atib[(tidxj + i) * CL_SIZE + tidxi] = atom_types[ai];
245 #    else
246         ljcpib[(tidxj + i) * CL_SIZE + tidxi] = lj_comb[ai];
247 #    endif
248 #endif
249     }
250 #if !USE_SUBGROUP_ANY
251     /* Initialise warp vote. (8x8 block) 2 warps for nvidia */
252     if (tidx == 0 || tidx == WARP_SIZE)
253     {
254         warp_any[widx] = 0;
255     }
256 #endif
257     barrier(CLK_LOCAL_MEM_FENCE);
258
259     fvec fci_buf[c_nbnxnGpuNumClusterPerSupercluster]; /* i force buffer */
260     for (int ci_offset = 0; ci_offset < c_nbnxnGpuNumClusterPerSupercluster; ci_offset++)
261     {
262         fci_buf[ci_offset][0] = 0.0F;
263         fci_buf[ci_offset][1] = 0.0F;
264         fci_buf[ci_offset][2] = 0.0F;
265     }
266
267 #ifdef LJ_EWALD
268     /* TODO: we are trading registers with flops by keeping lje_coeff-s, try re-calculating it later */
269     const float lje_coeff2   = nbparam->ewaldcoeff_lj * nbparam->ewaldcoeff_lj;
270     const float lje_coeff6_6 = lje_coeff2 * lje_coeff2 * lje_coeff2 * ONE_SIXTH_F;
271 #endif /* LJ_EWALD */
272
273
274 #ifdef CALC_ENERGIES
275     float E_lj = 0.0F;
276     float E_el = 0.0F;
277
278 #    if defined EXCLUSION_FORCES /* Ewald or RF */
279     if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
280     {
281         /* we have the diagonal: add the charge and LJ self interaction energy term */
282         for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
283         {
284 #        if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
285             const float qi = xqib[i * CL_SIZE + tidxi].w;
286             E_el += qi * qi;
287 #        endif
288 #        if defined LJ_EWALD
289             E_lj += nbfp[atom_types[(sci * c_nbnxnGpuNumClusterPerSupercluster + i) * CL_SIZE + tidxi] * (ntypes + 1)]
290                             .x;
291 #        endif /* LJ_EWALD */
292         }
293
294         /* divide the self term(s) equally over the j-threads, then multiply with the coefficients. */
295 #        ifdef LJ_EWALD
296         E_lj /= CL_SIZE;
297         E_lj *= HALF_F * ONE_SIXTH_F * lje_coeff6_6;
298 #        endif /* LJ_EWALD */
299
300 #        if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
301         /* Correct for epsfac^2 due to adding qi^2 */
302         E_el /= nbparam->epsfac * CL_SIZE;
303 #            if defined EL_RF || defined EL_CUTOFF
304         E_el *= -HALF_F * c_rf;
305 #            else
306         E_el *= -beta * M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
307 #            endif
308 #        endif /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */
309     }
310 #    endif /* EXCLUSION_FORCES */
311
312 #endif /* CALC_ENERGIES */
313
314 #ifdef EXCLUSION_FORCES
315     const int nonSelfInteraction = !(nb_sci.shift == CENTRAL & tidxj <= tidxi);
316 #endif
317
318     /* loop over the j clusters = seen by any of the atoms in the current super-cluster */
319     for (int j4 = cij4_start; j4 < cij4_end; j4++)
320     {
321         const int          wexcl_idx = pl_cj4[j4].imei[widx].excl_ind;
322         unsigned int       imask     = pl_cj4[j4].imei[widx].imask;
323         const unsigned int wexcl     = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)];
324
325         preloadCj4(&cjs, pl_cj4[j4].cj, tidxi, tidxj, imask != 0U);
326
327 #ifndef PRUNE_NBL
328         if (imask)
329 #endif
330         {
331             /* Unrolling this loop improves performance without pruning but
332              * with pruning it leads to slowdown.
333              *
334              * Tested with driver 1800.5
335              *
336              * TODO: check loop unrolling with NVIDIA OpenCL
337              */
338 #if !defined PRUNE_NBL && !defined _NVIDIA_SOURCE_
339 #    pragma unroll 4
340 #endif
341             for (int jm = 0; jm < c_nbnxnGpuJgroupSize; jm++)
342             {
343                 if (imask & (superClInteractionMask << (jm * c_nbnxnGpuNumClusterPerSupercluster)))
344                 {
345                     unsigned int mask_ji = (1U << (jm * c_nbnxnGpuNumClusterPerSupercluster));
346
347                     const int cj = loadCj(cjs, pl_cj4[j4].cj, jm, tidxi, tidxj);
348                     const int aj = cj * CL_SIZE + tidxj;
349
350                     /* load j atom data */
351                     const float4 xjqbuf = xq[aj];
352                     const float3 xj     = (float3)(xjqbuf.xyz);
353                     const float  qj_f   = xjqbuf.w;
354 #ifndef LJ_COMB
355                     const int typej = atom_types[aj];
356 #else
357                     const float2 ljcp_j = lj_comb[aj];
358 #endif
359
360                     float3 fcj_buf = (float3)(0.0F);
361
362 #if !defined PRUNE_NBL
363 #    pragma unroll 8
364 #endif
365                     for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
366                     {
367                         if (imask & mask_ji)
368                         {
369                             const int gmx_unused ci = sci * c_nbnxnGpuNumClusterPerSupercluster + i; /* i cluster index */
370
371                             /* all threads load an atom from i cluster ci into shmem! */
372                             const float4 xiqbuf = xqib[i * CL_SIZE + tidxi];
373                             const float3 xi     = (float3)(xiqbuf.xyz);
374
375                             /* distance between i and j atoms */
376                             const float3 rv = xi - xj;
377                             float        r2 = norm2(rv);
378
379 #ifdef PRUNE_NBL
380                             if (!gmx_sub_group_any(warp_any, widx, r2 < rlist_sq))
381                             {
382                                 imask &= ~mask_ji;
383                             }
384 #endif
385
386                             const float int_bit = (wexcl & mask_ji) ? 1.0F : 0.0F;
387
388                             /* cutoff & exclusion check */
389 #ifdef EXCLUSION_FORCES
390                             if ((r2 < rcoulomb_sq) * (nonSelfInteraction | (ci != cj)))
391 #else
392                             if ((float)(r2 < rcoulomb_sq) * int_bit != 0.0F)
393 #endif
394                             {
395                                 /* load the rest of the i-atom parameters */
396                                 const float qi = xiqbuf.w;
397 #ifdef IATYPE_SHMEM
398 #    ifndef LJ_COMB
399                                 const int typei = atib[i * CL_SIZE + tidxi];
400 #    else
401                                 const float2 ljcp_i = ljcpib[i * CL_SIZE + tidxi];
402 #    endif
403 #else /* IATYPE_SHMEM */
404                                 const int   ai     = ci * CL_SIZE + tidxi; /* i atom index */
405
406 #    ifndef LJ_COMB
407                                 const int   typei  = atom_types[ai];
408 #    else
409                                 const float2 ljcp_i = lj_comb[ai];
410 #    endif
411 #endif                          /* IATYPE_SHMEM */
412                                 /* LJ 6*C6 and 12*C12 */
413 #ifndef LJ_COMB
414                                 const float2 c6c12 = nbfp[ntypes * typei + typej];
415
416                                 const float c6  = c6c12.x;
417                                 const float c12 = c6c12.y;
418 #else /* LJ_COMB */
419 #    ifdef LJ_COMB_GEOM
420                                 const float c6     = ljcp_i.x * ljcp_j.x;
421                                 const float c12    = ljcp_i.y * ljcp_j.y;
422 #    else
423                                 /* LJ 2^(1/6)*sigma and 12*epsilon */
424                                 const float  sigma   = ljcp_i.x + ljcp_j.x;
425                                 const float  epsilon = ljcp_i.y * ljcp_j.y;
426 #        if defined CALC_ENERGIES || defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
427                                 const float2 c6c12 = convert_sigma_epsilon_to_c6_c12(sigma, epsilon);
428                                 const float  c6    = c6c12.x;
429                                 const float  c12   = c6c12.y;
430 #        endif
431 #    endif /* LJ_COMB_GEOM */
432 #endif     /* LJ_COMB */
433
434                                 // Ensure distance do not become so small that r^-12 overflows.
435                                 r2 = max(r2, c_nbnxnMinDistanceSquared);
436
437                                 const float inv_r  = rsqrt(r2);
438                                 const float inv_r2 = inv_r * inv_r;
439 #if !defined LJ_COMB_LB || defined CALC_ENERGIES
440                                 float inv_r6 = inv_r2 * inv_r2 * inv_r2;
441 #    if defined EXCLUSION_FORCES
442                                 /* We could mask inv_r2, but with Ewald
443                                  * masking both inv_r6 and F_invr is faster */
444                                 inv_r6 *= int_bit;
445 #    endif /* EXCLUSION_FORCES */
446
447                                 float F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
448 #    if defined CALC_ENERGIES || defined LJ_POT_SWITCH
449                                 float E_lj_p =
450                                         int_bit
451                                         * (c12 * (inv_r6 * inv_r6 + nbparam->repulsion_shift.cpot) * ONE_TWELVETH_F
452                                            - c6 * (inv_r6 + nbparam->dispersion_shift.cpot) * ONE_SIXTH_F);
453
454 #    endif
455 #else /* ! LJ_COMB_LB || CALC_ENERGIES */
456                                 const float sig_r  = sigma * inv_r;
457                                 const float sig_r2 = sig_r * sig_r;
458                                 float       sig_r6 = sig_r2 * sig_r2 * sig_r2;
459 #    if defined EXCLUSION_FORCES
460                                 sig_r6 *= int_bit;
461 #    endif /* EXCLUSION_FORCES */
462
463                                 float F_invr = epsilon * sig_r6 * (sig_r6 - 1.0F) * inv_r2;
464 #endif     /* ! LJ_COMB_LB || CALC_ENERGIES */
465
466
467 #ifdef LJ_FORCE_SWITCH
468 #    ifdef CALC_ENERGIES
469                                 calculate_force_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
470 #    else
471                                 calculate_force_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr);
472 #    endif /* CALC_ENERGIES */
473 #endif     /* LJ_FORCE_SWITCH */
474
475
476 #ifdef LJ_EWALD
477 #    ifdef LJ_EWALD_COMB_GEOM
478 #        ifdef CALC_ENERGIES
479                                 calculate_lj_ewald_comb_geom_F_E(nbfp_comb,
480                                                                  nbparam,
481                                                                  typei,
482                                                                  typej,
483                                                                  r2,
484                                                                  inv_r2,
485                                                                  lje_coeff2,
486                                                                  lje_coeff6_6,
487                                                                  int_bit,
488                                                                  &F_invr,
489                                                                  &E_lj_p);
490 #        else
491                                 calculate_lj_ewald_comb_geom_F(
492                                         nbfp_comb, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, &F_invr);
493 #        endif /* CALC_ENERGIES */
494 #    elif defined LJ_EWALD_COMB_LB
495                                 calculate_lj_ewald_comb_LB_F_E(nbfp_comb,
496                                                                nbparam,
497                                                                typei,
498                                                                typej,
499                                                                r2,
500                                                                inv_r2,
501                                                                lje_coeff2,
502                                                                lje_coeff6_6,
503 #        ifdef CALC_ENERGIES
504                                                                int_bit,
505                                                                true,
506                                                                &F_invr,
507                                                                &E_lj_p
508 #        else
509                                                                0,
510                                                                false,
511                                                                &F_invr,
512                                                                0
513 #        endif /* CALC_ENERGIES */
514                                 );
515 #    endif     /* LJ_EWALD_COMB_GEOM */
516 #endif         /* LJ_EWALD */
517
518 #ifdef LJ_POT_SWITCH
519 #    ifdef CALC_ENERGIES
520                                 calculate_potential_switch_F_E(nbparam, inv_r, r2, &F_invr, &E_lj_p);
521 #    else
522                                 calculate_potential_switch_F(nbparam, inv_r, r2, &F_invr, &E_lj_p);
523 #    endif /* CALC_ENERGIES */
524 #endif     /* LJ_POT_SWITCH */
525
526 #ifdef VDW_CUTOFF_CHECK
527                                 /* Separate VDW cut-off check to enable twin-range cut-offs
528                                  * (rvdw < rcoulomb <= rlist)
529                                  */
530                                 const float vdw_in_range = (r2 < rvdw_sq) ? 1.0F : 0.0F;
531                                 F_invr *= vdw_in_range;
532 #    ifdef CALC_ENERGIES
533                                 E_lj_p *= vdw_in_range;
534 #    endif
535 #endif /* VDW_CUTOFF_CHECK */
536
537 #ifdef CALC_ENERGIES
538                                 E_lj += E_lj_p;
539
540 #endif
541
542
543 #ifdef EL_CUTOFF
544 #    ifdef EXCLUSION_FORCES
545                                 F_invr += qi * qj_f * int_bit * inv_r2 * inv_r;
546 #    else
547                                 F_invr += qi * qj_f * inv_r2 * inv_r;
548 #    endif
549 #endif
550 #ifdef EL_RF
551                                 F_invr += qi * qj_f * (int_bit * inv_r2 * inv_r - two_k_rf);
552 #endif
553 #if defined EL_EWALD_ANA
554                                 F_invr += qi * qj_f
555                                           * (int_bit * inv_r2 * inv_r + pmecorrF(beta2 * r2) * beta3);
556 #elif defined EL_EWALD_TAB
557                                 F_invr += qi * qj_f
558                                           * (int_bit * inv_r2
559                                              - interpolate_coulomb_force_r(
560                                                        coulomb_tab, r2 * inv_r, coulomb_tab_scale))
561                                           * inv_r;
562 #endif /* EL_EWALD_ANA/TAB */
563
564 #ifdef CALC_ENERGIES
565 #    ifdef EL_CUTOFF
566                                 E_el += qi * qj_f * (int_bit * inv_r - c_rf);
567 #    endif
568 #    ifdef EL_RF
569                                 E_el += qi * qj_f * (int_bit * inv_r + HALF_F * two_k_rf * r2 - c_rf);
570 #    endif
571 #    ifdef EL_EWALD_ANY
572                                 /* 1.0F - erff is faster than erfcf */
573                                 E_el += qi * qj_f
574                                         * (inv_r * (int_bit - erf(r2 * inv_r * beta)) - int_bit * ewald_shift);
575 #    endif /* EL_EWALD_ANY */
576 #endif
577                                 const float3 f_ij = rv * F_invr;
578
579                                 /* accumulate j forces in registers */
580                                 fcj_buf -= f_ij;
581
582                                 /* accumulate i forces in registers */
583                                 fci_buf[i][0] += f_ij.x;
584                                 fci_buf[i][1] += f_ij.y;
585                                 fci_buf[i][2] += f_ij.z;
586                             }
587                         }
588
589                         /* shift the mask bit by 1 */
590                         mask_ji += mask_ji;
591                     }
592
593                     /* reduce j forces */
594                     reduce_force_j(f_buf, fcj_buf, f, tidxi, tidxj, aj);
595                 }
596             }
597 #ifdef PRUNE_NBL
598             /* Update the imask with the new one which does not contain the
599                out of range clusters anymore. */
600
601             pl_cj4[j4].imei[widx].imask = imask;
602 #endif
603         }
604     }
605
606     /* skip central shifts when summing shift forces */
607     if (nb_sci.shift == CENTRAL)
608     {
609         bCalcFshift = 0;
610     }
611     /* reduce i forces */
612     reduce_force_i_and_shift(f_buf, fci_buf, f, bCalcFshift != 0, tidxi, tidxj, sci, nb_sci.shift, fshift);
613
614 #ifdef CALC_ENERGIES
615     reduce_energy(f_buf, E_lj, E_el, e_lj, e_el, tidx);
616 #endif
617 }
618
619 #undef EL_EWALD_ANY
620 #undef EXCLUSION_FORCES
621 #undef LJ_EWALD
622
623 #undef LJ_COMB
624
625 #undef USE_CJ_PREFETCH