a7366c879bfa5a8e63a1e54a1fc2b5c243fea99a
[alexxy/gromacs.git] / src / gromacs / mdlib / nbnxn_ocl / nbnxn_ocl_kernel_nvidia.clh
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2016,2017, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *  \brief OpenCL non-bonded kernel for NVIDIA GPUs.
38  *
39  *  OpenCL 1.2 support is expected (CUDA driver API 7.5 and later).
40  *
41  *  \author Anca Hamuraru <anca@streamcomputing.eu>
42  *  \author Szilárd Páll <pall.szilard@gmail.com>
43  *  \ingroup module_mdlib
44  */
45
46
47 #include "nbnxn_ocl_kernel_utils.clh"
48
49 /////////////////////////////////////////////////////////////////////////////////////////////////
50
51 #if defined EL_EWALD_ANA || defined EL_EWALD_TAB
52 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
53 #define EL_EWALD_ANY
54 #endif
55
56 #if defined EL_EWALD_ANY || defined EL_RF || defined LJ_EWALD || (defined EL_CUTOFF && defined CALC_ENERGIES)
57 /* Macro to control the calculation of exclusion forces in the kernel
58  * We do that with Ewald (elec/vdw) and RF. Cut-off only has exclusion
59  * energy terms.
60  *
61  * Note: convenience macro, needs to be undef-ed at the end of the file.
62  */
63 #define EXCLUSION_FORCES
64 #endif
65
66 #if defined LJ_EWALD_COMB_GEOM || defined LJ_EWALD_COMB_LB
67 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
68 #define LJ_EWALD
69 #endif
70
71 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
72 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
73 #define LJ_COMB
74 #endif
75
76 /*
77    Kernel launch parameters:
78     - #blocks   = #pair lists, blockId = pair list Id
79     - #threads  = CL_SIZE^2
80     - shmem     = CL_SIZE^2 * sizeof(float)
81
82     Each thread calculates an i force-component taking one pair of i-j atoms.
83  */
84
85 /* NOTE:
86  NB_KERNEL_FUNC_NAME differs from the CUDA equivalent as it is not a variadic macro due to OpenCL not having a support for them, this version only takes exactly 2 arguments.
87  Thus if more strings need to be appended a new macro must be written or it must be directly appended here.
88 */
89 __attribute__((reqd_work_group_size(CL_SIZE, CL_SIZE, 1)))
90 #ifdef PRUNE_NBL
91     #ifdef CALC_ENERGIES
92         __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_prune_opencl)
93     #else
94         __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_prune_opencl)
95     #endif
96 #else
97     #ifdef CALC_ENERGIES
98         __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_opencl)
99     #else
100         __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_opencl)
101     #endif
102 #endif
103 (
104 #ifndef LJ_COMB
105  int ntypes,                                                               /* IN  */
106 #endif
107  cl_nbparam_params_t nbparam_params,                                       /* IN  */
108  const __global float4 *restrict xq,                                       /* IN  */
109  __global float *restrict f,                /* stores float3 values */     /* OUT */
110  __global float *restrict e_lj,                                            /* OUT */
111  __global float *restrict e_el,                                            /* OUT */
112 __global float *restrict fshift,            /* stores float3 values */     /* OUT */
113 #ifdef LJ_COMB
114  const __global float2 *restrict lj_comb,      /* stores float2 values */  /* IN  */
115 #else
116  const __global int *restrict atom_types,                                  /* IN  */
117 #endif
118  const __global float *restrict shift_vec,  /* stores float3 values */     /* IN  */
119  __constant float* nbfp_climg2d,                                           /* IN  */
120  __constant float* nbfp_comb_climg2d,                                      /* IN  */
121  __constant float* coulomb_tab_climg2d,                                    /* IN  */
122  const __global nbnxn_sci_t* pl_sci,                                       /* IN  */
123 #ifndef PRUNE_NBL
124     const
125 #endif
126  __global nbnxn_cj4_t* pl_cj4,                                             /* OUT / IN */
127  const __global nbnxn_excl_t* excl,                                        /* IN  */
128  int bCalcFshift,                                                          /* IN  */
129  __local  float4   *xqib,                                                  /* Pointer to dyn alloc'ed shmem */
130  __global float *debug_buffer                                              /* Debug buffer, can be used with print_to_debug_buffer_f */
131  )
132 {
133     /* convenience variables */
134     cl_nbparam_params_t *nbparam = &nbparam_params;
135
136     float               rcoulomb_sq = nbparam->rcoulomb_sq;
137 #ifdef LJ_COMB
138     float2              ljcp_i, ljcp_j;
139 #endif
140 #ifdef VDW_CUTOFF_CHECK
141     float               rvdw_sq     = nbparam_params.rvdw_sq;
142     float               vdw_in_range;
143 #endif
144 #ifdef LJ_EWALD
145     float               lje_coeff2, lje_coeff6_6;
146 #endif
147 #ifdef EL_RF
148     float two_k_rf              = nbparam->two_k_rf;
149 #endif
150 #ifdef EL_EWALD_TAB
151     float coulomb_tab_scale     = nbparam->coulomb_tab_scale;
152 #endif
153 #ifdef EL_EWALD_ANA
154     float beta2                 = nbparam->ewald_beta*nbparam->ewald_beta;
155     float beta3                 = nbparam->ewald_beta*nbparam->ewald_beta*nbparam->ewald_beta;
156 #endif
157 #ifdef PRUNE_NBL
158     float rlist_sq              = nbparam->rlistOuter_sq;
159 #endif
160
161 #ifdef CALC_ENERGIES
162 #ifdef EL_EWALD_ANY
163     float  beta        = nbparam->ewald_beta;
164     float  ewald_shift = nbparam->sh_ewald;
165 #else
166     float  c_rf        = nbparam->c_rf;
167 #endif /* EL_EWALD_ANY */
168 #endif /* CALC_ENERGIES */
169
170     /* thread/block/warp id-s */
171     unsigned int tidxi  = get_local_id(0);
172     unsigned int tidxj  = get_local_id(1);
173     unsigned int tidx   = get_local_id(1) * get_local_size(0) + get_local_id(0);
174     unsigned int bidx   = get_group_id(0);
175     unsigned int widx   = tidx / WARP_SIZE; /* warp index */
176     int          sci, ci, cj, ci_offset,
177                  ai, aj,
178                  cij4_start, cij4_end;
179 #ifndef LJ_COMB
180     int          typei, typej;
181 #endif
182     int          i, jm, j4, wexcl_idx;
183     float        qi, qj_f,
184                  r2, inv_r, inv_r2;
185 #if !defined LJ_COMB_LB || defined CALC_ENERGIES
186     float        inv_r6, c6, c12;
187 #endif
188 #if defined LJ_COMB_LB
189     float        sigma, epsilon;
190 #endif
191     float        int_bit,
192                  F_invr;
193
194 #ifdef CALC_ENERGIES
195     float        E_lj, E_el;
196 #endif
197 #if defined CALC_ENERGIES || defined LJ_POT_SWITCH
198     float        E_lj_p;
199 #endif
200     unsigned int wexcl, imask, mask_ji;
201     float4       xqbuf;
202     float3       xi, xj, rv, f_ij, fcj_buf/*, fshift_buf*/;
203     float        fshift_buf;
204     float3       fci_buf[NCL_PER_SUPERCL]; /* i force buffer */
205     nbnxn_sci_t  nb_sci;
206
207     /*! i-cluster interaction mask for a super-cluster with all NCL_PER_SUPERCL=8 bits set */
208     const unsigned superClInteractionMask = ((1U << NCL_PER_SUPERCL) - 1U);
209
210     /* shmem buffer for cj, for both warps separately */
211     __local int *cjs       = (__local int *)(xqib + NCL_PER_SUPERCL * CL_SIZE);
212     #define LOCAL_OFFSET cjs + 2 * NBNXN_GPU_JGROUP_SIZE
213
214 #ifdef IATYPE_SHMEM
215 #ifndef LJ_COMB
216     /* shmem buffer for i atom-type pre-loading */
217     __local int *atib      = (__local int *)(LOCAL_OFFSET);
218     #undef LOCAL_OFFSET
219     #define LOCAL_OFFSET atib + NCL_PER_SUPERCL * CL_SIZE
220 #else
221     __local float2 *ljcpib = (__local float2 *)(LOCAL_OFFSET);
222     #undef LOCAL_OFFSET
223     #define LOCAL_OFFSET ljcpib + NCL_PER_SUPERCL * CL_SIZE
224 #endif
225 #endif
226
227 #ifndef REDUCE_SHUFFLE
228     /* shmem j force buffer */
229     __local float *f_buf   = (__local float *)(LOCAL_OFFSET);
230     #undef LOCAL_OFFSET
231     #define LOCAL_OFFSET f_buf + CL_SIZE * CL_SIZE * 3
232 #endif
233     /* Local buffer used to implement __any warp vote function from CUDA.
234        volatile is used to avoid compiler optimizations for AMD builds. */
235     volatile __local uint *warp_any = (__local uint*)(LOCAL_OFFSET);
236 #undef LOCAL_OFFSET
237
238     nb_sci      = pl_sci[bidx];         /* my i super-cluster's index = current bidx */
239     sci         = nb_sci.sci;           /* super-cluster */
240     cij4_start  = nb_sci.cj4_ind_start; /* first ...*/
241     cij4_end    = nb_sci.cj4_ind_end;   /* and last index of j clusters */
242
243     /* Pre-load i-atom x and q into shared memory */
244     ci = sci * NCL_PER_SUPERCL + tidxj;
245     ai = ci * CL_SIZE + tidxi;
246
247     xqbuf    = xq[ai] + (float4)(shift_vec[3 * nb_sci.shift], shift_vec[3 * nb_sci.shift + 1], shift_vec[3 * nb_sci.shift + 2], 0.0f);
248     xqbuf.w *= nbparam->epsfac;
249     xqib[tidxj * CL_SIZE + tidxi] = xqbuf;
250
251 #ifdef IATYPE_SHMEM
252 #ifndef LJ_COMB
253     /* Pre-load the i-atom types into shared memory */
254     atib[tidxj * CL_SIZE + tidxi]   = atom_types[ai];
255 #else
256     ljcpib[tidxj * CL_SIZE + tidxi] = lj_comb[ai];
257 #endif
258 #endif
259     /* Initialise warp vote. (8x8 block) 2 warps for nvidia */
260     if(tidx==0 || tidx==32)
261         warp_any[widx] = 0;
262
263     barrier(CLK_LOCAL_MEM_FENCE);
264
265     for (ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
266     {
267         fci_buf[ci_offset] = (float3)(0.0f);
268     }
269
270 #ifdef LJ_EWALD
271     /* TODO: we are trading registers with flops by keeping lje_coeff-s, try re-calculating it later */
272     lje_coeff2   = nbparam->ewaldcoeff_lj*nbparam->ewaldcoeff_lj;
273     lje_coeff6_6 = lje_coeff2*lje_coeff2*lje_coeff2*ONE_SIXTH_F;
274 #endif /* LJ_EWALD */
275
276
277 #ifdef CALC_ENERGIES
278     E_lj = 0.0f;
279     E_el = 0.0f;
280
281 #if defined EXCLUSION_FORCES /* Ewald or RF */
282     if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci*NCL_PER_SUPERCL)
283     {
284         /* we have the diagonal: add the charge and LJ self interaction energy term */
285         for (i = 0; i < NCL_PER_SUPERCL; i++)
286         {
287 #if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
288             qi    = xqib[i * CL_SIZE + tidxi].w;
289             E_el += qi*qi;
290 #endif
291 #if defined LJ_EWALD
292             E_lj += nbfp_climg2d[atom_types[(sci*NCL_PER_SUPERCL + i)*CL_SIZE + tidxi]*(ntypes + 1)*2];
293 #endif /* LJ_EWALD */
294         }
295
296         /* divide the self term(s) equally over the j-threads, then multiply with the coefficients. */
297 #ifdef LJ_EWALD
298         E_lj /= CL_SIZE;
299         E_lj *= 0.5f*ONE_SIXTH_F*lje_coeff6_6;
300 #endif  /* LJ_EWALD */
301
302 #if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
303         /* Correct for epsfac^2 due to adding qi^2 */
304         E_el /= nbparam->epsfac*CL_SIZE;
305 #if defined EL_RF || defined EL_CUTOFF
306         E_el *= -0.5f*c_rf;
307 #else
308         E_el *= -beta*M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
309 #endif
310 #endif                                  /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */
311     }
312 #endif                                  /* EXCLUSION_FORCES */
313
314 #endif                                  /* CALC_ENERGIES */
315
316 #ifdef EXCLUSION_FORCES
317     const int nonSelfInteraction  = !(nb_sci.shift == CENTRAL & tidxj <= tidxi);
318 #endif
319
320     /* loop over the j clusters = seen by any of the atoms in the current super-cluster */
321     for (j4 = cij4_start; j4 < cij4_end; j4++)
322     {
323         wexcl_idx   = pl_cj4[j4].imei[widx].excl_ind;
324         imask       = pl_cj4[j4].imei[widx].imask;
325         wexcl       = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)];
326
327 #ifndef PRUNE_NBL
328         if (imask)
329 #endif
330         {
331             /* Pre-load cj into shared memory on both warps separately */
332             if ((tidxj == 0 | tidxj == 4) & (tidxi < NBNXN_GPU_JGROUP_SIZE))
333             {
334                 cjs[tidxi + tidxj * NBNXN_GPU_JGROUP_SIZE / 4] = pl_cj4[j4].cj[tidxi];
335             }
336
337             /* TODO: check loop unrolling with NVIDIA OpenCL */
338 #if !defined PRUNE_NBL
339 //#pragma unroll 4
340 #endif
341
342             for (jm = 0; jm < NBNXN_GPU_JGROUP_SIZE; jm++)
343             {
344                 if (imask & (superClInteractionMask << (jm * NCL_PER_SUPERCL)))
345                 {
346                     mask_ji = (1U << (jm * NCL_PER_SUPERCL));
347
348                     cj      = cjs[jm + (tidxj & 4) * NBNXN_GPU_JGROUP_SIZE / 4];
349                     aj      = cj * CL_SIZE + tidxj;
350
351                     /* load j atom data */
352                     xqbuf   = xq[aj];
353                     xj      = (float3)(xqbuf.xyz);
354                     qj_f    = xqbuf.w;
355 #ifndef LJ_COMB
356                     typej   = atom_types[aj];
357 #else
358                     ljcp_j  = lj_comb[aj];
359 #endif
360
361                     fcj_buf = (float3)(0.0f);
362
363 #if !defined PRUNE_NBL
364 #pragma unroll 8
365 #endif
366                     for (i = 0; i < NCL_PER_SUPERCL; i++)
367                     {
368                         if (imask & mask_ji)
369                         {
370                             ci_offset   = i;                     /* i force buffer offset */
371
372                             ci      = sci * NCL_PER_SUPERCL + i; /* i cluster index */
373                             ai      = ci * CL_SIZE + tidxi;      /* i atom index */
374
375                             /* all threads load an atom from i cluster ci into shmem! */
376                             xqbuf   = xqib[i * CL_SIZE + tidxi];
377                             xi      = (float3)(xqbuf.xyz);
378
379                             /* distance between i and j atoms */
380                             rv      = xi - xj;
381                             r2      = norm2(rv);
382
383 #ifdef PRUNE_NBL
384                             /* vote.. should code shmem serialisation, wonder what the hit will be */
385                             if (r2 < rlist_sq)
386                                 warp_any[widx]=1;
387
388                             /* If _none_ of the atoms pairs are in cutoff range,
389                                the bit corresponding to the current
390                                cluster-pair in imask gets set to 0. */
391                             if (!warp_any[widx])
392                                 imask &= ~mask_ji;
393
394                             warp_any[widx]=0;
395 #endif
396
397                             int_bit = (wexcl & mask_ji) ? 1.0f : 0.0f;
398
399                             /* cutoff & exclusion check */
400 #ifdef EXCLUSION_FORCES
401                             if ((r2 < rcoulomb_sq) * (nonSelfInteraction | (ci != cj)))
402 #else
403                             if ((r2 < rcoulomb_sq) * int_bit)
404 #endif
405                             {
406                                 /* load the rest of the i-atom parameters */
407                                 qi      = xqbuf.w;
408 #ifdef IATYPE_SHMEM
409 #ifndef LJ_COMB
410                                 typei   = atib[i * CL_SIZE + tidxi];
411 #else
412                                 ljcp_i  = ljcpib[i * CL_SIZE + tidxi];
413 #endif
414 #else /* IATYPE_SHMEM */
415 #ifndef LJ_COMB
416                                 typei   = atom_types[ai];
417 #else
418                                 ljcp_i  = lj_comb[ai];
419 #endif
420 #endif /* IATYPE_SHMEM */
421                                 /* LJ 6*C6 and 12*C12 */
422 #ifndef LJ_COMB
423                                 c6      = nbfp_climg2d[2 * (ntypes * typei + typej)];
424                                 c12     = nbfp_climg2d[2 * (ntypes * typei + typej)+1];
425 #else   /* LJ_COMB */
426 #ifdef LJ_COMB_GEOM
427                                 c6      = ljcp_i.x * ljcp_j.x;
428                                 c12     = ljcp_i.y * ljcp_j.y;
429 #else
430                                 /* LJ 2^(1/6)*sigma and 12*epsilon */
431                                 sigma   = ljcp_i.x + ljcp_j.x;
432                                 epsilon = ljcp_i.y * ljcp_j.y;
433 #if defined CALC_ENERGIES || defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
434                                 convert_sigma_epsilon_to_c6_c12(sigma, epsilon, &c6, &c12);
435 #endif
436 #endif                          /* LJ_COMB_GEOM */
437 #endif                          /* LJ_COMB */
438
439                                 // Ensure distance do not become so small that r^-12 overflows
440                                 r2      = max(r2,NBNXN_MIN_RSQ);
441
442                                 inv_r   = rsqrt(r2);
443                                 inv_r2  = inv_r * inv_r;
444 #if !defined LJ_COMB_LB || defined CALC_ENERGIES
445                                 inv_r6  = inv_r2 * inv_r2 * inv_r2;
446 #if defined EXCLUSION_FORCES
447                                 /* We could mask inv_r2, but with Ewald
448                                  * masking both inv_r6 and F_invr is faster */
449                                 inv_r6  *= int_bit;
450 #endif                          /* EXCLUSION_FORCES */
451
452                                 F_invr  = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
453 #if defined CALC_ENERGIES || defined LJ_POT_SWITCH
454                                 E_lj_p  = int_bit * (c12 * (inv_r6 * inv_r6 + nbparam->repulsion_shift.cpot)*ONE_TWELVETH_F -
455                                                      c6 * (inv_r6 + nbparam->dispersion_shift.cpot)*ONE_SIXTH_F);
456
457 #endif
458 #else                           /* ! LJ_COMB_LB || CALC_ENERGIES */
459                                 float sig_r  = sigma*inv_r;
460                                 float sig_r2 = sig_r*sig_r;
461                                 float sig_r6 = sig_r2*sig_r2*sig_r2;
462 #if defined EXCLUSION_FORCES
463                                 sig_r6 *= int_bit;
464 #endif                          /* EXCLUSION_FORCES */
465
466                                 F_invr  = epsilon * sig_r6 * (sig_r6 - 1.0f) * inv_r2;
467 #endif                          /* ! LJ_COMB_LB || CALC_ENERGIES */
468
469
470 #ifdef LJ_FORCE_SWITCH
471 #ifdef CALC_ENERGIES
472                                 calculate_force_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
473 #else
474                                 calculate_force_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr);
475 #endif /* CALC_ENERGIES */
476 #endif /* LJ_FORCE_SWITCH */
477
478
479 #ifdef LJ_EWALD
480 #ifdef LJ_EWALD_COMB_GEOM
481 #ifdef CALC_ENERGIES
482                                 calculate_lj_ewald_comb_geom_F_E(nbfp_comb_climg2d, nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, int_bit, &F_invr, &E_lj_p);
483 #else
484                                 calculate_lj_ewald_comb_geom_F(nbfp_comb_climg2d, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, &F_invr);
485 #endif                          /* CALC_ENERGIES */
486 #elif defined LJ_EWALD_COMB_LB
487                                 calculate_lj_ewald_comb_LB_F_E(nbfp_comb_climg2d, nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6,
488 #ifdef CALC_ENERGIES
489                                                                int_bit, true, &F_invr, &E_lj_p
490 #else
491                                                                0, false, &F_invr, 0
492 #endif /* CALC_ENERGIES */
493                                                                );
494 #endif /* LJ_EWALD_COMB_GEOM */
495 #endif /* LJ_EWALD */
496
497 #ifdef LJ_POT_SWITCH
498 #ifdef CALC_ENERGIES
499                                 calculate_potential_switch_F_E(nbparam, inv_r, r2, &F_invr, &E_lj_p);
500 #else
501                                 calculate_potential_switch_F(nbparam, inv_r, r2, &F_invr, &E_lj_p);
502 #endif /* CALC_ENERGIES */
503 #endif /* LJ_POT_SWITCH */
504
505 #ifdef VDW_CUTOFF_CHECK
506                                 /* Separate VDW cut-off check to enable twin-range cut-offs
507                                  * (rvdw < rcoulomb <= rlist)
508                                  */
509                                 vdw_in_range  = (r2 < rvdw_sq) ? 1.0f : 0.0f;
510                                 F_invr       *= vdw_in_range;
511 #ifdef CALC_ENERGIES
512                                 E_lj_p       *= vdw_in_range;
513 #endif
514 #endif                          /* VDW_CUTOFF_CHECK */
515
516 #ifdef CALC_ENERGIES
517                                 E_lj    += E_lj_p;
518
519 #endif
520
521
522 #ifdef EL_CUTOFF
523 #ifdef EXCLUSION_FORCES
524                                 F_invr  += qi * qj_f * int_bit * inv_r2 * inv_r;
525 #else
526                                 F_invr  += qi * qj_f * inv_r2 * inv_r;
527 #endif
528 #endif
529 #ifdef EL_RF
530                                 F_invr  += qi * qj_f * (int_bit*inv_r2 * inv_r - two_k_rf);
531 #endif
532 #if defined EL_EWALD_ANA
533                                 F_invr  += qi * qj_f * (int_bit*inv_r2*inv_r + pmecorrF(beta2*r2)*beta3);
534 #elif defined EL_EWALD_TAB
535                                 F_invr  += qi * qj_f * (int_bit*inv_r2 -
536 #ifdef USE_TEXOBJ
537                                                         interpolate_coulomb_force_r(nbparam->coulomb_tab_texobj, r2 * inv_r, coulomb_tab_scale)
538 #else
539                                                         interpolate_coulomb_force_r(coulomb_tab_climg2d, r2 * inv_r, coulomb_tab_scale)
540 #endif /* USE_TEXOBJ */
541                                                         ) * inv_r;
542 #endif /* EL_EWALD_ANA/TAB */
543
544 #ifdef CALC_ENERGIES
545 #ifdef EL_CUTOFF
546                                 E_el    += qi * qj_f * (int_bit*inv_r - c_rf);
547 #endif
548 #ifdef EL_RF
549                                 E_el    += qi * qj_f * (int_bit*inv_r + 0.5f * two_k_rf * r2 - c_rf);
550 #endif
551 #ifdef EL_EWALD_ANY
552                                 /* 1.0f - erff is faster than erfcf */
553                                 E_el    += qi * qj_f * (inv_r * (int_bit - erf(r2 * inv_r * beta)) - int_bit * ewald_shift);
554 #endif                          /* EL_EWALD_ANY */
555 #endif
556                                 f_ij    = rv * F_invr;
557
558                                 /* accumulate j forces in registers */
559                                 fcj_buf -= f_ij;
560
561                                 /* accumulate i forces in registers */
562                                 fci_buf[ci_offset] += f_ij;
563                             }
564                         }
565
566                         /* shift the mask bit by 1 */
567                         mask_ji += mask_ji;
568                     }
569
570                     /* reduce j forces */
571
572                     /* store j forces in shmem */
573                     f_buf[                  tidx] = fcj_buf.x;
574                     f_buf[    FBUF_STRIDE + tidx] = fcj_buf.y;
575                     f_buf[2 * FBUF_STRIDE + tidx] = fcj_buf.z;
576
577                     reduce_force_j_generic(f_buf, f, tidxi, tidxj, aj);
578                 }
579             }
580 #ifdef PRUNE_NBL
581             /* Update the imask with the new one which does not contain the
582                out of range clusters anymore. */
583
584             pl_cj4[j4].imei[widx].imask = imask;
585 #endif
586         }
587     }
588
589     /* skip central shifts when summing shift forces */
590     if (nb_sci.shift == CENTRAL)
591     {
592         bCalcFshift = false;
593     }
594
595     fshift_buf = 0.0f;
596
597     /* reduce i forces */
598     for (ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
599     {
600         ai  = (sci * NCL_PER_SUPERCL + ci_offset) * CL_SIZE + tidxi;
601
602         f_buf[                  tidx] = fci_buf[ci_offset].x;
603         f_buf[    FBUF_STRIDE + tidx] = fci_buf[ci_offset].y;
604         f_buf[2 * FBUF_STRIDE + tidx] = fci_buf[ci_offset].z;
605         barrier(CLK_LOCAL_MEM_FENCE);
606         reduce_force_i(f_buf, f,
607                        &fshift_buf, bCalcFshift,
608                        tidxi, tidxj, ai);
609         barrier(CLK_LOCAL_MEM_FENCE);
610     }
611
612     /* add up local shift forces into global mem */
613     if (bCalcFshift)
614     {
615         /* Only threads with tidxj < 3 will update fshift.
616            The threads performing the update must be the same with the threads
617            which stored the reduction result in reduce_force_i function
618         */
619         if (tidxj < 3)
620             atomicAdd_g_f(&(fshift[3 * nb_sci.shift + tidxj]), fshift_buf);
621     }
622
623 #ifdef CALC_ENERGIES
624     /* flush the energies to shmem and reduce them */
625     f_buf[              tidx] = E_lj;
626     f_buf[FBUF_STRIDE + tidx] = E_el;
627     reduce_energy_pow2(f_buf + (tidx & WARP_SIZE), e_lj, e_el, tidx & ~WARP_SIZE);
628
629 #endif
630 }
631
632 #undef EL_EWALD_ANY
633 #undef EXCLUSION_FORCES
634 #undef LJ_EWALD
635
636 #undef LJ_COMB