2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #include "nbnxn_ocl_kernel_utils.clh"
38 /////////////////////////////////////////////////////////////////////////////////////////////////
40 #if defined EL_EWALD_ANA || defined EL_EWALD_TAB
41 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
45 #if defined EL_EWALD_ANY || defined EL_RF || defined LJ_EWALD || (defined EL_CUTOFF && defined CALC_ENERGIES)
46 /* Macro to control the calculation of exclusion forces in the kernel
47 * We do that with Ewald (elec/vdw) and RF. Cut-off only has exclusion
50 * Note: convenience macro, needs to be undef-ed at the end of the file.
52 #define EXCLUSION_FORCES
55 #if defined LJ_EWALD_COMB_GEOM || defined LJ_EWALD_COMB_LB
56 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
61 Kernel launch parameters:
62 - #blocks = #pair lists, blockId = pair list Id
63 - #threads = CL_SIZE^2
64 - shmem = CL_SIZE^2 * sizeof(float)
66 Each thread calculates an i force-component taking one pair of i-j atoms.
68 //#if __CUDA_ARCH__ >= 350
69 //__launch_bounds__(64, 16)
72 NB_KERNEL_FUNC_NAME differs from the CUDA equivalent as it is not a variadic macro due to OpenCL not having a support for them, this version only takes exactly 2 arguments.
73 Thus if more strings need to be appended a new macro must be written or it must be directly appended here.
75 __attribute__((reqd_work_group_size(CL_SIZE, CL_SIZE, 1)))
78 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_prune_opencl)
80 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_prune_opencl)
84 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_opencl)
86 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_opencl)
90 cl_nbparam_params_t nbparam_params, /* IN */
91 const __global float4 *restrict xq, /* IN */
92 __global float *restrict f, /* stores float3 values */ /* OUT */
93 __global float *restrict e_lj, /* OUT */
94 __global float *restrict e_el, /* OUT */
95 __global float *restrict fshift, /* stores float3 values */ /* OUT */
96 const __global int *restrict atom_types, /* IN */
97 const __global float *restrict shift_vec, /* stores float3 values */ /* IN */
98 __constant float* nbfp_climg2d, /* IN */
99 __constant float* nbfp_comb_climg2d, /* IN */
100 __constant float* coulomb_tab_climg2d, /* IN */
101 const __global nbnxn_sci_t* pl_sci, /* IN */
105 __global nbnxn_cj4_t* pl_cj4, /* OUT / IN */
106 const __global nbnxn_excl_t* excl, /* IN */
107 int bCalcFshift, /* IN */
108 __local float4 *xqib, /* Pointer to dyn alloc'ed shmem */
109 __global float *debug_buffer /* Debug buffer, can be used with print_to_debug_buffer_f */
112 /* convenience variables */
113 cl_nbparam_params_t *nbparam = &nbparam_params;
115 float rcoulomb_sq = nbparam->rcoulomb_sq;
117 #ifdef VDW_CUTOFF_CHECK
118 float rvdw_sq = nbparam_params.rvdw_sq;//nbparam->rvdw_sq;
122 float lje_coeff2, lje_coeff6_6;
125 float two_k_rf = nbparam->two_k_rf;
128 float coulomb_tab_scale = nbparam->coulomb_tab_scale;
131 float beta2 = nbparam->ewald_beta*nbparam->ewald_beta;
132 float beta3 = nbparam->ewald_beta*nbparam->ewald_beta*nbparam->ewald_beta;
135 float rlist_sq = nbparam->rlist_sq;
140 float beta = nbparam->ewald_beta;
141 float ewald_shift = nbparam->sh_ewald;
143 float c_rf = nbparam->c_rf;
144 #endif /* EL_EWALD_ANY */
145 #endif /* CALC_ENERGIES */
147 /* thread/block/warp id-s */
148 unsigned int tidxi = get_local_id(0);
149 unsigned int tidxj = get_local_id(1);
150 unsigned int tidx = get_local_id(1) * get_local_size(0) + get_local_id(0);
151 unsigned int bidx = get_group_id(0);
152 unsigned int widx = tidx / WARP_SIZE; /* warp index */
153 int sci, ci, cj, ci_offset,
155 cij4_start, cij4_end,
157 i, jm, j4, wexcl_idx;
159 r2, inv_r, inv_r2, inv_r6,
167 #if defined CALC_ENERGIES || defined LJ_POT_SWITCH
170 unsigned int wexcl, imask, mask_ji;
172 float3 xi, xj, rv, f_ij, fcj_buf/*, fshift_buf*/;
174 float3 fci_buf[NCL_PER_SUPERCL]; /* i force buffer */
177 /* shmem buffer for cj, for both warps separately */
178 __local int *cjs = (__local int *)(xqib + NCL_PER_SUPERCL * CL_SIZE);
179 #define LOCAL_OFFSET cjs + 2 * NBNXN_GPU_JGROUP_SIZE
181 #ifdef IATYPE_SHMEM //Should not be defined! CUDA > 300
182 /* shmem buffer for i atom-type pre-loading */
183 __local int *atib = (__local int *)(LOCAL_OFFSET);
185 #define LOCAL_OFFSET atib + NCL_PER_SUPERCL * CL_SIZE
188 #ifndef REDUCE_SHUFFLE
189 /* shmem j force buffer */
190 __local float *f_buf = (__local float *)(LOCAL_OFFSET);
192 #define LOCAL_OFFSET f_buf + CL_SIZE * CL_SIZE * 3
194 /* Local buffer used to implement __any warp vote function from CUDA.
195 volatile is used to avoid compiler optimizations for AMD builds. */
196 volatile __local uint *warp_any = (__local uint*)(LOCAL_OFFSET);
199 nb_sci = pl_sci[bidx]; /* my i super-cluster's index = current bidx */
200 sci = nb_sci.sci; /* super-cluster */
201 cij4_start = nb_sci.cj4_ind_start; /* first ...*/
202 cij4_end = nb_sci.cj4_ind_end; /* and last index of j clusters */
204 /* Pre-load i-atom x and q into shared memory */
205 ci = sci * NCL_PER_SUPERCL + tidxj;
206 ai = ci * CL_SIZE + tidxi;
208 xqib[tidxj * CL_SIZE + tidxi] = xq[ai] + (float4)(shift_vec[3 * nb_sci.shift], shift_vec[3 * nb_sci.shift + 1], shift_vec[3 * nb_sci.shift + 2], 0.0f);
210 #ifdef IATYPE_SHMEM //Should not be defined! CUDA > 300
211 /* Pre-load the i-atom types into shared memory */
212 atib[tidxj * CL_SIZE + tidxi] = atom_types[ai];
214 /* Initialise warp vote. (8x8 block) 2 warps for nvidia */
215 if(tidx==0 || tidx==32)
218 barrier(CLK_LOCAL_MEM_FENCE);
220 for (ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
222 fci_buf[ci_offset] = (float3)(0.0f);
226 /* TODO: we are trading registers with flops by keeping lje_coeff-s, try re-calculating it later */
227 lje_coeff2 = nbparam->ewaldcoeff_lj*nbparam->ewaldcoeff_lj;
228 lje_coeff6_6 = lje_coeff2*lje_coeff2*lje_coeff2*ONE_SIXTH_F;
229 #endif /* LJ_EWALD */
236 #if defined EXCLUSION_FORCES /* Ewald or RF */
237 if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci*NCL_PER_SUPERCL)
239 /* we have the diagonal: add the charge and LJ self interaction energy term */
240 for (i = 0; i < NCL_PER_SUPERCL; i++)
242 #if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
243 qi = xqib[i * CL_SIZE + tidxi].w;
247 E_lj += nbfp_climg2d[atom_types[(sci*NCL_PER_SUPERCL + i)*CL_SIZE + tidxi]*(ntypes + 1)*2];
248 #endif /* LJ_EWALD */
251 /* divide the self term(s) equally over the j-threads, then multiply with the coefficients. */
254 E_lj *= 0.5f*ONE_SIXTH_F*lje_coeff6_6;
255 #endif /* LJ_EWALD */
257 #if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
259 #if defined EL_RF || defined EL_CUTOFF
260 E_el *= -nbparam->epsfac*0.5f*c_rf;
262 E_el *= -nbparam->epsfac*beta*M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
264 #endif /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */
266 #endif /* EXCLUSION_FORCES */
268 #endif /* CALC_ENERGIES */
270 /* skip central shifts when summing shift forces */
271 if (nb_sci.shift == CENTRAL)
278 /* loop over the j clusters = seen by any of the atoms in the current super-cluster */
279 for (j4 = cij4_start; j4 < cij4_end; j4++)
281 wexcl_idx = pl_cj4[j4].imei[widx].excl_ind;
282 imask = pl_cj4[j4].imei[widx].imask;
283 wexcl = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)];
289 /* Pre-load cj into shared memory on both warps separately */
290 if ((tidxj == 0 || tidxj == 4) && tidxi < NBNXN_GPU_JGROUP_SIZE)
292 cjs[tidxi + tidxj * NBNXN_GPU_JGROUP_SIZE / 4] = pl_cj4[j4].cj[tidxi];
295 /* Unrolling this loop
296 - with pruning leads to register spilling;
297 - on Kepler is much slower;
298 - doesn't work on CUDA <v4.1
299 Tested with nvcc 3.2 - 5.0.7 */
300 #if !defined PRUNE_NBL //&& __CUDA_ARCH__ < 300 && CUDA_VERSION >= 4010
304 for (jm = 0; jm < NBNXN_GPU_JGROUP_SIZE; jm++)
306 if (imask & (supercl_interaction_mask << (jm * NCL_PER_SUPERCL)))
308 mask_ji = (1U << (jm * NCL_PER_SUPERCL));
310 cj = cjs[jm + (tidxj & 4) * NBNXN_GPU_JGROUP_SIZE / 4];
311 aj = cj * CL_SIZE + tidxj;
313 /* load j atom data */
315 xj = (float3)(xqbuf.xyz);
316 qj_f = nbparam->epsfac * xqbuf.w;
317 typej = atom_types[aj];
319 fcj_buf = (float3)(0.0f);
321 /* The PME and RF kernels don't unroll with CUDA <v4.1. */
322 #if !defined PRUNE_NBL //&& !(CUDA_VERSION < 4010 && defined EXCLUSION_FORCES)
325 for (i = 0; i < NCL_PER_SUPERCL; i++)
329 ci_offset = i; /* i force buffer offset */
331 ci = sci * NCL_PER_SUPERCL + i; /* i cluster index */
332 ai = ci * CL_SIZE + tidxi; /* i atom index */
334 /* all threads load an atom from i cluster ci into shmem! */
335 xqbuf = xqib[i * CL_SIZE + tidxi];
336 xi = (float3)(xqbuf.xyz);
338 /* distance between i and j atoms */
343 /* vote.. should code shmem serialisation, wonder what the hit will be */
347 /* If _none_ of the atoms pairs are in cutoff range,
348 the bit corresponding to the current
349 cluster-pair in imask gets set to 0. */
357 int_bit = (wexcl & mask_ji) ? 1.0f : 0.0f;
359 /* cutoff & exclusion check */
360 #ifdef EXCLUSION_FORCES
361 if (r2 < rcoulomb_sq *
362 (nb_sci.shift != CENTRAL || ci != cj || tidxj > tidxi))
364 if (r2 < rcoulomb_sq * int_bit)
367 /* load the rest of the i-atom parameters */
369 #ifdef IATYPE_SHMEM //Should not be defined! CUDA > 300
370 typei = atib[i * CL_SIZE + tidxi];
372 typei = atom_types[ai];
374 /* LJ 6*C6 and 12*C12 */
375 c6 = nbfp_climg2d[2 * (ntypes * typei + typej)];
376 c12 = nbfp_climg2d[2 * (ntypes * typei + typej)+1];
378 /* avoid NaN for excluded pairs at r=0 */
379 r2 += (1.0f - int_bit) * NBNXN_AVOID_SING_R2_INC;
382 inv_r2 = inv_r * inv_r;
383 inv_r6 = inv_r2 * inv_r2 * inv_r2;
384 #if defined EXCLUSION_FORCES
385 /* We could mask inv_r2, but with Ewald
386 * masking both inv_r6 and F_invr is faster */
388 #endif /* EXCLUSION_FORCES */
390 F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
391 #if defined CALC_ENERGIES || defined LJ_POT_SWITCH
392 E_lj_p = int_bit * (c12 * (inv_r6 * inv_r6 + nbparam->repulsion_shift.cpot)*ONE_TWELVETH_F -
393 c6 * (inv_r6 + nbparam->dispersion_shift.cpot)*ONE_SIXTH_F);
398 #ifdef LJ_FORCE_SWITCH
400 calculate_force_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
402 calculate_force_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr);
403 #endif /* CALC_ENERGIES */
404 #endif /* LJ_FORCE_SWITCH */
408 #ifdef LJ_EWALD_COMB_GEOM
410 calculate_lj_ewald_comb_geom_F_E(nbfp_comb_climg2d, nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, int_bit, &F_invr, &E_lj_p);
412 calculate_lj_ewald_comb_geom_F(nbfp_comb_climg2d, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, &F_invr);
413 #endif /* CALC_ENERGIES */
414 #elif defined LJ_EWALD_COMB_LB
415 calculate_lj_ewald_comb_LB_F_E(nbfp_comb_climg2d, nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6,
417 int_bit, true, &F_invr, &E_lj_p
420 #endif /* CALC_ENERGIES */
422 #endif /* LJ_EWALD_COMB_GEOM */
423 #endif /* LJ_EWALD */
425 #ifdef VDW_CUTOFF_CHECK
426 /* Separate VDW cut-off check to enable twin-range cut-offs
427 * (rvdw < rcoulomb <= rlist)
429 vdw_in_range = (r2 < rvdw_sq) ? 1.0f : 0.0f;
430 F_invr *= vdw_in_range;
432 E_lj_p *= vdw_in_range;
434 #endif /* VDW_CUTOFF_CHECK */
438 calculate_potential_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
440 calculate_potential_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
441 #endif /* CALC_ENERGIES */
442 #endif /* LJ_POT_SWITCH */
451 #ifdef EXCLUSION_FORCES
452 F_invr += qi * qj_f * int_bit * inv_r2 * inv_r;
454 F_invr += qi * qj_f * inv_r2 * inv_r;
458 F_invr += qi * qj_f * (int_bit*inv_r2 * inv_r - two_k_rf);
460 #if defined EL_EWALD_ANA
461 F_invr += qi * qj_f * (int_bit*inv_r2*inv_r + pmecorrF(beta2*r2)*beta3);
462 #elif defined EL_EWALD_TAB
463 F_invr += qi * qj_f * (int_bit*inv_r2 -
465 interpolate_coulomb_force_r(nbparam->coulomb_tab_texobj, r2 * inv_r, coulomb_tab_scale)
467 interpolate_coulomb_force_r(coulomb_tab_climg2d, r2 * inv_r, coulomb_tab_scale)
468 #endif /* USE_TEXOBJ */
470 #endif /* EL_EWALD_ANA/TAB */
474 E_el += qi * qj_f * (int_bit*inv_r - c_rf);
477 E_el += qi * qj_f * (int_bit*inv_r + 0.5f * two_k_rf * r2 - c_rf);
480 /* 1.0f - erff is faster than erfcf */
481 E_el += qi * qj_f * (inv_r * (int_bit - erf(r2 * inv_r * beta)) - int_bit * ewald_shift);
482 #endif /* EL_EWALD_ANY */
486 /* accumulate j forces in registers */
489 /* accumulate i forces in registers */
490 fci_buf[ci_offset] += f_ij;
494 /* shift the mask bit by 1 */
498 /* reduce j forces */
500 /* store j forces in shmem */
501 f_buf[ tidx] = fcj_buf.x;
502 f_buf[ FBUF_STRIDE + tidx] = fcj_buf.y;
503 f_buf[2 * FBUF_STRIDE + tidx] = fcj_buf.z;
505 reduce_force_j_generic(f_buf, f, tidxi, tidxj, aj);
509 /* Update the imask with the new one which does not contain the
510 out of range clusters anymore. */
512 pl_cj4[j4].imei[widx].imask = imask;
517 /* reduce i forces */
518 for (ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
520 ai = (sci * NCL_PER_SUPERCL + ci_offset) * CL_SIZE + tidxi;
522 f_buf[ tidx] = fci_buf[ci_offset].x;
523 f_buf[ FBUF_STRIDE + tidx] = fci_buf[ci_offset].y;
524 f_buf[2 * FBUF_STRIDE + tidx] = fci_buf[ci_offset].z;
525 barrier(CLK_LOCAL_MEM_FENCE);
526 reduce_force_i(f_buf, f,
527 &fshift_buf, bCalcFshift,
529 barrier(CLK_LOCAL_MEM_FENCE);
532 /* add up local shift forces into global mem */
533 //if (bCalcFshift && tidxj == 0)
534 // atomicAdd_g_f3(&(fshift[3 * nb_sci.shift]),fshift_buf);
537 /* Only threads with tidxj < 3 will update fshift.
538 The threads performing the update must be the same with the threads
539 which stored the reduction result in reduce_force_i function
542 atomicAdd_g_f(&(fshift[3 * nb_sci.shift + tidxj]), fshift_buf);
546 /* flush the energies to shmem and reduce them */
548 f_buf[FBUF_STRIDE + tidx] = E_el;
549 reduce_energy_pow2(f_buf + (tidx & WARP_SIZE), e_lj, e_el, tidx & ~WARP_SIZE);
555 #undef EXCLUSION_FORCES