2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012-2018, The GROMACS development team.
5 * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief OpenCL non-bonded kernel.
40 * OpenCL 1.2 support is expected.
42 * \author Anca Hamuraru <anca@streamcomputing.eu>
43 * \author Szilárd Páll <pall.szilard@gmail.com>
44 * \ingroup module_nbnxm
47 /* Currently we enable CJ prefetch for AMD/NVIDIA and disable it for the "nowarp" kernel
48 * Note that this should precede the kernel_utils include.
50 #include "nbnxm_ocl_kernel_utils.clh"
52 /////////////////////////////////////////////////////////////////////////////////////////////////
54 #if defined EL_EWALD_ANA || defined EL_EWALD_TAB
55 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
59 #if defined LJ_EWALD_COMB_GEOM || defined LJ_EWALD_COMB_LB
60 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
64 #if defined EL_EWALD_ANY || defined EL_RF || defined LJ_EWALD \
65 || (defined EL_CUTOFF && defined CALC_ENERGIES)
66 /* Macro to control the calculation of exclusion forces in the kernel
67 * We do that with Ewald (elec/vdw) and RF. Cut-off only has exclusion
70 * Note: convenience macro, needs to be undef-ed at the end of the file.
72 # define EXCLUSION_FORCES
75 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
76 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
81 Kernel launch parameters:
82 - #blocks = #pair lists, blockId = pair list Id
83 - #threads = CL_SIZE^2
84 - shmem = CL_SIZE^2 * sizeof(float)
86 Each thread calculates an i force-component taking one pair of i-j atoms.
88 TODO: implement 128 threads/wavefront by porting over the NTHREAD_Z/j4 loop
89 "horizontal splitting" over threads.
93 NB_KERNEL_FUNC_NAME differs from the CUDA equivalent as it is not a variadic macro due to OpenCL
94 not having a support for them, this version only takes exactly 2 arguments. Thus if more strings
95 need to be appended a new macro must be written or it must be directly appended here.
97 __attribute__((reqd_work_group_size(CL_SIZE, CL_SIZE, 1)))
98 #ifdef cl_intel_required_subgroup_size
99 __attribute__((intel_reqd_sub_group_size(SUBGROUP_SIZE)))
102 # ifdef CALC_ENERGIES
103 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_prune_opencl)
105 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_prune_opencl)
108 # ifdef CALC_ENERGIES
109 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_opencl)
111 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_opencl)
118 cl_nbparam_params_t nbparam_params, /* IN */
119 const __global float4* restrict xq, /* IN */
120 __global float* restrict f, /* OUT stores float3 values */
121 __global float* restrict gmx_unused e_lj, /* OUT */
122 __global float* restrict gmx_unused e_el, /* OUT */
123 __global float* restrict fshift, /* OUT stores float3 values */
125 const __global float2* restrict lj_comb, /* IN stores float2 values */
127 const __global int* restrict atom_types, /* IN */
129 const __global float* restrict shift_vec, /* IN stores float3 values */
130 __constant const float2* restrict gmx_unused nbfp, /* IN */
131 __constant const float2* restrict gmx_unused nbfp_comb, /* IN */
132 __constant const float* restrict gmx_unused coulomb_tab, /* IN */
133 const __global nbnxn_sci_t* pl_sci, /* IN */
137 __global nbnxn_cj4_t* pl_cj4, /* OUT / IN */
138 const __global nbnxn_excl_t* excl, /* IN */
139 int bCalcFshift, /* IN */
140 __local float4* xqib /* Pointer to dyn alloc'ed shmem */
143 /* convenience variables */
144 const cl_nbparam_params_t* const nbparam = &nbparam_params;
146 const float rcoulomb_sq = nbparam->rcoulomb_sq;
147 #ifdef VDW_CUTOFF_CHECK
148 const float rvdw_sq = nbparam_params.rvdw_sq;
151 const float two_k_rf = nbparam->two_k_rf;
154 const float coulomb_tab_scale = nbparam->coulomb_tab_scale;
157 const float beta2 = nbparam->ewald_beta * nbparam->ewald_beta;
158 const float beta3 = nbparam->ewald_beta * nbparam->ewald_beta * nbparam->ewald_beta;
161 const float rlist_sq = nbparam->rlistOuter_sq;
166 const float beta = nbparam->ewald_beta;
167 const float ewald_shift = nbparam->sh_ewald;
169 const float gmx_unused c_rf = nbparam->c_rf;
170 # endif /* EL_EWALD_ANY */
171 #endif /* CALC_ENERGIES */
173 /* thread/block/warp id-s */
174 const int tidxi = get_local_id(0);
175 const int tidxj = get_local_id(1);
176 const int tidx = (int)(get_local_id(1) * get_local_size(0) + get_local_id(0));
177 const int bidx = get_group_id(0);
178 const int widx = tidx / WARP_SIZE; /* warp index */
180 /*! i-cluster interaction mask for a super-cluster with all c_nbnxnGpuNumClusterPerSupercluster=8 bits set */
181 const unsigned superClInteractionMask = ((1U << c_nbnxnGpuNumClusterPerSupercluster) - 1U);
183 #define LOCAL_OFFSET (xqib + c_nbnxnGpuNumClusterPerSupercluster * CL_SIZE)
186 /* shmem buffer for cj, for both warps separately */
187 cjs = (__local int*)(LOCAL_OFFSET);
189 # define LOCAL_OFFSET (cjs + 2 * c_nbnxnGpuJgroupSize)
190 #endif // USE_CJ_PREFETCH
194 /* shmem buffer for i atom-type pre-loading */
195 __local int* atib = (__local int*)(LOCAL_OFFSET); //NOLINT(google-readability-casting)
197 # define LOCAL_OFFSET (atib + c_nbnxnGpuNumClusterPerSupercluster * CL_SIZE)
199 __local float2* ljcpib = (__local float2*)(LOCAL_OFFSET);
201 # define LOCAL_OFFSET (ljcpib + c_nbnxnGpuNumClusterPerSupercluster * CL_SIZE)
206 /* shmem j force buffer */
207 __local float* f_buf = (__local float*)(LOCAL_OFFSET);
209 # define LOCAL_OFFSET (f_buf + CL_SIZE * CL_SIZE * 3)
211 __local float* f_buf = 0;
213 #if !USE_SUBGROUP_ANY
214 /* Local buffer used to implement __any warp vote function from CUDA.
215 volatile is used to avoid compiler optimizations for AMD builds. */
216 //NOLINTNEXTLINE(google-readability-casting)
217 volatile __local int* warp_any = (__local int*)(LOCAL_OFFSET);
219 __local int gmx_unused* warp_any = 0;
223 const nbnxn_sci_t nb_sci = pl_sci[bidx]; /* my i super-cluster's index = current bidx */
224 const int sci = nb_sci.sci; /* super-cluster */
225 const int cij4_start = nb_sci.cj4_ind_start; /* first ...*/
226 const int cij4_end = nb_sci.cj4_ind_end; /* and last index of j clusters */
228 for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i += CL_SIZE)
230 /* Pre-load i-atom x and q into shared memory */
231 const int ci = sci * c_nbnxnGpuNumClusterPerSupercluster + tidxj + i;
232 const int ai = ci * CL_SIZE + tidxi;
234 float4 xqbuf = xq[ai]
235 + (float4)(shift_vec[3 * nb_sci.shift],
236 shift_vec[3 * nb_sci.shift + 1],
237 shift_vec[3 * nb_sci.shift + 2],
239 xqbuf.w *= nbparam->epsfac;
240 xqib[(tidxj + i) * CL_SIZE + tidxi] = xqbuf;
243 /* Pre-load the i-atom types into shared memory */
244 atib[(tidxj + i) * CL_SIZE + tidxi] = atom_types[ai];
246 ljcpib[(tidxj + i) * CL_SIZE + tidxi] = lj_comb[ai];
250 #if !USE_SUBGROUP_ANY
251 /* Initialise warp vote. (8x8 block) 2 warps for nvidia */
252 if (tidx == 0 || tidx == WARP_SIZE)
257 barrier(CLK_LOCAL_MEM_FENCE);
259 fvec fci_buf[c_nbnxnGpuNumClusterPerSupercluster]; /* i force buffer */
260 for (int ci_offset = 0; ci_offset < c_nbnxnGpuNumClusterPerSupercluster; ci_offset++)
262 fci_buf[ci_offset][0] = 0.0F;
263 fci_buf[ci_offset][1] = 0.0F;
264 fci_buf[ci_offset][2] = 0.0F;
268 /* TODO: we are trading registers with flops by keeping lje_coeff-s, try re-calculating it later */
269 const float lje_coeff2 = nbparam->ewaldcoeff_lj * nbparam->ewaldcoeff_lj;
270 const float lje_coeff6_6 = lje_coeff2 * lje_coeff2 * lje_coeff2 * ONE_SIXTH_F;
271 #endif /* LJ_EWALD */
278 # if defined EXCLUSION_FORCES /* Ewald or RF */
279 if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
281 /* we have the diagonal: add the charge and LJ self interaction energy term */
282 for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
284 # if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
285 const float qi = xqib[i * CL_SIZE + tidxi].w;
288 # if defined LJ_EWALD
289 E_lj += nbfp[atom_types[(sci * c_nbnxnGpuNumClusterPerSupercluster + i) * CL_SIZE + tidxi] * (ntypes + 1)]
291 # endif /* LJ_EWALD */
294 /* divide the self term(s) equally over the j-threads, then multiply with the coefficients. */
297 E_lj *= HALF_F * ONE_SIXTH_F * lje_coeff6_6;
298 # endif /* LJ_EWALD */
300 # if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
301 /* Correct for epsfac^2 due to adding qi^2 */
302 E_el /= nbparam->epsfac * CL_SIZE;
303 # if defined EL_RF || defined EL_CUTOFF
304 E_el *= -HALF_F * c_rf;
306 E_el *= -beta * M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
308 # endif /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */
310 # endif /* EXCLUSION_FORCES */
312 #endif /* CALC_ENERGIES */
314 #ifdef EXCLUSION_FORCES
315 const int nonSelfInteraction = !(nb_sci.shift == CENTRAL & tidxj <= tidxi);
318 /* loop over the j clusters = seen by any of the atoms in the current super-cluster */
319 for (int j4 = cij4_start; j4 < cij4_end; j4++)
321 const int wexcl_idx = pl_cj4[j4].imei[widx].excl_ind;
322 unsigned int imask = pl_cj4[j4].imei[widx].imask;
323 const unsigned int wexcl = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)];
325 preloadCj4(&cjs, pl_cj4[j4].cj, tidxi, tidxj, imask != 0U);
331 /* Unrolling this loop improves performance without pruning but
332 * with pruning it leads to slowdown.
334 * Tested with driver 1800.5
336 * TODO: check loop unrolling with NVIDIA OpenCL
338 #if !defined PRUNE_NBL && !defined _NVIDIA_SOURCE_
341 for (int jm = 0; jm < c_nbnxnGpuJgroupSize; jm++)
343 if (imask & (superClInteractionMask << (jm * c_nbnxnGpuNumClusterPerSupercluster)))
345 unsigned int mask_ji = (1U << (jm * c_nbnxnGpuNumClusterPerSupercluster));
347 const int cj = loadCj(cjs, pl_cj4[j4].cj, jm, tidxi, tidxj);
348 const int aj = cj * CL_SIZE + tidxj;
350 /* load j atom data */
351 const float4 xjqbuf = xq[aj];
352 const float3 xj = (float3)(xjqbuf.xyz);
353 const float qj_f = xjqbuf.w;
355 const int typej = atom_types[aj];
357 const float2 ljcp_j = lj_comb[aj];
360 float3 fcj_buf = (float3)(0.0F);
362 #if !defined PRUNE_NBL
365 for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
369 const int gmx_unused ci = sci * c_nbnxnGpuNumClusterPerSupercluster + i; /* i cluster index */
371 /* all threads load an atom from i cluster ci into shmem! */
372 const float4 xiqbuf = xqib[i * CL_SIZE + tidxi];
373 const float3 xi = (float3)(xiqbuf.xyz);
375 /* distance between i and j atoms */
376 const float3 rv = xi - xj;
377 float r2 = norm2(rv);
380 if (!gmx_sub_group_any(warp_any, widx, r2 < rlist_sq))
386 const float int_bit = (wexcl & mask_ji) ? 1.0F : 0.0F;
388 /* cutoff & exclusion check */
389 #ifdef EXCLUSION_FORCES
390 if ((r2 < rcoulomb_sq) * (nonSelfInteraction | (ci != cj)))
392 if ((float)(r2 < rcoulomb_sq) * int_bit != 0.0F)
395 /* load the rest of the i-atom parameters */
396 const float qi = xiqbuf.w;
399 const int typei = atib[i * CL_SIZE + tidxi];
401 const float2 ljcp_i = ljcpib[i * CL_SIZE + tidxi];
403 #else /* IATYPE_SHMEM */
404 const int ai = ci * CL_SIZE + tidxi; /* i atom index */
407 const int typei = atom_types[ai];
409 const float2 ljcp_i = lj_comb[ai];
411 #endif /* IATYPE_SHMEM */
412 /* LJ 6*C6 and 12*C12 */
414 const float2 c6c12 = nbfp[ntypes * typei + typej];
416 const float c6 = c6c12.x;
417 const float c12 = c6c12.y;
420 const float c6 = ljcp_i.x * ljcp_j.x;
421 const float c12 = ljcp_i.y * ljcp_j.y;
423 /* LJ 2^(1/6)*sigma and 12*epsilon */
424 const float sigma = ljcp_i.x + ljcp_j.x;
425 const float epsilon = ljcp_i.y * ljcp_j.y;
426 # if defined CALC_ENERGIES || defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
427 const float2 c6c12 = convert_sigma_epsilon_to_c6_c12(sigma, epsilon);
428 const float c6 = c6c12.x;
429 const float c12 = c6c12.y;
431 # endif /* LJ_COMB_GEOM */
434 // Ensure distance do not become so small that r^-12 overflows.
435 r2 = max(r2, c_nbnxnMinDistanceSquared);
437 const float inv_r = rsqrt(r2);
438 const float inv_r2 = inv_r * inv_r;
439 #if !defined LJ_COMB_LB || defined CALC_ENERGIES
440 float inv_r6 = inv_r2 * inv_r2 * inv_r2;
441 # if defined EXCLUSION_FORCES
442 /* We could mask inv_r2, but with Ewald
443 * masking both inv_r6 and F_invr is faster */
445 # endif /* EXCLUSION_FORCES */
447 float F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
448 # if defined CALC_ENERGIES || defined LJ_POT_SWITCH
451 * (c12 * (inv_r6 * inv_r6 + nbparam->repulsion_shift.cpot) * ONE_TWELVETH_F
452 - c6 * (inv_r6 + nbparam->dispersion_shift.cpot) * ONE_SIXTH_F);
455 #else /* ! LJ_COMB_LB || CALC_ENERGIES */
456 const float sig_r = sigma * inv_r;
457 const float sig_r2 = sig_r * sig_r;
458 float sig_r6 = sig_r2 * sig_r2 * sig_r2;
459 # if defined EXCLUSION_FORCES
461 # endif /* EXCLUSION_FORCES */
463 float F_invr = epsilon * sig_r6 * (sig_r6 - 1.0F) * inv_r2;
464 #endif /* ! LJ_COMB_LB || CALC_ENERGIES */
467 #ifdef LJ_FORCE_SWITCH
468 # ifdef CALC_ENERGIES
469 calculate_force_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
471 calculate_force_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr);
472 # endif /* CALC_ENERGIES */
473 #endif /* LJ_FORCE_SWITCH */
477 # ifdef LJ_EWALD_COMB_GEOM
478 # ifdef CALC_ENERGIES
479 calculate_lj_ewald_comb_geom_F_E(nbfp_comb,
491 calculate_lj_ewald_comb_geom_F(
492 nbfp_comb, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, &F_invr);
493 # endif /* CALC_ENERGIES */
494 # elif defined LJ_EWALD_COMB_LB
495 calculate_lj_ewald_comb_LB_F_E(nbfp_comb,
503 # ifdef CALC_ENERGIES
513 # endif /* CALC_ENERGIES */
515 # endif /* LJ_EWALD_COMB_GEOM */
516 #endif /* LJ_EWALD */
519 # ifdef CALC_ENERGIES
520 calculate_potential_switch_F_E(nbparam, inv_r, r2, &F_invr, &E_lj_p);
522 calculate_potential_switch_F(nbparam, inv_r, r2, &F_invr, &E_lj_p);
523 # endif /* CALC_ENERGIES */
524 #endif /* LJ_POT_SWITCH */
526 #ifdef VDW_CUTOFF_CHECK
527 /* Separate VDW cut-off check to enable twin-range cut-offs
528 * (rvdw < rcoulomb <= rlist)
530 const float vdw_in_range = (r2 < rvdw_sq) ? 1.0F : 0.0F;
531 F_invr *= vdw_in_range;
532 # ifdef CALC_ENERGIES
533 E_lj_p *= vdw_in_range;
535 #endif /* VDW_CUTOFF_CHECK */
544 # ifdef EXCLUSION_FORCES
545 F_invr += qi * qj_f * int_bit * inv_r2 * inv_r;
547 F_invr += qi * qj_f * inv_r2 * inv_r;
551 F_invr += qi * qj_f * (int_bit * inv_r2 * inv_r - two_k_rf);
553 #if defined EL_EWALD_ANA
555 * (int_bit * inv_r2 * inv_r + pmecorrF(beta2 * r2) * beta3);
556 #elif defined EL_EWALD_TAB
559 - interpolate_coulomb_force_r(
560 coulomb_tab, r2 * inv_r, coulomb_tab_scale))
562 #endif /* EL_EWALD_ANA/TAB */
566 E_el += qi * qj_f * (int_bit * inv_r - c_rf);
569 E_el += qi * qj_f * (int_bit * inv_r + HALF_F * two_k_rf * r2 - c_rf);
572 /* 1.0F - erff is faster than erfcf */
574 * (inv_r * (int_bit - erf(r2 * inv_r * beta)) - int_bit * ewald_shift);
575 # endif /* EL_EWALD_ANY */
577 const float3 f_ij = rv * F_invr;
579 /* accumulate j forces in registers */
582 /* accumulate i forces in registers */
583 fci_buf[i][0] += f_ij.x;
584 fci_buf[i][1] += f_ij.y;
585 fci_buf[i][2] += f_ij.z;
589 /* shift the mask bit by 1 */
593 /* reduce j forces */
594 reduce_force_j(f_buf, fcj_buf, f, tidxi, tidxj, aj);
598 /* Update the imask with the new one which does not contain the
599 out of range clusters anymore. */
601 pl_cj4[j4].imei[widx].imask = imask;
606 /* skip central shifts when summing shift forces */
607 if (nb_sci.shift == CENTRAL)
611 /* reduce i forces */
612 reduce_force_i_and_shift(f_buf, fci_buf, f, bCalcFshift != 0, tidxi, tidxj, sci, nb_sci.shift, fshift);
615 reduce_energy(f_buf, E_lj, E_el, e_lj, e_el, tidx);
620 #undef EXCLUSION_FORCES
625 #undef USE_CJ_PREFETCH