/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 2012-2018, The GROMACS development team. * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, * and including many others, as listed in the AUTHORS file in the * top-level source directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ /*! \internal \file * \brief OpenCL non-bonded kernel. * * OpenCL 1.2 support is expected. * * \author Anca Hamuraru * \author Szilárd Páll * \ingroup module_nbnxm */ /* Currently we enable CJ prefetch for AMD/NVIDIA and disable it for the "nowarp" kernel * Note that this should precede the kernel_utils include. */ #include "nbnxm_ocl_kernel_utils.clh" ///////////////////////////////////////////////////////////////////////////////////////////////// #if defined EL_EWALD_ANA || defined EL_EWALD_TAB /* Note: convenience macro, needs to be undef-ed at the end of the file. */ # define EL_EWALD_ANY #endif #if defined LJ_EWALD_COMB_GEOM || defined LJ_EWALD_COMB_LB /* Note: convenience macro, needs to be undef-ed at the end of the file. */ # define LJ_EWALD #endif #if defined EL_EWALD_ANY || defined EL_RF || defined LJ_EWALD \ || (defined EL_CUTOFF && defined CALC_ENERGIES) /* Macro to control the calculation of exclusion forces in the kernel * We do that with Ewald (elec/vdw) and RF. Cut-off only has exclusion * energy terms. * * Note: convenience macro, needs to be undef-ed at the end of the file. */ # define EXCLUSION_FORCES #endif #if defined LJ_COMB_GEOM || defined LJ_COMB_LB /* Note: convenience macro, needs to be undef-ed at the end of the file. */ # define LJ_COMB #endif /* Kernel launch parameters: - #blocks = #pair lists, blockId = pair list Id - #threads = CL_SIZE^2 - shmem = CL_SIZE^2 * sizeof(float) Each thread calculates an i force-component taking one pair of i-j atoms. TODO: implement 128 threads/wavefront by porting over the NTHREAD_Z/j4 loop "horizontal splitting" over threads. */ /* NOTE: NB_KERNEL_FUNC_NAME differs from the CUDA equivalent as it is not a variadic macro due to OpenCL not having a support for them, this version only takes exactly 2 arguments. Thus if more strings need to be appended a new macro must be written or it must be directly appended here. */ __attribute__((reqd_work_group_size(CL_SIZE, CL_SIZE, 1))) #ifdef cl_intel_required_subgroup_size __attribute__((intel_reqd_sub_group_size(SUBGROUP_SIZE))) #endif #ifdef PRUNE_NBL # ifdef CALC_ENERGIES __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_prune_opencl) # else __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_prune_opencl) # endif #else # ifdef CALC_ENERGIES __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_opencl) # else __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_opencl) # endif #endif ( #ifndef LJ_COMB int ntypes, /* IN */ #endif cl_nbparam_params_t nbparam_params, /* IN */ const __global float4* restrict xq, /* IN */ __global float* restrict f, /* OUT stores float3 values */ __global float* restrict gmx_unused e_lj, /* OUT */ __global float* restrict gmx_unused e_el, /* OUT */ __global float* restrict fshift, /* OUT stores float3 values */ #ifdef LJ_COMB const __global float2* restrict lj_comb, /* IN stores float2 values */ #else const __global int* restrict atom_types, /* IN */ #endif const __global float* restrict shift_vec, /* IN stores float3 values */ __constant const float2* restrict gmx_unused nbfp, /* IN */ __constant const float2* restrict gmx_unused nbfp_comb, /* IN */ __constant const float* restrict gmx_unused coulomb_tab, /* IN */ const __global nbnxn_sci_t* pl_sci, /* IN */ #ifndef PRUNE_NBL const #endif __global nbnxn_cj4_t* pl_cj4, /* OUT / IN */ const __global nbnxn_excl_t* excl, /* IN */ int bCalcFshift, /* IN */ __local float4* xqib /* Pointer to dyn alloc'ed shmem */ ) { /* convenience variables */ const cl_nbparam_params_t* const nbparam = &nbparam_params; const float rcoulomb_sq = nbparam->rcoulomb_sq; #ifdef VDW_CUTOFF_CHECK const float rvdw_sq = nbparam_params.rvdw_sq; #endif #ifdef EL_RF const float two_k_rf = nbparam->two_k_rf; #endif #ifdef EL_EWALD_TAB const float coulomb_tab_scale = nbparam->coulomb_tab_scale; #endif #ifdef EL_EWALD_ANA const float beta2 = nbparam->ewald_beta * nbparam->ewald_beta; const float beta3 = nbparam->ewald_beta * nbparam->ewald_beta * nbparam->ewald_beta; #endif #ifdef PRUNE_NBL const float rlist_sq = nbparam->rlistOuter_sq; #endif #ifdef CALC_ENERGIES # ifdef EL_EWALD_ANY const float beta = nbparam->ewald_beta; const float ewald_shift = nbparam->sh_ewald; # else const float gmx_unused c_rf = nbparam->c_rf; # endif /* EL_EWALD_ANY */ #endif /* CALC_ENERGIES */ /* thread/block/warp id-s */ const int tidxi = get_local_id(0); const int tidxj = get_local_id(1); const int tidx = (int)(get_local_id(1) * get_local_size(0) + get_local_id(0)); const int bidx = get_group_id(0); const int widx = tidx / WARP_SIZE; /* warp index */ /*! i-cluster interaction mask for a super-cluster with all c_nbnxnGpuNumClusterPerSupercluster=8 bits set */ const unsigned superClInteractionMask = ((1U << c_nbnxnGpuNumClusterPerSupercluster) - 1U); #define LOCAL_OFFSET (xqib + c_nbnxnGpuNumClusterPerSupercluster * CL_SIZE) CjType cjs = 0; #if USE_CJ_PREFETCH /* shmem buffer for cj, for both warps separately */ cjs = (__local int*)(LOCAL_OFFSET); # undef LOCAL_OFFSET # define LOCAL_OFFSET (cjs + 2 * c_nbnxnGpuJgroupSize) #endif // USE_CJ_PREFETCH #ifdef IATYPE_SHMEM # ifndef LJ_COMB /* shmem buffer for i atom-type pre-loading */ __local int* atib = (__local int*)(LOCAL_OFFSET); //NOLINT(google-readability-casting) # undef LOCAL_OFFSET # define LOCAL_OFFSET (atib + c_nbnxnGpuNumClusterPerSupercluster * CL_SIZE) # else __local float2* ljcpib = (__local float2*)(LOCAL_OFFSET); # undef LOCAL_OFFSET # define LOCAL_OFFSET (ljcpib + c_nbnxnGpuNumClusterPerSupercluster * CL_SIZE) # endif #endif #if !REDUCE_SHUFFLE /* shmem j force buffer */ __local float* f_buf = (__local float*)(LOCAL_OFFSET); # undef LOCAL_OFFSET # define LOCAL_OFFSET (f_buf + CL_SIZE * CL_SIZE * 3) #else __local float* f_buf = 0; #endif #if !USE_SUBGROUP_ANY /* Local buffer used to implement __any warp vote function from CUDA. volatile is used to avoid compiler optimizations for AMD builds. */ //NOLINTNEXTLINE(google-readability-casting) volatile __local int* warp_any = (__local int*)(LOCAL_OFFSET); #else __local int gmx_unused* warp_any = 0; #endif #undef LOCAL_OFFSET const nbnxn_sci_t nb_sci = pl_sci[bidx]; /* my i super-cluster's index = current bidx */ const int sci = nb_sci.sci; /* super-cluster */ const int cij4_start = nb_sci.cj4_ind_start; /* first ...*/ const int cij4_end = nb_sci.cj4_ind_end; /* and last index of j clusters */ for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i += CL_SIZE) { /* Pre-load i-atom x and q into shared memory */ const int ci = sci * c_nbnxnGpuNumClusterPerSupercluster + tidxj + i; const int ai = ci * CL_SIZE + tidxi; float4 xqbuf = xq[ai] + (float4)(shift_vec[3 * nb_sci.shift], shift_vec[3 * nb_sci.shift + 1], shift_vec[3 * nb_sci.shift + 2], 0.0F); xqbuf.w *= nbparam->epsfac; xqib[(tidxj + i) * CL_SIZE + tidxi] = xqbuf; #ifdef IATYPE_SHMEM # ifndef LJ_COMB /* Pre-load the i-atom types into shared memory */ atib[(tidxj + i) * CL_SIZE + tidxi] = atom_types[ai]; # else ljcpib[(tidxj + i) * CL_SIZE + tidxi] = lj_comb[ai]; # endif #endif } #if !USE_SUBGROUP_ANY /* Initialise warp vote. (8x8 block) 2 warps for nvidia */ if (tidx == 0 || tidx == WARP_SIZE) { warp_any[widx] = 0; } #endif barrier(CLK_LOCAL_MEM_FENCE); fvec fci_buf[c_nbnxnGpuNumClusterPerSupercluster]; /* i force buffer */ for (int ci_offset = 0; ci_offset < c_nbnxnGpuNumClusterPerSupercluster; ci_offset++) { fci_buf[ci_offset][0] = 0.0F; fci_buf[ci_offset][1] = 0.0F; fci_buf[ci_offset][2] = 0.0F; } #ifdef LJ_EWALD /* TODO: we are trading registers with flops by keeping lje_coeff-s, try re-calculating it later */ const float lje_coeff2 = nbparam->ewaldcoeff_lj * nbparam->ewaldcoeff_lj; const float lje_coeff6_6 = lje_coeff2 * lje_coeff2 * lje_coeff2 * ONE_SIXTH_F; #endif /* LJ_EWALD */ #ifdef CALC_ENERGIES float E_lj = 0.0F; float E_el = 0.0F; # if defined EXCLUSION_FORCES /* Ewald or RF */ if (nb_sci.shift == c_centralShiftIndex && pl_cj4[cij4_start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster) { /* we have the diagonal: add the charge and LJ self interaction energy term */ for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++) { # if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF const float qi = xqib[i * CL_SIZE + tidxi].w; E_el += qi * qi; # endif # if defined LJ_EWALD E_lj += nbfp[atom_types[(sci * c_nbnxnGpuNumClusterPerSupercluster + i) * CL_SIZE + tidxi] * (ntypes + 1)] .x; # endif /* LJ_EWALD */ } /* divide the self term(s) equally over the j-threads, then multiply with the coefficients. */ # ifdef LJ_EWALD E_lj /= CL_SIZE; E_lj *= HALF_F * ONE_SIXTH_F * lje_coeff6_6; # endif /* LJ_EWALD */ # if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF /* Correct for epsfac^2 due to adding qi^2 */ E_el /= nbparam->epsfac * CL_SIZE; # if defined EL_RF || defined EL_CUTOFF E_el *= -HALF_F * c_rf; # else E_el *= -beta * M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */ # endif # endif /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */ } # endif /* EXCLUSION_FORCES */ #endif /* CALC_ENERGIES */ #ifdef EXCLUSION_FORCES const int nonSelfInteraction = !(nb_sci.shift == c_centralShiftIndex & tidxj <= tidxi); #endif /* loop over the j clusters = seen by any of the atoms in the current super-cluster */ for (int j4 = cij4_start; j4 < cij4_end; j4++) { const int wexcl_idx = pl_cj4[j4].imei[widx].excl_ind; unsigned int imask = pl_cj4[j4].imei[widx].imask; const unsigned int wexcl = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)]; preloadCj4(&cjs, pl_cj4[j4].cj, tidxi, tidxj, imask != 0U); #ifndef PRUNE_NBL if (imask) #endif { /* Unrolling this loop improves performance without pruning but * with pruning it leads to slowdown. * * Tested with driver 1800.5 * * TODO: check loop unrolling with NVIDIA OpenCL */ #if !defined PRUNE_NBL && !defined _NVIDIA_SOURCE_ # pragma unroll 4 #endif for (int jm = 0; jm < c_nbnxnGpuJgroupSize; jm++) { if (imask & (superClInteractionMask << (jm * c_nbnxnGpuNumClusterPerSupercluster))) { unsigned int mask_ji = (1U << (jm * c_nbnxnGpuNumClusterPerSupercluster)); const int cj = loadCj(cjs, pl_cj4[j4].cj, jm, tidxi, tidxj); const int aj = cj * CL_SIZE + tidxj; /* load j atom data */ const float4 xjqbuf = xq[aj]; const float3 xj = (float3)(xjqbuf.xyz); const float qj_f = xjqbuf.w; #ifndef LJ_COMB const int typej = atom_types[aj]; #else const float2 ljcp_j = lj_comb[aj]; #endif float3 fcj_buf = (float3)(0.0F); #if !defined PRUNE_NBL # pragma unroll 8 #endif for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++) { if (imask & mask_ji) { const int gmx_unused ci = sci * c_nbnxnGpuNumClusterPerSupercluster + i; /* i cluster index */ /* all threads load an atom from i cluster ci into shmem! */ const float4 xiqbuf = xqib[i * CL_SIZE + tidxi]; const float3 xi = (float3)(xiqbuf.xyz); /* distance between i and j atoms */ const float3 rv = xi - xj; float r2 = norm2(rv); #ifdef PRUNE_NBL if (!gmx_sub_group_any(warp_any, widx, r2 < rlist_sq)) { imask &= ~mask_ji; } #endif const float int_bit = (wexcl & mask_ji) ? 1.0F : 0.0F; /* cutoff & exclusion check */ #ifdef EXCLUSION_FORCES if ((r2 < rcoulomb_sq) * (nonSelfInteraction | (ci != cj))) #else if ((float)(r2 < rcoulomb_sq) * int_bit != 0.0F) #endif { /* load the rest of the i-atom parameters */ const float qi = xiqbuf.w; #ifdef IATYPE_SHMEM # ifndef LJ_COMB const int typei = atib[i * CL_SIZE + tidxi]; # else const float2 ljcp_i = ljcpib[i * CL_SIZE + tidxi]; # endif #else /* IATYPE_SHMEM */ const int ai = ci * CL_SIZE + tidxi; /* i atom index */ # ifndef LJ_COMB const int typei = atom_types[ai]; # else const float2 ljcp_i = lj_comb[ai]; # endif #endif /* IATYPE_SHMEM */ /* LJ 6*C6 and 12*C12 */ #ifndef LJ_COMB const float2 c6c12 = nbfp[ntypes * typei + typej]; const float c6 = c6c12.x; const float c12 = c6c12.y; #else /* LJ_COMB */ # ifdef LJ_COMB_GEOM const float c6 = ljcp_i.x * ljcp_j.x; const float c12 = ljcp_i.y * ljcp_j.y; # else /* LJ 2^(1/6)*sigma and 12*epsilon */ const float sigma = ljcp_i.x + ljcp_j.x; const float epsilon = ljcp_i.y * ljcp_j.y; # if defined CALC_ENERGIES || defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH const float2 c6c12 = convert_sigma_epsilon_to_c6_c12(sigma, epsilon); const float c6 = c6c12.x; const float c12 = c6c12.y; # endif # endif /* LJ_COMB_GEOM */ #endif /* LJ_COMB */ // Ensure distance do not become so small that r^-12 overflows. r2 = max(r2, c_nbnxnMinDistanceSquared); const float inv_r = rsqrt(r2); const float inv_r2 = inv_r * inv_r; #if !defined LJ_COMB_LB || defined CALC_ENERGIES float inv_r6 = inv_r2 * inv_r2 * inv_r2; # if defined EXCLUSION_FORCES /* We could mask inv_r2, but with Ewald * masking both inv_r6 and F_invr is faster */ inv_r6 *= int_bit; # endif /* EXCLUSION_FORCES */ float F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2; # if defined CALC_ENERGIES || defined LJ_POT_SWITCH float E_lj_p = int_bit * (c12 * (inv_r6 * inv_r6 + nbparam->repulsion_shift.cpot) * ONE_TWELVETH_F - c6 * (inv_r6 + nbparam->dispersion_shift.cpot) * ONE_SIXTH_F); # endif #else /* ! LJ_COMB_LB || CALC_ENERGIES */ const float sig_r = sigma * inv_r; const float sig_r2 = sig_r * sig_r; float sig_r6 = sig_r2 * sig_r2 * sig_r2; # if defined EXCLUSION_FORCES sig_r6 *= int_bit; # endif /* EXCLUSION_FORCES */ float F_invr = epsilon * sig_r6 * (sig_r6 - 1.0F) * inv_r2; #endif /* ! LJ_COMB_LB || CALC_ENERGIES */ #ifdef LJ_FORCE_SWITCH # ifdef CALC_ENERGIES calculate_force_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p); # else calculate_force_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr); # endif /* CALC_ENERGIES */ #endif /* LJ_FORCE_SWITCH */ #ifdef LJ_EWALD # ifdef LJ_EWALD_COMB_GEOM # ifdef CALC_ENERGIES calculate_lj_ewald_comb_geom_F_E(nbfp_comb, nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, int_bit, &F_invr, &E_lj_p); # else calculate_lj_ewald_comb_geom_F( nbfp_comb, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, &F_invr); # endif /* CALC_ENERGIES */ # elif defined LJ_EWALD_COMB_LB calculate_lj_ewald_comb_LB_F_E(nbfp_comb, nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, # ifdef CALC_ENERGIES int_bit, true, &F_invr, &E_lj_p # else 0, false, &F_invr, 0 # endif /* CALC_ENERGIES */ ); # endif /* LJ_EWALD_COMB_GEOM */ #endif /* LJ_EWALD */ #ifdef LJ_POT_SWITCH # ifdef CALC_ENERGIES calculate_potential_switch_F_E(nbparam, inv_r, r2, &F_invr, &E_lj_p); # else calculate_potential_switch_F(nbparam, inv_r, r2, &F_invr, &E_lj_p); # endif /* CALC_ENERGIES */ #endif /* LJ_POT_SWITCH */ #ifdef VDW_CUTOFF_CHECK /* Separate VDW cut-off check to enable twin-range cut-offs * (rvdw < rcoulomb <= rlist) */ const float vdw_in_range = (r2 < rvdw_sq) ? 1.0F : 0.0F; F_invr *= vdw_in_range; # ifdef CALC_ENERGIES E_lj_p *= vdw_in_range; # endif #endif /* VDW_CUTOFF_CHECK */ #ifdef CALC_ENERGIES E_lj += E_lj_p; #endif #ifdef EL_CUTOFF # ifdef EXCLUSION_FORCES F_invr += qi * qj_f * int_bit * inv_r2 * inv_r; # else F_invr += qi * qj_f * inv_r2 * inv_r; # endif #endif #ifdef EL_RF F_invr += qi * qj_f * (int_bit * inv_r2 * inv_r - two_k_rf); #endif #if defined EL_EWALD_ANA F_invr += qi * qj_f * (int_bit * inv_r2 * inv_r + pmecorrF(beta2 * r2) * beta3); #elif defined EL_EWALD_TAB F_invr += qi * qj_f * (int_bit * inv_r2 - interpolate_coulomb_force_r( coulomb_tab, r2 * inv_r, coulomb_tab_scale)) * inv_r; #endif /* EL_EWALD_ANA/TAB */ #ifdef CALC_ENERGIES # ifdef EL_CUTOFF E_el += qi * qj_f * (int_bit * inv_r - c_rf); # endif # ifdef EL_RF E_el += qi * qj_f * (int_bit * inv_r + HALF_F * two_k_rf * r2 - c_rf); # endif # ifdef EL_EWALD_ANY /* 1.0F - erff is faster than erfcf */ E_el += qi * qj_f * (inv_r * (int_bit - erf(r2 * inv_r * beta)) - int_bit * ewald_shift); # endif /* EL_EWALD_ANY */ #endif const float3 f_ij = rv * F_invr; /* accumulate j forces in registers */ fcj_buf -= f_ij; /* accumulate i forces in registers */ fci_buf[i][0] += f_ij.x; fci_buf[i][1] += f_ij.y; fci_buf[i][2] += f_ij.z; } } /* shift the mask bit by 1 */ mask_ji += mask_ji; } /* reduce j forces */ reduce_force_j(f_buf, fcj_buf, f, tidxi, tidxj, aj); } } #ifdef PRUNE_NBL /* Update the imask with the new one which does not contain the out of range clusters anymore. */ pl_cj4[j4].imei[widx].imask = imask; #endif } } /* skip central shifts when summing shift forces */ if (nb_sci.shift == c_centralShiftIndex) { bCalcFshift = 0; } /* reduce i forces */ reduce_force_i_and_shift(f_buf, fci_buf, f, bCalcFshift != 0, tidxi, tidxj, sci, nb_sci.shift, fshift); #ifdef CALC_ENERGIES reduce_energy(f_buf, E_lj, E_el, e_lj, e_el, tidx); #endif } #undef EL_EWALD_ANY #undef EXCLUSION_FORCES #undef LJ_EWALD #undef LJ_COMB #undef USE_CJ_PREFETCH