* NOTE: No include fence as it is meant to be included multiple times.
*
* \author Szilárd Páll <pall.szilard@gmail.com>
+ * \author Berk Hess <hess@kth.se>
* \ingroup module_mdlib
*/
-#include "config.h"
+#include "gromacs/gpu_utils/cuda_arch_utils.cuh"
#include "gromacs/math/utilities.h"
#include "gromacs/pbcutil/ishift.h"
/* Note that floating-point constants in CUDA code should be suffixed
* code that is in double precision.
*/
-#if __CUDA_ARCH__ >= 300
-/* Note: convenience macros, need to be undef-ed at the end of the file. */
-#define REDUCE_SHUFFLE
-/* On Kepler pre-loading i-atom types to shmem gives a few %,
- but on Fermi it does not */
-#define IATYPE_SHMEM
-#ifdef HAVE_CUDA_TEXOBJ_SUPPORT
-#define USE_TEXOBJ
-#endif
+#if GMX_PTX_ARCH < 300
+#error "nbnxn_cuda_kernel.cuh included with GMX_PTX_ARCH < 300"
#endif
#if defined EL_EWALD_ANA || defined EL_EWALD_TAB
#define LJ_EWALD
#endif
+#if defined LJ_COMB_GEOM || defined LJ_COMB_LB
+#define LJ_COMB
+#endif
/*
Kernel launch parameters:
- #blocks = #pair lists, blockId = pair list Id
- - #threads = NTHREAD_Z * CL_SIZE^2
+ - #threads = NTHREAD_Z * c_clSize^2
- shmem = see nbnxn_cuda.cu:calc_shmem_required()
Each thread calculates an i force-component taking one pair of i-j atoms.
*/
-/* NTHREAD_Z controls the number of j-clusters processed concurrently on NTHREAD_Z
+/**@{*/
+/*! \brief Compute capability dependent definition of kernel launch configuration parameters.
+ *
+ * NTHREAD_Z controls the number of j-clusters processed concurrently on NTHREAD_Z
* warp-pairs per block.
*
* - On CC 2.0-3.5, 5.0, and 5.2, NTHREAD_Z == 1, translating to 64 th/block with 16
* shuffle-based reduction, hence CC >= 3.0.
*/
-/* Kernel launch bounds as function of NTHREAD_Z.
+/* Kernel launch bounds for different compute capabilities. The value of NTHREAD_Z
+ * determines the number of threads per block and it is chosen such that
+ * 16 blocks/multiprocessor can be kept in flight.
* - CC 3.0/3.5/5.x, >=6.1: NTHREAD_Z=1, (64, 16) bounds
* - CC 3.7, 6.0: NTHREAD_Z=2, (128, 16) bounds
*
* Note: convenience macros, need to be undef-ed at the end of the file.
*/
-#if __CUDA_ARCH__ == 370 || __CUDA_ARCH__ == 600
-#define NTHREAD_Z (2)
-#define MIN_BLOCKS_PER_MP (16)
+#if GMX_PTX_ARCH == 370 || GMX_PTX_ARCH == 600
+ #define NTHREAD_Z (2)
+ #define MIN_BLOCKS_PER_MP (16)
#else
-#define NTHREAD_Z (1)
-#define MIN_BLOCKS_PER_MP (16)
-#endif
-#define THREADS_PER_BLOCK (CL_SIZE*CL_SIZE*NTHREAD_Z)
+ #define NTHREAD_Z (1)
+ #define MIN_BLOCKS_PER_MP (16)
+#endif /* GMX_PTX_ARCH == 370 || GMX_PTX_ARCH == 600 */
+#define THREADS_PER_BLOCK (c_clSize*c_clSize*NTHREAD_Z)
-#if __CUDA_ARCH__ >= 350
+#if GMX_PTX_ARCH >= 350
+#if (GMX_PTX_ARCH <= 210) && (NTHREAD_Z > 1)
+ #error NTHREAD_Z > 1 will give incorrect results on CC 2.x
+#endif
+/**@}*/
__launch_bounds__(THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP)
#else
__launch_bounds__(THREADS_PER_BLOCK)
-#endif
+#endif /* GMX_PTX_ARCH >= 350 */
#ifdef PRUNE_NBL
#ifdef CALC_ENERGIES
__global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_prune_cuda)
#else
__global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_prune_cuda)
-#endif
+#endif /* CALC_ENERGIES */
#else
#ifdef CALC_ENERGIES
__global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_cuda)
#else
__global__ void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_cuda)
-#endif
-#endif
+#endif /* CALC_ENERGIES */
+#endif /* PRUNE_NBL */
(const cu_atomdata_t atdat,
const cu_nbparam_t nbparam,
const cu_plist_t plist,
bool bCalcFshift)
+#ifdef FUNCTION_DECLARATION_ONLY
+; /* Only do function declaration, omit the function body. */
+#else
{
/* convenience variables */
const nbnxn_sci_t *pl_sci = plist.sci;
#endif
nbnxn_cj4_t *pl_cj4 = plist.cj4;
const nbnxn_excl_t *excl = plist.excl;
+#ifndef LJ_COMB
const int *atom_types = atdat.atom_types;
int ntypes = atdat.ntypes;
+#else
+ const float2 *lj_comb = atdat.lj_comb;
+ float2 ljcp_i, ljcp_j;
+#endif
const float4 *xq = atdat.xq;
float3 *f = atdat.f;
const float3 *shift_vec = atdat.shift_vec;
unsigned int tidxz = threadIdx.z;
#endif
unsigned int bidx = blockIdx.x;
- unsigned int widx = tidx / WARP_SIZE; /* warp index */
+ unsigned int widx = tidx / warp_size; /* warp index */
- int sci, ci, cj, ci_offset,
+ int sci, ci, cj,
ai, aj,
- cij4_start, cij4_end,
- typei, typej,
- i, jm, j4, wexcl_idx;
+ cij4_start, cij4_end;
+#ifndef LJ_COMB
+ int typei, typej;
+#endif
+ int i, jm, j4, wexcl_idx;
float qi, qj_f,
- r2, inv_r, inv_r2, inv_r6,
- c6, c12,
- int_bit,
+ r2, inv_r, inv_r2;
+#if !defined LJ_COMB_LB || defined CALC_ENERGIES
+ float inv_r6, c6, c12;
+#endif
+#ifdef LJ_COMB_LB
+ float sigma, epsilon;
+#endif
+ float int_bit,
F_invr;
#ifdef CALC_ENERGIES
float E_lj, E_el;
unsigned int wexcl, imask, mask_ji;
float4 xqbuf;
float3 xi, xj, rv, f_ij, fcj_buf;
- float3 fci_buf[NCL_PER_SUPERCL]; /* i force buffer */
+ float3 fci_buf[c_numClPerSupercl]; /* i force buffer */
nbnxn_sci_t nb_sci;
+ /*! i-cluster interaction mask for a super-cluster with all c_numClPerSupercl=8 bits set */
+ const unsigned superClInteractionMask = ((1U << c_numClPerSupercl) - 1U);
+
/* shmem buffer for i x+q pre-loading */
extern __shared__ float4 xqib[];
+
/* shmem buffer for cj, for each warp separately */
- int *cjs = ((int *)(xqib + NCL_PER_SUPERCL * CL_SIZE)) + tidxz * 2 * NBNXN_GPU_JGROUP_SIZE;
-#ifdef IATYPE_SHMEM
+ int *cjs = ((int *)(xqib + c_numClPerSupercl * c_clSize)) + tidxz * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize;
+ int *cjs_end = ((int *)(xqib + c_numClPerSupercl * c_clSize)) + NTHREAD_Z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize;
+#ifndef LJ_COMB
/* shmem buffer for i atom-type pre-loading */
- int *atib = ((int *)(xqib + NCL_PER_SUPERCL * CL_SIZE)) + NTHREAD_Z * 2 * NBNXN_GPU_JGROUP_SIZE;
-#endif
-
-#ifndef REDUCE_SHUFFLE
- /* shmem j force buffer */
-#ifdef IATYPE_SHMEM
- float *f_buf = (float *)(atib + NCL_PER_SUPERCL * CL_SIZE);
+ int *atib = cjs_end;
#else
- float *f_buf = (float *)(cjs + NTHREAD_Z * 2 * NBNXN_GPU_JGROUP_SIZE);
-#endif
+ /* shmem buffer for i-atom LJ combination rule parameters */
+ float2 *ljcpib = (float2 *)cjs_end;
#endif
nb_sci = pl_sci[bidx]; /* my i super-cluster's index = current bidx */
if (tidxz == 0)
{
/* Pre-load i-atom x and q into shared memory */
- ci = sci * NCL_PER_SUPERCL + tidxj;
- ai = ci * CL_SIZE + tidxi;
- xqib[tidxj * CL_SIZE + tidxi] = xq[ai] + shift_vec[nb_sci.shift];
-#ifdef IATYPE_SHMEM
+ ci = sci * c_numClPerSupercl + tidxj;
+ ai = ci * c_clSize + tidxi;
+
+ xqbuf = xq[ai] + shift_vec[nb_sci.shift];
+ xqbuf.w *= nbparam.epsfac;
+ xqib[tidxj * c_clSize + tidxi] = xqbuf;
+
+#ifndef LJ_COMB
/* Pre-load the i-atom types into shared memory */
- atib[tidxj * CL_SIZE + tidxi] = atom_types[ai];
+ atib[tidxj * c_clSize + tidxi] = atom_types[ai];
+#else
+ /* Pre-load the LJ combination parameters into shared memory */
+ ljcpib[tidxj * c_clSize + tidxi] = lj_comb[ai];
#endif
}
__syncthreads();
- for (ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
+ for (i = 0; i < c_numClPerSupercl; i++)
{
- fci_buf[ci_offset] = make_float3(0.0f);
+ fci_buf[i] = make_float3(0.0f);
}
#ifdef LJ_EWALD
/* TODO: we are trading registers with flops by keeping lje_coeff-s, try re-calculating it later */
lje_coeff2 = nbparam.ewaldcoeff_lj*nbparam.ewaldcoeff_lj;
- lje_coeff6_6 = lje_coeff2*lje_coeff2*lje_coeff2*ONE_SIXTH_F;
-#endif /* LJ_EWALD */
+ lje_coeff6_6 = lje_coeff2*lje_coeff2*lje_coeff2*c_oneSixth;
+#endif
#ifdef CALC_ENERGIES
E_lj = 0.0f;
E_el = 0.0f;
-#if defined EXCLUSION_FORCES /* Ewald or RF */
- if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci*NCL_PER_SUPERCL)
+#ifdef EXCLUSION_FORCES /* Ewald or RF */
+ if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci*c_numClPerSupercl)
{
/* we have the diagonal: add the charge and LJ self interaction energy term */
- for (i = 0; i < NCL_PER_SUPERCL; i++)
+ for (i = 0; i < c_numClPerSupercl; i++)
{
#if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
- qi = xqib[i * CL_SIZE + tidxi].w;
+ qi = xqib[i * c_clSize + tidxi].w;
E_el += qi*qi;
#endif
-#if defined LJ_EWALD
-#ifdef USE_TEXOBJ
- E_lj += tex1Dfetch<float>(nbparam.nbfp_texobj, atom_types[(sci*NCL_PER_SUPERCL + i)*CL_SIZE + tidxi]*(ntypes + 1)*2);
-#else
- E_lj += tex1Dfetch(nbfp_texref, atom_types[(sci*NCL_PER_SUPERCL + i)*CL_SIZE + tidxi]*(ntypes + 1)*2);
-#endif /* USE_TEXOBJ */
-#endif /* LJ_EWALD */
-
+#ifdef LJ_EWALD
+ E_lj += tex1Dfetch<float>(nbparam.nbfp_texobj, atom_types[(sci*c_numClPerSupercl + i)*c_clSize + tidxi]*(ntypes + 1)*2);
+#endif
}
/* divide the self term(s) equally over the j-threads, then multiply with the coefficients. */
#ifdef LJ_EWALD
- E_lj /= CL_SIZE*NTHREAD_Z;
- E_lj *= 0.5f*ONE_SIXTH_F*lje_coeff6_6;
-#endif /* LJ_EWALD */
+ E_lj /= c_clSize*NTHREAD_Z;
+ E_lj *= 0.5f*c_oneSixth*lje_coeff6_6;
+#endif
#if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
- E_el /= CL_SIZE*NTHREAD_Z;
+ /* Correct for epsfac^2 due to adding qi^2 */
+ E_el /= nbparam.epsfac*c_clSize*NTHREAD_Z;
#if defined EL_RF || defined EL_CUTOFF
- E_el *= -nbparam.epsfac*0.5f*c_rf;
+ E_el *= -0.5f*c_rf;
#else
- E_el *= -nbparam.epsfac*beta*M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
+ E_el *= -beta*M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
#endif
-#endif /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */
+#endif /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */
}
-#endif /* EXCLUSION_FORCES */
-
-#endif /* CALC_ENERGIES */
+#endif /* EXCLUSION_FORCES */
- /* skip central shifts when summing shift forces */
- if (nb_sci.shift == CENTRAL)
- {
- bCalcFshift = false;
- }
+#endif /* CALC_ENERGIES */
/* loop over the j clusters = seen by any of the atoms in the current super-cluster */
for (j4 = cij4_start + tidxz; j4 < cij4_end; j4 += NTHREAD_Z)
{
wexcl_idx = pl_cj4[j4].imei[widx].excl_ind;
imask = pl_cj4[j4].imei[widx].imask;
- wexcl = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)];
+ wexcl = excl[wexcl_idx].pair[(tidx) & (warp_size - 1)];
#ifndef PRUNE_NBL
if (imask)
#endif
{
/* Pre-load cj into shared memory on both warps separately */
- if ((tidxj == 0 || tidxj == 4) && tidxi < NBNXN_GPU_JGROUP_SIZE)
+ if ((tidxj == 0 || tidxj == 4) && tidxi < c_nbnxnGpuJgroupSize)
{
- cjs[tidxi + tidxj * NBNXN_GPU_JGROUP_SIZE / 4] = pl_cj4[j4].cj[tidxi];
+ cjs[tidxi + tidxj * c_nbnxnGpuJgroupSize/c_splitClSize] = pl_cj4[j4].cj[tidxi];
}
/* Unrolling this loop
- with pruning leads to register spilling;
- - on Kepler is much slower;
- - doesn't work on CUDA <v4.1
- Tested with nvcc 3.2 - 5.0.7 */
-#if !defined PRUNE_NBL && __CUDA_ARCH__ < 300 && GMX_CUDA_VERSION >= 4010
-#pragma unroll 4
-#endif
- for (jm = 0; jm < NBNXN_GPU_JGROUP_SIZE; jm++)
+ - on Kepler and later it is much slower;
+ Tested with up to nvcc 7.5 */
+ for (jm = 0; jm < c_nbnxnGpuJgroupSize; jm++)
{
- /* ((1U << NCL_PER_SUPERCL) - 1U) is the i-cluster interaction
- * mask for a super-cluster with all NCL_PER_SUPERCL bits set.
- */
- if (imask & (((1U << NCL_PER_SUPERCL) - 1U) << (jm * NCL_PER_SUPERCL)))
+ if (imask & (superClInteractionMask << (jm * c_numClPerSupercl)))
{
- mask_ji = (1U << (jm * NCL_PER_SUPERCL));
+ mask_ji = (1U << (jm * c_numClPerSupercl));
- cj = cjs[jm + (tidxj & 4) * NBNXN_GPU_JGROUP_SIZE / 4];
- aj = cj * CL_SIZE + tidxj;
+ cj = cjs[jm + (tidxj & 4) * c_nbnxnGpuJgroupSize/c_splitClSize];
+ aj = cj * c_clSize + tidxj;
/* load j atom data */
xqbuf = xq[aj];
xj = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
- qj_f = nbparam.epsfac * xqbuf.w;
+ qj_f = xqbuf.w;
+#ifndef LJ_COMB
typej = atom_types[aj];
+#else
+ ljcp_j = lj_comb[aj];
+#endif
fcj_buf = make_float3(0.0f);
- /* The PME and RF kernels don't unroll with CUDA <v4.1. */
-#if !defined PRUNE_NBL && !(GMX_CUDA_VERSION < 4010 && defined EXCLUSION_FORCES)
+#if !defined PRUNE_NBL
#pragma unroll 8
#endif
- for (i = 0; i < NCL_PER_SUPERCL; i++)
+ for (i = 0; i < c_numClPerSupercl; i++)
{
if (imask & mask_ji)
{
- ci_offset = i; /* i force buffer offset */
-
- ci = sci * NCL_PER_SUPERCL + i; /* i cluster index */
- ai = ci * CL_SIZE + tidxi; /* i atom index */
+ ci = sci * c_numClPerSupercl + i; /* i cluster index */
/* all threads load an atom from i cluster ci into shmem! */
- xqbuf = xqib[i * CL_SIZE + tidxi];
+ xqbuf = xqib[i * c_clSize + tidxi];
xi = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
/* distance between i and j atoms */
{
/* load the rest of the i-atom parameters */
qi = xqbuf.w;
-#ifdef IATYPE_SHMEM
- typei = atib[i * CL_SIZE + tidxi];
-#else
- typei = atom_types[ai];
-#endif
+#ifndef LJ_COMB
/* LJ 6*C6 and 12*C12 */
-#ifdef USE_TEXOBJ
+ typei = atib[i * c_clSize + tidxi];
c6 = tex1Dfetch<float>(nbparam.nbfp_texobj, 2 * (ntypes * typei + typej));
c12 = tex1Dfetch<float>(nbparam.nbfp_texobj, 2 * (ntypes * typei + typej) + 1);
#else
- c6 = tex1Dfetch(nbfp_texref, 2 * (ntypes * typei + typej));
- c12 = tex1Dfetch(nbfp_texref, 2 * (ntypes * typei + typej) + 1);
-#endif /* USE_TEXOBJ */
-
+ ljcp_i = ljcpib[i * c_clSize + tidxi];
+#ifdef LJ_COMB_GEOM
+ c6 = ljcp_i.x * ljcp_j.x;
+ c12 = ljcp_i.y * ljcp_j.y;
+#else
+ /* LJ 2^(1/6)*sigma and 12*epsilon */
+ sigma = ljcp_i.x + ljcp_j.x;
+ epsilon = ljcp_i.y * ljcp_j.y;
+#if defined CALC_ENERGIES || defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
+ convert_sigma_epsilon_to_c6_c12(sigma, epsilon, &c6, &c12);
+#endif
+#endif /* LJ_COMB_GEOM */
+#endif /* LJ_COMB */
- /* avoid NaN for excluded pairs at r=0 */
- r2 += (1.0f - int_bit) * NBNXN_AVOID_SING_R2_INC;
+ // Ensure distance do not become so small that r^-12 overflows
+ r2 = max(r2, NBNXN_MIN_RSQ);
inv_r = rsqrt(r2);
inv_r2 = inv_r * inv_r;
+#if !defined LJ_COMB_LB || defined CALC_ENERGIES
inv_r6 = inv_r2 * inv_r2 * inv_r2;
-#if defined EXCLUSION_FORCES
+#ifdef EXCLUSION_FORCES
/* We could mask inv_r2, but with Ewald
* masking both inv_r6 and F_invr is faster */
inv_r6 *= int_bit;
F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
#if defined CALC_ENERGIES || defined LJ_POT_SWITCH
- E_lj_p = int_bit * (c12 * (inv_r6 * inv_r6 + nbparam.repulsion_shift.cpot)*ONE_TWELVETH_F -
- c6 * (inv_r6 + nbparam.dispersion_shift.cpot)*ONE_SIXTH_F);
+ E_lj_p = int_bit * (c12 * (inv_r6 * inv_r6 + nbparam.repulsion_shift.cpot)*c_oneTwelveth -
+ c6 * (inv_r6 + nbparam.dispersion_shift.cpot)*c_oneSixth);
#endif
+#else /* !LJ_COMB_LB || CALC_ENERGIES */
+ float sig_r = sigma*inv_r;
+ float sig_r2 = sig_r*sig_r;
+ float sig_r6 = sig_r2*sig_r2*sig_r2;
+#ifdef EXCLUSION_FORCES
+ sig_r6 *= int_bit;
+#endif /* EXCLUSION_FORCES */
+
+ F_invr = epsilon * sig_r6 * (sig_r6 - 1.0f) * inv_r2;
+#endif /* !LJ_COMB_LB || CALC_ENERGIES */
#ifdef LJ_FORCE_SWITCH
#ifdef CALC_ENERGIES
F_invr += qi * qj_f * (int_bit*inv_r2*inv_r + pmecorrF(beta2*r2)*beta3);
#elif defined EL_EWALD_TAB
F_invr += qi * qj_f * (int_bit*inv_r2 -
-#ifdef USE_TEXOBJ
- interpolate_coulomb_force_r(nbparam.coulomb_tab_texobj, r2 * inv_r, coulomb_tab_scale)
-#else
- interpolate_coulomb_force_r(r2 * inv_r, coulomb_tab_scale)
-#endif /* USE_TEXOBJ */
- ) * inv_r;
-#endif /* EL_EWALD_ANA/TAB */
+ interpolate_coulomb_force_r(nbparam.coulomb_tab_texobj, r2 * inv_r, coulomb_tab_scale)) * inv_r;
+#endif /* EL_EWALD_ANA/TAB */
#ifdef CALC_ENERGIES
#ifdef EL_CUTOFF
fcj_buf -= f_ij;
/* accumulate i forces in registers */
- fci_buf[ci_offset] += f_ij;
+ fci_buf[i] += f_ij;
}
}
}
/* reduce j forces */
-#ifdef REDUCE_SHUFFLE
reduce_force_j_warp_shfl(fcj_buf, f, tidxi, aj);
-#else
- /* store j forces in shmem */
- f_buf[ tidx] = fcj_buf.x;
- f_buf[ FBUF_STRIDE + tidx] = fcj_buf.y;
- f_buf[2 * FBUF_STRIDE + tidx] = fcj_buf.z;
-
- reduce_force_j_generic(f_buf, f, tidxi, tidxj, aj);
-#endif
}
}
#ifdef PRUNE_NBL
}
}
+ /* skip central shifts when summing shift forces */
+ if (nb_sci.shift == CENTRAL)
+ {
+ bCalcFshift = false;
+ }
+
float fshift_buf = 0.0f;
/* reduce i forces */
- for (ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
+ for (i = 0; i < c_numClPerSupercl; i++)
{
- ai = (sci * NCL_PER_SUPERCL + ci_offset) * CL_SIZE + tidxi;
-#ifdef REDUCE_SHUFFLE
- reduce_force_i_warp_shfl(fci_buf[ci_offset], f,
+ ai = (sci * c_numClPerSupercl + i) * c_clSize + tidxi;
+ reduce_force_i_warp_shfl(fci_buf[i], f,
&fshift_buf, bCalcFshift,
tidxj, ai);
-#else
- f_buf[ tidx] = fci_buf[ci_offset].x;
- f_buf[ FBUF_STRIDE + tidx] = fci_buf[ci_offset].y;
- f_buf[2 * FBUF_STRIDE + tidx] = fci_buf[ci_offset].z;
- __syncthreads();
- reduce_force_i(f_buf, f,
- &fshift_buf, bCalcFshift,
- tidxi, tidxj, ai);
- __syncthreads();
-#endif
}
/* add up local shift forces into global mem, tidxj indexes x,y,z */
-#ifdef REDUCE_SHUFFLE
if (bCalcFshift && (tidxj & 3) < 3)
{
- atomicAdd(&(atdat.fshift[nb_sci.shift].x) + (tidxj & ~4), fshift_buf);
+ atomicAdd(&(atdat.fshift[nb_sci.shift].x) + (tidxj & 3), fshift_buf);
}
-#else
- if (bCalcFshift && tidxj < 3)
- {
- atomicAdd(&(atdat.fshift[nb_sci.shift].x) + tidxj, fshift_buf);
- }
-#endif
#ifdef CALC_ENERGIES
-#ifdef REDUCE_SHUFFLE
/* reduce the energies over warps and store into global memory */
reduce_energy_warp_shfl(E_lj, E_el, e_lj, e_el, tidx);
-#else
- /* flush the energies to shmem and reduce them */
- f_buf[ tidx] = E_lj;
- f_buf[FBUF_STRIDE + tidx] = E_el;
- reduce_energy_pow2(f_buf + (tidx & WARP_SIZE), e_lj, e_el, tidx & ~WARP_SIZE);
-#endif
#endif
}
-
-#undef REDUCE_SHUFFLE
-#undef IATYPE_SHMEM
-#undef USE_TEXOBJ
+#endif /* FUNCTION_DECLARATION_ONLY */
#undef NTHREAD_Z
#undef MIN_BLOCKS_PER_MP
#undef EL_EWALD_ANY
#undef EXCLUSION_FORCES
#undef LJ_EWALD
+
+#undef LJ_COMB