/*
* This define affects the spline calculation behaviour in the kernel.
- * 0: a single GPU thread handles a single dimension of a single particle (calculating and storing (order) spline values and derivatives).
- * 1: (order) threads do redundant work on this same task, each one stores only a single theta and single dtheta into global arrays.
- * The only efficiency difference is less global store operations, countered by more redundant spline computation.
+ * 0: a single GPU thread handles a single dimension of a single particle (calculating and storing
+ * (order) spline values and derivatives). 1: (order) threads do redundant work on this same task,
+ * each one stores only a single theta and single dtheta into global arrays. The only efficiency
+ * difference is less global store operations, countered by more redundant spline computation.
*
* TODO: estimate if this should be a boolean parameter (and add it to the unit test if so).
*/
#define PME_GPU_PARALLEL_SPLINE 0
#ifndef COMPILE_SPREAD_HELPERS_ONCE
-#define COMPILE_SPREAD_HELPERS_ONCE
+# define COMPILE_SPREAD_HELPERS_ONCE
/*! \brief
* General purpose function for loading atom-related data from global to shared memory.
* \param[in] dataCountPerAtom Number of data elements per single atom (e.g. DIM for an rvec coordinates array).
*
*/
-inline void pme_gpu_stage_atom_data(const struct PmeOpenCLKernelParams kernelParams,
- __local float * __restrict__ sm_destination,
- __global const float * __restrict__ gm_source,
- const int dataCountPerAtom)
+inline void pme_gpu_stage_atom_data(const struct PmeOpenCLKernelParams kernelParams,
+ __local float* __restrict__ sm_destination,
+ __global const float* __restrict__ gm_source,
+ const int dataCountPerAtom)
{
- const size_t threadLocalIndex = ((get_local_id(2) * get_local_size(1) + get_local_id(1)) * get_local_size(0) + get_local_id(0));
- const size_t localIndex = threadLocalIndex;
- const size_t globalIndexBase = get_group_id(XX) * atomsPerBlock * dataCountPerAtom;
- const size_t globalIndex = globalIndexBase + localIndex;
- const int globalCheck = pme_gpu_check_atom_data_index(globalIndex, kernelParams.atoms.nAtoms * dataCountPerAtom);
+ const size_t threadLocalIndex =
+ ((get_local_id(2) * get_local_size(1) + get_local_id(1)) * get_local_size(0) + get_local_id(0));
+ const size_t localIndex = threadLocalIndex;
+ const size_t globalIndexBase = get_group_id(XX) * atomsPerBlock * dataCountPerAtom;
+ const size_t globalIndex = globalIndexBase + localIndex;
+ const int globalCheck =
+ pme_gpu_check_atom_data_index(globalIndex, kernelParams.atoms.nAtoms * dataCountPerAtom);
if ((localIndex < atomsPerBlock * dataCountPerAtom) & globalCheck)
{
assert(isfinite(float(gm_source[globalIndex])));
* \param[in] gm_fractShiftsTable Atom fractional coordinates correction table
* \param[in] gm_gridlineIndicesTable Atom fractional coordinates correction table
*/
-inline void calculate_splines(const struct PmeOpenCLKernelParams kernelParams,
- const int atomIndexOffset,
- __local const float * __restrict__ sm_coordinates,
- __local const float * __restrict__ sm_coefficients,
- __local float * __restrict__ sm_theta,
- __local int * __restrict__ sm_gridlineIndices,
- __local float * __restrict__ sm_fractCoords,
- __global float * __restrict__ gm_theta,
- __global float * __restrict__ gm_dtheta,
- __global int * __restrict__ gm_gridlineIndices,
- __global const float * __restrict__ gm_fractShiftsTable,
- __global const int * __restrict__ gm_gridlineIndicesTable)
+inline void calculate_splines(const struct PmeOpenCLKernelParams kernelParams,
+ const int atomIndexOffset,
+ __local const float* __restrict__ sm_coordinates,
+ __local const float* __restrict__ sm_coefficients,
+ __local float* __restrict__ sm_theta,
+ __local int* __restrict__ sm_gridlineIndices,
+ __local float* __restrict__ sm_fractCoords,
+ __global float* __restrict__ gm_theta,
+ __global float* __restrict__ gm_dtheta,
+ __global int* __restrict__ gm_gridlineIndices,
+ __global const float* __restrict__ gm_fractShiftsTable,
+ __global const int* __restrict__ gm_gridlineIndicesTable)
{
/* Thread index w.r.t. block */
- const int threadLocalIndex = ((get_local_id(2) * get_local_size(1) + get_local_id(1)) * get_local_size(0) + get_local_id(0));
+ const int threadLocalIndex =
+ ((get_local_id(2) * get_local_size(1) + get_local_id(1)) * get_local_size(0) + get_local_id(0));
/* Warp index w.r.t. block - could probably be obtained easier? */
const int warpIndex = threadLocalIndex / warp_size;
/* Thread index w.r.t. warp */
* The buffer's size, striding and indexing are adjusted accordingly.
* The buffer is accessed with SPLINE_DATA_PTR and SPLINE_DATA macros.
*/
-#if PME_GPU_PARALLEL_SPLINE
- #define splineDataStride (atomsPerBlock * DIM)
- const int splineDataIndex = sharedMemoryIndex;
- __local float sm_splineData[splineDataStride * order];
- __local float *splineDataPtr = sm_splineData;
-#else
- #define splineDataStride 1
- const int splineDataIndex = 0;
- float splineData[splineDataStride * order];
- float *splineDataPtr = splineData;
-#endif
-
-#define SPLINE_DATA_PTR(i) (splineDataPtr + ((i) * splineDataStride + splineDataIndex))
-#define SPLINE_DATA(i) (*SPLINE_DATA_PTR(i))
-
- const int localCheck = (dimIndex < DIM) && (orderIndex < (PME_GPU_PARALLEL_SPLINE ? order : 1));
+# if PME_GPU_PARALLEL_SPLINE
+# define splineDataStride (atomsPerBlock * DIM)
+ const int splineDataIndex = sharedMemoryIndex;
+ __local float sm_splineData[splineDataStride * order];
+ __local float* splineDataPtr = sm_splineData;
+# else
+# define splineDataStride 1
+ const int splineDataIndex = 0;
+ float splineData[splineDataStride * order];
+ float* splineDataPtr = splineData;
+# endif
+
+# define SPLINE_DATA_PTR(i) (splineDataPtr + ((i)*splineDataStride + splineDataIndex))
+# define SPLINE_DATA(i) (*SPLINE_DATA_PTR(i))
+
+ const int localCheck = (dimIndex < DIM) && (orderIndex < (PME_GPU_PARALLEL_SPLINE ? order : 1));
const int globalCheck = pme_gpu_check_atom_data_index(atomIndexGlobal, kernelParams.atoms.nAtoms);
if (localCheck && globalCheck)
/* Indices interpolation */
if (orderIndex == 0)
{
- int tableIndex, tInt;
- float n, t;
- const float3 x = vload3(atomIndexLocal, sm_coordinates);
+ int tableIndex, tInt;
+ float n, t;
+ const float3 x = vload3(atomIndexLocal, sm_coordinates);
/* Accessing fields in fshOffset/nXYZ/recipbox/... with dimIndex offset
* puts them into local memory(!) instead of accessing the constant memory directly.
switch (dimIndex)
{
case XX:
- tableIndex = kernelParams.grid.tablesOffsets[XX];
- n = kernelParams.grid.realGridSizeFP[XX];
- t = x.x * kernelParams.current.recipBox[dimIndex][XX] + x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
+ tableIndex = kernelParams.grid.tablesOffsets[XX];
+ n = kernelParams.grid.realGridSizeFP[XX];
+ t = x.x * kernelParams.current.recipBox[dimIndex][XX]
+ + x.y * kernelParams.current.recipBox[dimIndex][YY]
+ + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
break;
case YY:
- tableIndex = kernelParams.grid.tablesOffsets[YY];
- n = kernelParams.grid.realGridSizeFP[YY];
- t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + */ x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
+ tableIndex = kernelParams.grid.tablesOffsets[YY];
+ n = kernelParams.grid.realGridSizeFP[YY];
+ t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + */ x.y
+ * kernelParams.current.recipBox[dimIndex][YY]
+ + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
break;
case ZZ:
- tableIndex = kernelParams.grid.tablesOffsets[ZZ];
- n = kernelParams.grid.realGridSizeFP[ZZ];
- t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + x.y * kernelParams.current.recipbox[dimIndex][YY] + */ x.z * kernelParams.current.recipBox[dimIndex][ZZ];
+ tableIndex = kernelParams.grid.tablesOffsets[ZZ];
+ n = kernelParams.grid.realGridSizeFP[ZZ];
+ t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + x.y * kernelParams.current.recipbox[dimIndex][YY] + */ x
+ .z
+ * kernelParams.current.recipBox[dimIndex][ZZ];
break;
}
const float shift = c_pmeMaxUnitcellShift;
/* Fractional coordinates along box vectors, adding a positive shift to ensure t is positive for triclinic boxes */
- t = (t + shift) * n;
- tInt = (int)t;
+ t = (t + shift) * n;
+ tInt = (int)t;
sm_fractCoords[sharedMemoryIndex] = t - tInt;
- tableIndex += tInt;
+ tableIndex += tInt;
assert(tInt >= 0);
assert(tInt < c_pmeNeighborUnitcellCount * n);
- sm_fractCoords[sharedMemoryIndex] += gm_fractShiftsTable[tableIndex];
+ sm_fractCoords[sharedMemoryIndex] += gm_fractShiftsTable[tableIndex];
sm_gridlineIndices[sharedMemoryIndex] = gm_gridlineIndicesTable[tableIndex];
- gm_gridlineIndices[atomIndexOffset * DIM + sharedMemoryIndex] = sm_gridlineIndices[sharedMemoryIndex];
+ gm_gridlineIndices[atomIndexOffset * DIM + sharedMemoryIndex] =
+ sm_gridlineIndices[sharedMemoryIndex];
}
/* B-spline calculation */
const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]);
if (chargeCheck)
{
- float div;
- int o = orderIndex; // This is an index that is set once for PME_GPU_PARALLEL_SPLINE == 1
+ float div;
+ int o = orderIndex; // This is an index that is set once for PME_GPU_PARALLEL_SPLINE == 1
const float dr = sm_fractCoords[sharedMemoryIndex];
assert(isfinite(dr));
*SPLINE_DATA_PTR(1) = dr;
*SPLINE_DATA_PTR(0) = 1.0f - dr;
-#pragma unroll order
+# pragma unroll order
for (int k = 3; k < order; k++)
{
div = 1.0f / (k - 1.0f);
*SPLINE_DATA_PTR(k - 1) = div * dr * SPLINE_DATA(k - 2);
-#pragma unroll
+# pragma unroll
for (int l = 1; l < (k - 1); l++)
{
- *SPLINE_DATA_PTR(k - l - 1) = div * ((dr + l) * SPLINE_DATA(k - l - 2) + (k - l - dr) * SPLINE_DATA(k - l - 1));
+ *SPLINE_DATA_PTR(k - l - 1) = div
+ * ((dr + l) * SPLINE_DATA(k - l - 2)
+ + (k - l - dr) * SPLINE_DATA(k - l - 1));
}
*SPLINE_DATA_PTR(0) = div * (1.0f - dr) * SPLINE_DATA(0);
}
const int thetaGlobalOffsetBase = atomIndexOffset * DIM * order;
/* Differentiation and storing the spline derivatives (dtheta) */
-#if !PME_GPU_PARALLEL_SPLINE
+# if !PME_GPU_PARALLEL_SPLINE
// With PME_GPU_PARALLEL_SPLINE == 1, o is already set to orderIndex;
// With PME_GPU_PARALLEL_SPLINE == 0, we loop o over range(order).
-#pragma unroll order
+# pragma unroll order
for (o = 0; o < order; o++)
-#endif
+# endif
{
- const int thetaIndex = getSplineParamIndex(thetaIndexBase, dimIndex, o);
- const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
+ const int thetaIndex = getSplineParamIndex(thetaIndexBase, dimIndex, o);
+ const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
const float dtheta = ((o > 0) ? SPLINE_DATA(o - 1) : 0.0f) - SPLINE_DATA(o);
assert(isfinite(dtheta));
gm_dtheta[thetaGlobalIndex] = dtheta;
}
- div = 1.0f / (order - 1.0f);
+ div = 1.0f / (order - 1.0f);
*SPLINE_DATA_PTR(order - 1) = div * dr * SPLINE_DATA(order - 2);
-#pragma unroll
+# pragma unroll
for (int k = 1; k < (order - 1); k++)
{
- *SPLINE_DATA_PTR(order - k - 1) = div * ((dr + k) * SPLINE_DATA(order - k - 2) + (order - k - dr) * SPLINE_DATA(order - k - 1));
+ *SPLINE_DATA_PTR(order - k - 1) = div
+ * ((dr + k) * SPLINE_DATA(order - k - 2)
+ + (order - k - dr) * SPLINE_DATA(order - k - 1));
}
*SPLINE_DATA_PTR(0) = div * (1.0f - dr) * SPLINE_DATA(0);
/* Storing the spline values (theta) */
-#if !PME_GPU_PARALLEL_SPLINE
+# if !PME_GPU_PARALLEL_SPLINE
// See comment for the loop above
-#pragma unroll order
+# pragma unroll order
for (o = 0; o < order; o++)
-#endif
+# endif
{
const int thetaIndex = getSplineParamIndex(thetaIndexBase, dimIndex, o);
const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
- sm_theta[thetaIndex] = SPLINE_DATA(o);
+ sm_theta[thetaIndex] = SPLINE_DATA(o);
assert(isfinite(sm_theta[thetaIndex]));
gm_theta[thetaGlobalIndex] = SPLINE_DATA(o);
}
* Second stage of the whole kernel.
*
* \param[in] kernelParams Input PME GPU data in constant memory.
- * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
- * \param[in] sm_coefficients Atom coefficents/charges in the shared memory.
- * \param[in] sm_gridlineIndices Atom gridline indices in the shared memory.
- * \param[in] sm_theta Atom spline values in the shared memory.
- * \param[out] gm_grid Global 3D grid for spreading.
+ * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global
+ * memory. \param[in] sm_coefficients Atom coefficents/charges in the shared memory. \param[in]
+ * sm_gridlineIndices Atom gridline indices in the shared memory. \param[in] sm_theta Atom spline
+ * values in the shared memory. \param[out] gm_grid Global 3D grid for spreading.
*/
-inline void spread_charges(const struct PmeOpenCLKernelParams kernelParams,
- int atomIndexOffset,
- __local const float * __restrict__ sm_coefficients,
- __local const int * __restrict__ sm_gridlineIndices,
- __local const float * __restrict__ sm_theta,
- __global float * __restrict__ gm_grid)
+inline void spread_charges(const struct PmeOpenCLKernelParams kernelParams,
+ int atomIndexOffset,
+ __local const float* __restrict__ sm_coefficients,
+ __local const int* __restrict__ sm_gridlineIndices,
+ __local const float* __restrict__ sm_theta,
+ __global float* __restrict__ gm_grid)
{
const int nx = kernelParams.grid.realGridSize[XX];
const int ny = kernelParams.grid.realGridSize[YY];
{
iy -= ny;
}
- int iz = sm_gridlineIndices[atomIndexLocal * DIM + ZZ] - offz + ithz;
+ int iz = sm_gridlineIndices[atomIndexLocal * DIM + ZZ] - offz + ithz;
if (iz >= nz)
{
iz -= nz;
}
/* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
- const int atomWarpIndex = atomIndexLocal % atomsPerWarp;
+ const int atomWarpIndex = atomIndexLocal % atomsPerWarp;
/* Warp index w.r.t. block - could probably be obtained easier? */
- const int warpIndex = atomIndexLocal / atomsPerWarp;
-
- const int splineIndexBase = getSplineParamIndexBase(warpIndex, atomWarpIndex);
- const int splineIndexZ = getSplineParamIndex(splineIndexBase, ZZ, ithz);
- const float thetaZ = sm_theta[splineIndexZ];
- const int splineIndexY = getSplineParamIndex(splineIndexBase, YY, ithy);
- const float thetaY = sm_theta[splineIndexY];
- const float constVal = thetaZ * thetaY * sm_coefficients[atomIndexLocal];
+ const int warpIndex = atomIndexLocal / atomsPerWarp;
+
+ const int splineIndexBase = getSplineParamIndexBase(warpIndex, atomWarpIndex);
+ const int splineIndexZ = getSplineParamIndex(splineIndexBase, ZZ, ithz);
+ const float thetaZ = sm_theta[splineIndexZ];
+ const int splineIndexY = getSplineParamIndex(splineIndexBase, YY, ithy);
+ const float thetaY = sm_theta[splineIndexY];
+ const float constVal = thetaZ * thetaY * sm_coefficients[atomIndexLocal];
assert(isfinite(constVal));
- const int constOffset = iy * pnz + iz;
+ const int constOffset = iy * pnz + iz;
-#pragma unroll order
+# pragma unroll order
for (int ithx = 0; (ithx < order); ithx++)
{
int ix = ixBase + ithx;
}
}
-#endif //COMPILE_SPREAD_HELPERS_ONCE
+#endif // COMPILE_SPREAD_HELPERS_ONCE
/*! \brief
* A spline computation and/or charge spreading kernel function.
* \param[in] gm_coefficients Atom charges/coefficients.
* \param[in] gm_coordinates Atom coordinates (rvec)
*/
-__attribute__((reqd_work_group_size(order, order, atomsPerBlock)))
-__kernel void CUSTOMIZED_KERNEL_NAME(pme_spline_and_spread_kernel)(const struct PmeOpenCLKernelParams kernelParams,
- __global float * __restrict__ gm_theta,
- __global float * __restrict__ gm_dtheta,
- __global int * __restrict__ gm_gridlineIndices,
- __global float *__restrict__ gm_grid,
- __global const float * __restrict__ gm_fractShiftsTable,
- __global const int * __restrict__ gm_gridlineIndicesTable,
- __global const float * __restrict__ gm_coefficients,
- __global const float * __restrict__ gm_coordinates)
+__attribute__((reqd_work_group_size(order, order, atomsPerBlock))) __kernel void CUSTOMIZED_KERNEL_NAME(
+ pme_spline_and_spread_kernel)(const struct PmeOpenCLKernelParams kernelParams,
+ __global float* __restrict__ gm_theta,
+ __global float* __restrict__ gm_dtheta,
+ __global int* __restrict__ gm_gridlineIndices,
+ __global float* __restrict__ gm_grid,
+ __global const float* __restrict__ gm_fractShiftsTable,
+ __global const int* __restrict__ gm_gridlineIndicesTable,
+ __global const float* __restrict__ gm_coefficients,
+ __global const float* __restrict__ gm_coordinates)
{
// Gridline indices, ivec
- __local int sm_gridlineIndices[atomsPerBlock * DIM];
+ __local int sm_gridlineIndices[atomsPerBlock * DIM];
// Charges
- __local float sm_coefficients[atomsPerBlock];
+ __local float sm_coefficients[atomsPerBlock];
// Spline values
- __local float sm_theta[atomsPerBlock * DIM * order];
+ __local float sm_theta[atomsPerBlock * DIM * order];
// Fractional coordinates - only for spline computation
- __local float sm_fractCoords[atomsPerBlock * DIM];
+ __local float sm_fractCoords[atomsPerBlock * DIM];
// Staging coordinates - only for spline computation
- __local float sm_coordinates[DIM * atomsPerBlock];
+ __local float sm_coordinates[DIM * atomsPerBlock];
- const int atomIndexOffset = get_group_id(XX) * atomsPerBlock;
+ const int atomIndexOffset = get_group_id(XX) * atomsPerBlock;
/* Staging coefficients/charges for both spline and spread */
pme_gpu_stage_atom_data(kernelParams, sm_coefficients, gm_coefficients, 1);
pme_gpu_stage_atom_data(kernelParams, sm_coordinates, gm_coordinates, DIM);
barrier(CLK_LOCAL_MEM_FENCE);
- calculate_splines(kernelParams, atomIndexOffset, sm_coordinates,
- sm_coefficients, sm_theta, sm_gridlineIndices,
- sm_fractCoords, gm_theta, gm_dtheta, gm_gridlineIndices,
- gm_fractShiftsTable, gm_gridlineIndicesTable);
+ calculate_splines(kernelParams, atomIndexOffset, sm_coordinates, sm_coefficients, sm_theta,
+ sm_gridlineIndices, sm_fractCoords, gm_theta, gm_dtheta,
+ gm_gridlineIndices, gm_fractShiftsTable, gm_gridlineIndicesTable);
#if !defined(_AMD_SOURCE_) && !defined(_NVIDIA_SOURCE_)
- /* This is only here for execution of e.g. 32-sized warps on 16-wide hardware; this was __syncwarp() in CUDA.
- * #2519
+ /* This is only here for execution of e.g. 32-sized warps on 16-wide hardware; this was
+ * __syncwarp() in CUDA. #2519
*/
barrier(CLK_LOCAL_MEM_FENCE);
#endif
/* Spline data - only thetas (dthetas will only be needed in gather) */
pme_gpu_stage_atom_data(kernelParams, sm_theta, gm_theta, DIM * order);
/* Gridline indices - they're actually int and not float, but C99 is angry about overloads */
- pme_gpu_stage_atom_data(kernelParams, (__local float *)sm_gridlineIndices, (__global const float *)gm_gridlineIndices, DIM);
+ pme_gpu_stage_atom_data(kernelParams, (__local float*)sm_gridlineIndices,
+ (__global const float*)gm_gridlineIndices, DIM);
barrier(CLK_LOCAL_MEM_FENCE);
}