/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, * and including many others, as listed in the AUTHORS file in the * top-level source directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ /*! \internal \file * \brief Implements PME OpenCL spline parameter computation and charge spread kernels. * When including this and other PME OpenCL kernel files, plenty of common * constants/macros are expected to be defined (such as "order" which is PME interpolation order). * For details, please see how pme_program.cl is compiled in pme_gpu_program_impl_ocl.cpp. * * This file's kernels specifically expect the following definitions: * * - atomsPerBlock which expresses how many atoms are processed by a single work group * - order which is a PME interpolation order * - computeSplines and spreadCharges must evaluate to either true or false to specify which * kernel falvor is being compiled * - wrapX and wrapY must evaluate to either true or false to specify whether the grid overlap * in dimension X/Y is to be used * * \author Aleksei Iupinov */ #include "gromacs/gpu_utils/vectype_ops.clh" #include "pme_gpu_calculate_splines.clh" #include "pme_gpu_types.h" /* * This define affects the spline calculation behaviour in the kernel. * 0: a single GPU thread handles a single dimension of a single particle (calculating and storing * (order) spline values and derivatives). 1: (order) threads do redundant work on this same task, * each one stores only a single theta and single dtheta into global arrays. The only efficiency * difference is less global store operations, countered by more redundant spline computation. * * TODO: estimate if this should be a boolean parameter (and add it to the unit test if so). */ #define PME_GPU_PARALLEL_SPLINE 0 #ifndef COMPILE_SPREAD_HELPERS_ONCE # define COMPILE_SPREAD_HELPERS_ONCE /*! \brief * General purpose function for loading atom-related data from global to shared memory. * * \param[out] sm_destination Local memory array for output. * \param[in] gm_source Global memory array for input. * \param[in] dataCountPerAtom Number of data elements per single * atom (e.g. DIM for an rvec coordinates array). * */ inline void pme_gpu_stage_atom_data(__local float* __restrict__ sm_destination, __global const float* __restrict__ gm_source, const int dataCountPerAtom) { assert((get_local_id(2) * get_local_size(1) + get_local_id(1)) * get_local_size(0) + get_local_id(0) < MAX_INT); const int threadLocalIndex = (int)((get_local_id(2) * get_local_size(1) + get_local_id(1)) * get_local_size(0) + get_local_id(0)); const int localIndex = threadLocalIndex; const int globalIndexBase = (int)get_group_id(XX) * atomsPerBlock * dataCountPerAtom; const int globalIndex = globalIndexBase + localIndex; if (localIndex < atomsPerBlock * dataCountPerAtom) { assert(isfinite(float(gm_source[globalIndex]))); sm_destination[localIndex] = gm_source[globalIndex]; } } /*! \brief * PME GPU spline parameter and gridline indices calculation. * This corresponds to the CPU functions calc_interpolation_idx() and make_bsplines(). * First stage of the whole kernel. * * \param[in] kernelParams Input PME GPU data in constant memory. * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory. * \param[in] sm_coordinates Atom coordinates in the shared memory (rvec) * \param[in] sm_coefficients Atom charges/coefficients in the shared memory. * \param[out] sm_theta Atom spline values in the shared memory. * \param[out] sm_gridlineIndices Atom gridline indices in the shared memory. * \param[out] sm_fractCoords Atom fractional coordinates (rvec) * \param[out] gm_theta Atom spline parameter values * \param[out] gm_dtheta Atom spline parameter derivatives * \param[out] gm_gridlineIndices Same as \p sm_gridlineIndices but in global memory. * \param[in] gm_fractShiftsTable Atom fractional coordinates correction table * \param[in] gm_gridlineIndicesTable Table with atom gridline indices */ gmx_opencl_inline void calculate_splines(const struct PmeOpenCLKernelParams kernelParams, const int atomIndexOffset, __local const float* __restrict__ sm_coordinates, __local const float* __restrict__ sm_coefficients, __local float* __restrict__ sm_theta, __local int* __restrict__ sm_gridlineIndices, __local float* __restrict__ sm_fractCoords, __global float* __restrict__ gm_theta, __global float* __restrict__ gm_dtheta, __global int* __restrict__ gm_gridlineIndices, __global const float* __restrict__ gm_fractShiftsTable, __global const int* __restrict__ gm_gridlineIndicesTable) { /* Thread index w.r.t. block */ assert((get_local_id(2) * get_local_size(1) + get_local_id(1)) * get_local_size(0) + get_local_id(0) < MAX_INT); assert(numGrids == 1 || numGrids == 2); assert(numGrids == 1 || c_skipNeutralAtoms == false); const int threadLocalIndex = (int)((get_local_id(2) * get_local_size(1) + get_local_id(1)) * get_local_size(0) + get_local_id(0)); /* Warp index w.r.t. block - could probably be obtained easier? */ const int warpIndex = threadLocalIndex / warp_size; /* Thread index w.r.t. warp */ const int threadWarpIndex = threadLocalIndex % warp_size; /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */ const int atomWarpIndex = threadWarpIndex % atomsPerWarp; /* Atom index w.r.t. block/shared memory */ const int atomIndexLocal = warpIndex * atomsPerWarp + atomWarpIndex; /* Spline contribution index in one dimension */ const int orderIndex = threadWarpIndex / (atomsPerWarp * DIM); /* Dimension index */ const int dimIndex = (threadWarpIndex - orderIndex * (atomsPerWarp * DIM)) / atomsPerWarp; /* Multi-purpose index of rvec/ivec atom data */ const int sharedMemoryIndex = atomIndexLocal * DIM + dimIndex; /* Spline parameters need a working buffer. * With PME_GPU_PARALLEL_SPLINE == 0 it is just a local array of (order) values for some of the threads, which is fine; * With PME_GPU_PARALLEL_SPLINE == 1 (order) times more threads are involved, so the shared memory is used to avoid overhead. * The buffer's size, striding and indexing are adjusted accordingly. * The buffer is accessed with SPLINE_DATA_PTR and SPLINE_DATA macros. */ # if PME_GPU_PARALLEL_SPLINE # define splineDataStride (atomsPerBlock * DIM) const int splineDataIndex = sharedMemoryIndex; __local float sm_splineData[splineDataStride * order]; __local float* splineDataPtr = sm_splineData; # else # define splineDataStride 1 const int splineDataIndex = 0; float splineData[splineDataStride * order]; float* splineDataPtr = splineData; # endif # define SPLINE_DATA_PTR(i) (splineDataPtr + ((i)*splineDataStride + splineDataIndex)) # define SPLINE_DATA(i) (*SPLINE_DATA_PTR(i)) const int localCheck = (dimIndex < DIM) && (orderIndex < (PME_GPU_PARALLEL_SPLINE ? order : 1)); if (localCheck) { /* Indices interpolation */ if (orderIndex == 0) { int tableIndex = 0; float n = 0.0F; float t = 0.0F; const float3 x = vload3(atomIndexLocal, sm_coordinates); /* Accessing fields in fshOffset/nXYZ/recipbox/... with dimIndex offset * puts them into local memory(!) instead of accessing the constant memory directly. * That's the reason for the switch, to unroll explicitly. * The commented parts correspond to the 0 components of the recipbox. */ switch (dimIndex) { case XX: tableIndex = kernelParams.grid.tablesOffsets[XX]; n = kernelParams.grid.realGridSizeFP[XX]; t = x.x * kernelParams.current.recipBox[dimIndex][XX] + x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ]; break; case YY: tableIndex = kernelParams.grid.tablesOffsets[YY]; n = kernelParams.grid.realGridSizeFP[YY]; t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + */ x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ]; break; case ZZ: tableIndex = kernelParams.grid.tablesOffsets[ZZ]; n = kernelParams.grid.realGridSizeFP[ZZ]; t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + x.y * kernelParams.current.recipbox[dimIndex][YY] + */ x .z * kernelParams.current.recipBox[dimIndex][ZZ]; break; default: assert(false); return; break; } const float shift = c_pmeMaxUnitcellShift; /* Fractional coordinates along box vectors, adding a positive shift to ensure t is positive for triclinic boxes */ t = (t + shift) * n; const int tInt = (int)t; sm_fractCoords[sharedMemoryIndex] = t - (float)tInt; tableIndex += tInt; assert(tInt >= 0); assert(tInt < c_pmeNeighborUnitcellCount * n); sm_fractCoords[sharedMemoryIndex] += gm_fractShiftsTable[tableIndex]; sm_gridlineIndices[sharedMemoryIndex] = gm_gridlineIndicesTable[tableIndex]; gm_gridlineIndices[atomIndexOffset * DIM + sharedMemoryIndex] = sm_gridlineIndices[sharedMemoryIndex]; } /* B-spline calculation */ const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]); /* With FEP (numGrids == 2), we might have 0 charge in state A, but !=0 in state B, so we always calculate splines */ if (numGrids == 2 || chargeCheck) { int o = orderIndex; // This is an index that is set once for PME_GPU_PARALLEL_SPLINE == 1 const float dr = sm_fractCoords[sharedMemoryIndex]; assert(isfinite(dr)); /* dr is relative offset from lower cell limit */ *SPLINE_DATA_PTR(order - 1) = 0.0F; *SPLINE_DATA_PTR(1) = dr; *SPLINE_DATA_PTR(0) = 1.0F - dr; # pragma unroll order for (int k = 3; k < order; k++) { const float div = 1.0F / ((float)k - 1.0F); *SPLINE_DATA_PTR(k - 1) = div * dr * SPLINE_DATA(k - 2); # pragma unroll for (int l = 1; l < (k - 1); l++) { *SPLINE_DATA_PTR(k - l - 1) = div * ((dr + (float)l) * SPLINE_DATA(k - l - 2) + ((float)k - (float)l - dr) * SPLINE_DATA(k - l - 1)); } *SPLINE_DATA_PTR(0) = div * (1.0F - dr) * SPLINE_DATA(0); } const int thetaIndexBase = getSplineParamIndexBase(warpIndex, atomWarpIndex); const int thetaGlobalOffsetBase = atomIndexOffset * DIM * order; /* Differentiation and storing the spline derivatives (dtheta) */ # if !PME_GPU_PARALLEL_SPLINE // With PME_GPU_PARALLEL_SPLINE == 1, o is already set to orderIndex; // With PME_GPU_PARALLEL_SPLINE == 0, we loop o over range(order). # pragma unroll order for (o = 0; o < order; o++) # endif { const int thetaIndex = getSplineParamIndex(thetaIndexBase, dimIndex, o); const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex; const float dtheta = ((o > 0) ? SPLINE_DATA(o - 1) : 0.0F) - SPLINE_DATA(o); assert(isfinite(dtheta)); gm_dtheta[thetaGlobalIndex] = dtheta; } const float div = 1.0F / (order - 1.0F); *SPLINE_DATA_PTR(order - 1) = div * dr * SPLINE_DATA(order - 2); # pragma unroll for (int k = 1; k < (order - 1); k++) { *SPLINE_DATA_PTR(order - k - 1) = div * ((dr + (float)k) * SPLINE_DATA(order - k - 2) + (order - k - dr) * SPLINE_DATA(order - k - 1)); } *SPLINE_DATA_PTR(0) = div * (1.0F - dr) * SPLINE_DATA(0); /* Storing the spline values (theta) */ # if !PME_GPU_PARALLEL_SPLINE // See comment for the loop above # pragma unroll order for (o = 0; o < order; o++) # endif { const int thetaIndex = getSplineParamIndex(thetaIndexBase, dimIndex, o); const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex; sm_theta[thetaIndex] = SPLINE_DATA(o); assert(isfinite(sm_theta[thetaIndex])); gm_theta[thetaGlobalIndex] = SPLINE_DATA(o); } } } } /*! \brief * Charge spreading onto the grid. * This corresponds to the CPU function spread_coefficients_bsplines_thread(). * Second stage of the whole kernel. * * \param[in] kernelParams Input PME GPU data in constant memory. * \param[in] sm_coefficients Atom coefficents/charges in the shared memory. * \param[in] sm_gridlineIndices Atom gridline indices in the shared memory. * \param[in] sm_theta Atom spline values in the shared memory. * \param[out] gm_grid Global 3D grid for spreading. */ gmx_opencl_inline void spread_charges(const struct PmeOpenCLKernelParams kernelParams, __local const float* __restrict__ sm_coefficients, __local const int* __restrict__ sm_gridlineIndices, __local const float* __restrict__ sm_theta, __global float* __restrict__ gm_grid) { const int nx = kernelParams.grid.realGridSize[XX]; const int ny = kernelParams.grid.realGridSize[YY]; const int nz = kernelParams.grid.realGridSize[ZZ]; const int pny = kernelParams.grid.realGridSizePadded[YY]; const int pnz = kernelParams.grid.realGridSizePadded[ZZ]; const int offx = 0; const int offy = 0; const int offz = 0; const int atomIndexLocal = get_local_id(ZZ); const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]); if (chargeCheck) { // Spline Y/Z coordinates const int ithy = get_local_id(YY); const int ithz = get_local_id(XX); const int ixBase = sm_gridlineIndices[atomIndexLocal * DIM + XX] - offx; int iy = sm_gridlineIndices[atomIndexLocal * DIM + YY] - offy + ithy; if (wrapY & (iy >= ny)) { iy -= ny; } int iz = sm_gridlineIndices[atomIndexLocal * DIM + ZZ] - offz + ithz; if (iz >= nz) { iz -= nz; } /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */ const int atomWarpIndex = atomIndexLocal % atomsPerWarp; /* Warp index w.r.t. block - could probably be obtained easier? */ const int warpIndex = atomIndexLocal / atomsPerWarp; const int splineIndexBase = getSplineParamIndexBase(warpIndex, atomWarpIndex); const int splineIndexZ = getSplineParamIndex(splineIndexBase, ZZ, ithz); const float thetaZ = sm_theta[splineIndexZ]; const int splineIndexY = getSplineParamIndex(splineIndexBase, YY, ithy); const float thetaY = sm_theta[splineIndexY]; const float constVal = thetaZ * thetaY * sm_coefficients[atomIndexLocal]; assert(isfinite(constVal)); const int constOffset = iy * pnz + iz; # pragma unroll order for (int ithx = 0; (ithx < order); ithx++) { int ix = ixBase + ithx; if (wrapX & (ix >= nx)) { ix -= nx; } const int gridIndexGlobal = ix * pny * pnz + constOffset; const int splineIndexX = getSplineParamIndex(splineIndexBase, XX, ithx); const float thetaX = sm_theta[splineIndexX]; assert(isfinite(thetaX)); assert(isfinite(gm_grid[gridIndexGlobal])); atomicAdd_g_f(gm_grid + gridIndexGlobal, thetaX * constVal); } } } #endif // COMPILE_SPREAD_HELPERS_ONCE /*! \brief * A spline computation and/or charge spreading kernel function. * Please see the file description for additional defines which this kernel expects. * * \param[in] kernelParams Input PME GPU data in constant memory. * \param[in,out] gm_theta Atom spline parameter values * \param[out] gm_dtheta Atom spline parameter derivatives * \param[in,out] gm_gridlineIndices Atom gridline indices (ivec) * \param[out] gm_gridA Global 3D grid for charge spreading. * Grid for the unperturbed state, FEP state A or the single grid used for * interpolated coefficients on one grid in FEP A/B. * \param[out] gm_gridB Global 3D grid for charge spreading. * FEP state B when using dual grids (when calculating energy and virials). * \param[in] gm_fractShiftsTable Atom fractional coordinates correction table * \param[in] gm_gridlineIndicesTable Table with atom gridline indices * \param[in] gm_coefficientsA Atom charges/coefficients of the unperturbed state * or FEP state A. * \param[in] gm_coefficientsB Atom charges/coefficients in FEP state B. Only * used when spreading interpolated coefficients on one grid. * \param[in] gm_coordinates Atom coordinates (rvec) */ __attribute__((reqd_work_group_size(order, order, atomsPerBlock))) __kernel void CUSTOMIZED_KERNEL_NAME( pme_spline_and_spread_kernel)(const struct PmeOpenCLKernelParams kernelParams, __global float* __restrict__ gm_theta, __global float* __restrict__ gm_dtheta, __global int* __restrict__ gm_gridlineIndices, __global float* __restrict__ gm_gridA, __global float* __restrict__ gm_gridB, __global const float* __restrict__ gm_fractShiftsTable, __global const int* __restrict__ gm_gridlineIndicesTable, __global const float* __restrict__ gm_coefficientsA, __global const float* __restrict__ gm_coefficientsB, __global const float* __restrict__ gm_coordinates) { // Gridline indices, ivec __local int sm_gridlineIndices[atomsPerBlock * DIM]; // Charges __local float sm_coefficients[atomsPerBlock]; // Spline values __local float sm_theta[atomsPerBlock * DIM * order]; // Fractional coordinates - only for spline computation __local float sm_fractCoords[atomsPerBlock * DIM]; // Staging coordinates - only for spline computation __local float sm_coordinates[DIM * atomsPerBlock]; const int atomIndexOffset = (int)get_group_id(XX) * atomsPerBlock; int gridIndex = 0; /* Staging coefficients/charges for both spline and spread */ pme_gpu_stage_atom_data(sm_coefficients, gm_coefficientsA, 1); if (computeSplines) { /* Staging coordinates */ pme_gpu_stage_atom_data(sm_coordinates, gm_coordinates, DIM); barrier(CLK_LOCAL_MEM_FENCE); calculate_splines(kernelParams, atomIndexOffset, sm_coordinates, sm_coefficients, sm_theta, sm_gridlineIndices, sm_fractCoords, gm_theta, gm_dtheta, gm_gridlineIndices, gm_fractShiftsTable, gm_gridlineIndicesTable); #if !defined(_AMD_SOURCE_) && !defined(_NVIDIA_SOURCE_) /* This is only here for execution of e.g. 32-sized warps on 16-wide hardware; this was * __syncwarp() in CUDA. #2519 */ barrier(CLK_LOCAL_MEM_FENCE); #endif } else { /* Staging the data for spread * (the data is assumed to be in GPU global memory with proper layout already, * as in after running the spline kernel) */ /* Spline data - only thetas (dthetas will only be needed in gather) */ pme_gpu_stage_atom_data(sm_theta, gm_theta, DIM * order); /* Gridline indices - they're actually int and not float, but C99 is angry about overloads */ pme_gpu_stage_atom_data( (__local float*)sm_gridlineIndices, (__global const float*)gm_gridlineIndices, DIM); barrier(CLK_LOCAL_MEM_FENCE); } /* Spreading */ if (spreadCharges) { spread_charges(kernelParams, sm_coefficients, sm_gridlineIndices, sm_theta, gm_gridA); } if (numGrids == 2) { barrier(CLK_LOCAL_MEM_FENCE); gridIndex = 1; pme_gpu_stage_atom_data(sm_coefficients, gm_coefficientsB, 1); barrier(CLK_LOCAL_MEM_FENCE); if (spreadCharges) { spread_charges(kernelParams, sm_coefficients, sm_gridlineIndices, sm_theta, gm_gridB); } } }