2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements helper routines for PME gather and spline routines.
39 * \author Aleksei Iupinov <a.yupinov@gmail.com>
46 #include "gromacs/gpu_utils/cuda_kernel_utils.cuh"
52 * Gets a base of the unique index to an element in a spline parameter buffer (theta/dtheta),
53 * which is laid out for GPU spread/gather kernels. The base only corresponds to the atom index within the execution block.
54 * Feed the result into getSplineParamIndex() to get a full index.
55 * TODO: it's likely that both parameters can be just replaced with a single atom index, as they are derived from it.
56 * Do that, verifying that the generated code is not bloated, and/or revise the spline indexing scheme.
57 * Removing warp dependency would also be nice (and would probably coincide with removing c_pmeSpreadGatherAtomsPerWarp).
59 * \tparam order PME order
60 * \tparam atomsPerWarp Number of atoms processed by a warp
61 * \param[in] warpIndex Warp index wrt the block.
62 * \param[in] atomWarpIndex Atom index wrt the warp (from 0 to atomsPerWarp - 1).
64 * \returns Index into theta or dtheta array using GPU layout.
66 template<int order, int atomsPerWarp>
67 int __device__ __forceinline__ getSplineParamIndexBase(int warpIndex, int atomWarpIndex)
69 assert((atomWarpIndex >= 0) && (atomWarpIndex < atomsPerWarp));
70 const int dimIndex = 0;
71 const int splineIndex = 0;
72 // The zeroes are here to preserve the full index formula for reference
73 return (((splineIndex + order * warpIndex) * DIM + dimIndex) * atomsPerWarp + atomWarpIndex);
77 * Gets a unique index to an element in a spline parameter buffer (theta/dtheta),
78 * which is laid out for GPU spread/gather kernels. The index is wrt to the execution block,
79 * in range(0, atomsPerBlock * order * DIM).
80 * This function consumes result of getSplineParamIndexBase() and adjusts it for \p dimIndex and \p splineIndex.
82 * \tparam order PME order
83 * \tparam atomsPerWarp Number of atoms processed by a warp
84 * \param[in] paramIndexBase Must be result of getSplineParamIndexBase().
85 * \param[in] dimIndex Dimension index (from 0 to 2)
86 * \param[in] splineIndex Spline contribution index (from 0 to \p order - 1)
88 * \returns Index into theta or dtheta array using GPU layout.
90 template<int order, int atomsPerWarp>
91 int __device__ __forceinline__ getSplineParamIndex(int paramIndexBase, int dimIndex, int splineIndex)
93 assert((dimIndex >= XX) && (dimIndex < DIM));
94 assert((splineIndex >= 0) && (splineIndex < order));
95 return (paramIndexBase + (splineIndex * DIM + dimIndex) * atomsPerWarp);
99 * An inline CUDA function for skipping the zero-charge atoms.
101 * \returns Non-0 if atom should be processed, 0 otherwise.
102 * \param[in] coefficient The atom charge.
104 * This is called from the spline_and_spread and gather PME kernels.
106 int __device__ __forceinline__ pme_gpu_check_atom_charge(const float coefficient)
108 assert(isfinite(coefficient));
109 return c_skipNeutralAtoms ? (coefficient != 0.0f) : 1;
112 //! Controls if the atom and charge data is prefeched into shared memory or loaded per thread from global
113 static const bool c_useAtomDataPrefetch = true;
115 /*! \brief Asserts if the argument is finite.
117 * The function works for any data type, that can be casted to float. Note that there is also
118 * a specialized implementation for float3 data type.
120 * \param[in] arg Argument to check.
123 __device__ inline void assertIsFinite(T arg);
126 __device__ inline void assertIsFinite(float3 arg)
128 assert(isfinite(float(arg.x)));
129 assert(isfinite(float(arg.y)));
130 assert(isfinite(float(arg.z)));
134 __device__ inline void assertIsFinite(T arg)
136 assert(isfinite(float(arg)));
140 * General purpose function for loading atom-related data from global to shared memory.
142 * \tparam[in] T Data type (float/int/...)
143 * \tparam[in] atomsPerBlock Number of atoms processed by a block - should be
144 * accounted for in the size of the shared memory array.
145 * \tparam[in] dataCountPerAtom Number of data elements per single atom (e.g. DIM for
146 * an rvec coordinates array).
147 * \param[out] sm_destination Shared memory array for output.
148 * \param[in] gm_source Global memory array for input.
150 template<typename T, const int atomsPerBlock, const int dataCountPerAtom>
151 __device__ __forceinline__ void pme_gpu_stage_atom_data(T* __restrict__ sm_destination,
152 const T* __restrict__ gm_source)
154 const int blockIndex = blockIdx.y * gridDim.x + blockIdx.x;
155 const int threadLocalIndex = ((threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x) + threadIdx.x;
156 const int localIndex = threadLocalIndex;
157 const int globalIndexBase = blockIndex * atomsPerBlock * dataCountPerAtom;
158 const int globalIndex = globalIndexBase + localIndex;
159 if (localIndex < atomsPerBlock * dataCountPerAtom)
161 assertIsFinite(gm_source[globalIndex]);
162 sm_destination[localIndex] = gm_source[globalIndex];
167 * PME GPU spline parameter and gridline indices calculation.
168 * This corresponds to the CPU functions calc_interpolation_idx() and make_bsplines().
169 * First stage of the whole kernel.
171 * \tparam[in] order PME interpolation order.
172 * \tparam[in] atomsPerBlock Number of atoms processed by a block - should be accounted for
173 * in the sizes of the shared memory arrays.
174 * \tparam[in] atomsPerWarp Number of atoms processed by a warp
175 * \tparam[in] writeSmDtheta Bool controling if the theta derivative should be written to shared memory. Enables calculation of dtheta if set.
176 * \tparam[in] writeGlobal A boolean which tells if the theta values and gridlines should be written to global memory. Enables calculation of dtheta if set.
177 * \param[in] kernelParams Input PME CUDA data in constant memory.
178 * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
179 * \param[in] atomX Atom coordinate of atom processed by thread.
180 * \param[in] atomCharge Atom charge/coefficient of atom processed by thread.
181 * \param[out] sm_theta Atom spline values in the shared memory.
182 * \param[out] sm_dtheta Derivative of atom spline values in shared memory.
183 * \param[out] sm_gridlineIndices Atom gridline indices in the shared memory.
186 template<const int order, const int atomsPerBlock, const int atomsPerWarp, const bool writeSmDtheta, const bool writeGlobal>
187 __device__ __forceinline__ void calculate_splines(const PmeGpuCudaKernelParams kernelParams,
188 const int atomIndexOffset,
190 const float atomCharge,
191 float* __restrict__ sm_theta,
192 float* __restrict__ sm_dtheta,
193 int* __restrict__ sm_gridlineIndices)
195 /* Global memory pointers for output */
196 float* __restrict__ gm_theta = kernelParams.atoms.d_theta;
197 float* __restrict__ gm_dtheta = kernelParams.atoms.d_dtheta;
198 int* __restrict__ gm_gridlineIndices = kernelParams.atoms.d_gridlineIndices;
200 /* Fractional coordinates */
201 __shared__ float sm_fractCoords[atomsPerBlock * DIM];
203 /* Thread index w.r.t. block */
204 const int threadLocalId =
205 (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
206 /* Warp index w.r.t. block - could probably be obtained easier? */
207 const int warpIndex = threadLocalId / warp_size;
208 /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
209 const int atomWarpIndex = threadIdx.z % atomsPerWarp;
210 /* Atom index w.r.t. block/shared memory */
211 const int atomIndexLocal = warpIndex * atomsPerWarp + atomWarpIndex;
213 /* Spline contribution index in one dimension */
214 const int threadLocalIdXY = (threadIdx.y * blockDim.x) + threadIdx.x;
215 const int orderIndex = threadLocalIdXY / DIM;
216 /* Dimension index */
217 const int dimIndex = threadLocalIdXY % DIM;
219 /* Multi-purpose index of rvec/ivec atom data */
220 const int sharedMemoryIndex = atomIndexLocal * DIM + dimIndex;
222 float splineData[order];
224 const int localCheck = (dimIndex < DIM) && (orderIndex < 1);
226 /* we have 4 threads per atom, but can only use 3 here for the dimensions */
229 /* Indices interpolation */
233 int tableIndex, tInt;
235 assert(atomIndexLocal < DIM * atomsPerBlock);
236 /* Accessing fields in fshOffset/nXYZ/recipbox/... with dimIndex offset
237 * puts them into local memory(!) instead of accessing the constant memory directly.
238 * That's the reason for the switch, to unroll explicitly.
239 * The commented parts correspond to the 0 components of the recipbox.
244 tableIndex = kernelParams.grid.tablesOffsets[XX];
245 n = kernelParams.grid.realGridSizeFP[XX];
246 t = atomX.x * kernelParams.current.recipBox[dimIndex][XX]
247 + atomX.y * kernelParams.current.recipBox[dimIndex][YY]
248 + atomX.z * kernelParams.current.recipBox[dimIndex][ZZ];
252 tableIndex = kernelParams.grid.tablesOffsets[YY];
253 n = kernelParams.grid.realGridSizeFP[YY];
254 t = /*atomX.x * kernelParams.current.recipBox[dimIndex][XX] + */ atomX.y
255 * kernelParams.current.recipBox[dimIndex][YY]
256 + atomX.z * kernelParams.current.recipBox[dimIndex][ZZ];
260 tableIndex = kernelParams.grid.tablesOffsets[ZZ];
261 n = kernelParams.grid.realGridSizeFP[ZZ];
262 t = /*atomX.x * kernelParams.current.recipBox[dimIndex][XX] + atomX.y * kernelParams.current.recipBox[dimIndex][YY] + */ atomX
264 * kernelParams.current.recipBox[dimIndex][ZZ];
267 const float shift = c_pmeMaxUnitcellShift;
268 /* Fractional coordinates along box vectors, adding a positive shift to ensure t is positive for triclinic boxes */
271 assert(sharedMemoryIndex < atomsPerBlock * DIM);
272 sm_fractCoords[sharedMemoryIndex] = t - tInt;
275 assert(tInt < c_pmeNeighborUnitcellCount * n);
277 // TODO have shared table for both parameters to share the fetch, as index is always same?
278 // TODO compare texture/LDG performance
279 sm_fractCoords[sharedMemoryIndex] +=
280 fetchFromParamLookupTable(kernelParams.grid.d_fractShiftsTable,
281 kernelParams.fractShiftsTableTexture, tableIndex);
282 sm_gridlineIndices[sharedMemoryIndex] =
283 fetchFromParamLookupTable(kernelParams.grid.d_gridlineIndicesTable,
284 kernelParams.gridlineIndicesTableTexture, tableIndex);
287 gm_gridlineIndices[atomIndexOffset * DIM + sharedMemoryIndex] =
288 sm_gridlineIndices[sharedMemoryIndex];
292 /* B-spline calculation */
294 const int chargeCheck = pme_gpu_check_atom_charge(atomCharge);
298 int o = orderIndex; // This is an index that is set once for PME_GPU_PARALLEL_SPLINE == 1
300 const float dr = sm_fractCoords[sharedMemoryIndex];
301 assert(isfinite(dr));
303 /* dr is relative offset from lower cell limit */
304 splineData[order - 1] = 0.0f;
306 splineData[0] = 1.0f - dr;
309 for (int k = 3; k < order; k++)
311 div = 1.0f / (k - 1.0f);
312 splineData[k - 1] = div * dr * splineData[k - 2];
314 for (int l = 1; l < (k - 1); l++)
316 splineData[k - l - 1] =
317 div * ((dr + l) * splineData[k - l - 2] + (k - l - dr) * splineData[k - l - 1]);
319 splineData[0] = div * (1.0f - dr) * splineData[0];
322 const int thetaIndexBase =
323 getSplineParamIndexBase<order, atomsPerWarp>(warpIndex, atomWarpIndex);
324 const int thetaGlobalOffsetBase = atomIndexOffset * DIM * order;
325 /* only calculate dtheta if we are saving it to shared or global memory */
326 if (writeSmDtheta || writeGlobal)
328 /* Differentiation and storing the spline derivatives (dtheta) */
330 for (o = 0; o < order; o++)
332 const int thetaIndex =
333 getSplineParamIndex<order, atomsPerWarp>(thetaIndexBase, dimIndex, o);
335 const float dtheta = ((o > 0) ? splineData[o - 1] : 0.0f) - splineData[o];
336 assert(isfinite(dtheta));
337 assert(thetaIndex < order * DIM * atomsPerBlock);
340 sm_dtheta[thetaIndex] = dtheta;
344 const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
345 gm_dtheta[thetaGlobalIndex] = dtheta;
350 div = 1.0f / (order - 1.0f);
351 splineData[order - 1] = div * dr * splineData[order - 2];
353 for (int k = 1; k < (order - 1); k++)
355 splineData[order - k - 1] = div
356 * ((dr + k) * splineData[order - k - 2]
357 + (order - k - dr) * splineData[order - k - 1]);
359 splineData[0] = div * (1.0f - dr) * splineData[0];
361 /* Storing the spline values (theta) */
363 for (o = 0; o < order; o++)
365 const int thetaIndex =
366 getSplineParamIndex<order, atomsPerWarp>(thetaIndexBase, dimIndex, o);
367 assert(thetaIndex < order * DIM * atomsPerBlock);
368 sm_theta[thetaIndex] = splineData[o];
369 assert(isfinite(sm_theta[thetaIndex]));
372 const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
373 gm_theta[thetaGlobalIndex] = splineData[o];