2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013-2016,2017,2018,2019,2020, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief Implements PME GPU spline calculation and charge spreading in CUDA.
40 * TODO: consider always pre-sorting particles (as in DD case).
42 * \author Aleksei Iupinov <a.yupinov@gmail.com>
49 #include "gromacs/gpu_utils/cuda_kernel_utils.cuh"
50 #include "gromacs/gpu_utils/typecasts.cuh"
53 #include "pme_gpu_calculate_splines.cuh"
57 * Charge spreading onto the grid.
58 * This corresponds to the CPU function spread_coefficients_bsplines_thread().
59 * Optional second stage of the spline_and_spread_kernel.
61 * \tparam[in] order PME interpolation order.
62 * \tparam[in] wrapX Whether the grid overlap in dimension X should be wrapped.
63 * \tparam[in] wrapY Whether the grid overlap in dimension Y should be wrapped.
64 * \tparam[in] useOrderThreads Whether we should use order threads per atom (order*order used if false).
66 * \param[in] kernelParams Input PME CUDA data in constant memory.
67 * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
68 * \param[in] atomCharge Atom charge/coefficient of atom processed by thread.
69 * \param[in] sm_gridlineIndices Atom gridline indices in the shared memory.
70 * \param[in] sm_theta Atom spline values in the shared memory.
72 template<const int order, const bool wrapX, const bool wrapY, const bool useOrderThreads>
73 __device__ __forceinline__ void spread_charges(const PmeGpuCudaKernelParams kernelParams,
75 const float* atomCharge,
76 const int* __restrict__ sm_gridlineIndices,
77 const float* __restrict__ sm_theta)
79 /* Global memory pointer to the output grid */
80 float* __restrict__ gm_grid = kernelParams.grid.d_realGrid;
83 const int atomsPerWarp = useOrderThreads ? c_pmeSpreadGatherAtomsPerWarp4ThPerAtom
84 : c_pmeSpreadGatherAtomsPerWarp;
86 const int nx = kernelParams.grid.realGridSize[XX];
87 const int ny = kernelParams.grid.realGridSize[YY];
88 const int nz = kernelParams.grid.realGridSize[ZZ];
89 const int pny = kernelParams.grid.realGridSizePadded[YY];
90 const int pnz = kernelParams.grid.realGridSizePadded[ZZ];
92 const int offx = 0, offy = 0, offz = 0; // unused for now
94 const int atomIndexLocal = threadIdx.z;
95 const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
97 const int globalCheck = pme_gpu_check_atom_data_index(atomIndexGlobal, kernelParams.atoms.nAtoms);
98 const int chargeCheck = pme_gpu_check_atom_charge(*atomCharge);
99 if (chargeCheck & globalCheck)
101 // Spline Z coordinates
102 const int ithz = threadIdx.x;
104 const int ixBase = sm_gridlineIndices[atomIndexLocal * DIM + XX] - offx;
105 const int iyBase = sm_gridlineIndices[atomIndexLocal * DIM + YY] - offy;
106 int iz = sm_gridlineIndices[atomIndexLocal * DIM + ZZ] - offz + ithz;
111 /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
112 const int atomWarpIndex = atomIndexLocal % atomsPerWarp;
113 /* Warp index w.r.t. block - could probably be obtained easier? */
114 const int warpIndex = atomIndexLocal / atomsPerWarp;
116 const int splineIndexBase = getSplineParamIndexBase<order, atomsPerWarp>(warpIndex, atomWarpIndex);
117 const int splineIndexZ = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, ZZ, ithz);
118 const float thetaZ = sm_theta[splineIndexZ];
120 /* loop not used if order*order threads per atom */
121 const int ithyMin = useOrderThreads ? 0 : threadIdx.y;
122 const int ithyMax = useOrderThreads ? order : threadIdx.y + 1;
123 for (int ithy = ithyMin; ithy < ithyMax; ithy++)
125 int iy = iyBase + ithy;
126 if (wrapY & (iy >= ny))
131 const int splineIndexY = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, YY, ithy);
132 float thetaY = sm_theta[splineIndexY];
133 const float Val = thetaZ * thetaY * (*atomCharge);
134 assert(isfinite(Val));
135 const int offset = iy * pnz + iz;
138 for (int ithx = 0; (ithx < order); ithx++)
140 int ix = ixBase + ithx;
141 if (wrapX & (ix >= nx))
145 const int gridIndexGlobal = ix * pny * pnz + offset;
146 const int splineIndexX =
147 getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, XX, ithx);
148 const float thetaX = sm_theta[splineIndexX];
149 assert(isfinite(thetaX));
150 assert(isfinite(gm_grid[gridIndexGlobal]));
151 atomicAdd(gm_grid + gridIndexGlobal, thetaX * Val);
158 * A spline computation and charge spreading kernel function.
160 * Two tuning parameters can be used for additional performance. For small systems and for debugging
161 * writeGlobal should be used removing the need to recalculate the theta values in the gather kernel.
162 * Similarly for useOrderThreads large systems order threads per atom gives higher performance than order*order threads
164 * \tparam[in] order PME interpolation order.
165 * \tparam[in] computeSplines A boolean which tells if the spline parameter and
166 * gridline indices' computation should be performed.
167 * \tparam[in] spreadCharges A boolean which tells if the charge spreading should be performed.
168 * \tparam[in] wrapX A boolean which tells if the grid overlap in dimension X should be wrapped.
169 * \tparam[in] wrapY A boolean which tells if the grid overlap in dimension Y should be wrapped.
170 * \tparam[in] writeGlobal A boolean which tells if the theta values and gridlines should be written to global memory.
171 * \tparam[in] useOrderThreads A boolean which tells if we should use order threads per atom (order*order used if false).
172 * \param[in] kernelParams Input PME CUDA data in constant memory.
174 template<const int order, const bool computeSplines, const bool spreadCharges, const bool wrapX, const bool wrapY, const bool writeGlobal, const bool useOrderThreads>
175 __launch_bounds__(c_spreadMaxThreadsPerBlock) CLANG_DISABLE_OPTIMIZATION_ATTRIBUTE __global__
176 void pme_spline_and_spread_kernel(const PmeGpuCudaKernelParams kernelParams)
178 const int atomsPerBlock =
179 useOrderThreads ? c_spreadMaxThreadsPerBlock / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
180 : c_spreadMaxThreadsPerBlock / c_pmeSpreadGatherThreadsPerAtom;
181 // Gridline indices, ivec
182 __shared__ int sm_gridlineIndices[atomsPerBlock * DIM];
184 __shared__ float sm_theta[atomsPerBlock * DIM * order];
187 const int atomsPerWarp = useOrderThreads ? c_pmeSpreadGatherAtomsPerWarp4ThPerAtom
188 : c_pmeSpreadGatherAtomsPerWarp;
193 const int blockIndex = blockIdx.y * gridDim.x + blockIdx.x;
194 const int atomIndexOffset = blockIndex * atomsPerBlock;
196 /* Thread index w.r.t. block */
197 const int threadLocalId =
198 (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
199 /* Warp index w.r.t. block - could probably be obtained easier? */
200 const int warpIndex = threadLocalId / warp_size;
202 /* Atom index w.r.t. warp */
203 const int atomWarpIndex = threadIdx.z % atomsPerWarp;
204 /* Atom index w.r.t. block/shared memory */
205 const int atomIndexLocal = warpIndex * atomsPerWarp + atomWarpIndex;
206 /* Atom index w.r.t. global memory */
207 const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
209 /* Early return for fully empty blocks at the end
210 * (should only happen for billions of input atoms)
212 if (atomIndexOffset >= kernelParams.atoms.nAtoms)
216 /* Charges, required for both spline and spread */
217 if (c_useAtomDataPrefetch)
219 __shared__ float sm_coefficients[atomsPerBlock];
220 pme_gpu_stage_atom_data<float, atomsPerBlock, 1>(kernelParams, sm_coefficients,
221 kernelParams.atoms.d_coefficients);
223 atomCharge = sm_coefficients[atomIndexLocal];
227 atomCharge = kernelParams.atoms.d_coefficients[atomIndexGlobal];
232 const float3* __restrict__ gm_coordinates = asFloat3(kernelParams.atoms.d_coordinates);
233 if (c_useAtomDataPrefetch)
236 __shared__ float3 sm_coordinates[atomsPerBlock];
238 /* Staging coordinates */
239 pme_gpu_stage_atom_data<float3, atomsPerBlock, 1>(kernelParams, sm_coordinates, gm_coordinates);
241 atomX = sm_coordinates[atomIndexLocal];
245 atomX = gm_coordinates[atomIndexGlobal];
247 calculate_splines<order, atomsPerBlock, atomsPerWarp, false, writeGlobal>(
248 kernelParams, atomIndexOffset, atomX, atomCharge, sm_theta, &dtheta, sm_gridlineIndices);
253 /* Staging the data for spread
254 * (the data is assumed to be in GPU global memory with proper layout already,
255 * as in after running the spline kernel)
257 /* Spline data - only thetas (dthetas will only be needed in gather) */
258 pme_gpu_stage_atom_data<float, atomsPerBlock, DIM * order>(kernelParams, sm_theta,
259 kernelParams.atoms.d_theta);
260 /* Gridline indices */
261 pme_gpu_stage_atom_data<int, atomsPerBlock, DIM>(kernelParams, sm_gridlineIndices,
262 kernelParams.atoms.d_gridlineIndices);
270 spread_charges<order, wrapX, wrapY, useOrderThreads>(
271 kernelParams, atomIndexOffset, &atomCharge, sm_gridlineIndices, sm_theta);
275 //! Kernel instantiations
276 template __global__ void pme_spline_and_spread_kernel<4, true, true, true, true, true, true>(const PmeGpuCudaKernelParams);
277 template __global__ void
278 pme_spline_and_spread_kernel<4, true, false, true, true, true, true>(const PmeGpuCudaKernelParams);
279 template __global__ void
280 pme_spline_and_spread_kernel<4, false, true, true, true, true, true>(const PmeGpuCudaKernelParams);
282 template __global__ void
283 pme_spline_and_spread_kernel<4, true, true, true, true, false, true>(const PmeGpuCudaKernelParams);
285 template __global__ void
286 pme_spline_and_spread_kernel<4, true, true, true, true, true, false>(const PmeGpuCudaKernelParams);
287 template __global__ void
288 pme_spline_and_spread_kernel<4, true, false, true, true, true, false>(const PmeGpuCudaKernelParams);
289 template __global__ void
290 pme_spline_and_spread_kernel<4, false, true, true, true, true, false>(const PmeGpuCudaKernelParams);
292 template __global__ void
293 pme_spline_and_spread_kernel<4, true, true, true, true, false, false>(const PmeGpuCudaKernelParams);