2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013-2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief Implements PME GPU spline calculation and charge spreading in CUDA.
40 * TODO: consider always pre-sorting particles (as in DD case).
42 * \author Aleksei Iupinov <a.yupinov@gmail.com>
49 #include "gromacs/ewald/pme.h"
50 #include "gromacs/gpu_utils/cuda_kernel_utils.cuh"
51 #include "gromacs/gpu_utils/cudautils.cuh"
52 #include "gromacs/utility/exceptions.h"
53 #include "gromacs/utility/gmxassert.h"
57 #include "pme-timings.cuh"
60 * This define affects the spline calculation behaviour in the kernel.
61 * 0: a single GPU thread handles a single dimension of a single particle (calculating and storing (order) spline values and derivatives).
62 * 1: (order) threads do redundant work on this same task, each one stores only a single theta and single dtheta into global arrays.
63 * The only efficiency difference is less global store operations, countered by more redundant spline computation.
65 * TODO: estimate if this should be a boolean parameter (and add it to the unit test if so).
67 #define PME_GPU_PARALLEL_SPLINE 0
70 //! Spreading max block width in warps picked among powers of 2 (2, 4, 8, 16) for max. occupancy and min. runtime in most cases
71 constexpr int c_spreadMaxWarpsPerBlock = 8;
72 /* TODO: it has been observed that the kernel can be faster with smaller block sizes (2 or 4 warps)
73 * only on some GPUs (660Ti) with large enough grid (>= 48^3) due to load/store units being overloaded
74 * (ldst_fu_utilization metric maxed out in nvprof). Runtime block size choice might be nice to have.
75 * This has been tried on architectures up to Maxwell (GTX 750) and it would be good to revisit this.
77 //! Spreading max block size in threads
78 constexpr int c_spreadMaxThreadsPerBlock = c_spreadMaxWarpsPerBlock * warp_size;
80 //! Texture references for CC 2.x
81 texture<int, 1, cudaReadModeElementType> gridlineIndicesTableTextureRef;
82 texture<float, 1, cudaReadModeElementType> fractShiftsTableTextureRef;
84 /*! Returns the reference to the gridlineIndices texture. */
85 const struct texture<int, 1, cudaReadModeElementType> &pme_gpu_get_gridline_texref()
87 return gridlineIndicesTableTextureRef;
90 /*! Returns the reference to the fractShifts texture. */
91 const struct texture<float, 1, cudaReadModeElementType> &pme_gpu_get_fract_shifts_texref()
93 return fractShiftsTableTextureRef;
97 * General purpose function for loading atom-related data from global to shared memory.
99 * \tparam[in] T Data type (float/int/...)
100 * \tparam[in] atomsPerBlock Number of atoms processed by a block - should be accounted for in the size of the shared memory array.
101 * \tparam[in] dataCountPerAtom Number of data elements per single atom (e.g. DIM for an rvec coordinates array).
102 * \param[in] kernelParams Input PME CUDA data in constant memory.
103 * \param[out] sm_destination Shared memory array for output.
104 * \param[in] gm_source Global memory array for input.
107 const int atomsPerBlock,
108 const int dataCountPerAtom>
109 __device__ __forceinline__
110 void pme_gpu_stage_atom_data(const PmeGpuCudaKernelParams kernelParams,
111 T * __restrict__ sm_destination,
112 const T * __restrict__ gm_source)
114 static_assert(c_usePadding, "With padding disabled, index checking should be fixed to account for spline theta/dtheta per-warp alignment");
115 const int threadLocalIndex = ((threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x) + threadIdx.x;
116 const int localIndex = threadLocalIndex;
117 const int globalIndexBase = blockIdx.x * atomsPerBlock * dataCountPerAtom;
118 const int globalIndex = globalIndexBase + localIndex;
119 const int globalCheck = pme_gpu_check_atom_data_index(globalIndex, kernelParams.atoms.nAtoms * dataCountPerAtom);
120 if ((localIndex < atomsPerBlock * dataCountPerAtom) & globalCheck)
122 assert(isfinite(float(gm_source[globalIndex])));
123 sm_destination[localIndex] = gm_source[globalIndex];
128 * PME GPU spline parameter and gridline indices calculation.
129 * This corresponds to the CPU functions calc_interpolation_idx() and make_bsplines().
130 * First stage of the whole kernel.
132 * \tparam[in] order PME interpolation order.
133 * \tparam[in] atomsPerBlock Number of atoms processed by a block - should be accounted for
134 * in the sizes of the shared memory arrays.
135 * \param[in] kernelParams Input PME CUDA data in constant memory.
136 * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
137 * \param[in] sm_coordinates Atom coordinates in the shared memory.
138 * \param[in] sm_coefficients Atom charges/coefficients in the shared memory.
139 * \param[out] sm_theta Atom spline values in the shared memory.
140 * \param[out] sm_gridlineIndices Atom gridline indices in the shared memory.
142 template <const int order,
143 const int atomsPerBlock>
144 __device__ __forceinline__ void calculate_splines(const PmeGpuCudaKernelParams kernelParams,
145 const int atomIndexOffset,
146 const float3 * __restrict__ sm_coordinates,
147 const float * __restrict__ sm_coefficients,
148 float * __restrict__ sm_theta,
149 int * __restrict__ sm_gridlineIndices)
151 /* Global memory pointers for output */
152 float * __restrict__ gm_theta = kernelParams.atoms.d_theta;
153 float * __restrict__ gm_dtheta = kernelParams.atoms.d_dtheta;
154 int * __restrict__ gm_gridlineIndices = kernelParams.atoms.d_gridlineIndices;
156 /* Fractional coordinates */
157 __shared__ float sm_fractCoords[atomsPerBlock * DIM];
159 /* Thread index w.r.t. block */
160 const int threadLocalId = (threadIdx.z * (blockDim.x * blockDim.y))
161 + (threadIdx.y * blockDim.x) + threadIdx.x;
162 /* Warp index w.r.t. block - could probably be obtained easier? */
163 const int warpIndex = threadLocalId / warp_size;
164 /* Thread index w.r.t. warp */
165 const int threadWarpIndex = threadLocalId % warp_size;
166 /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
167 const int atomWarpIndex = threadWarpIndex % PME_SPREADGATHER_ATOMS_PER_WARP;
168 /* Atom index w.r.t. block/shared memory */
169 const int atomIndexLocal = warpIndex * PME_SPREADGATHER_ATOMS_PER_WARP + atomWarpIndex;
171 /* Atom index w.r.t. global memory */
172 const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
173 /* Spline contribution index in one dimension */
174 const int orderIndex = threadWarpIndex / (PME_SPREADGATHER_ATOMS_PER_WARP * DIM);
175 /* Dimension index */
176 const int dimIndex = (threadWarpIndex - orderIndex * (PME_SPREADGATHER_ATOMS_PER_WARP * DIM)) / PME_SPREADGATHER_ATOMS_PER_WARP;
178 /* Multi-purpose index of rvec/ivec atom data */
179 const int sharedMemoryIndex = atomIndexLocal * DIM + dimIndex;
181 /* Spline parameters need a working buffer.
182 * With PME_GPU_PARALLEL_SPLINE == 0 it is just a local array of (order) values for some of the threads, which is fine;
183 * With PME_GPU_PARALLEL_SPLINE == 1 (order) times more threads are involved, so the shared memory is used to avoid overhead.
184 * The buffer's size, striding and indexing are adjusted accordingly.
185 * The buffer is accessed with SPLINE_DATA_PTR and SPLINE_DATA macros.
187 #if PME_GPU_PARALLEL_SPLINE
188 const int splineDataStride = atomsPerBlock * DIM;
189 const int splineDataIndex = sharedMemoryIndex;
190 __shared__ float sm_splineData[splineDataStride * order];
191 float *splineDataPtr = sm_splineData;
193 const int splineDataStride = 1;
194 const int splineDataIndex = 0;
195 float splineData[splineDataStride * order];
196 float *splineDataPtr = splineData;
199 #define SPLINE_DATA_PTR(i) (splineDataPtr + ((i) * splineDataStride + splineDataIndex))
200 #define SPLINE_DATA(i) (*SPLINE_DATA_PTR(i))
202 const int localCheck = (dimIndex < DIM) && (orderIndex < (PME_GPU_PARALLEL_SPLINE ? order : 1));
203 const int globalCheck = pme_gpu_check_atom_data_index(atomIndexGlobal, kernelParams.atoms.nAtoms);
205 if (localCheck && globalCheck)
207 /* Indices interpolation */
211 int tableIndex, tInt;
213 const float3 x = sm_coordinates[atomIndexLocal];
214 /* Accessing fields in fshOffset/nXYZ/recipbox/... with dimIndex offset
215 * puts them into local memory(!) instead of accessing the constant memory directly.
216 * That's the reason for the switch, to unroll explicitly.
217 * The commented parts correspond to the 0 components of the recipbox.
222 tableIndex = kernelParams.grid.tablesOffsets[XX];
223 n = kernelParams.grid.realGridSizeFP[XX];
224 t = x.x * kernelParams.current.recipBox[dimIndex][XX] + x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
228 tableIndex = kernelParams.grid.tablesOffsets[YY];
229 n = kernelParams.grid.realGridSizeFP[YY];
230 t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + */ x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
234 tableIndex = kernelParams.grid.tablesOffsets[ZZ];
235 n = kernelParams.grid.realGridSizeFP[ZZ];
236 t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + x.y * kernelParams.current.recipbox[dimIndex][YY] + */ x.z * kernelParams.current.recipBox[dimIndex][ZZ];
239 const float shift = c_pmeMaxUnitcellShift;
240 /* Fractional coordinates along box vectors, adding a positive shift to ensure t is positive for triclinic boxes */
243 sm_fractCoords[sharedMemoryIndex] = t - tInt;
246 assert(tInt < c_pmeNeighborUnitcellCount * n);
248 // TODO have shared table for both parameters to share the fetch, as index is always same?
249 // TODO compare texture/LDG performance
250 sm_fractCoords[sharedMemoryIndex] +=
251 fetchFromParamLookupTable(kernelParams.grid.d_fractShiftsTable,
252 kernelParams.fractShiftsTableTexture,
253 #if DISABLE_CUDA_TEXTURES == 0
254 fractShiftsTableTextureRef,
257 sm_gridlineIndices[sharedMemoryIndex] =
258 fetchFromParamLookupTable(kernelParams.grid.d_gridlineIndicesTable,
259 kernelParams.gridlineIndicesTableTexture,
260 #if DISABLE_CUDA_TEXTURES == 0
261 gridlineIndicesTableTextureRef,
264 gm_gridlineIndices[atomIndexOffset * DIM + sharedMemoryIndex] = sm_gridlineIndices[sharedMemoryIndex];
267 /* B-spline calculation */
269 const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]);
273 int o = orderIndex; // This is an index that is set once for PME_GPU_PARALLEL_SPLINE == 1
275 const float dr = sm_fractCoords[sharedMemoryIndex];
276 assert(isfinite(dr));
278 /* dr is relative offset from lower cell limit */
279 *SPLINE_DATA_PTR(order - 1) = 0.0f;
280 *SPLINE_DATA_PTR(1) = dr;
281 *SPLINE_DATA_PTR(0) = 1.0f - dr;
284 for (int k = 3; k < order; k++)
286 div = 1.0f / (k - 1.0f);
287 *SPLINE_DATA_PTR(k - 1) = div * dr * SPLINE_DATA(k - 2);
289 for (int l = 1; l < (k - 1); l++)
291 *SPLINE_DATA_PTR(k - l - 1) = div * ((dr + l) * SPLINE_DATA(k - l - 2) + (k - l - dr) * SPLINE_DATA(k - l - 1));
293 *SPLINE_DATA_PTR(0) = div * (1.0f - dr) * SPLINE_DATA(0);
296 const int thetaGlobalOffsetBase = atomIndexOffset * DIM * order;
298 /* Differentiation and storing the spline derivatives (dtheta) */
299 #if !PME_GPU_PARALLEL_SPLINE
300 // With PME_GPU_PARALLEL_SPLINE == 1, o is already set to orderIndex;
301 // With PME_GPU_PARALLEL_SPLINE == 0, we loop o over range(order).
303 for (o = 0; o < order; o++)
306 const int thetaIndex = PME_SPLINE_THETA_STRIDE * (((o + order * warpIndex) * DIM + dimIndex) * PME_SPREADGATHER_ATOMS_PER_WARP + atomWarpIndex);
307 const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
309 const float dtheta = ((o > 0) ? SPLINE_DATA(o - 1) : 0.0f) - SPLINE_DATA(o);
310 assert(isfinite(dtheta));
311 gm_dtheta[thetaGlobalIndex] = dtheta;
314 div = 1.0f / (order - 1.0f);
315 *SPLINE_DATA_PTR(order - 1) = div * dr * SPLINE_DATA(order - 2);
317 for (int k = 1; k < (order - 1); k++)
319 *SPLINE_DATA_PTR(order - k - 1) = div * ((dr + k) * SPLINE_DATA(order - k - 2) + (order - k - dr) * SPLINE_DATA(order - k - 1));
321 *SPLINE_DATA_PTR(0) = div * (1.0f - dr) * SPLINE_DATA(0);
323 /* Storing the spline values (theta) */
324 #if !PME_GPU_PARALLEL_SPLINE
325 // See comment for the loop above
327 for (o = 0; o < order; o++)
330 const int thetaIndex = PME_SPLINE_THETA_STRIDE * (((o + order * warpIndex) * DIM + dimIndex) * PME_SPREADGATHER_ATOMS_PER_WARP + atomWarpIndex);
331 const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
333 sm_theta[thetaIndex] = SPLINE_DATA(o);
334 assert(isfinite(sm_theta[thetaIndex]));
335 gm_theta[thetaGlobalIndex] = SPLINE_DATA(o);
342 * Charge spreading onto the grid.
343 * This corresponds to the CPU function spread_coefficients_bsplines_thread().
344 * Second stage of the whole kernel.
346 * \tparam[in] order PME interpolation order.
347 * \tparam[in] wrapX A boolean which tells if the grid overlap in dimension X should be wrapped.
348 * \tparam[in] wrapY A boolean which tells if the grid overlap in dimension Y should be wrapped.
349 * \param[in] kernelParams Input PME CUDA data in constant memory.
350 * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
351 * \param[in] sm_coefficients Atom coefficents/charges in the shared memory.
352 * \param[in] sm_gridlineIndices Atom gridline indices in the shared memory.
353 * \param[in] sm_theta Atom spline values in the shared memory.
356 const int order, const bool wrapX, const bool wrapY>
357 __device__ __forceinline__ void spread_charges(const PmeGpuCudaKernelParams kernelParams,
359 const float * __restrict__ sm_coefficients,
360 const int * __restrict__ sm_gridlineIndices,
361 const float * __restrict__ sm_theta)
363 /* Global memory pointer to the output grid */
364 float * __restrict__ gm_grid = kernelParams.grid.d_realGrid;
367 const int nx = kernelParams.grid.realGridSize[XX];
368 const int ny = kernelParams.grid.realGridSize[YY];
369 const int nz = kernelParams.grid.realGridSize[ZZ];
370 const int pny = kernelParams.grid.realGridSizePadded[YY];
371 const int pnz = kernelParams.grid.realGridSizePadded[ZZ];
373 const int offx = 0, offy = 0, offz = 0; // unused for now
375 const int atomIndexLocal = threadIdx.z;
376 const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
378 const int globalCheck = pme_gpu_check_atom_data_index(atomIndexGlobal, kernelParams.atoms.nAtoms);
379 const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]);
380 if (chargeCheck & globalCheck)
382 // Spline Y/Z coordinates
383 const int ithy = threadIdx.y;
384 const int ithz = threadIdx.x;
385 const int ixBase = sm_gridlineIndices[atomIndexLocal * DIM + XX] - offx;
386 int iy = sm_gridlineIndices[atomIndexLocal * DIM + YY] - offy + ithy;
387 if (wrapY & (iy >= ny))
391 int iz = sm_gridlineIndices[atomIndexLocal * DIM + ZZ] - offz + ithz;
397 /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
398 const int atomWarpIndex = atomIndexLocal % PME_SPREADGATHER_ATOMS_PER_WARP;
399 /* Warp index w.r.t. block - could probably be obtained easier? */
400 const int warpIndex = atomIndexLocal / PME_SPREADGATHER_ATOMS_PER_WARP;
401 const int dimStride = PME_SPLINE_THETA_STRIDE * PME_SPREADGATHER_ATOMS_PER_WARP;
402 const int orderStride = dimStride * DIM;
403 const int thetaOffsetBase = orderStride * order * warpIndex + atomWarpIndex;
405 const float thetaZ = sm_theta[thetaOffsetBase + ithz * orderStride + ZZ * dimStride];
406 const float thetaY = sm_theta[thetaOffsetBase + ithy * orderStride + YY * dimStride];
407 const float constVal = thetaZ * thetaY * sm_coefficients[atomIndexLocal];
408 assert(isfinite(constVal));
409 const int constOffset = iy * pnz + iz;
410 const float *sm_thetaX = sm_theta + (thetaOffsetBase + XX * dimStride);
413 for (int ithx = 0; (ithx < order); ithx++)
415 int ix = ixBase + ithx;
416 if (wrapX & (ix >= nx))
420 const int gridIndexGlobal = ix * pny * pnz + constOffset;
421 assert(isfinite(sm_thetaX[ithx * orderStride]));
422 assert(isfinite(gm_grid[gridIndexGlobal]));
423 atomicAdd(gm_grid + gridIndexGlobal, sm_thetaX[ithx * orderStride] * constVal);
429 * A spline computation and charge spreading kernel function.
431 * \tparam[in] order PME interpolation order.
432 * \tparam[in] computeSplines A boolean which tells if the spline parameter and
433 * gridline indices' computation should be performed.
434 * \tparam[in] spreadCharges A boolean which tells if the charge spreading should be performed.
435 * \tparam[in] wrapX A boolean which tells if the grid overlap in dimension X should be wrapped.
436 * \tparam[in] wrapY A boolean which tells if the grid overlap in dimension Y should be wrapped.
437 * \param[in] kernelParams Input PME CUDA data in constant memory.
441 const bool computeSplines,
442 const bool spreadCharges,
446 __launch_bounds__(c_spreadMaxThreadsPerBlock)
447 __global__ void pme_spline_and_spread_kernel(const PmeGpuCudaKernelParams kernelParams)
449 const int atomsPerBlock = c_spreadMaxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM;
450 // Gridline indices, ivec
451 __shared__ int sm_gridlineIndices[atomsPerBlock * DIM];
453 __shared__ float sm_coefficients[atomsPerBlock];
455 __shared__ float sm_theta[atomsPerBlock * DIM * order];
457 const int atomIndexOffset = blockIdx.x * atomsPerBlock;
459 /* Staging coefficients/charges for both spline and spread */
460 pme_gpu_stage_atom_data<float, atomsPerBlock, 1>(kernelParams, sm_coefficients, kernelParams.atoms.d_coefficients);
464 /* Staging coordinates */
465 __shared__ float sm_coordinates[DIM * atomsPerBlock];
466 pme_gpu_stage_atom_data<float, atomsPerBlock, DIM>(kernelParams, sm_coordinates, kernelParams.atoms.d_coordinates);
469 calculate_splines<order, atomsPerBlock>(kernelParams, atomIndexOffset, (const float3 *)sm_coordinates,
470 sm_coefficients, sm_theta, sm_gridlineIndices);
475 /* Staging the data for spread
476 * (the data is assumed to be in GPU global memory with proper layout already,
477 * as in after running the spline kernel)
479 /* Spline data - only thetas (dthetas will only be needed in gather) */
480 pme_gpu_stage_atom_data<float, atomsPerBlock, DIM * order>(kernelParams, sm_theta, kernelParams.atoms.d_theta);
481 /* Gridline indices */
482 pme_gpu_stage_atom_data<int, atomsPerBlock, DIM>(kernelParams, sm_gridlineIndices, kernelParams.atoms.d_gridlineIndices);
490 spread_charges<order, wrapX, wrapY>(kernelParams, atomIndexOffset, sm_coefficients, sm_gridlineIndices, sm_theta);
494 void pme_gpu_spread(const PmeGpu *pmeGpu,
495 int gmx_unused gridIndex,
500 GMX_ASSERT(computeSplines || spreadCharges, "PME spline/spread kernel has invalid input (nothing to do)");
501 cudaStream_t stream = pmeGpu->archSpecific->pmeStream;
502 const auto *kernelParamsPtr = pmeGpu->kernelParams.get();
503 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
505 const int order = pmeGpu->common->pme_order;
506 const int atomsPerBlock = c_spreadMaxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM;
507 // TODO: pick smaller block size in runtime if needed
508 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
509 // If doing so, change atomsPerBlock in the kernels as well.
510 // TODO: test varying block sizes on modern arch-s as well
511 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
512 //(for spline data mostly, together with varying PME_GPU_PARALLEL_SPLINE define)
513 GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. spreading block size");
515 dim3 nBlocks(pmeGpu->nAtomsPadded / atomsPerBlock);
516 dim3 dimBlock(order, order, atomsPerBlock);
518 // These should later check for PME decomposition
519 const bool wrapX = true;
520 const bool wrapY = true;
521 GMX_UNUSED_VALUE(wrapX);
522 GMX_UNUSED_VALUE(wrapY);
527 // TODO: cleaner unroll with some template trick?
532 pme_gpu_start_timing(pmeGpu, gtPME_SPLINEANDSPREAD);
533 pme_spline_and_spread_kernel<4, true, true, wrapX, wrapY> <<< nBlocks, dimBlock, 0, stream>>> (*kernelParamsPtr);
534 CU_LAUNCH_ERR("pme_spline_and_spread_kernel");
535 pme_gpu_stop_timing(pmeGpu, gtPME_SPLINEANDSPREAD);
539 pme_gpu_start_timing(pmeGpu, gtPME_SPLINE);
540 pme_spline_and_spread_kernel<4, true, false, wrapX, wrapY> <<< nBlocks, dimBlock, 0, stream>>> (*kernelParamsPtr);
541 CU_LAUNCH_ERR("pme_spline_and_spread_kernel");
542 pme_gpu_stop_timing(pmeGpu, gtPME_SPLINE);
547 pme_gpu_start_timing(pmeGpu, gtPME_SPREAD);
548 pme_spline_and_spread_kernel<4, false, true, wrapX, wrapY> <<< nBlocks, dimBlock, 0, stream>>> (*kernelParamsPtr);
549 CU_LAUNCH_ERR("pme_spline_and_spread_kernel");
550 pme_gpu_stop_timing(pmeGpu, gtPME_SPREAD);
556 GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not tested!"));
559 const bool copyBackGrid = spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
562 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
564 const bool copyBackAtomData = computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
565 if (copyBackAtomData)
567 pme_gpu_copy_output_spread_atom_data(pmeGpu);