2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013-2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief Implements PME GPU spline calculation and charge spreading in CUDA.
40 * TODO: consider always pre-sorting particles (as in DD case).
42 * \author Aleksei Iupinov <a.yupinov@gmail.com>
49 #include "gromacs/ewald/pme.h"
50 #include "gromacs/gpu_utils/cuda_kernel_utils.cuh"
51 #include "gromacs/gpu_utils/cudautils.cuh"
52 #include "gromacs/utility/exceptions.h"
53 #include "gromacs/utility/gmxassert.h"
57 #include "pme-timings.cuh"
60 * This define affects the spline calculation behaviour in the kernel.
61 * 0: a single GPU thread handles a single dimension of a single particle (calculating and storing (order) spline values and derivatives).
62 * 1: (order) threads do redundant work on this same task, each one stores only a single theta and single dtheta into global arrays.
63 * The only efficiency difference is less global store operations, countered by more redundant spline computation.
65 * TODO: estimate if this should be a boolean parameter (and add it to the unit test if so).
67 #define PME_GPU_PARALLEL_SPLINE 0
70 //! Spreading max block width in warps picked among powers of 2 (2, 4, 8, 16) for max. occupancy and min. runtime in most cases
71 constexpr int c_spreadMaxWarpsPerBlock = 8;
72 /* TODO: it has been observed that the kernel can be faster with smaller block sizes (2 or 4 warps)
73 * only on some GPUs (660Ti) with large enough grid (>= 48^3) due to load/store units being overloaded
74 * (ldst_fu_utilization metric maxed out in nvprof). Runtime block size choice might be nice to have.
75 * This has been tried on architectures up to Maxwell (GTX 750) and it would be good to revisit this.
77 //! Spreading max block size in threads
78 constexpr int c_spreadMaxThreadsPerBlock = c_spreadMaxWarpsPerBlock * warp_size;
82 * General purpose function for loading atom-related data from global to shared memory.
84 * \tparam[in] T Data type (float/int/...)
85 * \tparam[in] atomsPerBlock Number of atoms processed by a block - should be accounted for in the size of the shared memory array.
86 * \tparam[in] dataCountPerAtom Number of data elements per single atom (e.g. DIM for an rvec coordinates array).
87 * \param[in] kernelParams Input PME CUDA data in constant memory.
88 * \param[out] sm_destination Shared memory array for output.
89 * \param[in] gm_source Global memory array for input.
92 const int atomsPerBlock,
93 const int dataCountPerAtom>
94 __device__ __forceinline__
95 void pme_gpu_stage_atom_data(const PmeGpuCudaKernelParams kernelParams,
96 T * __restrict__ sm_destination,
97 const T * __restrict__ gm_source)
99 static_assert(c_usePadding, "With padding disabled, index checking should be fixed to account for spline theta/dtheta per-warp alignment");
100 const int threadLocalIndex = ((threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x) + threadIdx.x;
101 const int localIndex = threadLocalIndex;
102 const int globalIndexBase = blockIdx.x * atomsPerBlock * dataCountPerAtom;
103 const int globalIndex = globalIndexBase + localIndex;
104 const int globalCheck = pme_gpu_check_atom_data_index(globalIndex, kernelParams.atoms.nAtoms * dataCountPerAtom);
105 if ((localIndex < atomsPerBlock * dataCountPerAtom) & globalCheck)
107 assert(isfinite(float(gm_source[globalIndex])));
108 sm_destination[localIndex] = gm_source[globalIndex];
113 * PME GPU spline parameter and gridline indices calculation.
114 * This corresponds to the CPU functions calc_interpolation_idx() and make_bsplines().
115 * First stage of the whole kernel.
117 * \tparam[in] order PME interpolation order.
118 * \tparam[in] atomsPerBlock Number of atoms processed by a block - should be accounted for
119 * in the sizes of the shared memory arrays.
120 * \param[in] kernelParams Input PME CUDA data in constant memory.
121 * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
122 * \param[in] sm_coordinates Atom coordinates in the shared memory.
123 * \param[in] sm_coefficients Atom charges/coefficients in the shared memory.
124 * \param[out] sm_theta Atom spline values in the shared memory.
125 * \param[out] sm_gridlineIndices Atom gridline indices in the shared memory.
127 template <const int order,
128 const int atomsPerBlock>
129 __device__ __forceinline__ void calculate_splines(const PmeGpuCudaKernelParams kernelParams,
130 const int atomIndexOffset,
131 const float3 * __restrict__ sm_coordinates,
132 const float * __restrict__ sm_coefficients,
133 float * __restrict__ sm_theta,
134 int * __restrict__ sm_gridlineIndices)
136 /* Global memory pointers for output */
137 float * __restrict__ gm_theta = kernelParams.atoms.d_theta;
138 float * __restrict__ gm_dtheta = kernelParams.atoms.d_dtheta;
139 int * __restrict__ gm_gridlineIndices = kernelParams.atoms.d_gridlineIndices;
141 /* Fractional coordinates */
142 __shared__ float sm_fractCoords[atomsPerBlock * DIM];
144 /* Thread index w.r.t. block */
145 const int threadLocalId = (threadIdx.z * (blockDim.x * blockDim.y))
146 + (threadIdx.y * blockDim.x) + threadIdx.x;
147 /* Warp index w.r.t. block - could probably be obtained easier? */
148 const int warpIndex = threadLocalId / warp_size;
149 /* Thread index w.r.t. warp */
150 const int threadWarpIndex = threadLocalId % warp_size;
151 /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
152 const int atomWarpIndex = threadWarpIndex % PME_SPREADGATHER_ATOMS_PER_WARP;
153 /* Atom index w.r.t. block/shared memory */
154 const int atomIndexLocal = warpIndex * PME_SPREADGATHER_ATOMS_PER_WARP + atomWarpIndex;
156 /* Atom index w.r.t. global memory */
157 const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
158 /* Spline contribution index in one dimension */
159 const int orderIndex = threadWarpIndex / (PME_SPREADGATHER_ATOMS_PER_WARP * DIM);
160 /* Dimension index */
161 const int dimIndex = (threadWarpIndex - orderIndex * (PME_SPREADGATHER_ATOMS_PER_WARP * DIM)) / PME_SPREADGATHER_ATOMS_PER_WARP;
163 /* Multi-purpose index of rvec/ivec atom data */
164 const int sharedMemoryIndex = atomIndexLocal * DIM + dimIndex;
166 /* Spline parameters need a working buffer.
167 * With PME_GPU_PARALLEL_SPLINE == 0 it is just a local array of (order) values for some of the threads, which is fine;
168 * With PME_GPU_PARALLEL_SPLINE == 1 (order) times more threads are involved, so the shared memory is used to avoid overhead.
169 * The buffer's size, striding and indexing are adjusted accordingly.
170 * The buffer is accessed with SPLINE_DATA_PTR and SPLINE_DATA macros.
172 #if PME_GPU_PARALLEL_SPLINE
173 const int splineDataStride = atomsPerBlock * DIM;
174 const int splineDataIndex = sharedMemoryIndex;
175 __shared__ float sm_splineData[splineDataStride * order];
176 float *splineDataPtr = sm_splineData;
178 const int splineDataStride = 1;
179 const int splineDataIndex = 0;
180 float splineData[splineDataStride * order];
181 float *splineDataPtr = splineData;
184 #define SPLINE_DATA_PTR(i) (splineDataPtr + ((i) * splineDataStride + splineDataIndex))
185 #define SPLINE_DATA(i) (*SPLINE_DATA_PTR(i))
187 const int localCheck = (dimIndex < DIM) && (orderIndex < (PME_GPU_PARALLEL_SPLINE ? order : 1));
188 const int globalCheck = pme_gpu_check_atom_data_index(atomIndexGlobal, kernelParams.atoms.nAtoms);
190 if (localCheck && globalCheck)
192 /* Indices interpolation */
196 int tableIndex, tInt;
198 const float3 x = sm_coordinates[atomIndexLocal];
199 /* Accessing fields in fshOffset/nXYZ/recipbox/... with dimIndex offset
200 * puts them into local memory(!) instead of accessing the constant memory directly.
201 * That's the reason for the switch, to unroll explicitly.
202 * The commented parts correspond to the 0 components of the recipbox.
207 tableIndex = kernelParams.grid.tablesOffsets[XX];
208 n = kernelParams.grid.realGridSizeFP[XX];
209 t = x.x * kernelParams.current.recipBox[dimIndex][XX] + x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
213 tableIndex = kernelParams.grid.tablesOffsets[YY];
214 n = kernelParams.grid.realGridSizeFP[YY];
215 t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + */ x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
219 tableIndex = kernelParams.grid.tablesOffsets[ZZ];
220 n = kernelParams.grid.realGridSizeFP[ZZ];
221 t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + x.y * kernelParams.current.recipbox[dimIndex][YY] + */ x.z * kernelParams.current.recipBox[dimIndex][ZZ];
224 const float shift = c_pmeMaxUnitcellShift;
225 /* Fractional coordinates along box vectors, adding a positive shift to ensure t is positive for triclinic boxes */
228 sm_fractCoords[sharedMemoryIndex] = t - tInt;
231 assert(tInt < c_pmeNeighborUnitcellCount * n);
233 // TODO have shared table for both parameters to share the fetch, as index is always same?
234 // TODO compare texture/LDG performance
235 sm_fractCoords[sharedMemoryIndex] +=
236 fetchFromParamLookupTable(kernelParams.grid.d_fractShiftsTable,
237 kernelParams.fractShiftsTableTexture,
239 sm_gridlineIndices[sharedMemoryIndex] =
240 fetchFromParamLookupTable(kernelParams.grid.d_gridlineIndicesTable,
241 kernelParams.gridlineIndicesTableTexture,
243 gm_gridlineIndices[atomIndexOffset * DIM + sharedMemoryIndex] = sm_gridlineIndices[sharedMemoryIndex];
246 /* B-spline calculation */
248 const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]);
252 int o = orderIndex; // This is an index that is set once for PME_GPU_PARALLEL_SPLINE == 1
254 const float dr = sm_fractCoords[sharedMemoryIndex];
255 assert(isfinite(dr));
257 /* dr is relative offset from lower cell limit */
258 *SPLINE_DATA_PTR(order - 1) = 0.0f;
259 *SPLINE_DATA_PTR(1) = dr;
260 *SPLINE_DATA_PTR(0) = 1.0f - dr;
263 for (int k = 3; k < order; k++)
265 div = 1.0f / (k - 1.0f);
266 *SPLINE_DATA_PTR(k - 1) = div * dr * SPLINE_DATA(k - 2);
268 for (int l = 1; l < (k - 1); l++)
270 *SPLINE_DATA_PTR(k - l - 1) = div * ((dr + l) * SPLINE_DATA(k - l - 2) + (k - l - dr) * SPLINE_DATA(k - l - 1));
272 *SPLINE_DATA_PTR(0) = div * (1.0f - dr) * SPLINE_DATA(0);
275 const int thetaGlobalOffsetBase = atomIndexOffset * DIM * order;
277 /* Differentiation and storing the spline derivatives (dtheta) */
278 #if !PME_GPU_PARALLEL_SPLINE
279 // With PME_GPU_PARALLEL_SPLINE == 1, o is already set to orderIndex;
280 // With PME_GPU_PARALLEL_SPLINE == 0, we loop o over range(order).
282 for (o = 0; o < order; o++)
285 const int thetaIndex = PME_SPLINE_THETA_STRIDE * (((o + order * warpIndex) * DIM + dimIndex) * PME_SPREADGATHER_ATOMS_PER_WARP + atomWarpIndex);
286 const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
288 const float dtheta = ((o > 0) ? SPLINE_DATA(o - 1) : 0.0f) - SPLINE_DATA(o);
289 assert(isfinite(dtheta));
290 gm_dtheta[thetaGlobalIndex] = dtheta;
293 div = 1.0f / (order - 1.0f);
294 *SPLINE_DATA_PTR(order - 1) = div * dr * SPLINE_DATA(order - 2);
296 for (int k = 1; k < (order - 1); k++)
298 *SPLINE_DATA_PTR(order - k - 1) = div * ((dr + k) * SPLINE_DATA(order - k - 2) + (order - k - dr) * SPLINE_DATA(order - k - 1));
300 *SPLINE_DATA_PTR(0) = div * (1.0f - dr) * SPLINE_DATA(0);
302 /* Storing the spline values (theta) */
303 #if !PME_GPU_PARALLEL_SPLINE
304 // See comment for the loop above
306 for (o = 0; o < order; o++)
309 const int thetaIndex = PME_SPLINE_THETA_STRIDE * (((o + order * warpIndex) * DIM + dimIndex) * PME_SPREADGATHER_ATOMS_PER_WARP + atomWarpIndex);
310 const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
312 sm_theta[thetaIndex] = SPLINE_DATA(o);
313 assert(isfinite(sm_theta[thetaIndex]));
314 gm_theta[thetaGlobalIndex] = SPLINE_DATA(o);
321 * Charge spreading onto the grid.
322 * This corresponds to the CPU function spread_coefficients_bsplines_thread().
323 * Second stage of the whole kernel.
325 * \tparam[in] order PME interpolation order.
326 * \tparam[in] wrapX A boolean which tells if the grid overlap in dimension X should be wrapped.
327 * \tparam[in] wrapY A boolean which tells if the grid overlap in dimension Y should be wrapped.
328 * \param[in] kernelParams Input PME CUDA data in constant memory.
329 * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
330 * \param[in] sm_coefficients Atom coefficents/charges in the shared memory.
331 * \param[in] sm_gridlineIndices Atom gridline indices in the shared memory.
332 * \param[in] sm_theta Atom spline values in the shared memory.
335 const int order, const bool wrapX, const bool wrapY>
336 __device__ __forceinline__ void spread_charges(const PmeGpuCudaKernelParams kernelParams,
338 const float * __restrict__ sm_coefficients,
339 const int * __restrict__ sm_gridlineIndices,
340 const float * __restrict__ sm_theta)
342 /* Global memory pointer to the output grid */
343 float * __restrict__ gm_grid = kernelParams.grid.d_realGrid;
346 const int nx = kernelParams.grid.realGridSize[XX];
347 const int ny = kernelParams.grid.realGridSize[YY];
348 const int nz = kernelParams.grid.realGridSize[ZZ];
349 const int pny = kernelParams.grid.realGridSizePadded[YY];
350 const int pnz = kernelParams.grid.realGridSizePadded[ZZ];
352 const int offx = 0, offy = 0, offz = 0; // unused for now
354 const int atomIndexLocal = threadIdx.z;
355 const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
357 const int globalCheck = pme_gpu_check_atom_data_index(atomIndexGlobal, kernelParams.atoms.nAtoms);
358 const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]);
359 if (chargeCheck & globalCheck)
361 // Spline Y/Z coordinates
362 const int ithy = threadIdx.y;
363 const int ithz = threadIdx.x;
364 const int ixBase = sm_gridlineIndices[atomIndexLocal * DIM + XX] - offx;
365 int iy = sm_gridlineIndices[atomIndexLocal * DIM + YY] - offy + ithy;
366 if (wrapY & (iy >= ny))
370 int iz = sm_gridlineIndices[atomIndexLocal * DIM + ZZ] - offz + ithz;
376 /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
377 const int atomWarpIndex = atomIndexLocal % PME_SPREADGATHER_ATOMS_PER_WARP;
378 /* Warp index w.r.t. block - could probably be obtained easier? */
379 const int warpIndex = atomIndexLocal / PME_SPREADGATHER_ATOMS_PER_WARP;
380 const int dimStride = PME_SPLINE_THETA_STRIDE * PME_SPREADGATHER_ATOMS_PER_WARP;
381 const int orderStride = dimStride * DIM;
382 const int thetaOffsetBase = orderStride * order * warpIndex + atomWarpIndex;
384 const float thetaZ = sm_theta[thetaOffsetBase + ithz * orderStride + ZZ * dimStride];
385 const float thetaY = sm_theta[thetaOffsetBase + ithy * orderStride + YY * dimStride];
386 const float constVal = thetaZ * thetaY * sm_coefficients[atomIndexLocal];
387 assert(isfinite(constVal));
388 const int constOffset = iy * pnz + iz;
389 const float *sm_thetaX = sm_theta + (thetaOffsetBase + XX * dimStride);
392 for (int ithx = 0; (ithx < order); ithx++)
394 int ix = ixBase + ithx;
395 if (wrapX & (ix >= nx))
399 const int gridIndexGlobal = ix * pny * pnz + constOffset;
400 assert(isfinite(sm_thetaX[ithx * orderStride]));
401 assert(isfinite(gm_grid[gridIndexGlobal]));
402 atomicAdd(gm_grid + gridIndexGlobal, sm_thetaX[ithx * orderStride] * constVal);
408 * A spline computation and charge spreading kernel function.
410 * \tparam[in] order PME interpolation order.
411 * \tparam[in] computeSplines A boolean which tells if the spline parameter and
412 * gridline indices' computation should be performed.
413 * \tparam[in] spreadCharges A boolean which tells if the charge spreading should be performed.
414 * \tparam[in] wrapX A boolean which tells if the grid overlap in dimension X should be wrapped.
415 * \tparam[in] wrapY A boolean which tells if the grid overlap in dimension Y should be wrapped.
416 * \param[in] kernelParams Input PME CUDA data in constant memory.
420 const bool computeSplines,
421 const bool spreadCharges,
425 __launch_bounds__(c_spreadMaxThreadsPerBlock)
426 __global__ void pme_spline_and_spread_kernel(const PmeGpuCudaKernelParams kernelParams)
428 const int atomsPerBlock = c_spreadMaxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM;
429 // Gridline indices, ivec
430 __shared__ int sm_gridlineIndices[atomsPerBlock * DIM];
432 __shared__ float sm_coefficients[atomsPerBlock];
434 __shared__ float sm_theta[atomsPerBlock * DIM * order];
436 const int atomIndexOffset = blockIdx.x * atomsPerBlock;
438 /* Staging coefficients/charges for both spline and spread */
439 pme_gpu_stage_atom_data<float, atomsPerBlock, 1>(kernelParams, sm_coefficients, kernelParams.atoms.d_coefficients);
443 /* Staging coordinates */
444 __shared__ float sm_coordinates[DIM * atomsPerBlock];
445 pme_gpu_stage_atom_data<float, atomsPerBlock, DIM>(kernelParams, sm_coordinates, kernelParams.atoms.d_coordinates);
448 calculate_splines<order, atomsPerBlock>(kernelParams, atomIndexOffset, (const float3 *)sm_coordinates,
449 sm_coefficients, sm_theta, sm_gridlineIndices);
454 /* Staging the data for spread
455 * (the data is assumed to be in GPU global memory with proper layout already,
456 * as in after running the spline kernel)
458 /* Spline data - only thetas (dthetas will only be needed in gather) */
459 pme_gpu_stage_atom_data<float, atomsPerBlock, DIM * order>(kernelParams, sm_theta, kernelParams.atoms.d_theta);
460 /* Gridline indices */
461 pme_gpu_stage_atom_data<int, atomsPerBlock, DIM>(kernelParams, sm_gridlineIndices, kernelParams.atoms.d_gridlineIndices);
469 spread_charges<order, wrapX, wrapY>(kernelParams, atomIndexOffset, sm_coefficients, sm_gridlineIndices, sm_theta);
473 void pme_gpu_spread(const PmeGpu *pmeGpu,
474 int gmx_unused gridIndex,
479 GMX_ASSERT(computeSplines || spreadCharges, "PME spline/spread kernel has invalid input (nothing to do)");
480 cudaStream_t stream = pmeGpu->archSpecific->pmeStream;
481 const auto *kernelParamsPtr = pmeGpu->kernelParams.get();
482 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
484 const int order = pmeGpu->common->pme_order;
485 const int atomsPerBlock = c_spreadMaxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM;
486 // TODO: pick smaller block size in runtime if needed
487 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
488 // If doing so, change atomsPerBlock in the kernels as well.
489 // TODO: test varying block sizes on modern arch-s as well
490 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
491 //(for spline data mostly, together with varying PME_GPU_PARALLEL_SPLINE define)
492 GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. spreading block size");
494 dim3 nBlocks(pmeGpu->nAtomsPadded / atomsPerBlock);
495 dim3 dimBlock(order, order, atomsPerBlock);
497 // These should later check for PME decomposition
498 const bool wrapX = true;
499 const bool wrapY = true;
500 GMX_UNUSED_VALUE(wrapX);
501 GMX_UNUSED_VALUE(wrapY);
506 // TODO: cleaner unroll with some template trick?
511 pme_gpu_start_timing(pmeGpu, gtPME_SPLINEANDSPREAD);
512 pme_spline_and_spread_kernel<4, true, true, wrapX, wrapY> <<< nBlocks, dimBlock, 0, stream>>> (*kernelParamsPtr);
513 CU_LAUNCH_ERR("pme_spline_and_spread_kernel");
514 pme_gpu_stop_timing(pmeGpu, gtPME_SPLINEANDSPREAD);
518 pme_gpu_start_timing(pmeGpu, gtPME_SPLINE);
519 pme_spline_and_spread_kernel<4, true, false, wrapX, wrapY> <<< nBlocks, dimBlock, 0, stream>>> (*kernelParamsPtr);
520 CU_LAUNCH_ERR("pme_spline_and_spread_kernel");
521 pme_gpu_stop_timing(pmeGpu, gtPME_SPLINE);
526 pme_gpu_start_timing(pmeGpu, gtPME_SPREAD);
527 pme_spline_and_spread_kernel<4, false, true, wrapX, wrapY> <<< nBlocks, dimBlock, 0, stream>>> (*kernelParamsPtr);
528 CU_LAUNCH_ERR("pme_spline_and_spread_kernel");
529 pme_gpu_stop_timing(pmeGpu, gtPME_SPREAD);
535 GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not tested!"));
538 const bool copyBackGrid = spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
541 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
543 const bool copyBackAtomData = computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
544 if (copyBackAtomData)
546 pme_gpu_copy_output_spread_atom_data(pmeGpu);