Apply re-formatting to C++ in src/ tree.
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gather.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *  \brief Implements PME force gathering in CUDA.
38  *
39  *  \author Aleksei Iupinov <a.yupinov@gmail.com>
40  */
41
42 #include "gmxpre.h"
43
44 #include <cassert>
45
46 #include "gromacs/gpu_utils/cuda_kernel_utils.cuh"
47 #include "gromacs/gpu_utils/typecasts.cuh"
48
49 #include "pme.cuh"
50 #include "pme_gpu_calculate_splines.cuh"
51 #include "pme_grid.h"
52
53 /*! \brief
54  * An inline CUDA function: unroll the dynamic index accesses to the constant grid sizes to avoid local memory operations.
55  */
56 __device__ __forceinline__ float read_grid_size(const float* realGridSizeFP, const int dimIndex)
57 {
58     switch (dimIndex)
59     {
60         case XX: return realGridSizeFP[XX];
61         case YY: return realGridSizeFP[YY];
62         case ZZ: return realGridSizeFP[ZZ];
63     }
64     assert(false);
65     return 0.0f;
66 }
67
68 /*! \brief Reduce the partial force contributions.
69  *
70  * \tparam[in] order              The PME order (must be 4).
71  * \tparam[in] atomDataSize       The number of partial force contributions for each atom (currently
72  *                                order^2 == 16)
73  * \tparam[in] blockSize          The CUDA block size
74  *
75  * \param[out] sm_forces          Shared memory array with the output forces (number of elements
76  *                                is number of atoms per block)
77  * \param[in]  atomIndexLocal     Local atom index
78  * \param[in]  splineIndex        Spline index
79  * \param[in]  lineIndex          Line index (same as threadLocalId)
80  * \param[in]  realGridSizeFP     Local grid size constant
81  * \param[in]  fx                 Input force partial component X
82  * \param[in]  fy                 Input force partial component Y
83  * \param[in]  fz                 Input force partial component Z
84  */
85 template<int order, int atomDataSize, int blockSize>
86 __device__ __forceinline__ void reduce_atom_forces(float3* __restrict__ sm_forces,
87                                                    const int    atomIndexLocal,
88                                                    const int    splineIndex,
89                                                    const int    lineIndex,
90                                                    const float* realGridSizeFP,
91                                                    float&       fx,
92                                                    float&       fy,
93                                                    float&       fz)
94 {
95     if (!(order & (order - 1))) // Only for orders of power of 2
96     {
97         const unsigned int activeMask = c_fullWarpMask;
98
99         // A tricky shuffle reduction inspired by reduce_force_j_warp_shfl
100         // TODO: find out if this is the best in terms of transactions count
101         static_assert(order == 4, "Only order of 4 is implemented");
102         static_assert(atomDataSize <= warp_size,
103                       "TODO: rework for atomDataSize > warp_size (order 8 or larger)");
104         const int width = atomDataSize;
105
106         fx += __shfl_down_sync(activeMask, fx, 1, width);
107         fy += __shfl_up_sync(activeMask, fy, 1, width);
108         fz += __shfl_down_sync(activeMask, fz, 1, width);
109
110         if (splineIndex & 1)
111         {
112             fx = fy;
113         }
114
115         fx += __shfl_down_sync(activeMask, fx, 2, width);
116         fz += __shfl_up_sync(activeMask, fz, 2, width);
117
118         if (splineIndex & 2)
119         {
120             fx = fz;
121         }
122
123         // By now fx contains intermediate quad sums of all 3 components:
124         // splineIndex    0            1            2 and 3      4            5            6 and 7 8...
125         // sum of...      fx0 to fx3   fy0 to fy3   fz0 to fz3   fx4 to fx7   fy4 to fy7   fz4 to fz7 etc.
126
127         // We have to just further reduce those groups of 4
128         for (int delta = 4; delta < atomDataSize; delta <<= 1)
129         {
130             fx += __shfl_down_sync(activeMask, fx, delta, width);
131         }
132
133         const int dimIndex = splineIndex;
134         if (dimIndex < DIM)
135         {
136             const float n = read_grid_size(realGridSizeFP, dimIndex);
137             *((float*)(&sm_forces[atomIndexLocal]) + dimIndex) = fx * n;
138         }
139     }
140     else
141     {
142         // We use blockSize shared memory elements to read fx, or fy, or fz, and then reduce them to
143         // fit into smemPerDim elements which are stored separately (first 2 dimensions only)
144         const int         smemPerDim   = warp_size;
145         const int         smemReserved = (DIM)*smemPerDim;
146         __shared__ float  sm_forceReduction[smemReserved + blockSize];
147         __shared__ float* sm_forceTemp[DIM];
148
149         const int numWarps = blockSize / smemPerDim;
150         const int minStride =
151                 max(1, atomDataSize / numWarps); // order 4: 128 threads => 4, 256 threads => 2, etc
152
153 #pragma unroll
154         for (int dimIndex = 0; dimIndex < DIM; dimIndex++)
155         {
156             int elementIndex = smemReserved + lineIndex;
157             // Store input force contributions
158             sm_forceReduction[elementIndex] = (dimIndex == XX) ? fx : (dimIndex == YY) ? fy : fz;
159             // sync here because two warps write data that the first one consumes below
160             __syncthreads();
161             // Reduce to fit into smemPerDim (warp size)
162 #pragma unroll
163             for (int redStride = atomDataSize / 2; redStride > minStride; redStride >>= 1)
164             {
165                 if (splineIndex < redStride)
166                 {
167                     sm_forceReduction[elementIndex] += sm_forceReduction[elementIndex + redStride];
168                 }
169             }
170             __syncthreads();
171             // Last iteration - packing everything to be nearby, storing convenience pointer
172             sm_forceTemp[dimIndex] = sm_forceReduction + dimIndex * smemPerDim;
173             int redStride          = minStride;
174             if (splineIndex < redStride)
175             {
176                 const int packedIndex = atomIndexLocal * redStride + splineIndex;
177                 sm_forceTemp[dimIndex][packedIndex] =
178                         sm_forceReduction[elementIndex] + sm_forceReduction[elementIndex + redStride];
179             }
180             __syncthreads();
181         }
182
183         assert((blockSize / warp_size) >= DIM);
184         // assert (atomsPerBlock <= warp_size);
185
186         const int warpIndex = lineIndex / warp_size;
187         const int dimIndex  = warpIndex;
188
189         // First 3 warps can now process 1 dimension each
190         if (dimIndex < DIM)
191         {
192             int sourceIndex = lineIndex % warp_size;
193 #pragma unroll
194             for (int redStride = minStride / 2; redStride > 1; redStride >>= 1)
195             {
196                 if (!(splineIndex & redStride))
197                 {
198                     sm_forceTemp[dimIndex][sourceIndex] += sm_forceTemp[dimIndex][sourceIndex + redStride];
199                 }
200             }
201
202             __syncwarp();
203
204             const float n         = read_grid_size(realGridSizeFP, dimIndex);
205             const int   atomIndex = sourceIndex / minStride;
206
207             if (sourceIndex == minStride * atomIndex)
208             {
209                 *((float*)(&sm_forces[atomIndex]) + dimIndex) =
210                         (sm_forceTemp[dimIndex][sourceIndex] + sm_forceTemp[dimIndex][sourceIndex + 1]) * n;
211             }
212         }
213     }
214 }
215
216 /*! \brief Calculate the sum of the force partial components (in X, Y and Z)
217  *
218  * \tparam[in] order              The PME order (must be 4).
219  * \tparam[in] atomsPerWarp       The number of atoms per GPU warp.
220  * \tparam[in] wrapX              Tells if the grid is wrapped in the X dimension.
221  * \tparam[in] wrapY              Tells if the grid is wrapped in the Y dimension.
222  * \param[out] fx                 The force partial component in the X dimension.
223  * \param[out] fy                 The force partial component in the Y dimension.
224  * \param[out] fz                 The force partial component in the Z dimension.
225  * \param[in] ithyMin             The thread minimum index in the Y dimension.
226  * \param[in] ithyMax             The thread maximum index in the Y dimension.
227  * \param[in] ixBase              The grid line index base value in the X dimension.
228  * \param[in] iz                  The grid line index in the Z dimension.
229  * \param[in] nx                  The grid real size in the X dimension.
230  * \param[in] ny                  The grid real size in the Y dimension.
231  * \param[in] pny                 The padded grid real size in the Y dimension.
232  * \param[in] pnz                 The padded grid real size in the Z dimension.
233  * \param[in] atomIndexLocal      The atom index for this thread.
234  * \param[in] splineIndexBase     The base value of the spline parameter index.
235  * \param[in] tdz                 The theta and dtheta in the Z dimension.
236  * \param[in] sm_gridlineIndices  Shared memory array of grid line indices.
237  * \param[in] sm_theta            Shared memory array of atom theta values.
238  * \param[in] sm_dtheta           Shared memory array of atom dtheta values.
239  * \param[in] gm_grid             Global memory array of the grid to use.
240  */
241 template<int order, int atomsPerWarp, bool wrapX, bool wrapY>
242 __device__ __forceinline__ void sumForceComponents(float* __restrict__ fx,
243                                                    float* __restrict__ fy,
244                                                    float* __restrict__ fz,
245                                                    const int    ithyMin,
246                                                    const int    ithyMax,
247                                                    const int    ixBase,
248                                                    const int    iz,
249                                                    const int    nx,
250                                                    const int    ny,
251                                                    const int    pny,
252                                                    const int    pnz,
253                                                    const int    atomIndexLocal,
254                                                    const int    splineIndexBase,
255                                                    const float2 tdz,
256                                                    const int* __restrict__ sm_gridlineIndices,
257                                                    const float* __restrict__ sm_theta,
258                                                    const float* __restrict__ sm_dtheta,
259                                                    const float* __restrict__ gm_grid)
260 {
261 #pragma unroll
262     for (int ithy = ithyMin; ithy < ithyMax; ithy++)
263     {
264         const int splineIndexY = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, YY, ithy);
265         const float2 tdy       = make_float2(sm_theta[splineIndexY], sm_dtheta[splineIndexY]);
266
267         int iy = sm_gridlineIndices[atomIndexLocal * DIM + YY] + ithy;
268         if (wrapY & (iy >= ny))
269         {
270             iy -= ny;
271         }
272         const int constOffset = iy * pnz + iz;
273
274 #pragma unroll
275         for (int ithx = 0; (ithx < order); ithx++)
276         {
277             int ix = ixBase + ithx;
278             if (wrapX & (ix >= nx))
279             {
280                 ix -= nx;
281             }
282             const int gridIndexGlobal = ix * pny * pnz + constOffset;
283             assert(gridIndexGlobal >= 0);
284             const float gridValue = gm_grid[gridIndexGlobal];
285             assert(isfinite(gridValue));
286             const int splineIndexX = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, XX, ithx);
287             const float2 tdx       = make_float2(sm_theta[splineIndexX], sm_dtheta[splineIndexX]);
288             const float  fxy1      = tdz.x * gridValue;
289             const float  fz1       = tdz.y * gridValue;
290             *fx += tdx.y * tdy.x * fxy1;
291             *fy += tdx.x * tdy.y * fxy1;
292             *fz += tdx.x * tdy.x * fz1;
293         }
294     }
295 }
296
297 /*! \brief Calculate the grid forces and store them in shared memory.
298  *
299  * \param[in,out] sm_forces       Shared memory array with the output forces.
300  * \param[in] forceIndexLocal     The local (per thread) index in the sm_forces array.
301  * \param[in] forceIndexGlobal    The index of the thread in the gm_coefficients array.
302  * \param[in] recipBox            The reciprocal box.
303  * \param[in] scale               The scale to use when calculating the forces. For gm_coefficientsB
304  * (when using multiple coefficients on a single grid) the scale will be (1.0 - scale).
305  * \param[in] gm_coefficients     Global memory array of the coefficients to use for an unperturbed
306  * or FEP in state A if a single grid is used (\p multiCoefficientsSingleGrid == true).If two
307  * separate grids are used this should be the coefficients of the grid in question.
308  * \param[in] gm_coefficientsB    Global memory array of the coefficients to use for FEP in state B.
309  * Should be nullptr if two separate grids are used.
310  */
311 __device__ __forceinline__ void calculateAndStoreGridForces(float3* __restrict__ sm_forces,
312                                                             const int   forceIndexLocal,
313                                                             const int   forceIndexGlobal,
314                                                             const float recipBox[DIM][DIM],
315                                                             const float scale,
316                                                             const float* __restrict__ gm_coefficients)
317 {
318     const float3 atomForces     = sm_forces[forceIndexLocal];
319     float        negCoefficient = -scale * gm_coefficients[forceIndexGlobal];
320     float3       result;
321     result.x = negCoefficient * recipBox[XX][XX] * atomForces.x;
322     result.y = negCoefficient * (recipBox[XX][YY] * atomForces.x + recipBox[YY][YY] * atomForces.y);
323     result.z = negCoefficient
324                * (recipBox[XX][ZZ] * atomForces.x + recipBox[YY][ZZ] * atomForces.y
325                   + recipBox[ZZ][ZZ] * atomForces.z);
326     sm_forces[forceIndexLocal] = result;
327 }
328
329 /*! \brief
330  * A CUDA kernel which gathers the atom forces from the grid.
331  * The grid is assumed to be wrapped in dimension Z.
332  *
333  * \tparam[in] order                The PME order (must be 4 currently).
334  * \tparam[in] wrapX                Tells if the grid is wrapped in the X dimension.
335  * \tparam[in] wrapY                Tells if the grid is wrapped in the Y dimension.
336  * \tparam[in] numGrids             The number of grids to use in the kernel. Can be 1 or 2.
337  * \tparam[in] readGlobal           Tells if we should read spline values from global memory
338  * \tparam[in] threadsPerAtom       How many threads work on each atom
339  *
340  * \param[in]  kernelParams         All the PME GPU data.
341  */
342 template<int order, bool wrapX, bool wrapY, int numGrids, bool readGlobal, ThreadsPerAtom threadsPerAtom>
343 __launch_bounds__(c_gatherMaxThreadsPerBlock, c_gatherMinBlocksPerMP) __global__
344         void pme_gather_kernel(const PmeGpuCudaKernelParams kernelParams)
345 {
346     assert(numGrids == 1 || numGrids == 2);
347
348     /* Global memory pointers */
349     const float* __restrict__ gm_coefficientsA = kernelParams.atoms.d_coefficients[0];
350     const float* __restrict__ gm_coefficientsB = kernelParams.atoms.d_coefficients[1];
351     const float* __restrict__ gm_gridA         = kernelParams.grid.d_realGrid[0];
352     const float* __restrict__ gm_gridB         = kernelParams.grid.d_realGrid[1];
353     float* __restrict__ gm_forces              = kernelParams.atoms.d_forces;
354
355     /* Global memory pointers for readGlobal */
356     const float* __restrict__ gm_theta         = kernelParams.atoms.d_theta;
357     const float* __restrict__ gm_dtheta        = kernelParams.atoms.d_dtheta;
358     const int* __restrict__ gm_gridlineIndices = kernelParams.atoms.d_gridlineIndices;
359
360     float3 atomX;
361     float  atomCharge;
362
363     const int blockIndex = blockIdx.y * gridDim.x + blockIdx.x;
364
365     /* Number of data components and threads for a single atom */
366     const int threadsPerAtomValue = (threadsPerAtom == ThreadsPerAtom::Order) ? order : order * order;
367     const int atomDataSize        = threadsPerAtomValue;
368     const int atomsPerBlock       = c_gatherMaxThreadsPerBlock / atomDataSize;
369     // Number of atoms processed by a single warp in spread and gather
370     const int atomsPerWarp = warp_size / atomDataSize;
371
372     const int blockSize = atomsPerBlock * atomDataSize;
373     assert(blockSize == blockDim.x * blockDim.y * blockDim.z);
374
375     /* These are the atom indices - for the shared and global memory */
376     const int atomIndexLocal  = threadIdx.z;
377     const int atomIndexOffset = blockIndex * atomsPerBlock;
378     const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
379
380     /* Early return for fully empty blocks at the end
381      * (should only happen for billions of input atoms)
382      */
383     if (atomIndexOffset >= kernelParams.atoms.nAtoms)
384     {
385         return;
386     }
387     // 4 warps per block, 8 atoms per warp *3 *4
388     const int        splineParamsSize    = atomsPerBlock * DIM * order;
389     const int        gridlineIndicesSize = atomsPerBlock * DIM;
390     __shared__ int   sm_gridlineIndices[gridlineIndicesSize];
391     __shared__ float sm_theta[splineParamsSize];
392     __shared__ float sm_dtheta[splineParamsSize];
393
394     /* Spline Z coordinates */
395     const int ithz = threadIdx.x;
396
397     /* These are the spline contribution indices in shared memory */
398     const int splineIndex = threadIdx.y * blockDim.x + threadIdx.x;
399     const int lineIndex   = (threadIdx.z * (blockDim.x * blockDim.y))
400                           + splineIndex; /* And to all the block's particles */
401
402     const int threadLocalId =
403             (threadIdx.z * (blockDim.x * blockDim.y)) + blockDim.x * threadIdx.y + threadIdx.x;
404     const int threadLocalIdMax = blockDim.x * blockDim.y * blockDim.z;
405
406     if (readGlobal)
407     {
408         /* Read splines */
409         const int localGridlineIndicesIndex = threadLocalId;
410         const int globalGridlineIndicesIndex = blockIndex * gridlineIndicesSize + localGridlineIndicesIndex;
411         if (localGridlineIndicesIndex < gridlineIndicesSize)
412         {
413             sm_gridlineIndices[localGridlineIndicesIndex] = gm_gridlineIndices[globalGridlineIndicesIndex];
414             assert(sm_gridlineIndices[localGridlineIndicesIndex] >= 0);
415         }
416         /* The loop needed for order threads per atom to make sure we load all data values, as each thread must load multiple values
417            with order*order threads per atom, it is only required for each thread to load one data value */
418
419         const int iMin = 0;
420         const int iMax = (threadsPerAtom == ThreadsPerAtom::Order) ? 3 : 1;
421
422         for (int i = iMin; i < iMax; i++)
423         {
424             int localSplineParamsIndex =
425                     threadLocalId
426                     + i * threadLocalIdMax; /* i will always be zero for order*order threads per atom */
427             int globalSplineParamsIndex = blockIndex * splineParamsSize + localSplineParamsIndex;
428             if (localSplineParamsIndex < splineParamsSize)
429             {
430                 sm_theta[localSplineParamsIndex]  = gm_theta[globalSplineParamsIndex];
431                 sm_dtheta[localSplineParamsIndex] = gm_dtheta[globalSplineParamsIndex];
432                 assert(isfinite(sm_theta[localSplineParamsIndex]));
433                 assert(isfinite(sm_dtheta[localSplineParamsIndex]));
434             }
435         }
436         __syncthreads();
437     }
438     else
439     {
440         const float3* __restrict__ gm_coordinates = asFloat3(kernelParams.atoms.d_coordinates);
441         /* Recalculate  Splines  */
442         if (c_useAtomDataPrefetch)
443         {
444             // charges
445             __shared__ float sm_coefficients[atomsPerBlock];
446             // Coordinates
447             __shared__ float3 sm_coordinates[atomsPerBlock];
448             /* Staging coefficients/charges */
449             pme_gpu_stage_atom_data<float, atomsPerBlock, 1>(sm_coefficients, gm_coefficientsA);
450
451             /* Staging coordinates */
452             pme_gpu_stage_atom_data<float3, atomsPerBlock, 1>(sm_coordinates, gm_coordinates);
453             __syncthreads();
454             atomX      = sm_coordinates[atomIndexLocal];
455             atomCharge = sm_coefficients[atomIndexLocal];
456         }
457         else
458         {
459             atomX      = gm_coordinates[atomIndexGlobal];
460             atomCharge = gm_coefficientsA[atomIndexGlobal];
461         }
462         calculate_splines<order, atomsPerBlock, atomsPerWarp, true, false>(
463                 kernelParams, atomIndexOffset, atomX, atomCharge, sm_theta, sm_dtheta, sm_gridlineIndices);
464         __syncwarp();
465     }
466     float fx = 0.0f;
467     float fy = 0.0f;
468     float fz = 0.0f;
469
470     const int chargeCheck = pme_gpu_check_atom_charge(gm_coefficientsA[atomIndexGlobal]);
471
472     const int nx  = kernelParams.grid.realGridSize[XX];
473     const int ny  = kernelParams.grid.realGridSize[YY];
474     const int nz  = kernelParams.grid.realGridSize[ZZ];
475     const int pny = kernelParams.grid.realGridSizePadded[YY];
476     const int pnz = kernelParams.grid.realGridSizePadded[ZZ];
477
478     const int atomWarpIndex = atomIndexLocal % atomsPerWarp;
479     const int warpIndex     = atomIndexLocal / atomsPerWarp;
480
481     const int splineIndexBase = getSplineParamIndexBase<order, atomsPerWarp>(warpIndex, atomWarpIndex);
482     const int    splineIndexZ = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, ZZ, ithz);
483     const float2 tdz          = make_float2(sm_theta[splineIndexZ], sm_dtheta[splineIndexZ]);
484
485     int       iz     = sm_gridlineIndices[atomIndexLocal * DIM + ZZ] + ithz;
486     const int ixBase = sm_gridlineIndices[atomIndexLocal * DIM + XX];
487
488     if (iz >= nz)
489     {
490         iz -= nz;
491     }
492
493     const int ithyMin = (threadsPerAtom == ThreadsPerAtom::Order) ? 0 : threadIdx.y;
494     const int ithyMax = (threadsPerAtom == ThreadsPerAtom::Order) ? order : threadIdx.y + 1;
495     if (chargeCheck)
496     {
497         sumForceComponents<order, atomsPerWarp, wrapX, wrapY>(&fx,
498                                                               &fy,
499                                                               &fz,
500                                                               ithyMin,
501                                                               ithyMax,
502                                                               ixBase,
503                                                               iz,
504                                                               nx,
505                                                               ny,
506                                                               pny,
507                                                               pnz,
508                                                               atomIndexLocal,
509                                                               splineIndexBase,
510                                                               tdz,
511                                                               sm_gridlineIndices,
512                                                               sm_theta,
513                                                               sm_dtheta,
514                                                               gm_gridA);
515     }
516     // Reduction of partial force contributions
517     __shared__ float3 sm_forces[atomsPerBlock];
518     reduce_atom_forces<order, atomDataSize, blockSize>(
519             sm_forces, atomIndexLocal, splineIndex, lineIndex, kernelParams.grid.realGridSizeFP, fx, fy, fz);
520     __syncthreads();
521
522     /* Calculating the final forces with no component branching, atomsPerBlock threads */
523     const int   forceIndexLocal  = threadLocalId;
524     const int   forceIndexGlobal = atomIndexOffset + forceIndexLocal;
525     const float scale            = kernelParams.current.scale;
526     if (forceIndexLocal < atomsPerBlock)
527     {
528         calculateAndStoreGridForces(
529                 sm_forces, forceIndexLocal, forceIndexGlobal, kernelParams.current.recipBox, scale, gm_coefficientsA);
530     }
531
532     __syncwarp();
533     assert(atomsPerBlock <= warp_size);
534
535     /* Writing or adding the final forces component-wise, single warp */
536     const int blockForcesSize = atomsPerBlock * DIM;
537     const int numIter         = (blockForcesSize + warp_size - 1) / warp_size;
538     const int iterThreads     = blockForcesSize / numIter;
539     if (threadLocalId < iterThreads)
540     {
541 #pragma unroll
542         for (int i = 0; i < numIter; i++)
543         {
544             int   outputIndexLocal       = i * iterThreads + threadLocalId;
545             int   outputIndexGlobal      = blockIndex * blockForcesSize + outputIndexLocal;
546             float outputForceComponent   = ((float*)sm_forces)[outputIndexLocal];
547             gm_forces[outputIndexGlobal] = outputForceComponent;
548         }
549     }
550
551     if (numGrids == 2)
552     {
553         /* We must sync here since the same shared memory is used as above. */
554         __syncthreads();
555         fx                    = 0.0f;
556         fy                    = 0.0f;
557         fz                    = 0.0f;
558         const int chargeCheck = pme_gpu_check_atom_charge(gm_coefficientsB[atomIndexGlobal]);
559         if (chargeCheck)
560         {
561             sumForceComponents<order, atomsPerWarp, wrapX, wrapY>(&fx,
562                                                                   &fy,
563                                                                   &fz,
564                                                                   ithyMin,
565                                                                   ithyMax,
566                                                                   ixBase,
567                                                                   iz,
568                                                                   nx,
569                                                                   ny,
570                                                                   pny,
571                                                                   pnz,
572                                                                   atomIndexLocal,
573                                                                   splineIndexBase,
574                                                                   tdz,
575                                                                   sm_gridlineIndices,
576                                                                   sm_theta,
577                                                                   sm_dtheta,
578                                                                   gm_gridB);
579         }
580         // Reduction of partial force contributions
581         reduce_atom_forces<order, atomDataSize, blockSize>(
582                 sm_forces, atomIndexLocal, splineIndex, lineIndex, kernelParams.grid.realGridSizeFP, fx, fy, fz);
583         __syncthreads();
584
585         /* Calculating the final forces with no component branching, atomsPerBlock threads */
586         if (forceIndexLocal < atomsPerBlock)
587         {
588             calculateAndStoreGridForces(sm_forces,
589                                         forceIndexLocal,
590                                         forceIndexGlobal,
591                                         kernelParams.current.recipBox,
592                                         1.0F - scale,
593                                         gm_coefficientsB);
594         }
595
596         __syncwarp();
597
598         /* Writing or adding the final forces component-wise, single warp */
599         if (threadLocalId < iterThreads)
600         {
601 #pragma unroll
602             for (int i = 0; i < numIter; i++)
603             {
604                 int   outputIndexLocal     = i * iterThreads + threadLocalId;
605                 int   outputIndexGlobal    = blockIndex * blockForcesSize + outputIndexLocal;
606                 float outputForceComponent = ((float*)sm_forces)[outputIndexLocal];
607                 gm_forces[outputIndexGlobal] += outputForceComponent;
608             }
609         }
610     }
611 }
612
613 //! Kernel instantiations
614 // clang-format off
615 template __global__ void pme_gather_kernel<4, true, true, 1, true, ThreadsPerAtom::Order>        (const PmeGpuCudaKernelParams);
616 template __global__ void pme_gather_kernel<4, true, true, 1, true, ThreadsPerAtom::OrderSquared> (const PmeGpuCudaKernelParams);
617 template __global__ void pme_gather_kernel<4, true, true, 1, false, ThreadsPerAtom::Order>       (const PmeGpuCudaKernelParams);
618 template __global__ void pme_gather_kernel<4, true, true, 1, false, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams);
619 template __global__ void pme_gather_kernel<4, true, true, 2, true, ThreadsPerAtom::Order>        (const PmeGpuCudaKernelParams);
620 template __global__ void pme_gather_kernel<4, true, true, 2, true, ThreadsPerAtom::OrderSquared> (const PmeGpuCudaKernelParams);
621 template __global__ void pme_gather_kernel<4, true, true, 2, false, ThreadsPerAtom::Order>       (const PmeGpuCudaKernelParams);
622 template __global__ void pme_gather_kernel<4, true, true, 2, false, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams);
623 // clang-format on