Fix random doxygen warnings and typos
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme_gpu_internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <memory>
56 #include <string>
57
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/device_context.h"
60 #include "gromacs/gpu_utils/device_stream.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/hardware/device_information.h"
63 #include "gromacs/math/invertmatrix.h"
64 #include "gromacs/math/units.h"
65 #include "gromacs/timing/gpu_timing.h"
66 #include "gromacs/utility/exceptions.h"
67 #include "gromacs/utility/fatalerror.h"
68 #include "gromacs/utility/gmxassert.h"
69 #include "gromacs/utility/logger.h"
70 #include "gromacs/utility/stringutil.h"
71
72 #if GMX_GPU_CUDA
73 #    include "gromacs/gpu_utils/pmalloc_cuda.h"
74
75 #    include "pme.cuh"
76 #elif GMX_GPU_OPENCL
77 #    include "gromacs/gpu_utils/gmxopencl.h"
78 #endif
79
80 #include "gromacs/ewald/pme.h"
81
82 #include "pme_gpu_3dfft.h"
83 #include "pme_gpu_calculate_splines.h"
84 #include "pme_gpu_constants.h"
85 #include "pme_gpu_program_impl.h"
86 #include "pme_gpu_timings.h"
87 #include "pme_gpu_types.h"
88 #include "pme_gpu_types_host.h"
89 #include "pme_gpu_types_host_impl.h"
90 #include "pme_grid.h"
91 #include "pme_internal.h"
92 #include "pme_solve.h"
93
94 /*! \brief
95  * CUDA only
96  * Atom limit above which it is advantageous to turn on the
97  * recalculating of the splines in the gather and using less threads per atom in the spline and spread
98  */
99 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
100
101 /*! \internal \brief
102  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
103  *
104  * \param[in] pmeGpu  The PME GPU structure.
105  * \returns The pointer to the kernel parameters.
106  */
107 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
108 {
109     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
110     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
111     return kernelParamsPtr;
112 }
113
114 /*! \brief
115  * Atom data block size (in terms of number of atoms).
116  * This is the least common multiple of number of atoms processed by
117  * a single block/workgroup of the spread and gather kernels.
118  * The GPU atom data buffers must be padded, which means that
119  * the numbers of atoms used for determining the size of the memory
120  * allocation must be divisible by this.
121  */
122 constexpr int c_pmeAtomDataBlockSize = 64;
123
124 int pme_gpu_get_atom_data_block_size()
125 {
126     return c_pmeAtomDataBlockSize;
127 }
128
129 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
130 {
131     pmeGpu->archSpecific->pmeStream_.synchronize();
132 }
133
134 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
135 {
136     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
137
138     GMX_ASSERT(
139             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
140             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
141
142     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
143     {
144         allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
145                              c_virialAndEnergyCount, pmeGpu->archSpecific->deviceContext_);
146         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy[gridIndex]), energyAndVirialSize);
147     }
148 }
149
150 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
151 {
152     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
153     {
154         freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex]);
155         pfree(pmeGpu->staging.h_virialAndEnergy[gridIndex]);
156         pmeGpu->staging.h_virialAndEnergy[gridIndex] = nullptr;
157     }
158 }
159
160 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
161 {
162     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
163     {
164         clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex], 0,
165                                c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_);
166     }
167 }
168
169 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu, const int gridIndex)
170 {
171     GMX_ASSERT(
172             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
173             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
174     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
175                "Invalid combination of gridIndex and number of grids");
176
177     const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
178                                           pmeGpu->kernelParams->grid.realGridSize[XX]
179                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
180     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
181
182     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
183                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
184                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
185     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize[gridIndex]);
186     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
187                            newSplineValuesSize, &pmeGpu->archSpecific->splineValuesSize[gridIndex],
188                            &pmeGpu->archSpecific->splineValuesCapacity[gridIndex],
189                            pmeGpu->archSpecific->deviceContext_);
190     if (shouldRealloc)
191     {
192         /* Reallocate the host buffer */
193         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
194         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli[gridIndex]),
195                 newSplineValuesSize * sizeof(float));
196     }
197     for (int i = 0; i < DIM; i++)
198     {
199         memcpy(pmeGpu->staging.h_splineModuli[gridIndex] + splineValuesOffset[i],
200                pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
201     }
202     /* TODO: pin original buffer instead! */
203     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
204                        pmeGpu->staging.h_splineModuli[gridIndex], 0, newSplineValuesSize,
205                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
206 }
207
208 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
209 {
210     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
211     {
212         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
213         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex]);
214     }
215 }
216
217 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
218 {
219     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
220     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
221     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
222                            &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc,
223                            pmeGpu->archSpecific->deviceContext_);
224     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
225     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
226 }
227
228 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
229 {
230     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
231 }
232
233 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
234 {
235     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
236     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
237     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
238                        DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
239                        pmeGpu->settings.transferKind, nullptr);
240 }
241
242 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
243 {
244     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
245     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
246     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
247                          DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
248                          pmeGpu->settings.transferKind, nullptr);
249 }
250
251 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu,
252                                                  const float*  h_coefficients,
253                                                  const int     gridIndex)
254 {
255     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
256     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
257     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
258     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
259                            newCoefficientsSize, &pmeGpu->archSpecific->coefficientsSize[gridIndex],
260                            &pmeGpu->archSpecific->coefficientsCapacity[gridIndex],
261                            pmeGpu->archSpecific->deviceContext_);
262     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
263                        const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
264                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
265
266     const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
267     const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
268     if (paddingCount > 0)
269     {
270         clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex], paddingIndex,
271                                paddingCount, pmeGpu->archSpecific->pmeStream_);
272     }
273 }
274
275 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
276 {
277     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
278     {
279         freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex]);
280     }
281 }
282
283 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
284 {
285     const int order             = pmeGpu->common->pme_order;
286     const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
287     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
288     /* Two arrays of the same size */
289     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
290     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
291     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
292     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize, &currentSizeTemp,
293                            &currentSizeTempAlloc, pmeGpu->archSpecific->deviceContext_);
294     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
295                            &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc,
296                            pmeGpu->archSpecific->deviceContext_);
297     // the host side reallocation
298     if (shouldRealloc)
299     {
300         pfree(pmeGpu->staging.h_theta);
301         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
302         pfree(pmeGpu->staging.h_dtheta);
303         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
304     }
305 }
306
307 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
308 {
309     /* Two arrays of the same size */
310     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
311     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
312     pfree(pmeGpu->staging.h_theta);
313     pfree(pmeGpu->staging.h_dtheta);
314 }
315
316 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
317 {
318     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
319     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
320     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
321                            &pmeGpu->archSpecific->gridlineIndicesSize,
322                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
323                            pmeGpu->archSpecific->deviceContext_);
324     pfree(pmeGpu->staging.h_gridlineIndices);
325     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
326 }
327
328 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
329 {
330     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
331     pfree(pmeGpu->staging.h_gridlineIndices);
332 }
333
334 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
335 {
336     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
337
338     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
339                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
340                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
341     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
342                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
343                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
344     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
345     {
346         // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
347         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
348         {
349             /* 2 separate grids */
350             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex], newComplexGridSize,
351                                    &pmeGpu->archSpecific->complexGridSize[gridIndex],
352                                    &pmeGpu->archSpecific->complexGridCapacity[gridIndex],
353                                    pmeGpu->archSpecific->deviceContext_);
354             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex], newRealGridSize,
355                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
356                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
357                                    pmeGpu->archSpecific->deviceContext_);
358         }
359         else
360         {
361             /* A single buffer so that any grid will fit */
362             const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
363             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex], newGridsSize,
364                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
365                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
366                                    pmeGpu->archSpecific->deviceContext_);
367             kernelParamsPtr->grid.d_fourierGrid[gridIndex] = kernelParamsPtr->grid.d_realGrid[gridIndex];
368             pmeGpu->archSpecific->complexGridSize[gridIndex] =
369                     pmeGpu->archSpecific->realGridSize[gridIndex];
370             // the size might get used later for copying the grid
371         }
372     }
373 }
374
375 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
376 {
377     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
378     {
379         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
380         {
381             freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid[gridIndex]);
382         }
383         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex]);
384     }
385 }
386
387 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
388 {
389     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
390     {
391         clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex], 0,
392                                pmeGpu->archSpecific->realGridSize[gridIndex],
393                                pmeGpu->archSpecific->pmeStream_);
394     }
395 }
396
397 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
398 {
399     pme_gpu_free_fract_shifts(pmeGpu);
400
401     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
402
403     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
404     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
405     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
406     const int cellCount           = c_pmeNeighborUnitcellCount;
407     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
408
409     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
410
411     const int newFractShiftsSize = cellCount * (nx + ny + nz);
412
413     initParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
414                          &kernelParamsPtr->fractShiftsTableTexture, pmeGpu->common->fsh.data(),
415                          newFractShiftsSize, pmeGpu->archSpecific->deviceContext_);
416
417     initParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
418                          &kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
419                          newFractShiftsSize, pmeGpu->archSpecific->deviceContext_);
420 }
421
422 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
423 {
424     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
425 #if GMX_GPU_CUDA
426     destroyParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
427                             kernelParamsPtr->fractShiftsTableTexture);
428     destroyParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
429                             kernelParamsPtr->gridlineIndicesTableTexture);
430 #elif GMX_GPU_OPENCL
431     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
432     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
433 #endif
434 }
435
436 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
437 {
438     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
439 }
440
441 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, const float* h_grid, const int gridIndex)
442 {
443     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex], h_grid, 0,
444                        pmeGpu->archSpecific->realGridSize[gridIndex],
445                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
446 }
447
448 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid, const int gridIndex)
449 {
450     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid[gridIndex], 0,
451                          pmeGpu->archSpecific->realGridSize[gridIndex],
452                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
453     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
454 }
455
456 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
457 {
458     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
459     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
460     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
461                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
462     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
463                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
464     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
465                          0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
466                          pmeGpu->settings.transferKind, nullptr);
467 }
468
469 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
470 {
471     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
472     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
473
474     // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
475     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0, pmeGpu->nAtomsAlloc * DIM,
476                            pmeGpu->archSpecific->pmeStream_);
477     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
478                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
479                            pmeGpu->archSpecific->pmeStream_);
480     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
481                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
482                            pmeGpu->archSpecific->pmeStream_);
483
484     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
485                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
486     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
487                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
488     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
489                        0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
490                        pmeGpu->settings.transferKind, nullptr);
491 }
492
493 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
494 {
495     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
496 }
497
498 /*! \brief Internal GPU initialization for PME.
499  *
500  * \param[in]  pmeGpu         GPU PME data.
501  * \param[in]  deviceContext  GPU context.
502  * \param[in]  deviceStream   GPU stream.
503  */
504 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
505 {
506     /* Allocate the target-specific structures */
507     pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
508     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
509
510     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
511     /* This should give better performance, according to the cuFFT documentation.
512      * The performance seems to be the same though.
513      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
514      */
515
516 #if GMX_GPU_CUDA
517     pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
518 #else
519     // Use this path for any non-CUDA GPU acceleration
520     // TODO: is there no really global work size limit in OpenCL?
521     pmeGpu->maxGridWidthX = INT32_MAX / 2;
522 #endif
523 }
524
525 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
526 {
527     if (pme_gpu_settings(pmeGpu).performGPUFFT)
528     {
529         pmeGpu->archSpecific->fftSetup.resize(0);
530         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
531         {
532             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu, gridIndex));
533         }
534     }
535 }
536
537 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
538 {
539     pmeGpu->archSpecific->fftSetup.resize(0);
540 }
541
542 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, const float lambda, PmeOutput* output)
543 {
544     const PmeGpu* pmeGpu = pme.gpu;
545
546     GMX_ASSERT(lambda == 1.0 || pmeGpu->common->ngrids == 2,
547                "Invalid combination of lambda and number of grids");
548
549     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
550     {
551         for (int j = 0; j < c_virialAndEnergyCount; j++)
552         {
553             GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[gridIndex][j]),
554                        "PME GPU produces incorrect energy/virial.");
555         }
556     }
557     for (int dim1 = 0; dim1 < DIM; dim1++)
558     {
559         for (int dim2 = 0; dim2 < DIM; dim2++)
560         {
561             output->coulombVirial_[dim1][dim2] = 0;
562         }
563     }
564     output->coulombEnergy_ = 0;
565     float scale            = 1.0;
566     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
567     {
568         if (pmeGpu->common->ngrids == 2)
569         {
570             scale = gridIndex == 0 ? (1.0 - lambda) : lambda;
571         }
572         output->coulombVirial_[XX][XX] +=
573                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][0];
574         output->coulombVirial_[YY][YY] +=
575                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][1];
576         output->coulombVirial_[ZZ][ZZ] +=
577                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][2];
578         output->coulombVirial_[XX][YY] +=
579                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
580         output->coulombVirial_[YY][XX] +=
581                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
582         output->coulombVirial_[XX][ZZ] +=
583                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
584         output->coulombVirial_[ZZ][XX] +=
585                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
586         output->coulombVirial_[YY][ZZ] +=
587                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
588         output->coulombVirial_[ZZ][YY] +=
589                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
590         output->coulombEnergy_ += scale * 0.5F * pmeGpu->staging.h_virialAndEnergy[gridIndex][6];
591     }
592     if (pmeGpu->common->ngrids > 1)
593     {
594         output->coulombDvdl_ = 0.5F
595                                * (pmeGpu->staging.h_virialAndEnergy[FEP_STATE_B][6]
596                                   - pmeGpu->staging.h_virialAndEnergy[FEP_STATE_A][6]);
597     }
598 }
599
600 /*! \brief Sets the force-related members in \p output
601  *
602  * \param[in]   pmeGpu      PME GPU data structure
603  * \param[out]  output      Pointer to PME output data structure
604  */
605 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
606 {
607     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
608     if (output->haveForceOutput_)
609     {
610         output->forces_ = pmeGpu->staging.h_forces;
611     }
612 }
613
614 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial, const real lambdaQ)
615 {
616     PmeGpu* pmeGpu = pme.gpu;
617
618     PmeOutput output;
619
620     pme_gpu_getForceOutput(pmeGpu, &output);
621
622     if (computeEnergyAndVirial)
623     {
624         if (pme_gpu_settings(pmeGpu).performGPUSolve)
625         {
626             pme_gpu_getEnergyAndVirial(pme, lambdaQ, &output);
627         }
628         else
629         {
630             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
631         }
632     }
633     return output;
634 }
635
636 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
637 {
638 #if GMX_DOUBLE
639     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
640 #else
641     matrix scaledBox;
642     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
643     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
644     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
645     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
646     matrix recipBox;
647     gmx::invertBoxMatrix(scaledBox, recipBox);
648
649     /* The GPU recipBox is transposed as compared to the CPU recipBox.
650      * Spread uses matrix columns (while solve and gather use rows).
651      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
652      */
653     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
654                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
655                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
656     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
657 #endif
658 }
659
660 /*! \brief \libinternal
661  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
662  *
663  * \param[in] pmeGpu            The PME GPU structure.
664  */
665 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
666 {
667     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
668
669     GMX_ASSERT(
670             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
671             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
672
673     kernelParamsPtr->grid.ewaldFactor =
674             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
675     /* The grid size variants */
676     for (int i = 0; i < DIM; i++)
677     {
678         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
679         kernelParamsPtr->grid.realGridSizeFP[i] =
680                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
681         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
682
683         // The complex grid currently uses no padding;
684         // if it starts to do so, then another test should be added for that
685         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
686         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
687     }
688     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
689     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
690     {
691         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
692         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
693                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
694     }
695     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
696     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
697     kernelParamsPtr->grid.complexGridSize[ZZ]++;
698     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
699
700     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
701     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
702     {
703         pme_gpu_realloc_and_copy_bspline_values(pmeGpu, gridIndex);
704     }
705
706     pme_gpu_realloc_grids(pmeGpu);
707     pme_gpu_reinit_3dfft(pmeGpu);
708 }
709
710 /* Several GPU functions that refer to the CPU PME data live here.
711  * We would like to keep these away from the GPU-framework specific code for clarity,
712  * as well as compilation issues with MPI.
713  */
714
715 /*! \brief \libinternal
716  * Copies everything useful from the PME CPU to the PME GPU structure.
717  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
718  *
719  * \param[in] pme         The PME structure.
720  */
721 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
722 {
723     /* TODO: Consider refactoring the CPU PME code to use the same structure,
724      * so that this function becomes 2 lines */
725     PmeGpu* pmeGpu               = pme->gpu;
726     pmeGpu->common->ngrids       = pme->bFEP_q ? 2 : 1;
727     pmeGpu->common->epsilon_r    = pme->epsilon_r;
728     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
729     pmeGpu->common->nk[XX]       = pme->nkx;
730     pmeGpu->common->nk[YY]       = pme->nky;
731     pmeGpu->common->nk[ZZ]       = pme->nkz;
732     pmeGpu->common->pme_order    = pme->pme_order;
733     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
734     {
735         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
736     }
737     for (int i = 0; i < DIM; i++)
738     {
739         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
740     }
741     const int cellCount = c_pmeNeighborUnitcellCount;
742     pmeGpu->common->fsh.resize(0);
743     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
744     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
745     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
746     pmeGpu->common->nn.resize(0);
747     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
748     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
749     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
750     pmeGpu->common->runMode       = pme->runMode;
751     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
752     pmeGpu->common->boxScaler     = pme->boxScaler;
753 }
754
755 /*! \libinternal \brief
756  * uses heuristics to select the best performing PME gather and scatter kernels
757  *
758  * \param[in,out] pmeGpu         The PME GPU structure.
759  */
760 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
761 {
762     if (GMX_GPU_CUDA && pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit)
763     {
764         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::Order;
765         pmeGpu->settings.recalculateSplines = true;
766     }
767     else
768     {
769         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::OrderSquared;
770         pmeGpu->settings.recalculateSplines = false;
771     }
772 }
773
774
775 /*! \libinternal \brief
776  * Initializes the PME GPU data at the beginning of the run.
777  * TODO: this should become PmeGpu::PmeGpu()
778  *
779  * \param[in,out] pme            The PME structure.
780  * \param[in]     deviceContext  The GPU context.
781  * \param[in]     deviceStream   The GPU stream.
782  * \param[in,out] pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
783  */
784 static void pme_gpu_init(gmx_pme_t*           pme,
785                          const DeviceContext& deviceContext,
786                          const DeviceStream&  deviceStream,
787                          const PmeGpuProgram* pmeGpuProgram)
788 {
789     pme->gpu       = new PmeGpu();
790     PmeGpu* pmeGpu = pme->gpu;
791     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
792     pmeGpu->common = std::make_shared<PmeShared>();
793
794     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
795     /* A convenience variable. */
796     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
797     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
798     pmeGpu->settings.performGPUGather = true;
799     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
800     pmeGpu->settings.useGpuForceReduction = false;
801
802     pme_gpu_set_testing(pmeGpu, false);
803
804     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
805     pmeGpu->programHandle_ = pmeGpuProgram;
806
807     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
808
809     pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
810
811     pme_gpu_copy_common_data_from(pme);
812     pme_gpu_alloc_energy_virial(pmeGpu);
813
814     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
815
816     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
817     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
818 }
819
820 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
821 {
822     GMX_ASSERT(gridSize != nullptr, "");
823     GMX_ASSERT(paddedGridSize != nullptr, "");
824     GMX_ASSERT(pmeGpu != nullptr, "");
825     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
826     for (int i = 0; i < DIM; i++)
827     {
828         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
829         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
830     }
831 }
832
833 void pme_gpu_reinit(gmx_pme_t*           pme,
834                     const DeviceContext* deviceContext,
835                     const DeviceStream*  deviceStream,
836                     const PmeGpuProgram* pmeGpuProgram)
837 {
838     GMX_ASSERT(pme != nullptr, "Need valid PME object");
839
840     if (!pme->gpu)
841     {
842         GMX_RELEASE_ASSERT(deviceContext != nullptr,
843                            "Device context can not be nullptr when setting up PME on GPU.");
844         GMX_RELEASE_ASSERT(deviceStream != nullptr,
845                            "Device stream can not be nullptr when setting up PME on GPU.");
846         /* First-time initialization */
847         pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
848     }
849     else
850     {
851         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
852         pme_gpu_copy_common_data_from(pme);
853     }
854     /* GPU FFT will only get used for a single rank.*/
855     pme->gpu->settings.performGPUFFT =
856             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
857     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
858
859     /* Reinit active timers */
860     pme_gpu_reinit_timings(pme->gpu);
861
862     pme_gpu_reinit_grids(pme->gpu);
863     // Note: if timing the reinit launch overhead becomes more relevant
864     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
865     pme_gpu_reinit_computation(pme, nullptr);
866     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
867      * update for mixed mode on grid switch. TODO: use shared recipbox field.
868      */
869     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
870 }
871
872 void pme_gpu_destroy(PmeGpu* pmeGpu)
873 {
874     /* Free lots of data */
875     pme_gpu_free_energy_virial(pmeGpu);
876     pme_gpu_free_bspline_values(pmeGpu);
877     pme_gpu_free_forces(pmeGpu);
878     pme_gpu_free_coefficients(pmeGpu);
879     pme_gpu_free_spline_data(pmeGpu);
880     pme_gpu_free_grid_indices(pmeGpu);
881     pme_gpu_free_fract_shifts(pmeGpu);
882     pme_gpu_free_grids(pmeGpu);
883
884     pme_gpu_destroy_3dfft(pmeGpu);
885
886     delete pmeGpu;
887 }
888
889 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* chargesA, const real* chargesB)
890 {
891     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
892     kernelParamsPtr->atoms.nAtoms = nAtoms;
893     const int  block_size         = pme_gpu_get_atom_data_block_size();
894     const int  nAtomsNewPadded    = ((nAtoms + block_size - 1) / block_size) * block_size;
895     const bool haveToRealloc      = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
896     pmeGpu->nAtomsAlloc           = nAtomsNewPadded;
897
898 #if GMX_DOUBLE
899     GMX_RELEASE_ASSERT(false, "Only single precision supported");
900     GMX_UNUSED_VALUE(charges);
901 #else
902     int gridIndex = 0;
903     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
904     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
905     gridIndex++;
906     if (chargesB != nullptr)
907     {
908         pme_gpu_realloc_and_copy_input_coefficients(
909                 pmeGpu, reinterpret_cast<const float*>(chargesB), gridIndex);
910     }
911     else
912     {
913         /* Fill the second set of coefficients with chargesA as well to be able to avoid
914          * conditionals in the GPU kernels */
915         /* FIXME: This should be avoided by making a separate templated version of the
916          * relevant kernel(s) (probably only pme_gather_kernel). That would require a
917          * reduction of the current number of templated parameters of that kernel. */
918         pme_gpu_realloc_and_copy_input_coefficients(
919                 pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
920     }
921 #endif
922
923     if (haveToRealloc)
924     {
925         pme_gpu_realloc_forces(pmeGpu);
926         pme_gpu_realloc_spline_data(pmeGpu);
927         pme_gpu_realloc_grid_indices(pmeGpu);
928     }
929     pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
930 }
931
932 /*! \internal \brief
933  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
934  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
935  *
936  * \param[in] pmeGpu         The PME GPU data structure.
937  * \param[in] PMEStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
938  */
939 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
940 {
941     CommandEvent* timingEvent = nullptr;
942     if (pme_gpu_timings_enabled(pmeGpu))
943     {
944         GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
945                    "Wrong PME GPU timing event index");
946         timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
947     }
948     return timingEvent;
949 }
950
951 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, const int grid_index)
952 {
953     int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
954
955     pme_gpu_start_timing(pmeGpu, timerId);
956     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
957             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
958     pme_gpu_stop_timing(pmeGpu, timerId);
959 }
960
961 /*! \brief
962  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
963  * to minimize number of unused blocks.
964  */
965 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
966 {
967     // How many maximum widths in X do we need (hopefully just one)
968     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
969     // Trying to make things even
970     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
971     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
972     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
973                "pmeGpuCreateGrid: excessive blocks");
974     return std::pair<int, int>(colCount, minRowCount);
975 }
976
977 /*! \brief
978  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
979  *
980  * \param[in]  pmeGpu                   The PME GPU structure.
981  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
982  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
983  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
984  *
985  * \return Pointer to CUDA kernel
986  */
987 static auto selectSplineAndSpreadKernelPtr(const PmeGpu*  pmeGpu,
988                                            ThreadsPerAtom threadsPerAtom,
989                                            bool           writeSplinesToGlobal,
990                                            const int      numGrids)
991 {
992     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
993     if (writeSplinesToGlobal)
994     {
995         if (threadsPerAtom == ThreadsPerAtom::Order)
996         {
997             if (numGrids == 2)
998             {
999                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Dual;
1000             }
1001             else
1002             {
1003                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Single;
1004             }
1005         }
1006         else
1007         {
1008             if (numGrids == 2)
1009             {
1010                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesDual;
1011             }
1012             else
1013             {
1014                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesSingle;
1015             }
1016         }
1017     }
1018     else
1019     {
1020         if (threadsPerAtom == ThreadsPerAtom::Order)
1021         {
1022             if (numGrids == 2)
1023             {
1024                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1025             }
1026             else
1027             {
1028                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1029             }
1030         }
1031         else
1032         {
1033             if (numGrids == 2)
1034             {
1035                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1036             }
1037             else
1038             {
1039                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1040             }
1041         }
1042     }
1043
1044     return kernelPtr;
1045 }
1046
1047 /*! \brief
1048  * Returns a pointer to appropriate spline kernel based on the input bool values
1049  *
1050  * \param[in]  pmeGpu                   The PME GPU structure.
1051  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1052  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1053  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1054  *
1055  * \return Pointer to CUDA kernel
1056  */
1057 static auto selectSplineKernelPtr(const PmeGpu*  pmeGpu,
1058                                   ThreadsPerAtom threadsPerAtom,
1059                                   bool gmx_unused writeSplinesToGlobal,
1060                                   const int       numGrids)
1061 {
1062     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1063     GMX_ASSERT(
1064             writeSplinesToGlobal,
1065             "Spline data should always be written to global memory when just calculating splines");
1066
1067     if (threadsPerAtom == ThreadsPerAtom::Order)
1068     {
1069         if (numGrids == 2)
1070         {
1071             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Dual;
1072         }
1073         else
1074         {
1075             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Single;
1076         }
1077     }
1078     else
1079     {
1080         if (numGrids == 2)
1081         {
1082             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelDual;
1083         }
1084         else
1085         {
1086             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelSingle;
1087         }
1088     }
1089     return kernelPtr;
1090 }
1091
1092 /*! \brief
1093  * Returns a pointer to appropriate spread kernel based on the input bool values
1094  *
1095  * \param[in]  pmeGpu                   The PME GPU structure.
1096  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1097  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1098  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1099  *
1100  * \return Pointer to CUDA kernel
1101  */
1102 static auto selectSpreadKernelPtr(const PmeGpu*  pmeGpu,
1103                                   ThreadsPerAtom threadsPerAtom,
1104                                   bool           writeSplinesToGlobal,
1105                                   const int      numGrids)
1106 {
1107     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1108     if (writeSplinesToGlobal)
1109     {
1110         if (threadsPerAtom == ThreadsPerAtom::Order)
1111         {
1112             if (numGrids == 2)
1113             {
1114                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Dual;
1115             }
1116             {
1117                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Single;
1118             }
1119         }
1120         else
1121         {
1122             if (numGrids == 2)
1123             {
1124                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelDual;
1125             }
1126             else
1127             {
1128                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelSingle;
1129             }
1130         }
1131     }
1132     else
1133     {
1134         /* if we are not saving the spline data we need to recalculate it
1135            using the spline and spread Kernel */
1136         if (threadsPerAtom == ThreadsPerAtom::Order)
1137         {
1138             if (numGrids == 2)
1139             {
1140                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1141             }
1142             else
1143             {
1144                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1145             }
1146         }
1147         else
1148         {
1149             if (numGrids == 2)
1150             {
1151                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1152             }
1153             else
1154             {
1155                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1156             }
1157         }
1158     }
1159     return kernelPtr;
1160 }
1161
1162 void pme_gpu_spread(const PmeGpu*         pmeGpu,
1163                     GpuEventSynchronizer* xReadyOnDevice,
1164                     real**                h_grids,
1165                     bool                  computeSplines,
1166                     bool                  spreadCharges,
1167                     const real            lambda)
1168 {
1169     GMX_ASSERT(
1170             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1171             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1172
1173     GMX_ASSERT(computeSplines || spreadCharges,
1174                "PME spline/spread kernel has invalid input (nothing to do)");
1175     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1176     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1177
1178     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1179
1180     const int order = pmeGpu->common->pme_order;
1181     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1182     const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1183     const int  threadsPerAtom =
1184             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1185     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1186
1187     GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1188                "Only 16 threads per atom supported in OpenCL");
1189     GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1190                "Recalculating splines not supported in OpenCL");
1191
1192     const int atomsPerBlock = blockSize / threadsPerAtom;
1193
1194     // TODO: pick smaller block size in runtime if needed
1195     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1196     // If doing so, change atomsPerBlock in the kernels as well.
1197     // TODO: test varying block sizes on modern arch-s as well
1198     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1199     //(for spline data mostly)
1200     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1201                "inconsistent atom data padding vs. spreading block size");
1202
1203     // Ensure that coordinates are ready on the device before launching spread;
1204     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1205     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1206     GMX_ASSERT(!GMX_GPU_CUDA || xReadyOnDevice != nullptr || pmeGpu->common->isRankPmeOnly
1207                        || pme_gpu_settings(pmeGpu).copyAllOutputs,
1208                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1209
1210     if (xReadyOnDevice)
1211     {
1212         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1213     }
1214
1215     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1216     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1217
1218     if (pmeGpu->common->ngrids == 1)
1219     {
1220         kernelParamsPtr->current.scale = 1.0;
1221     }
1222     else
1223     {
1224         kernelParamsPtr->current.scale = 1.0 - lambda;
1225     }
1226
1227     KernelLaunchConfig config;
1228     config.blockSize[0] = order;
1229     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1230     config.blockSize[2] = atomsPerBlock;
1231     config.gridSize[0]  = dimGrid.first;
1232     config.gridSize[1]  = dimGrid.second;
1233
1234     int                                timingId;
1235     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1236     if (computeSplines)
1237     {
1238         if (spreadCharges)
1239         {
1240             timingId  = gtPME_SPLINEANDSPREAD;
1241             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1242                                                        writeGlobal || (!recalculateSplines),
1243                                                        pmeGpu->common->ngrids);
1244         }
1245         else
1246         {
1247             timingId  = gtPME_SPLINE;
1248             kernelPtr = selectSplineKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1249                                               writeGlobal || (!recalculateSplines),
1250                                               pmeGpu->common->ngrids);
1251         }
1252     }
1253     else
1254     {
1255         timingId  = gtPME_SPREAD;
1256         kernelPtr = selectSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1257                                           writeGlobal || (!recalculateSplines), pmeGpu->common->ngrids);
1258     }
1259
1260
1261     pme_gpu_start_timing(pmeGpu, timingId);
1262     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1263 #if c_canEmbedBuffers
1264     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1265 #else
1266     const auto kernelArgs = prepareGpuKernelArguments(
1267             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1268             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1269             &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A], &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1270             &kernelParamsPtr->grid.d_fractShiftsTable, &kernelParamsPtr->grid.d_gridlineIndicesTable,
1271             &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1272             &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B], &kernelParamsPtr->atoms.d_coordinates);
1273 #endif
1274
1275     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent,
1276                     "PME spline/spread", kernelArgs);
1277     pme_gpu_stop_timing(pmeGpu, timingId);
1278
1279     const auto& settings    = pmeGpu->settings;
1280     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1281     if (copyBackGrid)
1282     {
1283         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1284         {
1285             float* h_grid = h_grids[gridIndex];
1286             pme_gpu_copy_output_spread_grid(pmeGpu, h_grid, gridIndex);
1287         }
1288     }
1289     const bool copyBackAtomData =
1290             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1291     if (copyBackAtomData)
1292     {
1293         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1294     }
1295 }
1296
1297 void pme_gpu_solve(const PmeGpu* pmeGpu,
1298                    const int     gridIndex,
1299                    t_complex*    h_grid,
1300                    GridOrdering  gridOrdering,
1301                    bool          computeEnergyAndVirial)
1302 {
1303     GMX_ASSERT(
1304             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1305             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1306     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
1307                "Invalid combination of gridIndex and number of grids");
1308
1309     const auto& settings               = pmeGpu->settings;
1310     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1311
1312     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1313
1314     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1315     if (copyInputAndOutputGrid)
1316     {
1317         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex], h_gridFloat, 0,
1318                            pmeGpu->archSpecific->complexGridSize[gridIndex],
1319                            pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1320     }
1321
1322     int majorDim = -1, middleDim = -1, minorDim = -1;
1323     switch (gridOrdering)
1324     {
1325         case GridOrdering::YZX:
1326             majorDim  = YY;
1327             middleDim = ZZ;
1328             minorDim  = XX;
1329             break;
1330
1331         case GridOrdering::XYZ:
1332             majorDim  = XX;
1333             middleDim = YY;
1334             minorDim  = ZZ;
1335             break;
1336
1337         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1338     }
1339
1340     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1341
1342     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1343     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1344     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1345     int       cellsPerBlock;
1346     if (blocksPerGridLine == 1)
1347     {
1348         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1349     }
1350     else
1351     {
1352         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1353     }
1354     const int warpSize  = pmeGpu->programHandle_->warpSize();
1355     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1356
1357     static_assert(!GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1358                   "The CUDA solve energy kernels needs at least 4 warps. "
1359                   "Here we launch at least half of the max warps.");
1360
1361     KernelLaunchConfig config;
1362     config.blockSize[0] = blockSize;
1363     config.gridSize[0]  = blocksPerGridLine;
1364     // rounding up to full warps so that shuffle operations produce defined results
1365     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1366                          / gridLinesPerBlock;
1367     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1368
1369     int                                timingId  = gtPME_SOLVE;
1370     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1371     if (gridOrdering == GridOrdering::YZX)
1372     {
1373         if (gridIndex == 0)
1374         {
1375             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelA
1376                                                : pmeGpu->programHandle_->impl_->solveYZXKernelA;
1377         }
1378         else
1379         {
1380             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelB
1381                                                : pmeGpu->programHandle_->impl_->solveYZXKernelB;
1382         }
1383     }
1384     else if (gridOrdering == GridOrdering::XYZ)
1385     {
1386         if (gridIndex == 0)
1387         {
1388             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelA
1389                                                : pmeGpu->programHandle_->impl_->solveXYZKernelA;
1390         }
1391         else
1392         {
1393             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelB
1394                                                : pmeGpu->programHandle_->impl_->solveXYZKernelB;
1395         }
1396     }
1397
1398     pme_gpu_start_timing(pmeGpu, timingId);
1399     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1400 #if c_canEmbedBuffers
1401     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1402 #else
1403     const auto kernelArgs = prepareGpuKernelArguments(
1404             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli[gridIndex],
1405             &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1406             &kernelParamsPtr->grid.d_fourierGrid[gridIndex]);
1407 #endif
1408     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve",
1409                     kernelArgs);
1410     pme_gpu_stop_timing(pmeGpu, timingId);
1411
1412     if (computeEnergyAndVirial)
1413     {
1414         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy[gridIndex],
1415                              &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex], 0,
1416                              c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_,
1417                              pmeGpu->settings.transferKind, nullptr);
1418     }
1419
1420     if (copyInputAndOutputGrid)
1421     {
1422         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid[gridIndex], 0,
1423                              pmeGpu->archSpecific->complexGridSize[gridIndex],
1424                              pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1425     }
1426 }
1427
1428 /*! \brief
1429  * Returns a pointer to appropriate gather kernel based on the inputvalues
1430  *
1431  * \param[in]  pmeGpu                   The PME GPU structure.
1432  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1433  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1434  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1435  *
1436  * \return Pointer to CUDA kernel
1437  */
1438 inline auto selectGatherKernelPtr(const PmeGpu*  pmeGpu,
1439                                   ThreadsPerAtom threadsPerAtom,
1440                                   bool           readSplinesFromGlobal,
1441                                   const int      numGrids)
1442
1443 {
1444     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1445
1446     if (readSplinesFromGlobal)
1447     {
1448         if (threadsPerAtom == ThreadsPerAtom::Order)
1449         {
1450             if (numGrids == 2)
1451             {
1452                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Dual;
1453             }
1454             else
1455             {
1456                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Single;
1457             }
1458         }
1459         else
1460         {
1461             if (numGrids == 2)
1462             {
1463                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesDual;
1464             }
1465             else
1466             {
1467                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesSingle;
1468             }
1469         }
1470     }
1471     else
1472     {
1473         if (threadsPerAtom == ThreadsPerAtom::Order)
1474         {
1475             if (numGrids == 2)
1476             {
1477                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Dual;
1478             }
1479             else
1480             {
1481                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Single;
1482             }
1483         }
1484         else
1485         {
1486             if (numGrids == 2)
1487             {
1488                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelDual;
1489             }
1490             else
1491             {
1492                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelSingle;
1493             }
1494         }
1495     }
1496     return kernelPtr;
1497 }
1498
1499 void pme_gpu_gather(PmeGpu* pmeGpu, real** h_grids, const float lambda)
1500 {
1501     GMX_ASSERT(
1502             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1503             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1504
1505     const auto& settings = pmeGpu->settings;
1506
1507     if (!settings.performGPUFFT || settings.copyAllOutputs)
1508     {
1509         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1510         {
1511             float* h_grid = const_cast<float*>(h_grids[gridIndex]);
1512             pme_gpu_copy_input_gather_grid(pmeGpu, h_grid, gridIndex);
1513         }
1514     }
1515
1516     if (settings.copyAllOutputs)
1517     {
1518         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1519     }
1520
1521     /* Set if we have unit tests */
1522     const bool   readGlobal = pmeGpu->settings.copyAllOutputs;
1523     const size_t blockSize  = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1524     const int    order      = pmeGpu->common->pme_order;
1525     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1526     const int threadsPerAtom =
1527             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1528     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1529
1530     GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1531                "Only 16 threads per atom supported in OpenCL");
1532     GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1533                "Recalculating splines not supported in OpenCL");
1534
1535     const int atomsPerBlock = blockSize / threadsPerAtom;
1536
1537     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1538                "inconsistent atom data padding vs. gathering block size");
1539
1540     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1541     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1542
1543     KernelLaunchConfig config;
1544     config.blockSize[0] = order;
1545     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1546     config.blockSize[2] = atomsPerBlock;
1547     config.gridSize[0]  = dimGrid.first;
1548     config.gridSize[1]  = dimGrid.second;
1549
1550     // TODO test different cache configs
1551
1552     int                                timingId = gtPME_GATHER;
1553     PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1554             selectGatherKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1555                                   readGlobal || (!recalculateSplines), pmeGpu->common->ngrids);
1556     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1557
1558     pme_gpu_start_timing(pmeGpu, timingId);
1559     auto* timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1560     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1561     if (pmeGpu->common->ngrids == 1)
1562     {
1563         kernelParamsPtr->current.scale = 1.0;
1564     }
1565     else
1566     {
1567         kernelParamsPtr->current.scale = 1.0 - lambda;
1568     }
1569
1570 #if c_canEmbedBuffers
1571     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1572 #else
1573     const auto kernelArgs = prepareGpuKernelArguments(
1574             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1575             &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1576             &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A], &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1577             &kernelParamsPtr->atoms.d_theta, &kernelParamsPtr->atoms.d_dtheta,
1578             &kernelParamsPtr->atoms.d_gridlineIndices, &kernelParamsPtr->atoms.d_forces);
1579 #endif
1580     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather",
1581                     kernelArgs);
1582     pme_gpu_stop_timing(pmeGpu, timingId);
1583
1584     if (pmeGpu->settings.useGpuForceReduction)
1585     {
1586         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1587     }
1588     else
1589     {
1590         pme_gpu_copy_output_forces(pmeGpu);
1591     }
1592 }
1593
1594 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1595 {
1596     if (pmeGpu && pmeGpu->kernelParams)
1597     {
1598         return pmeGpu->kernelParams->atoms.d_forces;
1599     }
1600     else
1601     {
1602         return nullptr;
1603     }
1604 }
1605
1606 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1607 {
1608     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1609                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1610                "initialized.");
1611
1612     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1613                "The device-side buffer can not be set.");
1614
1615     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1616 }
1617
1618 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1619 {
1620     if (pmeGpu && pmeGpu->kernelParams)
1621     {
1622         return &pmeGpu->archSpecific->pmeForcesReady;
1623     }
1624     else
1625     {
1626         return nullptr;
1627     }
1628 }