96dc1f4db95a1b1953fca13a89f6c664a50e5e54
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020 by the GROMACS development team.
5  * Copyright (c) 2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 /*! \internal \file
38  *
39  * \brief This file contains internal function implementations
40  * for performing the PME calculations on GPU.
41  *
42  * Note that this file is compiled as regular C++ source in OpenCL builds, but
43  * it is treated as CUDA source in CUDA-enabled GPU builds.
44  *
45  * \author Aleksei Iupinov <a.yupinov@gmail.com>
46  * \ingroup module_ewald
47  */
48
49 #include "gmxpre.h"
50
51 #include "pme_gpu_internal.h"
52
53 #include "config.h"
54
55 #include <list>
56 #include <memory>
57 #include <string>
58
59 #include "gromacs/ewald/ewald_utils.h"
60 #include "gromacs/fft/gpu_3dfft.h"
61 #include "gromacs/gpu_utils/device_context.h"
62 #include "gromacs/gpu_utils/device_stream.h"
63 #include "gromacs/gpu_utils/gpu_utils.h"
64 #include "gromacs/gpu_utils/pmalloc.h"
65 #if GMX_GPU_SYCL
66 #    include "gromacs/gpu_utils/syclutils.h"
67 #endif
68 #include "gromacs/hardware/device_information.h"
69 #include "gromacs/math/invertmatrix.h"
70 #include "gromacs/math/units.h"
71 #include "gromacs/timing/gpu_timing.h"
72 #include "gromacs/utility/exceptions.h"
73 #include "gromacs/utility/fatalerror.h"
74 #include "gromacs/utility/gmxassert.h"
75 #include "gromacs/utility/logger.h"
76 #include "gromacs/utility/stringutil.h"
77 #include "gromacs/ewald/pme.h"
78
79 #if GMX_GPU_CUDA
80 #    include "pme.cuh"
81 #endif
82
83 #include "pme_gpu_calculate_splines.h"
84 #include "pme_gpu_constants.h"
85 #include "pme_gpu_program_impl.h"
86 #include "pme_gpu_timings.h"
87 #include "pme_gpu_types.h"
88 #include "pme_gpu_types_host.h"
89 #include "pme_gpu_types_host_impl.h"
90 #include "pme_grid.h"
91 #include "pme_internal.h"
92 #include "pme_solve.h"
93
94 /*! \brief
95  * CUDA only
96  * Atom limit above which it is advantageous to turn on the
97  * recalculating of the splines in the gather and using less threads per atom in the spline and spread
98  */
99 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
100
101 /*! \internal \brief
102  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
103  *
104  * \param[in] pmeGpu  The PME GPU structure.
105  * \returns The pointer to the kernel parameters.
106  */
107 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
108 {
109     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
110     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
111     return kernelParamsPtr;
112 }
113
114 /*! \brief
115  * Atom data block size (in terms of number of atoms).
116  * This is the least common multiple of number of atoms processed by
117  * a single block/workgroup of the spread and gather kernels.
118  * The GPU atom data buffers must be padded, which means that
119  * the numbers of atoms used for determining the size of the memory
120  * allocation must be divisible by this.
121  */
122 constexpr int c_pmeAtomDataBlockSize = 64;
123
124 int pme_gpu_get_atom_data_block_size()
125 {
126     return c_pmeAtomDataBlockSize;
127 }
128
129 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
130 {
131     pmeGpu->archSpecific->pmeStream_.synchronize();
132 }
133
134 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
135 {
136     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
137
138     GMX_ASSERT(
139             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
140             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
141
142     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
143     {
144         allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
145                              c_virialAndEnergyCount,
146                              pmeGpu->archSpecific->deviceContext_);
147         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy[gridIndex]), energyAndVirialSize);
148     }
149 }
150
151 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
152 {
153     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
154     {
155         freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex]);
156         pfree(pmeGpu->staging.h_virialAndEnergy[gridIndex]);
157         pmeGpu->staging.h_virialAndEnergy[gridIndex] = nullptr;
158     }
159 }
160
161 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
162 {
163     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
164     {
165         clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
166                                0,
167                                c_virialAndEnergyCount,
168                                pmeGpu->archSpecific->pmeStream_);
169     }
170 }
171
172 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu, const int gridIndex)
173 {
174     GMX_ASSERT(
175             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
176             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
177     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
178                "Invalid combination of gridIndex and number of grids");
179
180     const int splineValuesOffset[DIM] = { 0,
181                                           pmeGpu->kernelParams->grid.realGridSize[XX],
182                                           pmeGpu->kernelParams->grid.realGridSize[XX]
183                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
184     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
185
186     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
187                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
188                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
189     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize[gridIndex]);
190     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
191                            newSplineValuesSize,
192                            &pmeGpu->archSpecific->splineValuesSize[gridIndex],
193                            &pmeGpu->archSpecific->splineValuesCapacity[gridIndex],
194                            pmeGpu->archSpecific->deviceContext_);
195     if (shouldRealloc)
196     {
197         /* Reallocate the host buffer */
198         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
199         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli[gridIndex]),
200                 newSplineValuesSize * sizeof(float));
201     }
202     for (int i = 0; i < DIM; i++)
203     {
204         memcpy(pmeGpu->staging.h_splineModuli[gridIndex] + splineValuesOffset[i],
205                pmeGpu->common->bsp_mod[i].data(),
206                pmeGpu->common->bsp_mod[i].size() * sizeof(float));
207     }
208     /* TODO: pin original buffer instead! */
209     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
210                        pmeGpu->staging.h_splineModuli[gridIndex],
211                        0,
212                        newSplineValuesSize,
213                        pmeGpu->archSpecific->pmeStream_,
214                        pmeGpu->settings.transferKind,
215                        nullptr);
216 }
217
218 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
219 {
220     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
221     {
222         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
223         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex]);
224     }
225 }
226
227 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
228 {
229     const size_t newForcesSize = pmeGpu->nAtomsAlloc;
230     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
231     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
232                            newForcesSize,
233                            &pmeGpu->archSpecific->forcesSize,
234                            &pmeGpu->archSpecific->forcesSizeAlloc,
235                            pmeGpu->archSpecific->deviceContext_);
236     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
237     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
238 }
239
240 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
241 {
242     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
243 }
244
245 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
246 {
247     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
248     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
249                        pmeGpu->staging.h_forces.data(),
250                        0,
251                        pmeGpu->kernelParams->atoms.nAtoms,
252                        pmeGpu->archSpecific->pmeStream_,
253                        pmeGpu->settings.transferKind,
254                        nullptr);
255 }
256
257 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
258 {
259     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
260     copyFromDeviceBuffer(pmeGpu->staging.h_forces.data(),
261                          &pmeGpu->kernelParams->atoms.d_forces,
262                          0,
263                          pmeGpu->kernelParams->atoms.nAtoms,
264                          pmeGpu->archSpecific->pmeStream_,
265                          pmeGpu->settings.transferKind,
266                          nullptr);
267 }
268
269 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu,
270                                                  const float*  h_coefficients,
271                                                  const int     gridIndex)
272 {
273     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
274     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
275     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
276     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
277                            newCoefficientsSize,
278                            &pmeGpu->archSpecific->coefficientsSize[gridIndex],
279                            &pmeGpu->archSpecific->coefficientsCapacity[gridIndex],
280                            pmeGpu->archSpecific->deviceContext_);
281     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
282                        const_cast<float*>(h_coefficients),
283                        0,
284                        pmeGpu->kernelParams->atoms.nAtoms,
285                        pmeGpu->archSpecific->pmeStream_,
286                        pmeGpu->settings.transferKind,
287                        nullptr);
288
289     const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
290     const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
291     if (paddingCount > 0)
292     {
293         clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
294                                paddingIndex,
295                                paddingCount,
296                                pmeGpu->archSpecific->pmeStream_);
297     }
298 }
299
300 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
301 {
302     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
303     {
304         freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex]);
305     }
306 }
307
308 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
309 {
310     const int order             = pmeGpu->common->pme_order;
311     const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
312     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
313     /* Two arrays of the same size */
314     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
315     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
316     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
317     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta,
318                            newSplineDataSize,
319                            &currentSizeTemp,
320                            &currentSizeTempAlloc,
321                            pmeGpu->archSpecific->deviceContext_);
322     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta,
323                            newSplineDataSize,
324                            &pmeGpu->archSpecific->splineDataSize,
325                            &pmeGpu->archSpecific->splineDataSizeAlloc,
326                            pmeGpu->archSpecific->deviceContext_);
327     // the host side reallocation
328     if (shouldRealloc)
329     {
330         pfree(pmeGpu->staging.h_theta);
331         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
332         pfree(pmeGpu->staging.h_dtheta);
333         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
334     }
335 }
336
337 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
338 {
339     /* Two arrays of the same size */
340     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
341     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
342     pfree(pmeGpu->staging.h_theta);
343     pfree(pmeGpu->staging.h_dtheta);
344 }
345
346 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
347 {
348     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
349     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
350     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices,
351                            newIndicesSize,
352                            &pmeGpu->archSpecific->gridlineIndicesSize,
353                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
354                            pmeGpu->archSpecific->deviceContext_);
355     pfree(pmeGpu->staging.h_gridlineIndices);
356     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
357 }
358
359 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
360 {
361     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
362     pfree(pmeGpu->staging.h_gridlineIndices);
363 }
364
365 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
366 {
367     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
368
369     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
370                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
371                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
372     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
373                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
374                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
375     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
376     {
377         // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
378         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
379         {
380             /* 2 separate grids */
381             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
382                                    newComplexGridSize,
383                                    &pmeGpu->archSpecific->complexGridSize[gridIndex],
384                                    &pmeGpu->archSpecific->complexGridCapacity[gridIndex],
385                                    pmeGpu->archSpecific->deviceContext_);
386             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
387                                    newRealGridSize,
388                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
389                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
390                                    pmeGpu->archSpecific->deviceContext_);
391         }
392         else
393         {
394             /* A single buffer so that any grid will fit */
395             const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
396             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
397                                    newGridsSize,
398                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
399                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
400                                    pmeGpu->archSpecific->deviceContext_);
401             kernelParamsPtr->grid.d_fourierGrid[gridIndex] = kernelParamsPtr->grid.d_realGrid[gridIndex];
402             pmeGpu->archSpecific->complexGridSize[gridIndex] =
403                     pmeGpu->archSpecific->realGridSize[gridIndex];
404             // the size might get used later for copying the grid
405         }
406     }
407 }
408
409 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
410 {
411     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
412     {
413         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
414         {
415             freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid[gridIndex]);
416         }
417         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex]);
418     }
419 }
420
421 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
422 {
423     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
424     {
425         clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
426                                0,
427                                pmeGpu->archSpecific->realGridSize[gridIndex],
428                                pmeGpu->archSpecific->pmeStream_);
429     }
430 }
431
432 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
433 {
434     pme_gpu_free_fract_shifts(pmeGpu);
435
436     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
437
438     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
439     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
440     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
441     const int cellCount           = c_pmeNeighborUnitcellCount;
442     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
443
444     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
445
446     const int newFractShiftsSize = cellCount * (nx + ny + nz);
447
448     initParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
449                          &kernelParamsPtr->fractShiftsTableTexture,
450                          pmeGpu->common->fsh.data(),
451                          newFractShiftsSize,
452                          pmeGpu->archSpecific->deviceContext_);
453
454     initParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
455                          &kernelParamsPtr->gridlineIndicesTableTexture,
456                          pmeGpu->common->nn.data(),
457                          newFractShiftsSize,
458                          pmeGpu->archSpecific->deviceContext_);
459 }
460
461 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
462 {
463     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
464 #if GMX_GPU_CUDA
465     destroyParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
466                             &kernelParamsPtr->fractShiftsTableTexture);
467     destroyParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
468                             &kernelParamsPtr->gridlineIndicesTableTexture);
469 #elif GMX_GPU_OPENCL || GMX_GPU_SYCL
470     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
471     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
472 #endif
473 }
474
475 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
476 {
477     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
478 }
479
480 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, const float* h_grid, const int gridIndex)
481 {
482     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
483                        h_grid,
484                        0,
485                        pmeGpu->archSpecific->realGridSize[gridIndex],
486                        pmeGpu->archSpecific->pmeStream_,
487                        pmeGpu->settings.transferKind,
488                        nullptr);
489 }
490
491 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid, const int gridIndex)
492 {
493     copyFromDeviceBuffer(h_grid,
494                          &pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
495                          0,
496                          pmeGpu->archSpecific->realGridSize[gridIndex],
497                          pmeGpu->archSpecific->pmeStream_,
498                          pmeGpu->settings.transferKind,
499                          nullptr);
500     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
501 }
502
503 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
504 {
505     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
506     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
507     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta,
508                          &kernelParamsPtr->atoms.d_dtheta,
509                          0,
510                          splinesCount,
511                          pmeGpu->archSpecific->pmeStream_,
512                          pmeGpu->settings.transferKind,
513                          nullptr);
514     copyFromDeviceBuffer(pmeGpu->staging.h_theta,
515                          &kernelParamsPtr->atoms.d_theta,
516                          0,
517                          splinesCount,
518                          pmeGpu->archSpecific->pmeStream_,
519                          pmeGpu->settings.transferKind,
520                          nullptr);
521     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices,
522                          &kernelParamsPtr->atoms.d_gridlineIndices,
523                          0,
524                          kernelParamsPtr->atoms.nAtoms * DIM,
525                          pmeGpu->archSpecific->pmeStream_,
526                          pmeGpu->settings.transferKind,
527                          nullptr);
528 }
529
530 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
531 {
532     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
533     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
534
535     // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
536     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices,
537                            0,
538                            pmeGpu->nAtomsAlloc * DIM,
539                            pmeGpu->archSpecific->pmeStream_);
540     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta,
541                            0,
542                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
543                            pmeGpu->archSpecific->pmeStream_);
544     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta,
545                            0,
546                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
547                            pmeGpu->archSpecific->pmeStream_);
548
549     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta,
550                        pmeGpu->staging.h_dtheta,
551                        0,
552                        splinesCount,
553                        pmeGpu->archSpecific->pmeStream_,
554                        pmeGpu->settings.transferKind,
555                        nullptr);
556     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta,
557                        pmeGpu->staging.h_theta,
558                        0,
559                        splinesCount,
560                        pmeGpu->archSpecific->pmeStream_,
561                        pmeGpu->settings.transferKind,
562                        nullptr);
563     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices,
564                        pmeGpu->staging.h_gridlineIndices,
565                        0,
566                        kernelParamsPtr->atoms.nAtoms * DIM,
567                        pmeGpu->archSpecific->pmeStream_,
568                        pmeGpu->settings.transferKind,
569                        nullptr);
570 }
571
572 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
573 {
574     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
575 }
576
577 /*! \brief Internal GPU initialization for PME.
578  *
579  * \param[in]  pmeGpu         GPU PME data.
580  * \param[in]  deviceContext  GPU context.
581  * \param[in]  deviceStream   GPU stream.
582  */
583 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
584 {
585     /* Allocate the target-specific structures */
586     pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
587     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
588
589     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
590     /* This should give better performance, according to the cuFFT documentation.
591      * The performance seems to be the same though.
592      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
593      */
594
595 #if GMX_GPU_CUDA
596     pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
597 #else
598     // Use this path for any non-CUDA GPU acceleration
599     // TODO: is there no really global work size limit in OpenCL?
600     pmeGpu->maxGridWidthX = INT32_MAX / 2;
601 #endif
602 }
603
604 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
605 {
606     if (pme_gpu_settings(pmeGpu).performGPUFFT)
607     {
608         pmeGpu->archSpecific->fftSetup.resize(0);
609         const bool         performOutOfPlaceFFT      = pmeGpu->archSpecific->performOutOfPlaceFFT;
610         const bool         allocateGrid              = false;
611         MPI_Comm           comm                      = MPI_COMM_NULL;
612         std::array<int, 1> gridOffsetsInXForEachRank = { 0 };
613         std::array<int, 1> gridOffsetsInYForEachRank = { 0 };
614 #if GMX_GPU_CUDA
615         const gmx::FftBackend backend = gmx::FftBackend::Cufft;
616 #elif GMX_GPU_OPENCL
617         const gmx::FftBackend backend = gmx::FftBackend::Ocl;
618 #elif GMX_GPU_SYCL
619 #    if GMX_SYCL_DPCPP && GMX_FFT_MKL
620         const gmx::FftBackend backend = gmx::FftBackend::SyclMkl;
621 #    elif GMX_SYCL_HIPSYCL
622         const gmx::FftBackend backend = gmx::FftBackend::SyclRocfft;
623 #    else
624         const gmx::FftBackend backend = gmx::FftBackend::Sycl;
625 #    endif
626 #else
627         GMX_RELEASE_ASSERT(false, "Unknown GPU backend");
628         const gmx::FftBackend backend = gmx::FftBackend::Count;
629 #endif
630
631         PmeGpuGridParams& grid = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->grid;
632         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
633         {
634             pmeGpu->archSpecific->fftSetup.push_back(
635                     std::make_unique<gmx::Gpu3dFft>(backend,
636                                                     allocateGrid,
637                                                     comm,
638                                                     gridOffsetsInXForEachRank,
639                                                     gridOffsetsInYForEachRank,
640                                                     grid.realGridSize[ZZ],
641                                                     performOutOfPlaceFFT,
642                                                     pmeGpu->archSpecific->deviceContext_,
643                                                     pmeGpu->archSpecific->pmeStream_,
644                                                     grid.realGridSize,
645                                                     grid.realGridSizePadded,
646                                                     grid.complexGridSizePadded,
647                                                     &(grid.d_realGrid[gridIndex]),
648                                                     &(grid.d_fourierGrid[gridIndex])));
649         }
650     }
651 }
652
653 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
654 {
655     pmeGpu->archSpecific->fftSetup.resize(0);
656 }
657
658 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, const float lambda, PmeOutput* output)
659 {
660     const PmeGpu* pmeGpu = pme.gpu;
661
662     GMX_ASSERT(lambda == 1.0 || pmeGpu->common->ngrids == 2,
663                "Invalid combination of lambda and number of grids");
664
665     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
666     {
667         for (int j = 0; j < c_virialAndEnergyCount; j++)
668         {
669             GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[gridIndex][j]),
670                        "PME GPU produces incorrect energy/virial.");
671         }
672     }
673     for (int dim1 = 0; dim1 < DIM; dim1++)
674     {
675         for (int dim2 = 0; dim2 < DIM; dim2++)
676         {
677             output->coulombVirial_[dim1][dim2] = 0;
678         }
679     }
680     output->coulombEnergy_ = 0;
681     float scale            = 1.0;
682     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
683     {
684         if (pmeGpu->common->ngrids == 2)
685         {
686             scale = gridIndex == 0 ? (1.0 - lambda) : lambda;
687         }
688         output->coulombVirial_[XX][XX] +=
689                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][0];
690         output->coulombVirial_[YY][YY] +=
691                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][1];
692         output->coulombVirial_[ZZ][ZZ] +=
693                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][2];
694         output->coulombVirial_[XX][YY] +=
695                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
696         output->coulombVirial_[YY][XX] +=
697                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
698         output->coulombVirial_[XX][ZZ] +=
699                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
700         output->coulombVirial_[ZZ][XX] +=
701                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
702         output->coulombVirial_[YY][ZZ] +=
703                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
704         output->coulombVirial_[ZZ][YY] +=
705                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
706         output->coulombEnergy_ += scale * 0.5F * pmeGpu->staging.h_virialAndEnergy[gridIndex][6];
707     }
708     if (pmeGpu->common->ngrids > 1)
709     {
710         output->coulombDvdl_ = 0.5F
711                                * (pmeGpu->staging.h_virialAndEnergy[FEP_STATE_B][6]
712                                   - pmeGpu->staging.h_virialAndEnergy[FEP_STATE_A][6]);
713     }
714 }
715
716 /*! \brief Sets the force-related members in \p output
717  *
718  * \param[in]   pmeGpu      PME GPU data structure
719  * \param[out]  output      Pointer to PME output data structure
720  */
721 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
722 {
723     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
724     if (output->haveForceOutput_)
725     {
726         output->forces_ = pmeGpu->staging.h_forces;
727     }
728 }
729
730 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial, const real lambdaQ)
731 {
732     PmeGpu* pmeGpu = pme.gpu;
733
734     PmeOutput output;
735
736     pme_gpu_getForceOutput(pmeGpu, &output);
737
738     if (computeEnergyAndVirial)
739     {
740         if (pme_gpu_settings(pmeGpu).performGPUSolve)
741         {
742             pme_gpu_getEnergyAndVirial(pme, lambdaQ, &output);
743         }
744         else
745         {
746             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
747         }
748     }
749     return output;
750 }
751
752 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
753 {
754 #if GMX_DOUBLE
755     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
756 #else
757     matrix scaledBox;
758     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
759     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
760     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
761     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
762     matrix recipBox;
763     gmx::invertBoxMatrix(scaledBox, recipBox);
764
765     /* The GPU recipBox is transposed as compared to the CPU recipBox.
766      * Spread uses matrix columns (while solve and gather use rows).
767      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
768      */
769     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
770                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
771                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
772     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
773 #endif
774 }
775
776 /*! \brief \libinternal
777  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
778  *
779  * \param[in] pmeGpu            The PME GPU structure.
780  */
781 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
782 {
783     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
784
785     GMX_ASSERT(
786             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
787             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
788
789     kernelParamsPtr->grid.ewaldFactor =
790             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
791     /* The grid size variants */
792     for (int i = 0; i < DIM; i++)
793     {
794         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
795         kernelParamsPtr->grid.realGridSizeFP[i] =
796                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
797         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
798
799         // The complex grid currently uses no padding;
800         // if it starts to do so, then another test should be added for that
801         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
802         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
803     }
804     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
805     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
806     {
807         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
808         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
809                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
810     }
811     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
812     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
813     kernelParamsPtr->grid.complexGridSize[ZZ]++;
814     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
815
816     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
817     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
818     {
819         pme_gpu_realloc_and_copy_bspline_values(pmeGpu, gridIndex);
820     }
821
822     pme_gpu_realloc_grids(pmeGpu);
823     pme_gpu_reinit_3dfft(pmeGpu);
824 }
825
826 /* Several GPU functions that refer to the CPU PME data live here.
827  * We would like to keep these away from the GPU-framework specific code for clarity,
828  * as well as compilation issues with MPI.
829  */
830
831 /*! \brief \libinternal
832  * Copies everything useful from the PME CPU to the PME GPU structure.
833  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
834  *
835  * \param[in] pme         The PME structure.
836  */
837 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
838 {
839     /* TODO: Consider refactoring the CPU PME code to use the same structure,
840      * so that this function becomes 2 lines */
841     PmeGpu* pmeGpu               = pme->gpu;
842     pmeGpu->common->ngrids       = pme->bFEP_q ? 2 : 1;
843     pmeGpu->common->epsilon_r    = pme->epsilon_r;
844     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
845     pmeGpu->common->nk[XX]       = pme->nkx;
846     pmeGpu->common->nk[YY]       = pme->nky;
847     pmeGpu->common->nk[ZZ]       = pme->nkz;
848     pmeGpu->common->pme_order    = pme->pme_order;
849     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
850     {
851         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
852     }
853     for (int i = 0; i < DIM; i++)
854     {
855         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
856     }
857     const int cellCount = c_pmeNeighborUnitcellCount;
858     pmeGpu->common->fsh.resize(0);
859     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
860     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
861     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
862     pmeGpu->common->nn.resize(0);
863     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
864     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
865     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
866     pmeGpu->common->runMode       = pme->runMode;
867     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
868     pmeGpu->common->boxScaler     = pme->boxScaler.get();
869 }
870
871 /*! \libinternal \brief
872  * uses heuristics to select the best performing PME gather and scatter kernels
873  *
874  * \param[in,out] pmeGpu         The PME GPU structure.
875  */
876 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
877 {
878     if (GMX_GPU_CUDA && pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit)
879     {
880         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::Order;
881         pmeGpu->settings.recalculateSplines = true;
882     }
883     else
884     {
885         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::OrderSquared;
886         pmeGpu->settings.recalculateSplines = false;
887     }
888 }
889
890
891 /*! \libinternal \brief
892  * Initializes the PME GPU data at the beginning of the run.
893  * TODO: this should become PmeGpu::PmeGpu()
894  *
895  * \param[in,out] pme            The PME structure.
896  * \param[in]     deviceContext  The GPU context.
897  * \param[in]     deviceStream   The GPU stream.
898  * \param[in,out] pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
899  */
900 static void pme_gpu_init(gmx_pme_t*           pme,
901                          const DeviceContext& deviceContext,
902                          const DeviceStream&  deviceStream,
903                          const PmeGpuProgram* pmeGpuProgram)
904 {
905     pme->gpu       = new PmeGpu();
906     PmeGpu* pmeGpu = pme->gpu;
907     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
908     pmeGpu->common = std::make_shared<PmeShared>();
909
910     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
911     /* A convenience variable. */
912     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
913     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
914     pmeGpu->settings.performGPUGather = true;
915     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
916     pmeGpu->settings.useGpuForceReduction = false;
917
918     pme_gpu_set_testing(pmeGpu, false);
919
920     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
921     pmeGpu->programHandle_ = pmeGpuProgram;
922
923     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
924
925     pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
926
927     pme_gpu_copy_common_data_from(pme);
928     pme_gpu_alloc_energy_virial(pmeGpu);
929
930     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
931
932     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
933     kernelParamsPtr->constants.elFactor = gmx::c_one4PiEps0 / pmeGpu->common->epsilon_r;
934 }
935
936 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
937 {
938     GMX_ASSERT(gridSize != nullptr, "");
939     GMX_ASSERT(paddedGridSize != nullptr, "");
940     GMX_ASSERT(pmeGpu != nullptr, "");
941     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
942     for (int i = 0; i < DIM; i++)
943     {
944         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
945         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
946     }
947 }
948
949 void pme_gpu_reinit(gmx_pme_t*           pme,
950                     const DeviceContext* deviceContext,
951                     const DeviceStream*  deviceStream,
952                     const PmeGpuProgram* pmeGpuProgram)
953 {
954     GMX_ASSERT(pme != nullptr, "Need valid PME object");
955
956     if (!pme->gpu)
957     {
958         GMX_RELEASE_ASSERT(deviceContext != nullptr,
959                            "Device context can not be nullptr when setting up PME on GPU.");
960         GMX_RELEASE_ASSERT(deviceStream != nullptr,
961                            "Device stream can not be nullptr when setting up PME on GPU.");
962         /* First-time initialization */
963         pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
964     }
965     else
966     {
967         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
968         pme_gpu_copy_common_data_from(pme);
969     }
970     /* GPU FFT will only get used for a single rank.*/
971     pme->gpu->settings.performGPUFFT =
972             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
973     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
974
975     /* Reinit active timers */
976     pme_gpu_reinit_timings(pme->gpu);
977
978     pme_gpu_reinit_grids(pme->gpu);
979     // Note: if timing the reinit launch overhead becomes more relevant
980     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
981     pme_gpu_reinit_computation(pme, nullptr);
982     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
983      * update for mixed mode on grid switch. TODO: use shared recipbox field.
984      */
985     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
986 }
987
988 void pme_gpu_destroy(PmeGpu* pmeGpu)
989 {
990     /* Free lots of data */
991     pme_gpu_free_energy_virial(pmeGpu);
992     pme_gpu_free_bspline_values(pmeGpu);
993     pme_gpu_free_forces(pmeGpu);
994     pme_gpu_free_coefficients(pmeGpu);
995     pme_gpu_free_spline_data(pmeGpu);
996     pme_gpu_free_grid_indices(pmeGpu);
997     pme_gpu_free_fract_shifts(pmeGpu);
998     pme_gpu_free_grids(pmeGpu);
999
1000     pme_gpu_destroy_3dfft(pmeGpu);
1001
1002     delete pmeGpu;
1003 }
1004
1005 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* chargesA, const real* chargesB)
1006 {
1007     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
1008     kernelParamsPtr->atoms.nAtoms = nAtoms;
1009     const int  block_size         = pme_gpu_get_atom_data_block_size();
1010     const int  nAtomsNewPadded    = ((nAtoms + block_size - 1) / block_size) * block_size;
1011     const bool haveToRealloc      = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
1012     pmeGpu->nAtomsAlloc           = nAtomsNewPadded;
1013
1014 #if GMX_DOUBLE
1015     GMX_RELEASE_ASSERT(false, "Only single precision supported");
1016     GMX_UNUSED_VALUE(charges);
1017 #else
1018     int gridIndex = 0;
1019     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
1020     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
1021     gridIndex++;
1022     if (chargesB != nullptr)
1023     {
1024         pme_gpu_realloc_and_copy_input_coefficients(
1025                 pmeGpu, reinterpret_cast<const float*>(chargesB), gridIndex);
1026     }
1027     else
1028     {
1029         /* Fill the second set of coefficients with chargesA as well to be able to avoid
1030          * conditionals in the GPU kernels */
1031         /* FIXME: This should be avoided by making a separate templated version of the
1032          * relevant kernel(s) (probably only pme_gather_kernel). That would require a
1033          * reduction of the current number of templated parameters of that kernel. */
1034         pme_gpu_realloc_and_copy_input_coefficients(
1035                 pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
1036     }
1037 #endif
1038
1039     if (haveToRealloc)
1040     {
1041         pme_gpu_realloc_forces(pmeGpu);
1042         pme_gpu_realloc_spline_data(pmeGpu);
1043         pme_gpu_realloc_grid_indices(pmeGpu);
1044     }
1045     pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
1046 }
1047
1048 /*! \internal \brief
1049  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
1050  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
1051  *
1052  * \param[in] pmeGpu         The PME GPU data structure.
1053  * \param[in] pmeStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
1054  */
1055 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, PmeStage pmeStageId)
1056 {
1057     CommandEvent* timingEvent = nullptr;
1058     if (pme_gpu_timings_enabled(pmeGpu))
1059     {
1060         GMX_ASSERT(pmeStageId < PmeStage::Count, "Wrong PME GPU timing event index");
1061         timingEvent = pmeGpu->archSpecific->timingEvents[pmeStageId].fetchNextEvent();
1062     }
1063     return timingEvent;
1064 }
1065
1066 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, const int grid_index)
1067 {
1068     PmeStage timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? PmeStage::FftTransformR2C
1069                                                         : PmeStage::FftTransformC2R;
1070
1071     pme_gpu_start_timing(pmeGpu, timerId);
1072     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
1073             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1074     pme_gpu_stop_timing(pmeGpu, timerId);
1075 }
1076
1077 /*! \brief
1078  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1079  * to minimize number of unused blocks.
1080  */
1081 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
1082 {
1083     // How many maximum widths in X do we need (hopefully just one)
1084     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1085     // Trying to make things even
1086     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1087     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1088     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1089                "pmeGpuCreateGrid: excessive blocks");
1090     return std::pair<int, int>(colCount, minRowCount);
1091 }
1092
1093 /*! \brief
1094  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1095  *
1096  * \param[in]  pmeGpu                   The PME GPU structure.
1097  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1098  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1099  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1100  *
1101  * \return Pointer to CUDA kernel
1102  */
1103 static auto selectSplineAndSpreadKernelPtr(const PmeGpu*  pmeGpu,
1104                                            ThreadsPerAtom threadsPerAtom,
1105                                            bool           writeSplinesToGlobal,
1106                                            const int      numGrids)
1107 {
1108     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1109     if (writeSplinesToGlobal)
1110     {
1111         if (threadsPerAtom == ThreadsPerAtom::Order)
1112         {
1113             if (numGrids == 2)
1114             {
1115                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Dual;
1116             }
1117             else
1118             {
1119                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Single;
1120             }
1121         }
1122         else
1123         {
1124             if (numGrids == 2)
1125             {
1126                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesDual;
1127             }
1128             else
1129             {
1130                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesSingle;
1131             }
1132         }
1133     }
1134     else
1135     {
1136         if (threadsPerAtom == ThreadsPerAtom::Order)
1137         {
1138             if (numGrids == 2)
1139             {
1140                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1141             }
1142             else
1143             {
1144                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1145             }
1146         }
1147         else
1148         {
1149             if (numGrids == 2)
1150             {
1151                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1152             }
1153             else
1154             {
1155                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1156             }
1157         }
1158     }
1159
1160     return kernelPtr;
1161 }
1162
1163 /*! \brief
1164  * Returns a pointer to appropriate spline kernel based on the input bool values
1165  *
1166  * \param[in]  pmeGpu                   The PME GPU structure.
1167  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1168  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1169  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1170  *
1171  * \return Pointer to CUDA kernel
1172  */
1173 static auto selectSplineKernelPtr(const PmeGpu*   pmeGpu,
1174                                   ThreadsPerAtom  threadsPerAtom,
1175                                   bool gmx_unused writeSplinesToGlobal,
1176                                   const int       numGrids)
1177 {
1178     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1179     GMX_ASSERT(
1180             writeSplinesToGlobal,
1181             "Spline data should always be written to global memory when just calculating splines");
1182
1183     if (threadsPerAtom == ThreadsPerAtom::Order)
1184     {
1185         if (numGrids == 2)
1186         {
1187             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Dual;
1188         }
1189         else
1190         {
1191             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Single;
1192         }
1193     }
1194     else
1195     {
1196         if (numGrids == 2)
1197         {
1198             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelDual;
1199         }
1200         else
1201         {
1202             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelSingle;
1203         }
1204     }
1205     return kernelPtr;
1206 }
1207
1208 /*! \brief
1209  * Returns a pointer to appropriate spread kernel based on the input bool values
1210  *
1211  * \param[in]  pmeGpu                   The PME GPU structure.
1212  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1213  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1214  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1215  *
1216  * \return Pointer to CUDA kernel
1217  */
1218 static auto selectSpreadKernelPtr(const PmeGpu*  pmeGpu,
1219                                   ThreadsPerAtom threadsPerAtom,
1220                                   bool           writeSplinesToGlobal,
1221                                   const int      numGrids)
1222 {
1223     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1224     if (writeSplinesToGlobal)
1225     {
1226         if (threadsPerAtom == ThreadsPerAtom::Order)
1227         {
1228             if (numGrids == 2)
1229             {
1230                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Dual;
1231             }
1232             else
1233             {
1234                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Single;
1235             }
1236         }
1237         else
1238         {
1239             if (numGrids == 2)
1240             {
1241                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelDual;
1242             }
1243             else
1244             {
1245                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelSingle;
1246             }
1247         }
1248     }
1249     else
1250     {
1251         /* if we are not saving the spline data we need to recalculate it
1252            using the spline and spread Kernel */
1253         if (threadsPerAtom == ThreadsPerAtom::Order)
1254         {
1255             if (numGrids == 2)
1256             {
1257                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1258             }
1259             else
1260             {
1261                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1262             }
1263         }
1264         else
1265         {
1266             if (numGrids == 2)
1267             {
1268                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1269             }
1270             else
1271             {
1272                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1273             }
1274         }
1275     }
1276     return kernelPtr;
1277 }
1278
1279 void pme_gpu_spread(const PmeGpu*         pmeGpu,
1280                     GpuEventSynchronizer* xReadyOnDevice,
1281                     real**                h_grids,
1282                     bool                  computeSplines,
1283                     bool                  spreadCharges,
1284                     const real            lambda)
1285 {
1286     GMX_ASSERT(
1287             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1288             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1289
1290     GMX_ASSERT(computeSplines || spreadCharges,
1291                "PME spline/spread kernel has invalid input (nothing to do)");
1292     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1293     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1294
1295     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1296
1297     const int order = pmeGpu->common->pme_order;
1298     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1299     const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1300     const int  threadsPerAtom =
1301             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1302     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1303
1304     GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1305                "Only 16 threads per atom supported in OpenCL");
1306     GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1307                "Recalculating splines not supported in OpenCL");
1308
1309     const int atomsPerBlock = blockSize / threadsPerAtom;
1310
1311     // TODO: pick smaller block size in runtime if needed
1312     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1313     // If doing so, change atomsPerBlock in the kernels as well.
1314     // TODO: test varying block sizes on modern arch-s as well
1315     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1316     //(for spline data mostly)
1317     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1318                "inconsistent atom data padding vs. spreading block size");
1319
1320     // Ensure that coordinates are ready on the device before launching spread;
1321     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1322     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1323     GMX_ASSERT(!GMX_GPU_CUDA || xReadyOnDevice != nullptr || pmeGpu->common->isRankPmeOnly
1324                        || pme_gpu_settings(pmeGpu).copyAllOutputs,
1325                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1326
1327     if (xReadyOnDevice)
1328     {
1329         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1330     }
1331
1332     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1333     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1334
1335     if (pmeGpu->common->ngrids == 1)
1336     {
1337         kernelParamsPtr->current.scale = 1.0;
1338     }
1339     else
1340     {
1341         kernelParamsPtr->current.scale = 1.0 - lambda;
1342     }
1343
1344     KernelLaunchConfig config;
1345     config.blockSize[0] = order;
1346     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1347     config.blockSize[2] = atomsPerBlock;
1348     config.gridSize[0]  = dimGrid.first;
1349     config.gridSize[1]  = dimGrid.second;
1350
1351     PmeStage                           timingId;
1352     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1353     if (computeSplines)
1354     {
1355         if (spreadCharges)
1356         {
1357             timingId  = PmeStage::SplineAndSpread;
1358             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu,
1359                                                        pmeGpu->settings.threadsPerAtom,
1360                                                        writeGlobal || (!recalculateSplines),
1361                                                        pmeGpu->common->ngrids);
1362         }
1363         else
1364         {
1365             timingId  = PmeStage::Spline;
1366             kernelPtr = selectSplineKernelPtr(pmeGpu,
1367                                               pmeGpu->settings.threadsPerAtom,
1368                                               writeGlobal || (!recalculateSplines),
1369                                               pmeGpu->common->ngrids);
1370         }
1371     }
1372     else
1373     {
1374         timingId  = PmeStage::Spread;
1375         kernelPtr = selectSpreadKernelPtr(pmeGpu,
1376                                           pmeGpu->settings.threadsPerAtom,
1377                                           writeGlobal || (!recalculateSplines),
1378                                           pmeGpu->common->ngrids);
1379     }
1380
1381
1382     pme_gpu_start_timing(pmeGpu, timingId);
1383     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1384 #if c_canEmbedBuffers
1385     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1386 #else
1387     const auto kernelArgs =
1388             prepareGpuKernelArguments(kernelPtr,
1389                                       config,
1390                                       kernelParamsPtr,
1391                                       &kernelParamsPtr->atoms.d_theta,
1392                                       &kernelParamsPtr->atoms.d_dtheta,
1393                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1394                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1395                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1396                                       &kernelParamsPtr->grid.d_fractShiftsTable,
1397                                       &kernelParamsPtr->grid.d_gridlineIndicesTable,
1398                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1399                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1400                                       &kernelParamsPtr->atoms.d_coordinates);
1401 #endif
1402
1403     launchGpuKernel(
1404             kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME spline/spread", kernelArgs);
1405     pme_gpu_stop_timing(pmeGpu, timingId);
1406
1407     const auto& settings    = pmeGpu->settings;
1408     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1409     if (copyBackGrid)
1410     {
1411         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1412         {
1413             float* h_grid = h_grids[gridIndex];
1414             pme_gpu_copy_output_spread_grid(pmeGpu, h_grid, gridIndex);
1415         }
1416     }
1417     const bool copyBackAtomData =
1418             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1419     if (copyBackAtomData)
1420     {
1421         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1422     }
1423 }
1424
1425 void pme_gpu_solve(const PmeGpu* pmeGpu,
1426                    const int     gridIndex,
1427                    t_complex*    h_grid,
1428                    GridOrdering  gridOrdering,
1429                    bool          computeEnergyAndVirial)
1430 {
1431     GMX_ASSERT(
1432             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1433             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1434     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
1435                "Invalid combination of gridIndex and number of grids");
1436
1437     const auto& settings               = pmeGpu->settings;
1438     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1439
1440     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1441
1442     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1443     if (copyInputAndOutputGrid)
1444     {
1445         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1446                            h_gridFloat,
1447                            0,
1448                            pmeGpu->archSpecific->complexGridSize[gridIndex],
1449                            pmeGpu->archSpecific->pmeStream_,
1450                            pmeGpu->settings.transferKind,
1451                            nullptr);
1452     }
1453
1454     int majorDim = -1, middleDim = -1, minorDim = -1;
1455     switch (gridOrdering)
1456     {
1457         case GridOrdering::YZX:
1458             majorDim  = YY;
1459             middleDim = ZZ;
1460             minorDim  = XX;
1461             break;
1462
1463         case GridOrdering::XYZ:
1464             majorDim  = XX;
1465             middleDim = YY;
1466             minorDim  = ZZ;
1467             break;
1468
1469         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1470     }
1471
1472     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1473
1474     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1475     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1476     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1477     int       cellsPerBlock;
1478     if (blocksPerGridLine == 1)
1479     {
1480         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1481     }
1482     else
1483     {
1484         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1485     }
1486     const int warpSize  = pmeGpu->programHandle_->warpSize();
1487     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1488
1489     static_assert(!GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1490                   "The CUDA solve energy kernels needs at least 4 warps. "
1491                   "Here we launch at least half of the max warps.");
1492
1493     KernelLaunchConfig config;
1494     config.blockSize[0] = blockSize;
1495     config.gridSize[0]  = blocksPerGridLine;
1496     // rounding up to full warps so that shuffle operations produce defined results
1497     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1498                          / gridLinesPerBlock;
1499     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1500
1501     PmeStage                           timingId  = PmeStage::Solve;
1502     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1503     if (gridOrdering == GridOrdering::YZX)
1504     {
1505         if (gridIndex == 0)
1506         {
1507             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelA
1508                                                : pmeGpu->programHandle_->impl_->solveYZXKernelA;
1509         }
1510         else
1511         {
1512             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelB
1513                                                : pmeGpu->programHandle_->impl_->solveYZXKernelB;
1514         }
1515     }
1516     else if (gridOrdering == GridOrdering::XYZ)
1517     {
1518         if (gridIndex == 0)
1519         {
1520             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelA
1521                                                : pmeGpu->programHandle_->impl_->solveXYZKernelA;
1522         }
1523         else
1524         {
1525             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelB
1526                                                : pmeGpu->programHandle_->impl_->solveXYZKernelB;
1527         }
1528     }
1529
1530     pme_gpu_start_timing(pmeGpu, timingId);
1531     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1532 #if c_canEmbedBuffers
1533     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1534 #else
1535     const auto kernelArgs =
1536             prepareGpuKernelArguments(kernelPtr,
1537                                       config,
1538                                       kernelParamsPtr,
1539                                       &kernelParamsPtr->grid.d_splineModuli[gridIndex],
1540                                       &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1541                                       &kernelParamsPtr->grid.d_fourierGrid[gridIndex]);
1542 #endif
1543     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve", kernelArgs);
1544     pme_gpu_stop_timing(pmeGpu, timingId);
1545
1546     if (computeEnergyAndVirial)
1547     {
1548         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy[gridIndex],
1549                              &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1550                              0,
1551                              c_virialAndEnergyCount,
1552                              pmeGpu->archSpecific->pmeStream_,
1553                              pmeGpu->settings.transferKind,
1554                              nullptr);
1555     }
1556
1557     if (copyInputAndOutputGrid)
1558     {
1559         copyFromDeviceBuffer(h_gridFloat,
1560                              &kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1561                              0,
1562                              pmeGpu->archSpecific->complexGridSize[gridIndex],
1563                              pmeGpu->archSpecific->pmeStream_,
1564                              pmeGpu->settings.transferKind,
1565                              nullptr);
1566     }
1567 }
1568
1569 /*! \brief
1570  * Returns a pointer to appropriate gather kernel based on the inputvalues
1571  *
1572  * \param[in]  pmeGpu                   The PME GPU structure.
1573  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1574  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1575  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1576  *
1577  * \return Pointer to CUDA kernel
1578  */
1579 inline auto selectGatherKernelPtr(const PmeGpu*  pmeGpu,
1580                                   ThreadsPerAtom threadsPerAtom,
1581                                   bool           readSplinesFromGlobal,
1582                                   const int      numGrids)
1583
1584 {
1585     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1586
1587     if (readSplinesFromGlobal)
1588     {
1589         if (threadsPerAtom == ThreadsPerAtom::Order)
1590         {
1591             if (numGrids == 2)
1592             {
1593                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Dual;
1594             }
1595             else
1596             {
1597                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Single;
1598             }
1599         }
1600         else
1601         {
1602             if (numGrids == 2)
1603             {
1604                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesDual;
1605             }
1606             else
1607             {
1608                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesSingle;
1609             }
1610         }
1611     }
1612     else
1613     {
1614         if (threadsPerAtom == ThreadsPerAtom::Order)
1615         {
1616             if (numGrids == 2)
1617             {
1618                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Dual;
1619             }
1620             else
1621             {
1622                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Single;
1623             }
1624         }
1625         else
1626         {
1627             if (numGrids == 2)
1628             {
1629                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelDual;
1630             }
1631             else
1632             {
1633                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelSingle;
1634             }
1635         }
1636     }
1637     return kernelPtr;
1638 }
1639
1640 void pme_gpu_gather(PmeGpu* pmeGpu, real** h_grids, const float lambda)
1641 {
1642     GMX_ASSERT(
1643             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1644             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1645
1646     const auto& settings = pmeGpu->settings;
1647
1648     if (!settings.performGPUFFT || settings.copyAllOutputs)
1649     {
1650         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1651         {
1652             float* h_grid = const_cast<float*>(h_grids[gridIndex]);
1653             pme_gpu_copy_input_gather_grid(pmeGpu, h_grid, gridIndex);
1654         }
1655     }
1656
1657     if (settings.copyAllOutputs)
1658     {
1659         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1660     }
1661
1662     /* Set if we have unit tests */
1663     const bool   readGlobal = pmeGpu->settings.copyAllOutputs;
1664     const size_t blockSize  = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1665     const int    order      = pmeGpu->common->pme_order;
1666     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1667     const int threadsPerAtom =
1668             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1669     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1670
1671     GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1672                "Only 16 threads per atom supported in OpenCL");
1673     GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1674                "Recalculating splines not supported in OpenCL");
1675
1676     const int atomsPerBlock = blockSize / threadsPerAtom;
1677
1678     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1679                "inconsistent atom data padding vs. gathering block size");
1680
1681     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1682     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1683
1684     KernelLaunchConfig config;
1685     config.blockSize[0] = order;
1686     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1687     config.blockSize[2] = atomsPerBlock;
1688     config.gridSize[0]  = dimGrid.first;
1689     config.gridSize[1]  = dimGrid.second;
1690
1691     // TODO test different cache configs
1692
1693     PmeStage                           timingId = PmeStage::Gather;
1694     PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1695             selectGatherKernelPtr(pmeGpu,
1696                                   pmeGpu->settings.threadsPerAtom,
1697                                   readGlobal || (!recalculateSplines),
1698                                   pmeGpu->common->ngrids);
1699     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1700
1701     pme_gpu_start_timing(pmeGpu, timingId);
1702     auto* timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1703     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1704     if (pmeGpu->common->ngrids == 1)
1705     {
1706         kernelParamsPtr->current.scale = 1.0;
1707     }
1708     else
1709     {
1710         kernelParamsPtr->current.scale = 1.0 - lambda;
1711     }
1712
1713 #if c_canEmbedBuffers
1714     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1715 #else
1716     const auto kernelArgs =
1717             prepareGpuKernelArguments(kernelPtr,
1718                                       config,
1719                                       kernelParamsPtr,
1720                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1721                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1722                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1723                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1724                                       &kernelParamsPtr->atoms.d_theta,
1725                                       &kernelParamsPtr->atoms.d_dtheta,
1726                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1727                                       &kernelParamsPtr->atoms.d_forces);
1728 #endif
1729     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather", kernelArgs);
1730     pme_gpu_stop_timing(pmeGpu, timingId);
1731
1732     if (pmeGpu->settings.useGpuForceReduction)
1733     {
1734         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1735     }
1736     else
1737     {
1738         pme_gpu_copy_output_forces(pmeGpu);
1739     }
1740 }
1741
1742 DeviceBuffer<gmx::RVec> pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1743 {
1744     if (pmeGpu && pmeGpu->kernelParams)
1745     {
1746         return pmeGpu->kernelParams->atoms.d_forces;
1747     }
1748     else
1749     {
1750         return DeviceBuffer<gmx::RVec>{};
1751     }
1752 }
1753
1754 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1755 {
1756     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1757                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1758                "initialized.");
1759
1760     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1761                "The device-side buffer can not be set.");
1762
1763     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1764 }
1765
1766 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1767 {
1768     if (pmeGpu && pmeGpu->kernelParams)
1769     {
1770         return &pmeGpu->archSpecific->pmeForcesReady;
1771     }
1772     else
1773     {
1774         return nullptr;
1775     }
1776 }