Unify coordinate copy handling across GPU platforms
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020 by the GROMACS development team.
5  * Copyright (c) 2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 /*! \internal \file
38  *
39  * \brief This file contains internal function implementations
40  * for performing the PME calculations on GPU.
41  *
42  * Note that this file is compiled as regular C++ source in OpenCL builds, but
43  * it is treated as CUDA source in CUDA-enabled GPU builds.
44  *
45  * \author Aleksei Iupinov <a.yupinov@gmail.com>
46  * \ingroup module_ewald
47  */
48
49 #include "gmxpre.h"
50
51 #include "pme_gpu_internal.h"
52
53 #include "config.h"
54
55 #include <list>
56 #include <memory>
57 #include <string>
58
59 #include "gromacs/ewald/ewald_utils.h"
60 #include "gromacs/fft/gpu_3dfft.h"
61 #include "gromacs/gpu_utils/device_context.h"
62 #include "gromacs/gpu_utils/device_stream.h"
63 #include "gromacs/gpu_utils/gpu_utils.h"
64 #include "gromacs/gpu_utils/pmalloc.h"
65 #if GMX_GPU_SYCL
66 #    include "gromacs/gpu_utils/syclutils.h"
67 #endif
68 #include "gromacs/hardware/device_information.h"
69 #include "gromacs/math/invertmatrix.h"
70 #include "gromacs/math/units.h"
71 #include "gromacs/timing/gpu_timing.h"
72 #include "gromacs/utility/exceptions.h"
73 #include "gromacs/utility/fatalerror.h"
74 #include "gromacs/utility/gmxassert.h"
75 #include "gromacs/utility/logger.h"
76 #include "gromacs/utility/stringutil.h"
77 #include "gromacs/ewald/pme.h"
78 #include "gromacs/ewald/pme_coordinate_receiver_gpu.h"
79
80 #if GMX_GPU_CUDA
81 #    include "pme.cuh"
82 #endif
83
84 #include "pme_gpu_calculate_splines.h"
85 #include "pme_gpu_constants.h"
86 #include "pme_gpu_program_impl.h"
87 #include "pme_gpu_timings.h"
88 #include "pme_gpu_types.h"
89 #include "pme_gpu_types_host.h"
90 #include "pme_gpu_types_host_impl.h"
91 #include "pme_grid.h"
92 #include "pme_internal.h"
93 #include "pme_solve.h"
94
95 /*! \brief
96  * CUDA only
97  * Atom limit above which it is advantageous to turn on the
98  * recalculating of the splines in the gather and using less threads per atom in the spline and spread
99  */
100 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
101
102 /*! \internal \brief
103  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
104  *
105  * \param[in] pmeGpu  The PME GPU structure.
106  * \returns The pointer to the kernel parameters.
107  */
108 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
109 {
110     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
111     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
112     return kernelParamsPtr;
113 }
114
115 /*! \brief
116  * Atom data block size (in terms of number of atoms).
117  * This is the least common multiple of number of atoms processed by
118  * a single block/workgroup of the spread and gather kernels.
119  * The GPU atom data buffers must be padded, which means that
120  * the numbers of atoms used for determining the size of the memory
121  * allocation must be divisible by this.
122  */
123 constexpr int c_pmeAtomDataBlockSize = 64;
124
125 int pme_gpu_get_atom_data_block_size()
126 {
127     return c_pmeAtomDataBlockSize;
128 }
129
130 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
131 {
132     pmeGpu->archSpecific->pmeStream_.synchronize();
133 }
134
135 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
136 {
137     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
138
139     GMX_ASSERT(
140             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
141             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
142
143     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
144     {
145         allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
146                              c_virialAndEnergyCount,
147                              pmeGpu->archSpecific->deviceContext_);
148         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy[gridIndex]), energyAndVirialSize);
149     }
150 }
151
152 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
153 {
154     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
155     {
156         freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex]);
157         pfree(pmeGpu->staging.h_virialAndEnergy[gridIndex]);
158         pmeGpu->staging.h_virialAndEnergy[gridIndex] = nullptr;
159     }
160 }
161
162 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
163 {
164     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
165     {
166         clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
167                                0,
168                                c_virialAndEnergyCount,
169                                pmeGpu->archSpecific->pmeStream_);
170     }
171 }
172
173 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu, const int gridIndex)
174 {
175     GMX_ASSERT(
176             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
177             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
178     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
179                "Invalid combination of gridIndex and number of grids");
180
181     const int splineValuesOffset[DIM] = { 0,
182                                           pmeGpu->kernelParams->grid.realGridSize[XX],
183                                           pmeGpu->kernelParams->grid.realGridSize[XX]
184                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
185     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
186
187     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
188                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
189                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
190     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize[gridIndex]);
191     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
192                            newSplineValuesSize,
193                            &pmeGpu->archSpecific->splineValuesSize[gridIndex],
194                            &pmeGpu->archSpecific->splineValuesCapacity[gridIndex],
195                            pmeGpu->archSpecific->deviceContext_);
196     if (shouldRealloc)
197     {
198         /* Reallocate the host buffer */
199         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
200         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli[gridIndex]),
201                 newSplineValuesSize * sizeof(float));
202     }
203     for (int i = 0; i < DIM; i++)
204     {
205         memcpy(pmeGpu->staging.h_splineModuli[gridIndex] + splineValuesOffset[i],
206                pmeGpu->common->bsp_mod[i].data(),
207                pmeGpu->common->bsp_mod[i].size() * sizeof(float));
208     }
209     /* TODO: pin original buffer instead! */
210     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
211                        pmeGpu->staging.h_splineModuli[gridIndex],
212                        0,
213                        newSplineValuesSize,
214                        pmeGpu->archSpecific->pmeStream_,
215                        pmeGpu->settings.transferKind,
216                        nullptr);
217 }
218
219 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
220 {
221     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
222     {
223         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
224         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex]);
225     }
226 }
227
228 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
229 {
230     const size_t newForcesSize = pmeGpu->nAtomsAlloc;
231     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
232     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
233                            newForcesSize,
234                            &pmeGpu->archSpecific->forcesSize,
235                            &pmeGpu->archSpecific->forcesSizeAlloc,
236                            pmeGpu->archSpecific->deviceContext_);
237     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
238     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
239 }
240
241 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
242 {
243     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
244 }
245
246 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
247 {
248     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
249     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
250                        pmeGpu->staging.h_forces.data(),
251                        0,
252                        pmeGpu->kernelParams->atoms.nAtoms,
253                        pmeGpu->archSpecific->pmeStream_,
254                        pmeGpu->settings.transferKind,
255                        nullptr);
256 }
257
258 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
259 {
260     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
261     copyFromDeviceBuffer(pmeGpu->staging.h_forces.data(),
262                          &pmeGpu->kernelParams->atoms.d_forces,
263                          0,
264                          pmeGpu->kernelParams->atoms.nAtoms,
265                          pmeGpu->archSpecific->pmeStream_,
266                          pmeGpu->settings.transferKind,
267                          nullptr);
268 }
269
270 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu,
271                                                  const float*  h_coefficients,
272                                                  const int     gridIndex)
273 {
274     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
275     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
276     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
277     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
278                            newCoefficientsSize,
279                            &pmeGpu->archSpecific->coefficientsSize[gridIndex],
280                            &pmeGpu->archSpecific->coefficientsCapacity[gridIndex],
281                            pmeGpu->archSpecific->deviceContext_);
282     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
283                        const_cast<float*>(h_coefficients),
284                        0,
285                        pmeGpu->kernelParams->atoms.nAtoms,
286                        pmeGpu->archSpecific->pmeStream_,
287                        pmeGpu->settings.transferKind,
288                        nullptr);
289
290     const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
291     const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
292     if (paddingCount > 0)
293     {
294         clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
295                                paddingIndex,
296                                paddingCount,
297                                pmeGpu->archSpecific->pmeStream_);
298     }
299 }
300
301 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
302 {
303     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
304     {
305         freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex]);
306     }
307 }
308
309 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
310 {
311     const int order             = pmeGpu->common->pme_order;
312     const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
313     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
314     /* Two arrays of the same size */
315     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
316     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
317     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
318     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta,
319                            newSplineDataSize,
320                            &currentSizeTemp,
321                            &currentSizeTempAlloc,
322                            pmeGpu->archSpecific->deviceContext_);
323     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta,
324                            newSplineDataSize,
325                            &pmeGpu->archSpecific->splineDataSize,
326                            &pmeGpu->archSpecific->splineDataSizeAlloc,
327                            pmeGpu->archSpecific->deviceContext_);
328     // the host side reallocation
329     if (shouldRealloc)
330     {
331         pfree(pmeGpu->staging.h_theta);
332         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
333         pfree(pmeGpu->staging.h_dtheta);
334         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
335     }
336 }
337
338 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
339 {
340     /* Two arrays of the same size */
341     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
342     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
343     pfree(pmeGpu->staging.h_theta);
344     pfree(pmeGpu->staging.h_dtheta);
345 }
346
347 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
348 {
349     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
350     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
351     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices,
352                            newIndicesSize,
353                            &pmeGpu->archSpecific->gridlineIndicesSize,
354                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
355                            pmeGpu->archSpecific->deviceContext_);
356     pfree(pmeGpu->staging.h_gridlineIndices);
357     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
358 }
359
360 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
361 {
362     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
363     pfree(pmeGpu->staging.h_gridlineIndices);
364 }
365
366 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
367 {
368     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
369
370     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
371                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
372                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
373     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
374                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
375                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
376     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
377     {
378         // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
379         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
380         {
381             /* 2 separate grids */
382             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
383                                    newComplexGridSize,
384                                    &pmeGpu->archSpecific->complexGridSize[gridIndex],
385                                    &pmeGpu->archSpecific->complexGridCapacity[gridIndex],
386                                    pmeGpu->archSpecific->deviceContext_);
387             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
388                                    newRealGridSize,
389                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
390                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
391                                    pmeGpu->archSpecific->deviceContext_);
392         }
393         else
394         {
395             /* A single buffer so that any grid will fit */
396             const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
397             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
398                                    newGridsSize,
399                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
400                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
401                                    pmeGpu->archSpecific->deviceContext_);
402             kernelParamsPtr->grid.d_fourierGrid[gridIndex] = kernelParamsPtr->grid.d_realGrid[gridIndex];
403             pmeGpu->archSpecific->complexGridSize[gridIndex] =
404                     pmeGpu->archSpecific->realGridSize[gridIndex];
405             // the size might get used later for copying the grid
406         }
407     }
408 }
409
410 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
411 {
412     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
413     {
414         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
415         {
416             freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid[gridIndex]);
417         }
418         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex]);
419     }
420 }
421
422 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
423 {
424     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
425     {
426         clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
427                                0,
428                                pmeGpu->archSpecific->realGridSize[gridIndex],
429                                pmeGpu->archSpecific->pmeStream_);
430     }
431 }
432
433 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
434 {
435     pme_gpu_free_fract_shifts(pmeGpu);
436
437     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
438
439     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
440     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
441     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
442     const int cellCount           = c_pmeNeighborUnitcellCount;
443     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
444
445     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
446
447     const int newFractShiftsSize = cellCount * (nx + ny + nz);
448
449     initParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
450                          &kernelParamsPtr->fractShiftsTableTexture,
451                          pmeGpu->common->fsh.data(),
452                          newFractShiftsSize,
453                          pmeGpu->archSpecific->deviceContext_);
454
455     initParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
456                          &kernelParamsPtr->gridlineIndicesTableTexture,
457                          pmeGpu->common->nn.data(),
458                          newFractShiftsSize,
459                          pmeGpu->archSpecific->deviceContext_);
460 }
461
462 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
463 {
464     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
465 #if GMX_GPU_CUDA
466     destroyParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
467                             &kernelParamsPtr->fractShiftsTableTexture);
468     destroyParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
469                             &kernelParamsPtr->gridlineIndicesTableTexture);
470 #elif GMX_GPU_OPENCL || GMX_GPU_SYCL
471     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
472     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
473 #endif
474 }
475
476 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
477 {
478     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
479 }
480
481 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, const float* h_grid, const int gridIndex)
482 {
483     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
484                        h_grid,
485                        0,
486                        pmeGpu->archSpecific->realGridSize[gridIndex],
487                        pmeGpu->archSpecific->pmeStream_,
488                        pmeGpu->settings.transferKind,
489                        nullptr);
490 }
491
492 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid, const int gridIndex)
493 {
494     copyFromDeviceBuffer(h_grid,
495                          &pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
496                          0,
497                          pmeGpu->archSpecific->realGridSize[gridIndex],
498                          pmeGpu->archSpecific->pmeStream_,
499                          pmeGpu->settings.transferKind,
500                          nullptr);
501     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
502 }
503
504 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
505 {
506     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
507     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
508     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta,
509                          &kernelParamsPtr->atoms.d_dtheta,
510                          0,
511                          splinesCount,
512                          pmeGpu->archSpecific->pmeStream_,
513                          pmeGpu->settings.transferKind,
514                          nullptr);
515     copyFromDeviceBuffer(pmeGpu->staging.h_theta,
516                          &kernelParamsPtr->atoms.d_theta,
517                          0,
518                          splinesCount,
519                          pmeGpu->archSpecific->pmeStream_,
520                          pmeGpu->settings.transferKind,
521                          nullptr);
522     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices,
523                          &kernelParamsPtr->atoms.d_gridlineIndices,
524                          0,
525                          kernelParamsPtr->atoms.nAtoms * DIM,
526                          pmeGpu->archSpecific->pmeStream_,
527                          pmeGpu->settings.transferKind,
528                          nullptr);
529 }
530
531 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
532 {
533     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
534     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
535
536     // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
537     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices,
538                            0,
539                            pmeGpu->nAtomsAlloc * DIM,
540                            pmeGpu->archSpecific->pmeStream_);
541     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta,
542                            0,
543                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
544                            pmeGpu->archSpecific->pmeStream_);
545     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta,
546                            0,
547                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
548                            pmeGpu->archSpecific->pmeStream_);
549
550     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta,
551                        pmeGpu->staging.h_dtheta,
552                        0,
553                        splinesCount,
554                        pmeGpu->archSpecific->pmeStream_,
555                        pmeGpu->settings.transferKind,
556                        nullptr);
557     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta,
558                        pmeGpu->staging.h_theta,
559                        0,
560                        splinesCount,
561                        pmeGpu->archSpecific->pmeStream_,
562                        pmeGpu->settings.transferKind,
563                        nullptr);
564     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices,
565                        pmeGpu->staging.h_gridlineIndices,
566                        0,
567                        kernelParamsPtr->atoms.nAtoms * DIM,
568                        pmeGpu->archSpecific->pmeStream_,
569                        pmeGpu->settings.transferKind,
570                        nullptr);
571 }
572
573 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
574 {
575     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
576 }
577
578 /*! \brief Internal GPU initialization for PME.
579  *
580  * \param[in]  pmeGpu         GPU PME data.
581  * \param[in]  deviceContext  GPU context.
582  * \param[in]  deviceStream   GPU stream.
583  */
584 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
585 {
586     /* Allocate the target-specific structures */
587     pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
588     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
589
590     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
591     /* This should give better performance, according to the cuFFT documentation.
592      * The performance seems to be the same though.
593      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
594      */
595
596 #if GMX_GPU_CUDA
597     pmeGpu->kernelParams->usePipeline       = char(false);
598     pmeGpu->kernelParams->pipelineAtomStart = 0;
599     pmeGpu->kernelParams->pipelineAtomEnd   = 0;
600     pmeGpu->maxGridWidthX                   = deviceContext.deviceInfo().prop.maxGridSize[0];
601 #else
602     // Use this path for any non-CUDA GPU acceleration
603     // TODO: is there no really global work size limit in OpenCL?
604     pmeGpu->maxGridWidthX = INT32_MAX / 2;
605 #endif
606 }
607
608 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
609 {
610     if (pme_gpu_settings(pmeGpu).performGPUFFT)
611     {
612         pmeGpu->archSpecific->fftSetup.resize(0);
613         const bool         performOutOfPlaceFFT      = pmeGpu->archSpecific->performOutOfPlaceFFT;
614         const bool         allocateGrid              = false;
615         MPI_Comm           comm                      = MPI_COMM_NULL;
616         std::array<int, 1> gridOffsetsInXForEachRank = { 0 };
617         std::array<int, 1> gridOffsetsInYForEachRank = { 0 };
618 #if GMX_GPU_CUDA
619         const gmx::FftBackend backend = gmx::FftBackend::Cufft;
620 #elif GMX_GPU_OPENCL
621         const gmx::FftBackend backend = gmx::FftBackend::Ocl;
622 #elif GMX_GPU_SYCL
623 #    if GMX_SYCL_DPCPP && GMX_FFT_MKL
624         const gmx::FftBackend backend = gmx::FftBackend::SyclMkl;
625 #    elif GMX_SYCL_HIPSYCL
626         const gmx::FftBackend backend = gmx::FftBackend::SyclRocfft;
627 #    else
628         const gmx::FftBackend backend = gmx::FftBackend::Sycl;
629 #    endif
630 #else
631         GMX_RELEASE_ASSERT(false, "Unknown GPU backend");
632         const gmx::FftBackend backend = gmx::FftBackend::Count;
633 #endif
634
635         PmeGpuGridParams& grid = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->grid;
636         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
637         {
638             pmeGpu->archSpecific->fftSetup.push_back(
639                     std::make_unique<gmx::Gpu3dFft>(backend,
640                                                     allocateGrid,
641                                                     comm,
642                                                     gridOffsetsInXForEachRank,
643                                                     gridOffsetsInYForEachRank,
644                                                     grid.realGridSize[ZZ],
645                                                     performOutOfPlaceFFT,
646                                                     pmeGpu->archSpecific->deviceContext_,
647                                                     pmeGpu->archSpecific->pmeStream_,
648                                                     grid.realGridSize,
649                                                     grid.realGridSizePadded,
650                                                     grid.complexGridSizePadded,
651                                                     &(grid.d_realGrid[gridIndex]),
652                                                     &(grid.d_fourierGrid[gridIndex])));
653         }
654     }
655 }
656
657 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
658 {
659     pmeGpu->archSpecific->fftSetup.resize(0);
660 }
661
662 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, const float lambda, PmeOutput* output)
663 {
664     const PmeGpu* pmeGpu = pme.gpu;
665
666     GMX_ASSERT(lambda == 1.0 || pmeGpu->common->ngrids == 2,
667                "Invalid combination of lambda and number of grids");
668
669     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
670     {
671         for (int j = 0; j < c_virialAndEnergyCount; j++)
672         {
673             GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[gridIndex][j]),
674                        "PME GPU produces incorrect energy/virial.");
675         }
676     }
677     for (int dim1 = 0; dim1 < DIM; dim1++)
678     {
679         for (int dim2 = 0; dim2 < DIM; dim2++)
680         {
681             output->coulombVirial_[dim1][dim2] = 0;
682         }
683     }
684     output->coulombEnergy_ = 0;
685     float scale            = 1.0;
686     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
687     {
688         if (pmeGpu->common->ngrids == 2)
689         {
690             scale = gridIndex == 0 ? (1.0 - lambda) : lambda;
691         }
692         output->coulombVirial_[XX][XX] +=
693                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][0];
694         output->coulombVirial_[YY][YY] +=
695                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][1];
696         output->coulombVirial_[ZZ][ZZ] +=
697                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][2];
698         output->coulombVirial_[XX][YY] +=
699                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
700         output->coulombVirial_[YY][XX] +=
701                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
702         output->coulombVirial_[XX][ZZ] +=
703                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
704         output->coulombVirial_[ZZ][XX] +=
705                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
706         output->coulombVirial_[YY][ZZ] +=
707                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
708         output->coulombVirial_[ZZ][YY] +=
709                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
710         output->coulombEnergy_ += scale * 0.5F * pmeGpu->staging.h_virialAndEnergy[gridIndex][6];
711     }
712     if (pmeGpu->common->ngrids > 1)
713     {
714         output->coulombDvdl_ = 0.5F
715                                * (pmeGpu->staging.h_virialAndEnergy[FEP_STATE_B][6]
716                                   - pmeGpu->staging.h_virialAndEnergy[FEP_STATE_A][6]);
717     }
718 }
719
720 /*! \brief Sets the force-related members in \p output
721  *
722  * \param[in]   pmeGpu      PME GPU data structure
723  * \param[out]  output      Pointer to PME output data structure
724  */
725 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
726 {
727     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
728     if (output->haveForceOutput_)
729     {
730         output->forces_ = pmeGpu->staging.h_forces;
731     }
732 }
733
734 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial, const real lambdaQ)
735 {
736     PmeGpu* pmeGpu = pme.gpu;
737
738     PmeOutput output;
739
740     pme_gpu_getForceOutput(pmeGpu, &output);
741
742     if (computeEnergyAndVirial)
743     {
744         if (pme_gpu_settings(pmeGpu).performGPUSolve)
745         {
746             pme_gpu_getEnergyAndVirial(pme, lambdaQ, &output);
747         }
748         else
749         {
750             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
751         }
752     }
753     return output;
754 }
755
756 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
757 {
758 #if GMX_DOUBLE
759     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
760 #else
761     matrix scaledBox;
762     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
763     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
764     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
765     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
766     matrix recipBox;
767     gmx::invertBoxMatrix(scaledBox, recipBox);
768
769     /* The GPU recipBox is transposed as compared to the CPU recipBox.
770      * Spread uses matrix columns (while solve and gather use rows).
771      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
772      */
773     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
774                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
775                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
776     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
777 #endif
778 }
779
780 /*! \brief \libinternal
781  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
782  *
783  * \param[in] pmeGpu            The PME GPU structure.
784  */
785 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
786 {
787     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
788
789     GMX_ASSERT(
790             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
791             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
792
793     kernelParamsPtr->grid.ewaldFactor =
794             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
795     /* The grid size variants */
796     for (int i = 0; i < DIM; i++)
797     {
798         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
799         kernelParamsPtr->grid.realGridSizeFP[i] =
800                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
801         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
802
803         // The complex grid currently uses no padding;
804         // if it starts to do so, then another test should be added for that
805         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
806         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
807     }
808     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
809     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
810     {
811         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
812         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
813                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
814     }
815     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
816     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
817     kernelParamsPtr->grid.complexGridSize[ZZ]++;
818     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
819
820     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
821     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
822     {
823         pme_gpu_realloc_and_copy_bspline_values(pmeGpu, gridIndex);
824     }
825
826     pme_gpu_realloc_grids(pmeGpu);
827     pme_gpu_reinit_3dfft(pmeGpu);
828 }
829
830 /* Several GPU functions that refer to the CPU PME data live here.
831  * We would like to keep these away from the GPU-framework specific code for clarity,
832  * as well as compilation issues with MPI.
833  */
834
835 /*! \brief \libinternal
836  * Copies everything useful from the PME CPU to the PME GPU structure.
837  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
838  *
839  * \param[in] pme         The PME structure.
840  */
841 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
842 {
843     /* TODO: Consider refactoring the CPU PME code to use the same structure,
844      * so that this function becomes 2 lines */
845     PmeGpu* pmeGpu               = pme->gpu;
846     pmeGpu->common->ngrids       = pme->bFEP_q ? 2 : 1;
847     pmeGpu->common->epsilon_r    = pme->epsilon_r;
848     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
849     pmeGpu->common->nk[XX]       = pme->nkx;
850     pmeGpu->common->nk[YY]       = pme->nky;
851     pmeGpu->common->nk[ZZ]       = pme->nkz;
852     pmeGpu->common->pme_order    = pme->pme_order;
853     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
854     {
855         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
856     }
857     for (int i = 0; i < DIM; i++)
858     {
859         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
860     }
861     const int cellCount = c_pmeNeighborUnitcellCount;
862     pmeGpu->common->fsh.resize(0);
863     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
864     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
865     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
866     pmeGpu->common->nn.resize(0);
867     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
868     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
869     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
870     pmeGpu->common->runMode       = pme->runMode;
871     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
872     pmeGpu->common->boxScaler     = pme->boxScaler.get();
873 }
874
875 /*! \libinternal \brief
876  * uses heuristics to select the best performing PME gather and scatter kernels
877  *
878  * \param[in,out] pmeGpu         The PME GPU structure.
879  */
880 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
881 {
882     if (GMX_GPU_CUDA && pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit)
883     {
884         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::Order;
885         pmeGpu->settings.recalculateSplines = true;
886     }
887     else
888     {
889         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::OrderSquared;
890         pmeGpu->settings.recalculateSplines = false;
891     }
892 }
893
894
895 /*! \libinternal \brief
896  * Initializes the PME GPU data at the beginning of the run.
897  * TODO: this should become PmeGpu::PmeGpu()
898  *
899  * \param[in,out] pme            The PME structure.
900  * \param[in]     deviceContext  The GPU context.
901  * \param[in]     deviceStream   The GPU stream.
902  * \param[in,out] pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
903  */
904 static void pme_gpu_init(gmx_pme_t*           pme,
905                          const DeviceContext& deviceContext,
906                          const DeviceStream&  deviceStream,
907                          const PmeGpuProgram* pmeGpuProgram)
908 {
909     pme->gpu       = new PmeGpu();
910     PmeGpu* pmeGpu = pme->gpu;
911     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
912     pmeGpu->common = std::make_shared<PmeShared>();
913
914     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
915     /* A convenience variable. */
916     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
917     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
918     pmeGpu->settings.performGPUGather = true;
919     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
920     pmeGpu->settings.useGpuForceReduction = false;
921
922     pme_gpu_set_testing(pmeGpu, false);
923
924     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
925     pmeGpu->programHandle_ = pmeGpuProgram;
926
927     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
928
929     pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
930
931     pme_gpu_copy_common_data_from(pme);
932     pme_gpu_alloc_energy_virial(pmeGpu);
933
934     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
935
936     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
937     kernelParamsPtr->constants.elFactor = gmx::c_one4PiEps0 / pmeGpu->common->epsilon_r;
938 }
939
940 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
941 {
942     GMX_ASSERT(gridSize != nullptr, "");
943     GMX_ASSERT(paddedGridSize != nullptr, "");
944     GMX_ASSERT(pmeGpu != nullptr, "");
945     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
946     for (int i = 0; i < DIM; i++)
947     {
948         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
949         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
950     }
951 }
952
953 void pme_gpu_reinit(gmx_pme_t*           pme,
954                     const DeviceContext* deviceContext,
955                     const DeviceStream*  deviceStream,
956                     const PmeGpuProgram* pmeGpuProgram)
957 {
958     GMX_ASSERT(pme != nullptr, "Need valid PME object");
959
960     if (!pme->gpu)
961     {
962         GMX_RELEASE_ASSERT(deviceContext != nullptr,
963                            "Device context can not be nullptr when setting up PME on GPU.");
964         GMX_RELEASE_ASSERT(deviceStream != nullptr,
965                            "Device stream can not be nullptr when setting up PME on GPU.");
966         /* First-time initialization */
967         pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
968     }
969     else
970     {
971         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
972         pme_gpu_copy_common_data_from(pme);
973     }
974     /* GPU FFT will only get used for a single rank.*/
975     pme->gpu->settings.performGPUFFT =
976             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
977     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
978
979     /* Reinit active timers */
980     pme_gpu_reinit_timings(pme->gpu);
981
982     pme_gpu_reinit_grids(pme->gpu);
983     // Note: if timing the reinit launch overhead becomes more relevant
984     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
985     pme_gpu_reinit_computation(pme, nullptr);
986     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
987      * update for mixed mode on grid switch. TODO: use shared recipbox field.
988      */
989     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
990 }
991
992 void pme_gpu_destroy(PmeGpu* pmeGpu)
993 {
994     /* Free lots of data */
995     pme_gpu_free_energy_virial(pmeGpu);
996     pme_gpu_free_bspline_values(pmeGpu);
997     pme_gpu_free_forces(pmeGpu);
998     pme_gpu_free_coefficients(pmeGpu);
999     pme_gpu_free_spline_data(pmeGpu);
1000     pme_gpu_free_grid_indices(pmeGpu);
1001     pme_gpu_free_fract_shifts(pmeGpu);
1002     pme_gpu_free_grids(pmeGpu);
1003
1004     pme_gpu_destroy_3dfft(pmeGpu);
1005
1006     delete pmeGpu;
1007 }
1008
1009 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* chargesA, const real* chargesB)
1010 {
1011     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
1012     kernelParamsPtr->atoms.nAtoms = nAtoms;
1013     const int  block_size         = pme_gpu_get_atom_data_block_size();
1014     const int  nAtomsNewPadded    = ((nAtoms + block_size - 1) / block_size) * block_size;
1015     const bool haveToRealloc      = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
1016     pmeGpu->nAtomsAlloc           = nAtomsNewPadded;
1017
1018 #if GMX_DOUBLE
1019     GMX_RELEASE_ASSERT(false, "Only single precision supported");
1020     GMX_UNUSED_VALUE(charges);
1021 #else
1022     int gridIndex = 0;
1023     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
1024     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
1025     gridIndex++;
1026     if (chargesB != nullptr)
1027     {
1028         pme_gpu_realloc_and_copy_input_coefficients(
1029                 pmeGpu, reinterpret_cast<const float*>(chargesB), gridIndex);
1030     }
1031     else
1032     {
1033         /* Fill the second set of coefficients with chargesA as well to be able to avoid
1034          * conditionals in the GPU kernels */
1035         /* FIXME: This should be avoided by making a separate templated version of the
1036          * relevant kernel(s) (probably only pme_gather_kernel). That would require a
1037          * reduction of the current number of templated parameters of that kernel. */
1038         pme_gpu_realloc_and_copy_input_coefficients(
1039                 pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
1040     }
1041 #endif
1042
1043     if (haveToRealloc)
1044     {
1045         pme_gpu_realloc_forces(pmeGpu);
1046         pme_gpu_realloc_spline_data(pmeGpu);
1047         pme_gpu_realloc_grid_indices(pmeGpu);
1048     }
1049     pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
1050 }
1051
1052 /*! \internal \brief
1053  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
1054  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
1055  *
1056  * \param[in] pmeGpu         The PME GPU data structure.
1057  * \param[in] pmeStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
1058  */
1059 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, PmeStage pmeStageId)
1060 {
1061     CommandEvent* timingEvent = nullptr;
1062     if (pme_gpu_timings_enabled(pmeGpu))
1063     {
1064         GMX_ASSERT(pmeStageId < PmeStage::Count, "Wrong PME GPU timing event index");
1065         timingEvent = pmeGpu->archSpecific->timingEvents[pmeStageId].fetchNextEvent();
1066     }
1067     return timingEvent;
1068 }
1069
1070 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, const int grid_index)
1071 {
1072     PmeStage timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? PmeStage::FftTransformR2C
1073                                                         : PmeStage::FftTransformC2R;
1074
1075     pme_gpu_start_timing(pmeGpu, timerId);
1076     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
1077             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1078     pme_gpu_stop_timing(pmeGpu, timerId);
1079 }
1080
1081 /*! \brief
1082  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1083  * to minimize number of unused blocks.
1084  */
1085 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
1086 {
1087     // How many maximum widths in X do we need (hopefully just one)
1088     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1089     // Trying to make things even
1090     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1091     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1092     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1093                "pmeGpuCreateGrid: excessive blocks");
1094     return std::pair<int, int>(colCount, minRowCount);
1095 }
1096
1097 /*! \brief
1098  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1099  *
1100  * \param[in]  pmeGpu                   The PME GPU structure.
1101  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1102  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1103  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1104  *
1105  * \return Pointer to CUDA kernel
1106  */
1107 static auto selectSplineAndSpreadKernelPtr(const PmeGpu*  pmeGpu,
1108                                            ThreadsPerAtom threadsPerAtom,
1109                                            bool           writeSplinesToGlobal,
1110                                            const int      numGrids)
1111 {
1112     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1113     if (writeSplinesToGlobal)
1114     {
1115         if (threadsPerAtom == ThreadsPerAtom::Order)
1116         {
1117             if (numGrids == 2)
1118             {
1119                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Dual;
1120             }
1121             else
1122             {
1123                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Single;
1124             }
1125         }
1126         else
1127         {
1128             if (numGrids == 2)
1129             {
1130                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesDual;
1131             }
1132             else
1133             {
1134                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesSingle;
1135             }
1136         }
1137     }
1138     else
1139     {
1140         if (threadsPerAtom == ThreadsPerAtom::Order)
1141         {
1142             if (numGrids == 2)
1143             {
1144                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1145             }
1146             else
1147             {
1148                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1149             }
1150         }
1151         else
1152         {
1153             if (numGrids == 2)
1154             {
1155                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1156             }
1157             else
1158             {
1159                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1160             }
1161         }
1162     }
1163
1164     return kernelPtr;
1165 }
1166
1167 /*! \brief
1168  * Returns a pointer to appropriate spline kernel based on the input bool values
1169  *
1170  * \param[in]  pmeGpu                   The PME GPU structure.
1171  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1172  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1173  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1174  *
1175  * \return Pointer to CUDA kernel
1176  */
1177 static auto selectSplineKernelPtr(const PmeGpu*   pmeGpu,
1178                                   ThreadsPerAtom  threadsPerAtom,
1179                                   bool gmx_unused writeSplinesToGlobal,
1180                                   const int       numGrids)
1181 {
1182     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1183     GMX_ASSERT(
1184             writeSplinesToGlobal,
1185             "Spline data should always be written to global memory when just calculating splines");
1186
1187     if (threadsPerAtom == ThreadsPerAtom::Order)
1188     {
1189         if (numGrids == 2)
1190         {
1191             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Dual;
1192         }
1193         else
1194         {
1195             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Single;
1196         }
1197     }
1198     else
1199     {
1200         if (numGrids == 2)
1201         {
1202             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelDual;
1203         }
1204         else
1205         {
1206             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelSingle;
1207         }
1208     }
1209     return kernelPtr;
1210 }
1211
1212 /*! \brief
1213  * Returns a pointer to appropriate spread kernel based on the input bool values
1214  *
1215  * \param[in]  pmeGpu                   The PME GPU structure.
1216  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1217  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1218  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1219  *
1220  * \return Pointer to CUDA kernel
1221  */
1222 static auto selectSpreadKernelPtr(const PmeGpu*  pmeGpu,
1223                                   ThreadsPerAtom threadsPerAtom,
1224                                   bool           writeSplinesToGlobal,
1225                                   const int      numGrids)
1226 {
1227     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1228     if (writeSplinesToGlobal)
1229     {
1230         if (threadsPerAtom == ThreadsPerAtom::Order)
1231         {
1232             if (numGrids == 2)
1233             {
1234                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Dual;
1235             }
1236             else
1237             {
1238                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Single;
1239             }
1240         }
1241         else
1242         {
1243             if (numGrids == 2)
1244             {
1245                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelDual;
1246             }
1247             else
1248             {
1249                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelSingle;
1250             }
1251         }
1252     }
1253     else
1254     {
1255         /* if we are not saving the spline data we need to recalculate it
1256            using the spline and spread Kernel */
1257         if (threadsPerAtom == ThreadsPerAtom::Order)
1258         {
1259             if (numGrids == 2)
1260             {
1261                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1262             }
1263             else
1264             {
1265                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1266             }
1267         }
1268         else
1269         {
1270             if (numGrids == 2)
1271             {
1272                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1273             }
1274             else
1275             {
1276                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1277             }
1278         }
1279     }
1280     return kernelPtr;
1281 }
1282
1283 void pme_gpu_spread(const PmeGpu*                  pmeGpu,
1284                     GpuEventSynchronizer*          xReadyOnDevice,
1285                     real**                         h_grids,
1286                     bool                           computeSplines,
1287                     bool                           spreadCharges,
1288                     const real                     lambda,
1289                     const bool                     useGpuDirectComm,
1290                     gmx::PmeCoordinateReceiverGpu* pmeCoordinateReceiverGpu)
1291 {
1292     GMX_ASSERT(
1293             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1294             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1295
1296     GMX_ASSERT(computeSplines || spreadCharges,
1297                "PME spline/spread kernel has invalid input (nothing to do)");
1298     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1299     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1300
1301     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1302
1303     const int order = pmeGpu->common->pme_order;
1304     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1305     const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1306     const int  threadsPerAtom =
1307             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1308     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1309
1310     GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1311                "Only 16 threads per atom supported in OpenCL");
1312     GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1313                "Recalculating splines not supported in OpenCL");
1314
1315     const int atomsPerBlock = blockSize / threadsPerAtom;
1316
1317     // TODO: pick smaller block size in runtime if needed
1318     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1319     // If doing so, change atomsPerBlock in the kernels as well.
1320     // TODO: test varying block sizes on modern arch-s as well
1321     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1322     //(for spline data mostly)
1323     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1324                "inconsistent atom data padding vs. spreading block size");
1325
1326     // Ensure that coordinates are ready on the device before launching spread;
1327     // only needed on PP+PME ranks, not on separate PME ranks, in unit tests
1328     // as these cases use a single stream (hence xReadyOnDevice == nullptr).
1329     GMX_ASSERT(xReadyOnDevice != nullptr || pmeGpu->common->isRankPmeOnly
1330                        || pme_gpu_settings(pmeGpu).copyAllOutputs,
1331                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1332
1333     if (xReadyOnDevice)
1334     {
1335         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1336     }
1337
1338     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1339     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1340
1341     if (pmeGpu->common->ngrids == 1)
1342     {
1343         kernelParamsPtr->current.scale = 1.0;
1344     }
1345     else
1346     {
1347         kernelParamsPtr->current.scale = 1.0 - lambda;
1348     }
1349
1350     KernelLaunchConfig config;
1351     config.blockSize[0] = order;
1352     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1353     config.blockSize[2] = atomsPerBlock;
1354     config.gridSize[0]  = dimGrid.first;
1355     config.gridSize[1]  = dimGrid.second;
1356
1357     PmeStage                           timingId;
1358     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1359     const bool writeGlobalOrSaveSplines          = writeGlobal || (!recalculateSplines);
1360     if (computeSplines)
1361     {
1362         if (spreadCharges)
1363         {
1364             timingId  = PmeStage::SplineAndSpread;
1365             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu,
1366                                                        pmeGpu->settings.threadsPerAtom,
1367                                                        writeGlobalOrSaveSplines,
1368                                                        pmeGpu->common->ngrids);
1369         }
1370         else
1371         {
1372             timingId  = PmeStage::Spline;
1373             kernelPtr = selectSplineKernelPtr(pmeGpu,
1374                                               pmeGpu->settings.threadsPerAtom,
1375                                               writeGlobalOrSaveSplines,
1376                                               pmeGpu->common->ngrids);
1377         }
1378     }
1379     else
1380     {
1381         timingId  = PmeStage::Spread;
1382         kernelPtr = selectSpreadKernelPtr(
1383                 pmeGpu, pmeGpu->settings.threadsPerAtom, writeGlobalOrSaveSplines, pmeGpu->common->ngrids);
1384     }
1385
1386
1387     pme_gpu_start_timing(pmeGpu, timingId);
1388     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1389
1390     kernelParamsPtr->usePipeline = char(computeSplines && spreadCharges && useGpuDirectComm
1391                                         && (pmeCoordinateReceiverGpu->ppCommNumSenderRanks() > 1)
1392                                         && !writeGlobalOrSaveSplines);
1393     if (kernelParamsPtr->usePipeline != 0)
1394     {
1395         int numStagesInPipeline = pmeCoordinateReceiverGpu->ppCommNumSenderRanks();
1396
1397         for (int i = 0; i < numStagesInPipeline; i++)
1398         {
1399             int senderRank;
1400             if (useGpuDirectComm)
1401             {
1402                 senderRank = pmeCoordinateReceiverGpu->synchronizeOnCoordinatesFromPpRank(
1403                         i, *(pmeCoordinateReceiverGpu->ppCommStream(i)));
1404             }
1405             else
1406             {
1407                 senderRank = i;
1408             }
1409
1410             // set kernel configuration options specific to this stage of the pipeline
1411             std::tie(kernelParamsPtr->pipelineAtomStart, kernelParamsPtr->pipelineAtomEnd) =
1412                     pmeCoordinateReceiverGpu->ppCommAtomRange(senderRank);
1413             const int blockCount       = static_cast<int>(std::ceil(
1414                     static_cast<float>(kernelParamsPtr->pipelineAtomEnd - kernelParamsPtr->pipelineAtomStart)
1415                     / atomsPerBlock));
1416             auto      dimGrid          = pmeGpuCreateGrid(pmeGpu, blockCount);
1417             config.gridSize[0]         = dimGrid.first;
1418             config.gridSize[1]         = dimGrid.second;
1419             DeviceStream* launchStream = pmeCoordinateReceiverGpu->ppCommStream(senderRank);
1420
1421
1422 #if c_canEmbedBuffers
1423             const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1424 #else
1425             const auto kernelArgs =
1426                     prepareGpuKernelArguments(kernelPtr,
1427                                               config,
1428                                               kernelParamsPtr,
1429                                               &kernelParamsPtr->atoms.d_theta,
1430                                               &kernelParamsPtr->atoms.d_dtheta,
1431                                               &kernelParamsPtr->atoms.d_gridlineIndices,
1432                                               &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1433                                               &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1434                                               &kernelParamsPtr->grid.d_fractShiftsTable,
1435                                               &kernelParamsPtr->grid.d_gridlineIndicesTable,
1436                                               &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1437                                               &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1438                                               &kernelParamsPtr->atoms.d_coordinates);
1439 #endif
1440
1441             launchGpuKernel(kernelPtr, config, *launchStream, timingEvent, "PME spline/spread", kernelArgs);
1442         }
1443         // Set dependencies for PME stream on all pipeline streams
1444         for (int i = 0; i < pmeCoordinateReceiverGpu->ppCommNumSenderRanks(); i++)
1445         {
1446             GpuEventSynchronizer event;
1447             event.markEvent(*(pmeCoordinateReceiverGpu->ppCommStream(i)));
1448             event.enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1449         }
1450     }
1451     else // pipelining is not in use
1452     {
1453         if (useGpuDirectComm) // Sync all PME-PP communications to PME stream
1454         {
1455             pmeCoordinateReceiverGpu->synchronizeOnCoordinatesFromAllPpRanks(pmeGpu->archSpecific->pmeStream_);
1456         }
1457
1458 #if c_canEmbedBuffers
1459         const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1460 #else
1461         const auto kernelArgs =
1462                 prepareGpuKernelArguments(kernelPtr,
1463                                           config,
1464                                           kernelParamsPtr,
1465                                           &kernelParamsPtr->atoms.d_theta,
1466                                           &kernelParamsPtr->atoms.d_dtheta,
1467                                           &kernelParamsPtr->atoms.d_gridlineIndices,
1468                                           &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1469                                           &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1470                                           &kernelParamsPtr->grid.d_fractShiftsTable,
1471                                           &kernelParamsPtr->grid.d_gridlineIndicesTable,
1472                                           &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1473                                           &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1474                                           &kernelParamsPtr->atoms.d_coordinates);
1475 #endif
1476
1477         launchGpuKernel(kernelPtr,
1478                         config,
1479                         pmeGpu->archSpecific->pmeStream_,
1480                         timingEvent,
1481                         "PME spline/spread",
1482                         kernelArgs);
1483     }
1484
1485     pme_gpu_stop_timing(pmeGpu, timingId);
1486
1487     const auto& settings    = pmeGpu->settings;
1488     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1489     if (copyBackGrid)
1490     {
1491         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1492         {
1493             float* h_grid = h_grids[gridIndex];
1494             pme_gpu_copy_output_spread_grid(pmeGpu, h_grid, gridIndex);
1495         }
1496     }
1497     const bool copyBackAtomData =
1498             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1499     if (copyBackAtomData)
1500     {
1501         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1502     }
1503 }
1504
1505 void pme_gpu_solve(const PmeGpu* pmeGpu,
1506                    const int     gridIndex,
1507                    t_complex*    h_grid,
1508                    GridOrdering  gridOrdering,
1509                    bool          computeEnergyAndVirial)
1510 {
1511     GMX_ASSERT(
1512             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1513             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1514     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
1515                "Invalid combination of gridIndex and number of grids");
1516
1517     const auto& settings               = pmeGpu->settings;
1518     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1519
1520     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1521
1522     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1523     if (copyInputAndOutputGrid)
1524     {
1525         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1526                            h_gridFloat,
1527                            0,
1528                            pmeGpu->archSpecific->complexGridSize[gridIndex],
1529                            pmeGpu->archSpecific->pmeStream_,
1530                            pmeGpu->settings.transferKind,
1531                            nullptr);
1532     }
1533
1534     int majorDim = -1, middleDim = -1, minorDim = -1;
1535     switch (gridOrdering)
1536     {
1537         case GridOrdering::YZX:
1538             majorDim  = YY;
1539             middleDim = ZZ;
1540             minorDim  = XX;
1541             break;
1542
1543         case GridOrdering::XYZ:
1544             majorDim  = XX;
1545             middleDim = YY;
1546             minorDim  = ZZ;
1547             break;
1548
1549         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1550     }
1551
1552     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1553
1554     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1555     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1556     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1557     int       cellsPerBlock;
1558     if (blocksPerGridLine == 1)
1559     {
1560         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1561     }
1562     else
1563     {
1564         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1565     }
1566     const int warpSize  = pmeGpu->programHandle_->warpSize();
1567     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1568
1569     static_assert(!GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1570                   "The CUDA solve energy kernels needs at least 4 warps. "
1571                   "Here we launch at least half of the max warps.");
1572
1573     KernelLaunchConfig config;
1574     config.blockSize[0] = blockSize;
1575     config.gridSize[0]  = blocksPerGridLine;
1576     // rounding up to full warps so that shuffle operations produce defined results
1577     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1578                          / gridLinesPerBlock;
1579     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1580
1581     PmeStage                           timingId  = PmeStage::Solve;
1582     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1583     if (gridOrdering == GridOrdering::YZX)
1584     {
1585         if (gridIndex == 0)
1586         {
1587             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelA
1588                                                : pmeGpu->programHandle_->impl_->solveYZXKernelA;
1589         }
1590         else
1591         {
1592             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelB
1593                                                : pmeGpu->programHandle_->impl_->solveYZXKernelB;
1594         }
1595     }
1596     else if (gridOrdering == GridOrdering::XYZ)
1597     {
1598         if (gridIndex == 0)
1599         {
1600             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelA
1601                                                : pmeGpu->programHandle_->impl_->solveXYZKernelA;
1602         }
1603         else
1604         {
1605             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelB
1606                                                : pmeGpu->programHandle_->impl_->solveXYZKernelB;
1607         }
1608     }
1609
1610     pme_gpu_start_timing(pmeGpu, timingId);
1611     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1612 #if c_canEmbedBuffers
1613     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1614 #else
1615     const auto kernelArgs =
1616             prepareGpuKernelArguments(kernelPtr,
1617                                       config,
1618                                       kernelParamsPtr,
1619                                       &kernelParamsPtr->grid.d_splineModuli[gridIndex],
1620                                       &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1621                                       &kernelParamsPtr->grid.d_fourierGrid[gridIndex]);
1622 #endif
1623     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve", kernelArgs);
1624     pme_gpu_stop_timing(pmeGpu, timingId);
1625
1626     if (computeEnergyAndVirial)
1627     {
1628         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy[gridIndex],
1629                              &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1630                              0,
1631                              c_virialAndEnergyCount,
1632                              pmeGpu->archSpecific->pmeStream_,
1633                              pmeGpu->settings.transferKind,
1634                              nullptr);
1635     }
1636
1637     if (copyInputAndOutputGrid)
1638     {
1639         copyFromDeviceBuffer(h_gridFloat,
1640                              &kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1641                              0,
1642                              pmeGpu->archSpecific->complexGridSize[gridIndex],
1643                              pmeGpu->archSpecific->pmeStream_,
1644                              pmeGpu->settings.transferKind,
1645                              nullptr);
1646     }
1647 }
1648
1649 /*! \brief
1650  * Returns a pointer to appropriate gather kernel based on the inputvalues
1651  *
1652  * \param[in]  pmeGpu                   The PME GPU structure.
1653  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1654  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1655  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1656  *
1657  * \return Pointer to CUDA kernel
1658  */
1659 inline auto selectGatherKernelPtr(const PmeGpu*  pmeGpu,
1660                                   ThreadsPerAtom threadsPerAtom,
1661                                   bool           readSplinesFromGlobal,
1662                                   const int      numGrids)
1663
1664 {
1665     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1666
1667     if (readSplinesFromGlobal)
1668     {
1669         if (threadsPerAtom == ThreadsPerAtom::Order)
1670         {
1671             if (numGrids == 2)
1672             {
1673                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Dual;
1674             }
1675             else
1676             {
1677                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Single;
1678             }
1679         }
1680         else
1681         {
1682             if (numGrids == 2)
1683             {
1684                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesDual;
1685             }
1686             else
1687             {
1688                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesSingle;
1689             }
1690         }
1691     }
1692     else
1693     {
1694         if (threadsPerAtom == ThreadsPerAtom::Order)
1695         {
1696             if (numGrids == 2)
1697             {
1698                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Dual;
1699             }
1700             else
1701             {
1702                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Single;
1703             }
1704         }
1705         else
1706         {
1707             if (numGrids == 2)
1708             {
1709                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelDual;
1710             }
1711             else
1712             {
1713                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelSingle;
1714             }
1715         }
1716     }
1717     return kernelPtr;
1718 }
1719
1720 void pme_gpu_gather(PmeGpu* pmeGpu, real** h_grids, const float lambda)
1721 {
1722     GMX_ASSERT(
1723             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1724             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1725
1726     const auto& settings = pmeGpu->settings;
1727
1728     if (!settings.performGPUFFT || settings.copyAllOutputs)
1729     {
1730         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1731         {
1732             float* h_grid = const_cast<float*>(h_grids[gridIndex]);
1733             pme_gpu_copy_input_gather_grid(pmeGpu, h_grid, gridIndex);
1734         }
1735     }
1736
1737     if (settings.copyAllOutputs)
1738     {
1739         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1740     }
1741
1742     /* Set if we have unit tests */
1743     const bool   readGlobal = pmeGpu->settings.copyAllOutputs;
1744     const size_t blockSize  = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1745     const int    order      = pmeGpu->common->pme_order;
1746     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1747     const int threadsPerAtom =
1748             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1749     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1750
1751     GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1752                "Only 16 threads per atom supported in OpenCL");
1753     GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1754                "Recalculating splines not supported in OpenCL");
1755
1756     const int atomsPerBlock = blockSize / threadsPerAtom;
1757
1758     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1759                "inconsistent atom data padding vs. gathering block size");
1760
1761     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1762     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1763
1764     KernelLaunchConfig config;
1765     config.blockSize[0] = order;
1766     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1767     config.blockSize[2] = atomsPerBlock;
1768     config.gridSize[0]  = dimGrid.first;
1769     config.gridSize[1]  = dimGrid.second;
1770
1771     // TODO test different cache configs
1772
1773     PmeStage                           timingId = PmeStage::Gather;
1774     PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1775             selectGatherKernelPtr(pmeGpu,
1776                                   pmeGpu->settings.threadsPerAtom,
1777                                   readGlobal || (!recalculateSplines),
1778                                   pmeGpu->common->ngrids);
1779     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1780
1781     pme_gpu_start_timing(pmeGpu, timingId);
1782     auto* timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1783     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1784     if (pmeGpu->common->ngrids == 1)
1785     {
1786         kernelParamsPtr->current.scale = 1.0;
1787     }
1788     else
1789     {
1790         kernelParamsPtr->current.scale = 1.0 - lambda;
1791     }
1792
1793 #if c_canEmbedBuffers
1794     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1795 #else
1796     const auto kernelArgs =
1797             prepareGpuKernelArguments(kernelPtr,
1798                                       config,
1799                                       kernelParamsPtr,
1800                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1801                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1802                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1803                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1804                                       &kernelParamsPtr->atoms.d_theta,
1805                                       &kernelParamsPtr->atoms.d_dtheta,
1806                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1807                                       &kernelParamsPtr->atoms.d_forces);
1808 #endif
1809     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather", kernelArgs);
1810     pme_gpu_stop_timing(pmeGpu, timingId);
1811
1812     if (pmeGpu->settings.useGpuForceReduction)
1813     {
1814         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1815     }
1816     else
1817     {
1818         pme_gpu_copy_output_forces(pmeGpu);
1819     }
1820 }
1821
1822 DeviceBuffer<gmx::RVec> pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1823 {
1824     if (pmeGpu && pmeGpu->kernelParams)
1825     {
1826         return pmeGpu->kernelParams->atoms.d_forces;
1827     }
1828     else
1829     {
1830         return DeviceBuffer<gmx::RVec>{};
1831     }
1832 }
1833
1834 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1835 {
1836     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1837                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1838                "initialized.");
1839
1840     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1841                "The device-side buffer can not be set.");
1842
1843     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1844 }
1845
1846 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1847 {
1848     if (pmeGpu && pmeGpu->kernelParams)
1849     {
1850         return &pmeGpu->archSpecific->pmeForcesReady;
1851     }
1852     else
1853     {
1854         return nullptr;
1855     }
1856 }