Apply re-formatting to C++ in src/ tree.
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme_gpu_internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <memory>
56 #include <string>
57
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/device_context.h"
60 #include "gromacs/gpu_utils/device_stream.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/hardware/device_information.h"
63 #include "gromacs/math/invertmatrix.h"
64 #include "gromacs/math/units.h"
65 #include "gromacs/timing/gpu_timing.h"
66 #include "gromacs/utility/exceptions.h"
67 #include "gromacs/utility/fatalerror.h"
68 #include "gromacs/utility/gmxassert.h"
69 #include "gromacs/utility/logger.h"
70 #include "gromacs/utility/stringutil.h"
71
72 #if GMX_GPU_CUDA
73 #    include "gromacs/gpu_utils/pmalloc_cuda.h"
74
75 #    include "pme.cuh"
76 #elif GMX_GPU_OPENCL
77 #    include "gromacs/gpu_utils/gmxopencl.h"
78 #endif
79
80 #include "gromacs/ewald/pme.h"
81
82 #include "pme_gpu_3dfft.h"
83 #include "pme_gpu_calculate_splines.h"
84 #include "pme_gpu_constants.h"
85 #include "pme_gpu_program_impl.h"
86 #include "pme_gpu_timings.h"
87 #include "pme_gpu_types.h"
88 #include "pme_gpu_types_host.h"
89 #include "pme_gpu_types_host_impl.h"
90 #include "pme_grid.h"
91 #include "pme_internal.h"
92 #include "pme_solve.h"
93
94 /*! \brief
95  * CUDA only
96  * Atom limit above which it is advantageous to turn on the
97  * recalculating of the splines in the gather and using less threads per atom in the spline and spread
98  */
99 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
100
101 /*! \internal \brief
102  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
103  *
104  * \param[in] pmeGpu  The PME GPU structure.
105  * \returns The pointer to the kernel parameters.
106  */
107 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
108 {
109     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
110     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
111     return kernelParamsPtr;
112 }
113
114 /*! \brief
115  * Atom data block size (in terms of number of atoms).
116  * This is the least common multiple of number of atoms processed by
117  * a single block/workgroup of the spread and gather kernels.
118  * The GPU atom data buffers must be padded, which means that
119  * the numbers of atoms used for determining the size of the memory
120  * allocation must be divisible by this.
121  */
122 constexpr int c_pmeAtomDataBlockSize = 64;
123
124 int pme_gpu_get_atom_data_block_size()
125 {
126     return c_pmeAtomDataBlockSize;
127 }
128
129 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
130 {
131     pmeGpu->archSpecific->pmeStream_.synchronize();
132 }
133
134 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
135 {
136     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
137
138     GMX_ASSERT(
139             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
140             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
141
142     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
143     {
144         allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
145                              c_virialAndEnergyCount,
146                              pmeGpu->archSpecific->deviceContext_);
147         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy[gridIndex]), energyAndVirialSize);
148     }
149 }
150
151 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
152 {
153     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
154     {
155         freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex]);
156         pfree(pmeGpu->staging.h_virialAndEnergy[gridIndex]);
157         pmeGpu->staging.h_virialAndEnergy[gridIndex] = nullptr;
158     }
159 }
160
161 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
162 {
163     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
164     {
165         clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
166                                0,
167                                c_virialAndEnergyCount,
168                                pmeGpu->archSpecific->pmeStream_);
169     }
170 }
171
172 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu, const int gridIndex)
173 {
174     GMX_ASSERT(
175             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
176             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
177     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
178                "Invalid combination of gridIndex and number of grids");
179
180     const int splineValuesOffset[DIM] = { 0,
181                                           pmeGpu->kernelParams->grid.realGridSize[XX],
182                                           pmeGpu->kernelParams->grid.realGridSize[XX]
183                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
184     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
185
186     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
187                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
188                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
189     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize[gridIndex]);
190     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
191                            newSplineValuesSize,
192                            &pmeGpu->archSpecific->splineValuesSize[gridIndex],
193                            &pmeGpu->archSpecific->splineValuesCapacity[gridIndex],
194                            pmeGpu->archSpecific->deviceContext_);
195     if (shouldRealloc)
196     {
197         /* Reallocate the host buffer */
198         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
199         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli[gridIndex]),
200                 newSplineValuesSize * sizeof(float));
201     }
202     for (int i = 0; i < DIM; i++)
203     {
204         memcpy(pmeGpu->staging.h_splineModuli[gridIndex] + splineValuesOffset[i],
205                pmeGpu->common->bsp_mod[i].data(),
206                pmeGpu->common->bsp_mod[i].size() * sizeof(float));
207     }
208     /* TODO: pin original buffer instead! */
209     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
210                        pmeGpu->staging.h_splineModuli[gridIndex],
211                        0,
212                        newSplineValuesSize,
213                        pmeGpu->archSpecific->pmeStream_,
214                        pmeGpu->settings.transferKind,
215                        nullptr);
216 }
217
218 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
219 {
220     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
221     {
222         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
223         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex]);
224     }
225 }
226
227 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
228 {
229     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
230     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
231     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
232                            newForcesSize,
233                            &pmeGpu->archSpecific->forcesSize,
234                            &pmeGpu->archSpecific->forcesSizeAlloc,
235                            pmeGpu->archSpecific->deviceContext_);
236     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
237     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
238 }
239
240 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
241 {
242     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
243 }
244
245 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
246 {
247     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
248     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
249     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
250                        h_forcesFloat,
251                        0,
252                        DIM * pmeGpu->kernelParams->atoms.nAtoms,
253                        pmeGpu->archSpecific->pmeStream_,
254                        pmeGpu->settings.transferKind,
255                        nullptr);
256 }
257
258 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
259 {
260     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
261     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
262     copyFromDeviceBuffer(h_forcesFloat,
263                          &pmeGpu->kernelParams->atoms.d_forces,
264                          0,
265                          DIM * pmeGpu->kernelParams->atoms.nAtoms,
266                          pmeGpu->archSpecific->pmeStream_,
267                          pmeGpu->settings.transferKind,
268                          nullptr);
269 }
270
271 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu,
272                                                  const float*  h_coefficients,
273                                                  const int     gridIndex)
274 {
275     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
276     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
277     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
278     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
279                            newCoefficientsSize,
280                            &pmeGpu->archSpecific->coefficientsSize[gridIndex],
281                            &pmeGpu->archSpecific->coefficientsCapacity[gridIndex],
282                            pmeGpu->archSpecific->deviceContext_);
283     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
284                        const_cast<float*>(h_coefficients),
285                        0,
286                        pmeGpu->kernelParams->atoms.nAtoms,
287                        pmeGpu->archSpecific->pmeStream_,
288                        pmeGpu->settings.transferKind,
289                        nullptr);
290
291     const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
292     const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
293     if (paddingCount > 0)
294     {
295         clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
296                                paddingIndex,
297                                paddingCount,
298                                pmeGpu->archSpecific->pmeStream_);
299     }
300 }
301
302 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
303 {
304     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
305     {
306         freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex]);
307     }
308 }
309
310 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
311 {
312     const int order             = pmeGpu->common->pme_order;
313     const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
314     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
315     /* Two arrays of the same size */
316     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
317     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
318     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
319     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta,
320                            newSplineDataSize,
321                            &currentSizeTemp,
322                            &currentSizeTempAlloc,
323                            pmeGpu->archSpecific->deviceContext_);
324     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta,
325                            newSplineDataSize,
326                            &pmeGpu->archSpecific->splineDataSize,
327                            &pmeGpu->archSpecific->splineDataSizeAlloc,
328                            pmeGpu->archSpecific->deviceContext_);
329     // the host side reallocation
330     if (shouldRealloc)
331     {
332         pfree(pmeGpu->staging.h_theta);
333         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
334         pfree(pmeGpu->staging.h_dtheta);
335         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
336     }
337 }
338
339 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
340 {
341     /* Two arrays of the same size */
342     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
343     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
344     pfree(pmeGpu->staging.h_theta);
345     pfree(pmeGpu->staging.h_dtheta);
346 }
347
348 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
349 {
350     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
351     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
352     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices,
353                            newIndicesSize,
354                            &pmeGpu->archSpecific->gridlineIndicesSize,
355                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
356                            pmeGpu->archSpecific->deviceContext_);
357     pfree(pmeGpu->staging.h_gridlineIndices);
358     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
359 }
360
361 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
362 {
363     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
364     pfree(pmeGpu->staging.h_gridlineIndices);
365 }
366
367 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
368 {
369     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
370
371     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
372                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
373                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
374     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
375                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
376                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
377     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
378     {
379         // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
380         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
381         {
382             /* 2 separate grids */
383             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
384                                    newComplexGridSize,
385                                    &pmeGpu->archSpecific->complexGridSize[gridIndex],
386                                    &pmeGpu->archSpecific->complexGridCapacity[gridIndex],
387                                    pmeGpu->archSpecific->deviceContext_);
388             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
389                                    newRealGridSize,
390                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
391                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
392                                    pmeGpu->archSpecific->deviceContext_);
393         }
394         else
395         {
396             /* A single buffer so that any grid will fit */
397             const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
398             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
399                                    newGridsSize,
400                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
401                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
402                                    pmeGpu->archSpecific->deviceContext_);
403             kernelParamsPtr->grid.d_fourierGrid[gridIndex] = kernelParamsPtr->grid.d_realGrid[gridIndex];
404             pmeGpu->archSpecific->complexGridSize[gridIndex] =
405                     pmeGpu->archSpecific->realGridSize[gridIndex];
406             // the size might get used later for copying the grid
407         }
408     }
409 }
410
411 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
412 {
413     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
414     {
415         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
416         {
417             freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid[gridIndex]);
418         }
419         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex]);
420     }
421 }
422
423 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
424 {
425     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
426     {
427         clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
428                                0,
429                                pmeGpu->archSpecific->realGridSize[gridIndex],
430                                pmeGpu->archSpecific->pmeStream_);
431     }
432 }
433
434 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
435 {
436     pme_gpu_free_fract_shifts(pmeGpu);
437
438     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
439
440     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
441     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
442     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
443     const int cellCount           = c_pmeNeighborUnitcellCount;
444     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
445
446     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
447
448     const int newFractShiftsSize = cellCount * (nx + ny + nz);
449
450     initParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
451                          &kernelParamsPtr->fractShiftsTableTexture,
452                          pmeGpu->common->fsh.data(),
453                          newFractShiftsSize,
454                          pmeGpu->archSpecific->deviceContext_);
455
456     initParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
457                          &kernelParamsPtr->gridlineIndicesTableTexture,
458                          pmeGpu->common->nn.data(),
459                          newFractShiftsSize,
460                          pmeGpu->archSpecific->deviceContext_);
461 }
462
463 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
464 {
465     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
466 #if GMX_GPU_CUDA
467     destroyParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
468                             kernelParamsPtr->fractShiftsTableTexture);
469     destroyParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
470                             kernelParamsPtr->gridlineIndicesTableTexture);
471 #elif GMX_GPU_OPENCL
472     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
473     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
474 #endif
475 }
476
477 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
478 {
479     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
480 }
481
482 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, const float* h_grid, const int gridIndex)
483 {
484     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
485                        h_grid,
486                        0,
487                        pmeGpu->archSpecific->realGridSize[gridIndex],
488                        pmeGpu->archSpecific->pmeStream_,
489                        pmeGpu->settings.transferKind,
490                        nullptr);
491 }
492
493 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid, const int gridIndex)
494 {
495     copyFromDeviceBuffer(h_grid,
496                          &pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
497                          0,
498                          pmeGpu->archSpecific->realGridSize[gridIndex],
499                          pmeGpu->archSpecific->pmeStream_,
500                          pmeGpu->settings.transferKind,
501                          nullptr);
502     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
503 }
504
505 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
506 {
507     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
508     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
509     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta,
510                          &kernelParamsPtr->atoms.d_dtheta,
511                          0,
512                          splinesCount,
513                          pmeGpu->archSpecific->pmeStream_,
514                          pmeGpu->settings.transferKind,
515                          nullptr);
516     copyFromDeviceBuffer(pmeGpu->staging.h_theta,
517                          &kernelParamsPtr->atoms.d_theta,
518                          0,
519                          splinesCount,
520                          pmeGpu->archSpecific->pmeStream_,
521                          pmeGpu->settings.transferKind,
522                          nullptr);
523     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices,
524                          &kernelParamsPtr->atoms.d_gridlineIndices,
525                          0,
526                          kernelParamsPtr->atoms.nAtoms * DIM,
527                          pmeGpu->archSpecific->pmeStream_,
528                          pmeGpu->settings.transferKind,
529                          nullptr);
530 }
531
532 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
533 {
534     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
535     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
536
537     // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
538     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices,
539                            0,
540                            pmeGpu->nAtomsAlloc * DIM,
541                            pmeGpu->archSpecific->pmeStream_);
542     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta,
543                            0,
544                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
545                            pmeGpu->archSpecific->pmeStream_);
546     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta,
547                            0,
548                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
549                            pmeGpu->archSpecific->pmeStream_);
550
551     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta,
552                        pmeGpu->staging.h_dtheta,
553                        0,
554                        splinesCount,
555                        pmeGpu->archSpecific->pmeStream_,
556                        pmeGpu->settings.transferKind,
557                        nullptr);
558     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta,
559                        pmeGpu->staging.h_theta,
560                        0,
561                        splinesCount,
562                        pmeGpu->archSpecific->pmeStream_,
563                        pmeGpu->settings.transferKind,
564                        nullptr);
565     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices,
566                        pmeGpu->staging.h_gridlineIndices,
567                        0,
568                        kernelParamsPtr->atoms.nAtoms * DIM,
569                        pmeGpu->archSpecific->pmeStream_,
570                        pmeGpu->settings.transferKind,
571                        nullptr);
572 }
573
574 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
575 {
576     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
577 }
578
579 /*! \brief Internal GPU initialization for PME.
580  *
581  * \param[in]  pmeGpu         GPU PME data.
582  * \param[in]  deviceContext  GPU context.
583  * \param[in]  deviceStream   GPU stream.
584  */
585 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
586 {
587     /* Allocate the target-specific structures */
588     pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
589     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
590
591     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
592     /* This should give better performance, according to the cuFFT documentation.
593      * The performance seems to be the same though.
594      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
595      */
596
597 #if GMX_GPU_CUDA
598     pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
599 #else
600     // Use this path for any non-CUDA GPU acceleration
601     // TODO: is there no really global work size limit in OpenCL?
602     pmeGpu->maxGridWidthX = INT32_MAX / 2;
603 #endif
604 }
605
606 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
607 {
608     if (pme_gpu_settings(pmeGpu).performGPUFFT)
609     {
610         pmeGpu->archSpecific->fftSetup.resize(0);
611         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
612         {
613             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu, gridIndex));
614         }
615     }
616 }
617
618 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
619 {
620     pmeGpu->archSpecific->fftSetup.resize(0);
621 }
622
623 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, const float lambda, PmeOutput* output)
624 {
625     const PmeGpu* pmeGpu = pme.gpu;
626
627     GMX_ASSERT(lambda == 1.0 || pmeGpu->common->ngrids == 2,
628                "Invalid combination of lambda and number of grids");
629
630     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
631     {
632         for (int j = 0; j < c_virialAndEnergyCount; j++)
633         {
634             GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[gridIndex][j]),
635                        "PME GPU produces incorrect energy/virial.");
636         }
637     }
638     for (int dim1 = 0; dim1 < DIM; dim1++)
639     {
640         for (int dim2 = 0; dim2 < DIM; dim2++)
641         {
642             output->coulombVirial_[dim1][dim2] = 0;
643         }
644     }
645     output->coulombEnergy_ = 0;
646     float scale            = 1.0;
647     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
648     {
649         if (pmeGpu->common->ngrids == 2)
650         {
651             scale = gridIndex == 0 ? (1.0 - lambda) : lambda;
652         }
653         output->coulombVirial_[XX][XX] +=
654                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][0];
655         output->coulombVirial_[YY][YY] +=
656                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][1];
657         output->coulombVirial_[ZZ][ZZ] +=
658                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][2];
659         output->coulombVirial_[XX][YY] +=
660                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
661         output->coulombVirial_[YY][XX] +=
662                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
663         output->coulombVirial_[XX][ZZ] +=
664                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
665         output->coulombVirial_[ZZ][XX] +=
666                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
667         output->coulombVirial_[YY][ZZ] +=
668                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
669         output->coulombVirial_[ZZ][YY] +=
670                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
671         output->coulombEnergy_ += scale * 0.5F * pmeGpu->staging.h_virialAndEnergy[gridIndex][6];
672     }
673     if (pmeGpu->common->ngrids > 1)
674     {
675         output->coulombDvdl_ = 0.5F
676                                * (pmeGpu->staging.h_virialAndEnergy[FEP_STATE_B][6]
677                                   - pmeGpu->staging.h_virialAndEnergy[FEP_STATE_A][6]);
678     }
679 }
680
681 /*! \brief Sets the force-related members in \p output
682  *
683  * \param[in]   pmeGpu      PME GPU data structure
684  * \param[out]  output      Pointer to PME output data structure
685  */
686 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
687 {
688     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
689     if (output->haveForceOutput_)
690     {
691         output->forces_ = pmeGpu->staging.h_forces;
692     }
693 }
694
695 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial, const real lambdaQ)
696 {
697     PmeGpu* pmeGpu = pme.gpu;
698
699     PmeOutput output;
700
701     pme_gpu_getForceOutput(pmeGpu, &output);
702
703     if (computeEnergyAndVirial)
704     {
705         if (pme_gpu_settings(pmeGpu).performGPUSolve)
706         {
707             pme_gpu_getEnergyAndVirial(pme, lambdaQ, &output);
708         }
709         else
710         {
711             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
712         }
713     }
714     return output;
715 }
716
717 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
718 {
719 #if GMX_DOUBLE
720     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
721 #else
722     matrix scaledBox;
723     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
724     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
725     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
726     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
727     matrix recipBox;
728     gmx::invertBoxMatrix(scaledBox, recipBox);
729
730     /* The GPU recipBox is transposed as compared to the CPU recipBox.
731      * Spread uses matrix columns (while solve and gather use rows).
732      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
733      */
734     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
735                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
736                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
737     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
738 #endif
739 }
740
741 /*! \brief \libinternal
742  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
743  *
744  * \param[in] pmeGpu            The PME GPU structure.
745  */
746 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
747 {
748     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
749
750     GMX_ASSERT(
751             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
752             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
753
754     kernelParamsPtr->grid.ewaldFactor =
755             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
756     /* The grid size variants */
757     for (int i = 0; i < DIM; i++)
758     {
759         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
760         kernelParamsPtr->grid.realGridSizeFP[i] =
761                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
762         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
763
764         // The complex grid currently uses no padding;
765         // if it starts to do so, then another test should be added for that
766         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
767         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
768     }
769     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
770     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
771     {
772         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
773         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
774                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
775     }
776     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
777     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
778     kernelParamsPtr->grid.complexGridSize[ZZ]++;
779     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
780
781     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
782     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
783     {
784         pme_gpu_realloc_and_copy_bspline_values(pmeGpu, gridIndex);
785     }
786
787     pme_gpu_realloc_grids(pmeGpu);
788     pme_gpu_reinit_3dfft(pmeGpu);
789 }
790
791 /* Several GPU functions that refer to the CPU PME data live here.
792  * We would like to keep these away from the GPU-framework specific code for clarity,
793  * as well as compilation issues with MPI.
794  */
795
796 /*! \brief \libinternal
797  * Copies everything useful from the PME CPU to the PME GPU structure.
798  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
799  *
800  * \param[in] pme         The PME structure.
801  */
802 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
803 {
804     /* TODO: Consider refactoring the CPU PME code to use the same structure,
805      * so that this function becomes 2 lines */
806     PmeGpu* pmeGpu               = pme->gpu;
807     pmeGpu->common->ngrids       = pme->bFEP_q ? 2 : 1;
808     pmeGpu->common->epsilon_r    = pme->epsilon_r;
809     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
810     pmeGpu->common->nk[XX]       = pme->nkx;
811     pmeGpu->common->nk[YY]       = pme->nky;
812     pmeGpu->common->nk[ZZ]       = pme->nkz;
813     pmeGpu->common->pme_order    = pme->pme_order;
814     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
815     {
816         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
817     }
818     for (int i = 0; i < DIM; i++)
819     {
820         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
821     }
822     const int cellCount = c_pmeNeighborUnitcellCount;
823     pmeGpu->common->fsh.resize(0);
824     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
825     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
826     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
827     pmeGpu->common->nn.resize(0);
828     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
829     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
830     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
831     pmeGpu->common->runMode       = pme->runMode;
832     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
833     pmeGpu->common->boxScaler     = pme->boxScaler;
834 }
835
836 /*! \libinternal \brief
837  * uses heuristics to select the best performing PME gather and scatter kernels
838  *
839  * \param[in,out] pmeGpu         The PME GPU structure.
840  */
841 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
842 {
843     if (GMX_GPU_CUDA && pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit)
844     {
845         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::Order;
846         pmeGpu->settings.recalculateSplines = true;
847     }
848     else
849     {
850         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::OrderSquared;
851         pmeGpu->settings.recalculateSplines = false;
852     }
853 }
854
855
856 /*! \libinternal \brief
857  * Initializes the PME GPU data at the beginning of the run.
858  * TODO: this should become PmeGpu::PmeGpu()
859  *
860  * \param[in,out] pme            The PME structure.
861  * \param[in]     deviceContext  The GPU context.
862  * \param[in]     deviceStream   The GPU stream.
863  * \param[in,out] pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
864  */
865 static void pme_gpu_init(gmx_pme_t*           pme,
866                          const DeviceContext& deviceContext,
867                          const DeviceStream&  deviceStream,
868                          const PmeGpuProgram* pmeGpuProgram)
869 {
870     pme->gpu       = new PmeGpu();
871     PmeGpu* pmeGpu = pme->gpu;
872     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
873     pmeGpu->common = std::make_shared<PmeShared>();
874
875     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
876     /* A convenience variable. */
877     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
878     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
879     pmeGpu->settings.performGPUGather = true;
880     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
881     pmeGpu->settings.useGpuForceReduction = false;
882
883     pme_gpu_set_testing(pmeGpu, false);
884
885     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
886     pmeGpu->programHandle_ = pmeGpuProgram;
887
888     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
889
890     pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
891
892     pme_gpu_copy_common_data_from(pme);
893     pme_gpu_alloc_energy_virial(pmeGpu);
894
895     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
896
897     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
898     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
899 }
900
901 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
902 {
903     GMX_ASSERT(gridSize != nullptr, "");
904     GMX_ASSERT(paddedGridSize != nullptr, "");
905     GMX_ASSERT(pmeGpu != nullptr, "");
906     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
907     for (int i = 0; i < DIM; i++)
908     {
909         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
910         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
911     }
912 }
913
914 void pme_gpu_reinit(gmx_pme_t*           pme,
915                     const DeviceContext* deviceContext,
916                     const DeviceStream*  deviceStream,
917                     const PmeGpuProgram* pmeGpuProgram)
918 {
919     GMX_ASSERT(pme != nullptr, "Need valid PME object");
920
921     if (!pme->gpu)
922     {
923         GMX_RELEASE_ASSERT(deviceContext != nullptr,
924                            "Device context can not be nullptr when setting up PME on GPU.");
925         GMX_RELEASE_ASSERT(deviceStream != nullptr,
926                            "Device stream can not be nullptr when setting up PME on GPU.");
927         /* First-time initialization */
928         pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
929     }
930     else
931     {
932         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
933         pme_gpu_copy_common_data_from(pme);
934     }
935     /* GPU FFT will only get used for a single rank.*/
936     pme->gpu->settings.performGPUFFT =
937             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
938     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
939
940     /* Reinit active timers */
941     pme_gpu_reinit_timings(pme->gpu);
942
943     pme_gpu_reinit_grids(pme->gpu);
944     // Note: if timing the reinit launch overhead becomes more relevant
945     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
946     pme_gpu_reinit_computation(pme, nullptr);
947     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
948      * update for mixed mode on grid switch. TODO: use shared recipbox field.
949      */
950     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
951 }
952
953 void pme_gpu_destroy(PmeGpu* pmeGpu)
954 {
955     /* Free lots of data */
956     pme_gpu_free_energy_virial(pmeGpu);
957     pme_gpu_free_bspline_values(pmeGpu);
958     pme_gpu_free_forces(pmeGpu);
959     pme_gpu_free_coefficients(pmeGpu);
960     pme_gpu_free_spline_data(pmeGpu);
961     pme_gpu_free_grid_indices(pmeGpu);
962     pme_gpu_free_fract_shifts(pmeGpu);
963     pme_gpu_free_grids(pmeGpu);
964
965     pme_gpu_destroy_3dfft(pmeGpu);
966
967     delete pmeGpu;
968 }
969
970 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* chargesA, const real* chargesB)
971 {
972     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
973     kernelParamsPtr->atoms.nAtoms = nAtoms;
974     const int  block_size         = pme_gpu_get_atom_data_block_size();
975     const int  nAtomsNewPadded    = ((nAtoms + block_size - 1) / block_size) * block_size;
976     const bool haveToRealloc      = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
977     pmeGpu->nAtomsAlloc           = nAtomsNewPadded;
978
979 #if GMX_DOUBLE
980     GMX_RELEASE_ASSERT(false, "Only single precision supported");
981     GMX_UNUSED_VALUE(charges);
982 #else
983     int gridIndex = 0;
984     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
985     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
986     gridIndex++;
987     if (chargesB != nullptr)
988     {
989         pme_gpu_realloc_and_copy_input_coefficients(
990                 pmeGpu, reinterpret_cast<const float*>(chargesB), gridIndex);
991     }
992     else
993     {
994         /* Fill the second set of coefficients with chargesA as well to be able to avoid
995          * conditionals in the GPU kernels */
996         /* FIXME: This should be avoided by making a separate templated version of the
997          * relevant kernel(s) (probably only pme_gather_kernel). That would require a
998          * reduction of the current number of templated parameters of that kernel. */
999         pme_gpu_realloc_and_copy_input_coefficients(
1000                 pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
1001     }
1002 #endif
1003
1004     if (haveToRealloc)
1005     {
1006         pme_gpu_realloc_forces(pmeGpu);
1007         pme_gpu_realloc_spline_data(pmeGpu);
1008         pme_gpu_realloc_grid_indices(pmeGpu);
1009     }
1010     pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
1011 }
1012
1013 /*! \internal \brief
1014  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
1015  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
1016  *
1017  * \param[in] pmeGpu         The PME GPU data structure.
1018  * \param[in] PMEStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
1019  */
1020 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
1021 {
1022     CommandEvent* timingEvent = nullptr;
1023     if (pme_gpu_timings_enabled(pmeGpu))
1024     {
1025         GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
1026                    "Wrong PME GPU timing event index");
1027         timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
1028     }
1029     return timingEvent;
1030 }
1031
1032 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, const int grid_index)
1033 {
1034     int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
1035
1036     pme_gpu_start_timing(pmeGpu, timerId);
1037     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
1038             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1039     pme_gpu_stop_timing(pmeGpu, timerId);
1040 }
1041
1042 /*! \brief
1043  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1044  * to minimize number of unused blocks.
1045  */
1046 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
1047 {
1048     // How many maximum widths in X do we need (hopefully just one)
1049     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1050     // Trying to make things even
1051     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1052     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1053     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1054                "pmeGpuCreateGrid: excessive blocks");
1055     return std::pair<int, int>(colCount, minRowCount);
1056 }
1057
1058 /*! \brief
1059  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1060  *
1061  * \param[in]  pmeGpu                   The PME GPU structure.
1062  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1063  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1064  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1065  *
1066  * \return Pointer to CUDA kernel
1067  */
1068 static auto selectSplineAndSpreadKernelPtr(const PmeGpu*  pmeGpu,
1069                                            ThreadsPerAtom threadsPerAtom,
1070                                            bool           writeSplinesToGlobal,
1071                                            const int      numGrids)
1072 {
1073     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1074     if (writeSplinesToGlobal)
1075     {
1076         if (threadsPerAtom == ThreadsPerAtom::Order)
1077         {
1078             if (numGrids == 2)
1079             {
1080                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Dual;
1081             }
1082             else
1083             {
1084                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Single;
1085             }
1086         }
1087         else
1088         {
1089             if (numGrids == 2)
1090             {
1091                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesDual;
1092             }
1093             else
1094             {
1095                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesSingle;
1096             }
1097         }
1098     }
1099     else
1100     {
1101         if (threadsPerAtom == ThreadsPerAtom::Order)
1102         {
1103             if (numGrids == 2)
1104             {
1105                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1106             }
1107             else
1108             {
1109                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1110             }
1111         }
1112         else
1113         {
1114             if (numGrids == 2)
1115             {
1116                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1117             }
1118             else
1119             {
1120                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1121             }
1122         }
1123     }
1124
1125     return kernelPtr;
1126 }
1127
1128 /*! \brief
1129  * Returns a pointer to appropriate spline kernel based on the input bool values
1130  *
1131  * \param[in]  pmeGpu                   The PME GPU structure.
1132  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1133  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1134  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1135  *
1136  * \return Pointer to CUDA kernel
1137  */
1138 static auto selectSplineKernelPtr(const PmeGpu*  pmeGpu,
1139                                   ThreadsPerAtom threadsPerAtom,
1140                                   bool gmx_unused writeSplinesToGlobal,
1141                                   const int       numGrids)
1142 {
1143     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1144     GMX_ASSERT(
1145             writeSplinesToGlobal,
1146             "Spline data should always be written to global memory when just calculating splines");
1147
1148     if (threadsPerAtom == ThreadsPerAtom::Order)
1149     {
1150         if (numGrids == 2)
1151         {
1152             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Dual;
1153         }
1154         else
1155         {
1156             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Single;
1157         }
1158     }
1159     else
1160     {
1161         if (numGrids == 2)
1162         {
1163             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelDual;
1164         }
1165         else
1166         {
1167             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelSingle;
1168         }
1169     }
1170     return kernelPtr;
1171 }
1172
1173 /*! \brief
1174  * Returns a pointer to appropriate spread kernel based on the input bool values
1175  *
1176  * \param[in]  pmeGpu                   The PME GPU structure.
1177  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1178  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1179  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1180  *
1181  * \return Pointer to CUDA kernel
1182  */
1183 static auto selectSpreadKernelPtr(const PmeGpu*  pmeGpu,
1184                                   ThreadsPerAtom threadsPerAtom,
1185                                   bool           writeSplinesToGlobal,
1186                                   const int      numGrids)
1187 {
1188     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1189     if (writeSplinesToGlobal)
1190     {
1191         if (threadsPerAtom == ThreadsPerAtom::Order)
1192         {
1193             if (numGrids == 2)
1194             {
1195                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Dual;
1196             }
1197             {
1198                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Single;
1199             }
1200         }
1201         else
1202         {
1203             if (numGrids == 2)
1204             {
1205                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelDual;
1206             }
1207             else
1208             {
1209                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelSingle;
1210             }
1211         }
1212     }
1213     else
1214     {
1215         /* if we are not saving the spline data we need to recalculate it
1216            using the spline and spread Kernel */
1217         if (threadsPerAtom == ThreadsPerAtom::Order)
1218         {
1219             if (numGrids == 2)
1220             {
1221                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1222             }
1223             else
1224             {
1225                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1226             }
1227         }
1228         else
1229         {
1230             if (numGrids == 2)
1231             {
1232                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1233             }
1234             else
1235             {
1236                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1237             }
1238         }
1239     }
1240     return kernelPtr;
1241 }
1242
1243 void pme_gpu_spread(const PmeGpu*         pmeGpu,
1244                     GpuEventSynchronizer* xReadyOnDevice,
1245                     real**                h_grids,
1246                     bool                  computeSplines,
1247                     bool                  spreadCharges,
1248                     const real            lambda)
1249 {
1250     GMX_ASSERT(
1251             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1252             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1253
1254     GMX_ASSERT(computeSplines || spreadCharges,
1255                "PME spline/spread kernel has invalid input (nothing to do)");
1256     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1257     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1258
1259     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1260
1261     const int order = pmeGpu->common->pme_order;
1262     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1263     const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1264     const int  threadsPerAtom =
1265             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1266     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1267
1268     GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1269                "Only 16 threads per atom supported in OpenCL");
1270     GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1271                "Recalculating splines not supported in OpenCL");
1272
1273     const int atomsPerBlock = blockSize / threadsPerAtom;
1274
1275     // TODO: pick smaller block size in runtime if needed
1276     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1277     // If doing so, change atomsPerBlock in the kernels as well.
1278     // TODO: test varying block sizes on modern arch-s as well
1279     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1280     //(for spline data mostly)
1281     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1282                "inconsistent atom data padding vs. spreading block size");
1283
1284     // Ensure that coordinates are ready on the device before launching spread;
1285     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1286     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1287     GMX_ASSERT(!GMX_GPU_CUDA || xReadyOnDevice != nullptr || pmeGpu->common->isRankPmeOnly
1288                        || pme_gpu_settings(pmeGpu).copyAllOutputs,
1289                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1290
1291     if (xReadyOnDevice)
1292     {
1293         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1294     }
1295
1296     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1297     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1298
1299     if (pmeGpu->common->ngrids == 1)
1300     {
1301         kernelParamsPtr->current.scale = 1.0;
1302     }
1303     else
1304     {
1305         kernelParamsPtr->current.scale = 1.0 - lambda;
1306     }
1307
1308     KernelLaunchConfig config;
1309     config.blockSize[0] = order;
1310     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1311     config.blockSize[2] = atomsPerBlock;
1312     config.gridSize[0]  = dimGrid.first;
1313     config.gridSize[1]  = dimGrid.second;
1314
1315     int                                timingId;
1316     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1317     if (computeSplines)
1318     {
1319         if (spreadCharges)
1320         {
1321             timingId  = gtPME_SPLINEANDSPREAD;
1322             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu,
1323                                                        pmeGpu->settings.threadsPerAtom,
1324                                                        writeGlobal || (!recalculateSplines),
1325                                                        pmeGpu->common->ngrids);
1326         }
1327         else
1328         {
1329             timingId  = gtPME_SPLINE;
1330             kernelPtr = selectSplineKernelPtr(pmeGpu,
1331                                               pmeGpu->settings.threadsPerAtom,
1332                                               writeGlobal || (!recalculateSplines),
1333                                               pmeGpu->common->ngrids);
1334         }
1335     }
1336     else
1337     {
1338         timingId  = gtPME_SPREAD;
1339         kernelPtr = selectSpreadKernelPtr(pmeGpu,
1340                                           pmeGpu->settings.threadsPerAtom,
1341                                           writeGlobal || (!recalculateSplines),
1342                                           pmeGpu->common->ngrids);
1343     }
1344
1345
1346     pme_gpu_start_timing(pmeGpu, timingId);
1347     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1348 #if c_canEmbedBuffers
1349     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1350 #else
1351     const auto kernelArgs =
1352             prepareGpuKernelArguments(kernelPtr,
1353                                       config,
1354                                       kernelParamsPtr,
1355                                       &kernelParamsPtr->atoms.d_theta,
1356                                       &kernelParamsPtr->atoms.d_dtheta,
1357                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1358                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1359                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1360                                       &kernelParamsPtr->grid.d_fractShiftsTable,
1361                                       &kernelParamsPtr->grid.d_gridlineIndicesTable,
1362                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1363                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1364                                       &kernelParamsPtr->atoms.d_coordinates);
1365 #endif
1366
1367     launchGpuKernel(
1368             kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME spline/spread", kernelArgs);
1369     pme_gpu_stop_timing(pmeGpu, timingId);
1370
1371     const auto& settings    = pmeGpu->settings;
1372     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1373     if (copyBackGrid)
1374     {
1375         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1376         {
1377             float* h_grid = h_grids[gridIndex];
1378             pme_gpu_copy_output_spread_grid(pmeGpu, h_grid, gridIndex);
1379         }
1380     }
1381     const bool copyBackAtomData =
1382             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1383     if (copyBackAtomData)
1384     {
1385         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1386     }
1387 }
1388
1389 void pme_gpu_solve(const PmeGpu* pmeGpu,
1390                    const int     gridIndex,
1391                    t_complex*    h_grid,
1392                    GridOrdering  gridOrdering,
1393                    bool          computeEnergyAndVirial)
1394 {
1395     GMX_ASSERT(
1396             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1397             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1398     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
1399                "Invalid combination of gridIndex and number of grids");
1400
1401     const auto& settings               = pmeGpu->settings;
1402     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1403
1404     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1405
1406     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1407     if (copyInputAndOutputGrid)
1408     {
1409         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1410                            h_gridFloat,
1411                            0,
1412                            pmeGpu->archSpecific->complexGridSize[gridIndex],
1413                            pmeGpu->archSpecific->pmeStream_,
1414                            pmeGpu->settings.transferKind,
1415                            nullptr);
1416     }
1417
1418     int majorDim = -1, middleDim = -1, minorDim = -1;
1419     switch (gridOrdering)
1420     {
1421         case GridOrdering::YZX:
1422             majorDim  = YY;
1423             middleDim = ZZ;
1424             minorDim  = XX;
1425             break;
1426
1427         case GridOrdering::XYZ:
1428             majorDim  = XX;
1429             middleDim = YY;
1430             minorDim  = ZZ;
1431             break;
1432
1433         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1434     }
1435
1436     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1437
1438     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1439     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1440     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1441     int       cellsPerBlock;
1442     if (blocksPerGridLine == 1)
1443     {
1444         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1445     }
1446     else
1447     {
1448         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1449     }
1450     const int warpSize  = pmeGpu->programHandle_->warpSize();
1451     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1452
1453     static_assert(!GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1454                   "The CUDA solve energy kernels needs at least 4 warps. "
1455                   "Here we launch at least half of the max warps.");
1456
1457     KernelLaunchConfig config;
1458     config.blockSize[0] = blockSize;
1459     config.gridSize[0]  = blocksPerGridLine;
1460     // rounding up to full warps so that shuffle operations produce defined results
1461     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1462                          / gridLinesPerBlock;
1463     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1464
1465     int                                timingId  = gtPME_SOLVE;
1466     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1467     if (gridOrdering == GridOrdering::YZX)
1468     {
1469         if (gridIndex == 0)
1470         {
1471             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelA
1472                                                : pmeGpu->programHandle_->impl_->solveYZXKernelA;
1473         }
1474         else
1475         {
1476             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelB
1477                                                : pmeGpu->programHandle_->impl_->solveYZXKernelB;
1478         }
1479     }
1480     else if (gridOrdering == GridOrdering::XYZ)
1481     {
1482         if (gridIndex == 0)
1483         {
1484             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelA
1485                                                : pmeGpu->programHandle_->impl_->solveXYZKernelA;
1486         }
1487         else
1488         {
1489             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelB
1490                                                : pmeGpu->programHandle_->impl_->solveXYZKernelB;
1491         }
1492     }
1493
1494     pme_gpu_start_timing(pmeGpu, timingId);
1495     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1496 #if c_canEmbedBuffers
1497     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1498 #else
1499     const auto kernelArgs =
1500             prepareGpuKernelArguments(kernelPtr,
1501                                       config,
1502                                       kernelParamsPtr,
1503                                       &kernelParamsPtr->grid.d_splineModuli[gridIndex],
1504                                       &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1505                                       &kernelParamsPtr->grid.d_fourierGrid[gridIndex]);
1506 #endif
1507     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve", kernelArgs);
1508     pme_gpu_stop_timing(pmeGpu, timingId);
1509
1510     if (computeEnergyAndVirial)
1511     {
1512         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy[gridIndex],
1513                              &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1514                              0,
1515                              c_virialAndEnergyCount,
1516                              pmeGpu->archSpecific->pmeStream_,
1517                              pmeGpu->settings.transferKind,
1518                              nullptr);
1519     }
1520
1521     if (copyInputAndOutputGrid)
1522     {
1523         copyFromDeviceBuffer(h_gridFloat,
1524                              &kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1525                              0,
1526                              pmeGpu->archSpecific->complexGridSize[gridIndex],
1527                              pmeGpu->archSpecific->pmeStream_,
1528                              pmeGpu->settings.transferKind,
1529                              nullptr);
1530     }
1531 }
1532
1533 /*! \brief
1534  * Returns a pointer to appropriate gather kernel based on the inputvalues
1535  *
1536  * \param[in]  pmeGpu                   The PME GPU structure.
1537  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1538  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1539  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1540  *
1541  * \return Pointer to CUDA kernel
1542  */
1543 inline auto selectGatherKernelPtr(const PmeGpu*  pmeGpu,
1544                                   ThreadsPerAtom threadsPerAtom,
1545                                   bool           readSplinesFromGlobal,
1546                                   const int      numGrids)
1547
1548 {
1549     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1550
1551     if (readSplinesFromGlobal)
1552     {
1553         if (threadsPerAtom == ThreadsPerAtom::Order)
1554         {
1555             if (numGrids == 2)
1556             {
1557                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Dual;
1558             }
1559             else
1560             {
1561                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Single;
1562             }
1563         }
1564         else
1565         {
1566             if (numGrids == 2)
1567             {
1568                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesDual;
1569             }
1570             else
1571             {
1572                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesSingle;
1573             }
1574         }
1575     }
1576     else
1577     {
1578         if (threadsPerAtom == ThreadsPerAtom::Order)
1579         {
1580             if (numGrids == 2)
1581             {
1582                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Dual;
1583             }
1584             else
1585             {
1586                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Single;
1587             }
1588         }
1589         else
1590         {
1591             if (numGrids == 2)
1592             {
1593                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelDual;
1594             }
1595             else
1596             {
1597                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelSingle;
1598             }
1599         }
1600     }
1601     return kernelPtr;
1602 }
1603
1604 void pme_gpu_gather(PmeGpu* pmeGpu, real** h_grids, const float lambda)
1605 {
1606     GMX_ASSERT(
1607             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1608             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1609
1610     const auto& settings = pmeGpu->settings;
1611
1612     if (!settings.performGPUFFT || settings.copyAllOutputs)
1613     {
1614         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1615         {
1616             float* h_grid = const_cast<float*>(h_grids[gridIndex]);
1617             pme_gpu_copy_input_gather_grid(pmeGpu, h_grid, gridIndex);
1618         }
1619     }
1620
1621     if (settings.copyAllOutputs)
1622     {
1623         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1624     }
1625
1626     /* Set if we have unit tests */
1627     const bool   readGlobal = pmeGpu->settings.copyAllOutputs;
1628     const size_t blockSize  = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1629     const int    order      = pmeGpu->common->pme_order;
1630     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1631     const int threadsPerAtom =
1632             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1633     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1634
1635     GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1636                "Only 16 threads per atom supported in OpenCL");
1637     GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1638                "Recalculating splines not supported in OpenCL");
1639
1640     const int atomsPerBlock = blockSize / threadsPerAtom;
1641
1642     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1643                "inconsistent atom data padding vs. gathering block size");
1644
1645     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1646     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1647
1648     KernelLaunchConfig config;
1649     config.blockSize[0] = order;
1650     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1651     config.blockSize[2] = atomsPerBlock;
1652     config.gridSize[0]  = dimGrid.first;
1653     config.gridSize[1]  = dimGrid.second;
1654
1655     // TODO test different cache configs
1656
1657     int                                timingId = gtPME_GATHER;
1658     PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1659             selectGatherKernelPtr(pmeGpu,
1660                                   pmeGpu->settings.threadsPerAtom,
1661                                   readGlobal || (!recalculateSplines),
1662                                   pmeGpu->common->ngrids);
1663     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1664
1665     pme_gpu_start_timing(pmeGpu, timingId);
1666     auto* timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1667     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1668     if (pmeGpu->common->ngrids == 1)
1669     {
1670         kernelParamsPtr->current.scale = 1.0;
1671     }
1672     else
1673     {
1674         kernelParamsPtr->current.scale = 1.0 - lambda;
1675     }
1676
1677 #if c_canEmbedBuffers
1678     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1679 #else
1680     const auto kernelArgs =
1681             prepareGpuKernelArguments(kernelPtr,
1682                                       config,
1683                                       kernelParamsPtr,
1684                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1685                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1686                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1687                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1688                                       &kernelParamsPtr->atoms.d_theta,
1689                                       &kernelParamsPtr->atoms.d_dtheta,
1690                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1691                                       &kernelParamsPtr->atoms.d_forces);
1692 #endif
1693     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather", kernelArgs);
1694     pme_gpu_stop_timing(pmeGpu, timingId);
1695
1696     if (pmeGpu->settings.useGpuForceReduction)
1697     {
1698         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1699     }
1700     else
1701     {
1702         pme_gpu_copy_output_forces(pmeGpu);
1703     }
1704 }
1705
1706 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1707 {
1708     if (pmeGpu && pmeGpu->kernelParams)
1709     {
1710         return pmeGpu->kernelParams->atoms.d_forces;
1711     }
1712     else
1713     {
1714         return nullptr;
1715     }
1716 }
1717
1718 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1719 {
1720     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1721                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1722                "initialized.");
1723
1724     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1725                "The device-side buffer can not be set.");
1726
1727     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1728 }
1729
1730 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1731 {
1732     if (pmeGpu && pmeGpu->kernelParams)
1733     {
1734         return &pmeGpu->archSpecific->pmeForcesReady;
1735     }
1736     else
1737     {
1738         return nullptr;
1739     }
1740 }