Modernize PME GPU timing enums
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team.
5  * Copyright (c) 2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 /*! \internal \file
38  *
39  * \brief This file contains internal function implementations
40  * for performing the PME calculations on GPU.
41  *
42  * Note that this file is compiled as regular C++ source in OpenCL builds, but
43  * it is treated as CUDA source in CUDA-enabled GPU builds.
44  *
45  * \author Aleksei Iupinov <a.yupinov@gmail.com>
46  * \ingroup module_ewald
47  */
48
49 #include "gmxpre.h"
50
51 #include "pme_gpu_internal.h"
52
53 #include "config.h"
54
55 #include <list>
56 #include <memory>
57 #include <string>
58
59 #include "gromacs/ewald/ewald_utils.h"
60 #include "gromacs/gpu_utils/device_context.h"
61 #include "gromacs/gpu_utils/device_stream.h"
62 #include "gromacs/gpu_utils/gpu_utils.h"
63 #include "gromacs/hardware/device_information.h"
64 #include "gromacs/math/invertmatrix.h"
65 #include "gromacs/math/units.h"
66 #include "gromacs/timing/gpu_timing.h"
67 #include "gromacs/utility/exceptions.h"
68 #include "gromacs/utility/fatalerror.h"
69 #include "gromacs/utility/gmxassert.h"
70 #include "gromacs/utility/logger.h"
71 #include "gromacs/utility/stringutil.h"
72
73 #if GMX_GPU_CUDA
74 #    include "gromacs/gpu_utils/pmalloc_cuda.h"
75
76 #    include "pme.cuh"
77 #elif GMX_GPU_OPENCL
78 #    include "gromacs/gpu_utils/gmxopencl.h"
79 #elif GMX_GPU_SYCL
80 #    include "gromacs/gpu_utils/syclutils.h"
81 #endif
82
83 #include "gromacs/ewald/pme.h"
84
85 #include "pme_gpu_3dfft.h"
86 #include "pme_gpu_calculate_splines.h"
87 #include "pme_gpu_constants.h"
88 #include "pme_gpu_program_impl.h"
89 #include "pme_gpu_timings.h"
90 #include "pme_gpu_types.h"
91 #include "pme_gpu_types_host.h"
92 #include "pme_gpu_types_host_impl.h"
93 #include "pme_grid.h"
94 #include "pme_internal.h"
95 #include "pme_solve.h"
96
97 /*! \brief
98  * CUDA only
99  * Atom limit above which it is advantageous to turn on the
100  * recalculating of the splines in the gather and using less threads per atom in the spline and spread
101  */
102 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
103
104 /*! \internal \brief
105  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
106  *
107  * \param[in] pmeGpu  The PME GPU structure.
108  * \returns The pointer to the kernel parameters.
109  */
110 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
111 {
112     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
113     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
114     return kernelParamsPtr;
115 }
116
117 /*! \brief
118  * Atom data block size (in terms of number of atoms).
119  * This is the least common multiple of number of atoms processed by
120  * a single block/workgroup of the spread and gather kernels.
121  * The GPU atom data buffers must be padded, which means that
122  * the numbers of atoms used for determining the size of the memory
123  * allocation must be divisible by this.
124  */
125 constexpr int c_pmeAtomDataBlockSize = 64;
126
127 int pme_gpu_get_atom_data_block_size()
128 {
129     return c_pmeAtomDataBlockSize;
130 }
131
132 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
133 {
134     pmeGpu->archSpecific->pmeStream_.synchronize();
135 }
136
137 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
138 {
139     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
140
141     GMX_ASSERT(
142             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
143             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
144
145     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
146     {
147         allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
148                              c_virialAndEnergyCount,
149                              pmeGpu->archSpecific->deviceContext_);
150         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy[gridIndex]), energyAndVirialSize);
151     }
152 }
153
154 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
155 {
156     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
157     {
158         freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex]);
159         pfree(pmeGpu->staging.h_virialAndEnergy[gridIndex]);
160         pmeGpu->staging.h_virialAndEnergy[gridIndex] = nullptr;
161     }
162 }
163
164 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
165 {
166     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
167     {
168         clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
169                                0,
170                                c_virialAndEnergyCount,
171                                pmeGpu->archSpecific->pmeStream_);
172     }
173 }
174
175 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu, const int gridIndex)
176 {
177     GMX_ASSERT(
178             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
179             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
180     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
181                "Invalid combination of gridIndex and number of grids");
182
183     const int splineValuesOffset[DIM] = { 0,
184                                           pmeGpu->kernelParams->grid.realGridSize[XX],
185                                           pmeGpu->kernelParams->grid.realGridSize[XX]
186                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
187     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
188
189     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
190                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
191                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
192     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize[gridIndex]);
193     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
194                            newSplineValuesSize,
195                            &pmeGpu->archSpecific->splineValuesSize[gridIndex],
196                            &pmeGpu->archSpecific->splineValuesCapacity[gridIndex],
197                            pmeGpu->archSpecific->deviceContext_);
198     if (shouldRealloc)
199     {
200         /* Reallocate the host buffer */
201         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
202         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli[gridIndex]),
203                 newSplineValuesSize * sizeof(float));
204     }
205     for (int i = 0; i < DIM; i++)
206     {
207         memcpy(pmeGpu->staging.h_splineModuli[gridIndex] + splineValuesOffset[i],
208                pmeGpu->common->bsp_mod[i].data(),
209                pmeGpu->common->bsp_mod[i].size() * sizeof(float));
210     }
211     /* TODO: pin original buffer instead! */
212     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
213                        pmeGpu->staging.h_splineModuli[gridIndex],
214                        0,
215                        newSplineValuesSize,
216                        pmeGpu->archSpecific->pmeStream_,
217                        pmeGpu->settings.transferKind,
218                        nullptr);
219 }
220
221 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
222 {
223     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
224     {
225         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
226         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex]);
227     }
228 }
229
230 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
231 {
232     const size_t newForcesSize = pmeGpu->nAtomsAlloc;
233     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
234     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
235                            newForcesSize,
236                            &pmeGpu->archSpecific->forcesSize,
237                            &pmeGpu->archSpecific->forcesSizeAlloc,
238                            pmeGpu->archSpecific->deviceContext_);
239     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
240     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
241 }
242
243 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
244 {
245     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
246 }
247
248 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
249 {
250     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
251     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
252                        pmeGpu->staging.h_forces.data(),
253                        0,
254                        pmeGpu->kernelParams->atoms.nAtoms,
255                        pmeGpu->archSpecific->pmeStream_,
256                        pmeGpu->settings.transferKind,
257                        nullptr);
258 }
259
260 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
261 {
262     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
263     copyFromDeviceBuffer(pmeGpu->staging.h_forces.data(),
264                          &pmeGpu->kernelParams->atoms.d_forces,
265                          0,
266                          pmeGpu->kernelParams->atoms.nAtoms,
267                          pmeGpu->archSpecific->pmeStream_,
268                          pmeGpu->settings.transferKind,
269                          nullptr);
270 }
271
272 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu,
273                                                  const float*  h_coefficients,
274                                                  const int     gridIndex)
275 {
276     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
277     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
278     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
279     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
280                            newCoefficientsSize,
281                            &pmeGpu->archSpecific->coefficientsSize[gridIndex],
282                            &pmeGpu->archSpecific->coefficientsCapacity[gridIndex],
283                            pmeGpu->archSpecific->deviceContext_);
284     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
285                        const_cast<float*>(h_coefficients),
286                        0,
287                        pmeGpu->kernelParams->atoms.nAtoms,
288                        pmeGpu->archSpecific->pmeStream_,
289                        pmeGpu->settings.transferKind,
290                        nullptr);
291
292     const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
293     const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
294     if (paddingCount > 0)
295     {
296         clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
297                                paddingIndex,
298                                paddingCount,
299                                pmeGpu->archSpecific->pmeStream_);
300     }
301 }
302
303 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
304 {
305     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
306     {
307         freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex]);
308     }
309 }
310
311 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
312 {
313     const int order             = pmeGpu->common->pme_order;
314     const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
315     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
316     /* Two arrays of the same size */
317     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
318     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
319     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
320     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta,
321                            newSplineDataSize,
322                            &currentSizeTemp,
323                            &currentSizeTempAlloc,
324                            pmeGpu->archSpecific->deviceContext_);
325     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta,
326                            newSplineDataSize,
327                            &pmeGpu->archSpecific->splineDataSize,
328                            &pmeGpu->archSpecific->splineDataSizeAlloc,
329                            pmeGpu->archSpecific->deviceContext_);
330     // the host side reallocation
331     if (shouldRealloc)
332     {
333         pfree(pmeGpu->staging.h_theta);
334         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
335         pfree(pmeGpu->staging.h_dtheta);
336         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
337     }
338 }
339
340 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
341 {
342     /* Two arrays of the same size */
343     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
344     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
345     pfree(pmeGpu->staging.h_theta);
346     pfree(pmeGpu->staging.h_dtheta);
347 }
348
349 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
350 {
351     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
352     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
353     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices,
354                            newIndicesSize,
355                            &pmeGpu->archSpecific->gridlineIndicesSize,
356                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
357                            pmeGpu->archSpecific->deviceContext_);
358     pfree(pmeGpu->staging.h_gridlineIndices);
359     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
360 }
361
362 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
363 {
364     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
365     pfree(pmeGpu->staging.h_gridlineIndices);
366 }
367
368 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
369 {
370     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
371
372     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
373                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
374                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
375     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
376                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
377                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
378     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
379     {
380         // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
381         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
382         {
383             /* 2 separate grids */
384             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
385                                    newComplexGridSize,
386                                    &pmeGpu->archSpecific->complexGridSize[gridIndex],
387                                    &pmeGpu->archSpecific->complexGridCapacity[gridIndex],
388                                    pmeGpu->archSpecific->deviceContext_);
389             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
390                                    newRealGridSize,
391                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
392                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
393                                    pmeGpu->archSpecific->deviceContext_);
394         }
395         else
396         {
397             /* A single buffer so that any grid will fit */
398             const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
399             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
400                                    newGridsSize,
401                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
402                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
403                                    pmeGpu->archSpecific->deviceContext_);
404             kernelParamsPtr->grid.d_fourierGrid[gridIndex] = kernelParamsPtr->grid.d_realGrid[gridIndex];
405             pmeGpu->archSpecific->complexGridSize[gridIndex] =
406                     pmeGpu->archSpecific->realGridSize[gridIndex];
407             // the size might get used later for copying the grid
408         }
409     }
410 }
411
412 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
413 {
414     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
415     {
416         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
417         {
418             freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid[gridIndex]);
419         }
420         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex]);
421     }
422 }
423
424 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
425 {
426     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
427     {
428         clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
429                                0,
430                                pmeGpu->archSpecific->realGridSize[gridIndex],
431                                pmeGpu->archSpecific->pmeStream_);
432     }
433 }
434
435 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
436 {
437     pme_gpu_free_fract_shifts(pmeGpu);
438
439     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
440
441     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
442     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
443     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
444     const int cellCount           = c_pmeNeighborUnitcellCount;
445     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
446
447     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
448
449     const int newFractShiftsSize = cellCount * (nx + ny + nz);
450
451     initParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
452                          &kernelParamsPtr->fractShiftsTableTexture,
453                          pmeGpu->common->fsh.data(),
454                          newFractShiftsSize,
455                          pmeGpu->archSpecific->deviceContext_);
456
457     initParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
458                          &kernelParamsPtr->gridlineIndicesTableTexture,
459                          pmeGpu->common->nn.data(),
460                          newFractShiftsSize,
461                          pmeGpu->archSpecific->deviceContext_);
462 }
463
464 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
465 {
466     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
467 #if GMX_GPU_CUDA
468     destroyParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
469                             kernelParamsPtr->fractShiftsTableTexture);
470     destroyParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
471                             kernelParamsPtr->gridlineIndicesTableTexture);
472 #elif GMX_GPU_OPENCL || GMX_GPU_SYCL
473     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
474     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
475 #endif
476 }
477
478 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
479 {
480     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
481 }
482
483 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, const float* h_grid, const int gridIndex)
484 {
485     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
486                        h_grid,
487                        0,
488                        pmeGpu->archSpecific->realGridSize[gridIndex],
489                        pmeGpu->archSpecific->pmeStream_,
490                        pmeGpu->settings.transferKind,
491                        nullptr);
492 }
493
494 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid, const int gridIndex)
495 {
496     copyFromDeviceBuffer(h_grid,
497                          &pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
498                          0,
499                          pmeGpu->archSpecific->realGridSize[gridIndex],
500                          pmeGpu->archSpecific->pmeStream_,
501                          pmeGpu->settings.transferKind,
502                          nullptr);
503     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
504 }
505
506 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
507 {
508     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
509     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
510     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta,
511                          &kernelParamsPtr->atoms.d_dtheta,
512                          0,
513                          splinesCount,
514                          pmeGpu->archSpecific->pmeStream_,
515                          pmeGpu->settings.transferKind,
516                          nullptr);
517     copyFromDeviceBuffer(pmeGpu->staging.h_theta,
518                          &kernelParamsPtr->atoms.d_theta,
519                          0,
520                          splinesCount,
521                          pmeGpu->archSpecific->pmeStream_,
522                          pmeGpu->settings.transferKind,
523                          nullptr);
524     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices,
525                          &kernelParamsPtr->atoms.d_gridlineIndices,
526                          0,
527                          kernelParamsPtr->atoms.nAtoms * DIM,
528                          pmeGpu->archSpecific->pmeStream_,
529                          pmeGpu->settings.transferKind,
530                          nullptr);
531 }
532
533 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
534 {
535     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
536     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
537
538     // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
539     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices,
540                            0,
541                            pmeGpu->nAtomsAlloc * DIM,
542                            pmeGpu->archSpecific->pmeStream_);
543     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta,
544                            0,
545                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
546                            pmeGpu->archSpecific->pmeStream_);
547     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta,
548                            0,
549                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
550                            pmeGpu->archSpecific->pmeStream_);
551
552     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta,
553                        pmeGpu->staging.h_dtheta,
554                        0,
555                        splinesCount,
556                        pmeGpu->archSpecific->pmeStream_,
557                        pmeGpu->settings.transferKind,
558                        nullptr);
559     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta,
560                        pmeGpu->staging.h_theta,
561                        0,
562                        splinesCount,
563                        pmeGpu->archSpecific->pmeStream_,
564                        pmeGpu->settings.transferKind,
565                        nullptr);
566     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices,
567                        pmeGpu->staging.h_gridlineIndices,
568                        0,
569                        kernelParamsPtr->atoms.nAtoms * DIM,
570                        pmeGpu->archSpecific->pmeStream_,
571                        pmeGpu->settings.transferKind,
572                        nullptr);
573 }
574
575 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
576 {
577     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
578 }
579
580 /*! \brief Internal GPU initialization for PME.
581  *
582  * \param[in]  pmeGpu         GPU PME data.
583  * \param[in]  deviceContext  GPU context.
584  * \param[in]  deviceStream   GPU stream.
585  */
586 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
587 {
588     /* Allocate the target-specific structures */
589     pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
590     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
591
592     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
593     /* This should give better performance, according to the cuFFT documentation.
594      * The performance seems to be the same though.
595      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
596      */
597
598 #if GMX_GPU_CUDA
599     pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
600 #else
601     // Use this path for any non-CUDA GPU acceleration
602     // TODO: is there no really global work size limit in OpenCL?
603     pmeGpu->maxGridWidthX = INT32_MAX / 2;
604 #endif
605 }
606
607 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
608 {
609     if (pme_gpu_settings(pmeGpu).performGPUFFT)
610     {
611         pmeGpu->archSpecific->fftSetup.resize(0);
612         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
613         {
614             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu, gridIndex));
615         }
616     }
617 }
618
619 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
620 {
621     pmeGpu->archSpecific->fftSetup.resize(0);
622 }
623
624 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, const float lambda, PmeOutput* output)
625 {
626     const PmeGpu* pmeGpu = pme.gpu;
627
628     GMX_ASSERT(lambda == 1.0 || pmeGpu->common->ngrids == 2,
629                "Invalid combination of lambda and number of grids");
630
631     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
632     {
633         for (int j = 0; j < c_virialAndEnergyCount; j++)
634         {
635             GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[gridIndex][j]),
636                        "PME GPU produces incorrect energy/virial.");
637         }
638     }
639     for (int dim1 = 0; dim1 < DIM; dim1++)
640     {
641         for (int dim2 = 0; dim2 < DIM; dim2++)
642         {
643             output->coulombVirial_[dim1][dim2] = 0;
644         }
645     }
646     output->coulombEnergy_ = 0;
647     float scale            = 1.0;
648     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
649     {
650         if (pmeGpu->common->ngrids == 2)
651         {
652             scale = gridIndex == 0 ? (1.0 - lambda) : lambda;
653         }
654         output->coulombVirial_[XX][XX] +=
655                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][0];
656         output->coulombVirial_[YY][YY] +=
657                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][1];
658         output->coulombVirial_[ZZ][ZZ] +=
659                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][2];
660         output->coulombVirial_[XX][YY] +=
661                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
662         output->coulombVirial_[YY][XX] +=
663                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
664         output->coulombVirial_[XX][ZZ] +=
665                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
666         output->coulombVirial_[ZZ][XX] +=
667                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
668         output->coulombVirial_[YY][ZZ] +=
669                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
670         output->coulombVirial_[ZZ][YY] +=
671                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
672         output->coulombEnergy_ += scale * 0.5F * pmeGpu->staging.h_virialAndEnergy[gridIndex][6];
673     }
674     if (pmeGpu->common->ngrids > 1)
675     {
676         output->coulombDvdl_ = 0.5F
677                                * (pmeGpu->staging.h_virialAndEnergy[FEP_STATE_B][6]
678                                   - pmeGpu->staging.h_virialAndEnergy[FEP_STATE_A][6]);
679     }
680 }
681
682 /*! \brief Sets the force-related members in \p output
683  *
684  * \param[in]   pmeGpu      PME GPU data structure
685  * \param[out]  output      Pointer to PME output data structure
686  */
687 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
688 {
689     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
690     if (output->haveForceOutput_)
691     {
692         output->forces_ = pmeGpu->staging.h_forces;
693     }
694 }
695
696 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial, const real lambdaQ)
697 {
698     PmeGpu* pmeGpu = pme.gpu;
699
700     PmeOutput output;
701
702     pme_gpu_getForceOutput(pmeGpu, &output);
703
704     if (computeEnergyAndVirial)
705     {
706         if (pme_gpu_settings(pmeGpu).performGPUSolve)
707         {
708             pme_gpu_getEnergyAndVirial(pme, lambdaQ, &output);
709         }
710         else
711         {
712             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
713         }
714     }
715     return output;
716 }
717
718 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
719 {
720 #if GMX_DOUBLE
721     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
722 #else
723     matrix scaledBox;
724     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
725     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
726     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
727     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
728     matrix recipBox;
729     gmx::invertBoxMatrix(scaledBox, recipBox);
730
731     /* The GPU recipBox is transposed as compared to the CPU recipBox.
732      * Spread uses matrix columns (while solve and gather use rows).
733      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
734      */
735     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
736                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
737                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
738     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
739 #endif
740 }
741
742 /*! \brief \libinternal
743  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
744  *
745  * \param[in] pmeGpu            The PME GPU structure.
746  */
747 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
748 {
749     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
750
751     GMX_ASSERT(
752             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
753             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
754
755     kernelParamsPtr->grid.ewaldFactor =
756             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
757     /* The grid size variants */
758     for (int i = 0; i < DIM; i++)
759     {
760         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
761         kernelParamsPtr->grid.realGridSizeFP[i] =
762                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
763         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
764
765         // The complex grid currently uses no padding;
766         // if it starts to do so, then another test should be added for that
767         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
768         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
769     }
770     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
771     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
772     {
773         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
774         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
775                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
776     }
777     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
778     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
779     kernelParamsPtr->grid.complexGridSize[ZZ]++;
780     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
781
782     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
783     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
784     {
785         pme_gpu_realloc_and_copy_bspline_values(pmeGpu, gridIndex);
786     }
787
788     pme_gpu_realloc_grids(pmeGpu);
789     pme_gpu_reinit_3dfft(pmeGpu);
790 }
791
792 /* Several GPU functions that refer to the CPU PME data live here.
793  * We would like to keep these away from the GPU-framework specific code for clarity,
794  * as well as compilation issues with MPI.
795  */
796
797 /*! \brief \libinternal
798  * Copies everything useful from the PME CPU to the PME GPU structure.
799  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
800  *
801  * \param[in] pme         The PME structure.
802  */
803 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
804 {
805     /* TODO: Consider refactoring the CPU PME code to use the same structure,
806      * so that this function becomes 2 lines */
807     PmeGpu* pmeGpu               = pme->gpu;
808     pmeGpu->common->ngrids       = pme->bFEP_q ? 2 : 1;
809     pmeGpu->common->epsilon_r    = pme->epsilon_r;
810     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
811     pmeGpu->common->nk[XX]       = pme->nkx;
812     pmeGpu->common->nk[YY]       = pme->nky;
813     pmeGpu->common->nk[ZZ]       = pme->nkz;
814     pmeGpu->common->pme_order    = pme->pme_order;
815     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
816     {
817         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
818     }
819     for (int i = 0; i < DIM; i++)
820     {
821         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
822     }
823     const int cellCount = c_pmeNeighborUnitcellCount;
824     pmeGpu->common->fsh.resize(0);
825     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
826     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
827     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
828     pmeGpu->common->nn.resize(0);
829     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
830     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
831     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
832     pmeGpu->common->runMode       = pme->runMode;
833     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
834     pmeGpu->common->boxScaler     = pme->boxScaler;
835 }
836
837 /*! \libinternal \brief
838  * uses heuristics to select the best performing PME gather and scatter kernels
839  *
840  * \param[in,out] pmeGpu         The PME GPU structure.
841  */
842 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
843 {
844     if (GMX_GPU_CUDA && pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit)
845     {
846         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::Order;
847         pmeGpu->settings.recalculateSplines = true;
848     }
849     else
850     {
851         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::OrderSquared;
852         pmeGpu->settings.recalculateSplines = false;
853     }
854 }
855
856
857 /*! \libinternal \brief
858  * Initializes the PME GPU data at the beginning of the run.
859  * TODO: this should become PmeGpu::PmeGpu()
860  *
861  * \param[in,out] pme            The PME structure.
862  * \param[in]     deviceContext  The GPU context.
863  * \param[in]     deviceStream   The GPU stream.
864  * \param[in,out] pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
865  */
866 static void pme_gpu_init(gmx_pme_t*           pme,
867                          const DeviceContext& deviceContext,
868                          const DeviceStream&  deviceStream,
869                          const PmeGpuProgram* pmeGpuProgram)
870 {
871     pme->gpu       = new PmeGpu();
872     PmeGpu* pmeGpu = pme->gpu;
873     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
874     pmeGpu->common = std::make_shared<PmeShared>();
875
876     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
877     /* A convenience variable. */
878     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
879     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
880     pmeGpu->settings.performGPUGather = true;
881     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
882     pmeGpu->settings.useGpuForceReduction = false;
883
884     pme_gpu_set_testing(pmeGpu, false);
885
886     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
887     pmeGpu->programHandle_ = pmeGpuProgram;
888
889     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
890
891     pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
892
893     pme_gpu_copy_common_data_from(pme);
894     pme_gpu_alloc_energy_virial(pmeGpu);
895
896     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
897
898     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
899     kernelParamsPtr->constants.elFactor = gmx::c_one4PiEps0 / pmeGpu->common->epsilon_r;
900 }
901
902 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
903 {
904     GMX_ASSERT(gridSize != nullptr, "");
905     GMX_ASSERT(paddedGridSize != nullptr, "");
906     GMX_ASSERT(pmeGpu != nullptr, "");
907     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
908     for (int i = 0; i < DIM; i++)
909     {
910         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
911         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
912     }
913 }
914
915 void pme_gpu_reinit(gmx_pme_t*           pme,
916                     const DeviceContext* deviceContext,
917                     const DeviceStream*  deviceStream,
918                     const PmeGpuProgram* pmeGpuProgram)
919 {
920     GMX_ASSERT(pme != nullptr, "Need valid PME object");
921
922     if (!pme->gpu)
923     {
924         GMX_RELEASE_ASSERT(deviceContext != nullptr,
925                            "Device context can not be nullptr when setting up PME on GPU.");
926         GMX_RELEASE_ASSERT(deviceStream != nullptr,
927                            "Device stream can not be nullptr when setting up PME on GPU.");
928         /* First-time initialization */
929         pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
930     }
931     else
932     {
933         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
934         pme_gpu_copy_common_data_from(pme);
935     }
936     /* GPU FFT will only get used for a single rank.*/
937     pme->gpu->settings.performGPUFFT =
938             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
939     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
940
941     /* Reinit active timers */
942     pme_gpu_reinit_timings(pme->gpu);
943
944     pme_gpu_reinit_grids(pme->gpu);
945     // Note: if timing the reinit launch overhead becomes more relevant
946     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
947     pme_gpu_reinit_computation(pme, nullptr);
948     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
949      * update for mixed mode on grid switch. TODO: use shared recipbox field.
950      */
951     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
952 }
953
954 void pme_gpu_destroy(PmeGpu* pmeGpu)
955 {
956     /* Free lots of data */
957     pme_gpu_free_energy_virial(pmeGpu);
958     pme_gpu_free_bspline_values(pmeGpu);
959     pme_gpu_free_forces(pmeGpu);
960     pme_gpu_free_coefficients(pmeGpu);
961     pme_gpu_free_spline_data(pmeGpu);
962     pme_gpu_free_grid_indices(pmeGpu);
963     pme_gpu_free_fract_shifts(pmeGpu);
964     pme_gpu_free_grids(pmeGpu);
965
966     pme_gpu_destroy_3dfft(pmeGpu);
967
968     delete pmeGpu;
969 }
970
971 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* chargesA, const real* chargesB)
972 {
973     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
974     kernelParamsPtr->atoms.nAtoms = nAtoms;
975     const int  block_size         = pme_gpu_get_atom_data_block_size();
976     const int  nAtomsNewPadded    = ((nAtoms + block_size - 1) / block_size) * block_size;
977     const bool haveToRealloc      = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
978     pmeGpu->nAtomsAlloc           = nAtomsNewPadded;
979
980 #if GMX_DOUBLE
981     GMX_RELEASE_ASSERT(false, "Only single precision supported");
982     GMX_UNUSED_VALUE(charges);
983 #else
984     int gridIndex = 0;
985     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
986     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
987     gridIndex++;
988     if (chargesB != nullptr)
989     {
990         pme_gpu_realloc_and_copy_input_coefficients(
991                 pmeGpu, reinterpret_cast<const float*>(chargesB), gridIndex);
992     }
993     else
994     {
995         /* Fill the second set of coefficients with chargesA as well to be able to avoid
996          * conditionals in the GPU kernels */
997         /* FIXME: This should be avoided by making a separate templated version of the
998          * relevant kernel(s) (probably only pme_gather_kernel). That would require a
999          * reduction of the current number of templated parameters of that kernel. */
1000         pme_gpu_realloc_and_copy_input_coefficients(
1001                 pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
1002     }
1003 #endif
1004
1005     if (haveToRealloc)
1006     {
1007         pme_gpu_realloc_forces(pmeGpu);
1008         pme_gpu_realloc_spline_data(pmeGpu);
1009         pme_gpu_realloc_grid_indices(pmeGpu);
1010     }
1011     pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
1012 }
1013
1014 /*! \internal \brief
1015  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
1016  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
1017  *
1018  * \param[in] pmeGpu         The PME GPU data structure.
1019  * \param[in] pmeStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
1020  */
1021 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, PmeStage pmeStageId)
1022 {
1023     CommandEvent* timingEvent = nullptr;
1024     if (pme_gpu_timings_enabled(pmeGpu))
1025     {
1026         GMX_ASSERT(pmeStageId < PmeStage::Count, "Wrong PME GPU timing event index");
1027         timingEvent = pmeGpu->archSpecific->timingEvents[pmeStageId].fetchNextEvent();
1028     }
1029     return timingEvent;
1030 }
1031
1032 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, const int grid_index)
1033 {
1034     PmeStage timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? PmeStage::FftTransformR2C
1035                                                         : PmeStage::FftTransformC2R;
1036
1037     pme_gpu_start_timing(pmeGpu, timerId);
1038     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
1039             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1040     pme_gpu_stop_timing(pmeGpu, timerId);
1041 }
1042
1043 /*! \brief
1044  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1045  * to minimize number of unused blocks.
1046  */
1047 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
1048 {
1049     // How many maximum widths in X do we need (hopefully just one)
1050     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1051     // Trying to make things even
1052     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1053     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1054     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1055                "pmeGpuCreateGrid: excessive blocks");
1056     return std::pair<int, int>(colCount, minRowCount);
1057 }
1058
1059 /*! \brief
1060  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1061  *
1062  * \param[in]  pmeGpu                   The PME GPU structure.
1063  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1064  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1065  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1066  *
1067  * \return Pointer to CUDA kernel
1068  */
1069 static auto selectSplineAndSpreadKernelPtr(const PmeGpu*  pmeGpu,
1070                                            ThreadsPerAtom threadsPerAtom,
1071                                            bool           writeSplinesToGlobal,
1072                                            const int      numGrids)
1073 {
1074     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1075     if (writeSplinesToGlobal)
1076     {
1077         if (threadsPerAtom == ThreadsPerAtom::Order)
1078         {
1079             if (numGrids == 2)
1080             {
1081                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Dual;
1082             }
1083             else
1084             {
1085                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Single;
1086             }
1087         }
1088         else
1089         {
1090             if (numGrids == 2)
1091             {
1092                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesDual;
1093             }
1094             else
1095             {
1096                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesSingle;
1097             }
1098         }
1099     }
1100     else
1101     {
1102         if (threadsPerAtom == ThreadsPerAtom::Order)
1103         {
1104             if (numGrids == 2)
1105             {
1106                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1107             }
1108             else
1109             {
1110                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1111             }
1112         }
1113         else
1114         {
1115             if (numGrids == 2)
1116             {
1117                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1118             }
1119             else
1120             {
1121                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1122             }
1123         }
1124     }
1125
1126     return kernelPtr;
1127 }
1128
1129 /*! \brief
1130  * Returns a pointer to appropriate spline kernel based on the input bool values
1131  *
1132  * \param[in]  pmeGpu                   The PME GPU structure.
1133  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1134  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1135  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1136  *
1137  * \return Pointer to CUDA kernel
1138  */
1139 static auto selectSplineKernelPtr(const PmeGpu*  pmeGpu,
1140                                   ThreadsPerAtom threadsPerAtom,
1141                                   bool gmx_unused writeSplinesToGlobal,
1142                                   const int       numGrids)
1143 {
1144     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1145     GMX_ASSERT(
1146             writeSplinesToGlobal,
1147             "Spline data should always be written to global memory when just calculating splines");
1148
1149     if (threadsPerAtom == ThreadsPerAtom::Order)
1150     {
1151         if (numGrids == 2)
1152         {
1153             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Dual;
1154         }
1155         else
1156         {
1157             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Single;
1158         }
1159     }
1160     else
1161     {
1162         if (numGrids == 2)
1163         {
1164             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelDual;
1165         }
1166         else
1167         {
1168             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelSingle;
1169         }
1170     }
1171     return kernelPtr;
1172 }
1173
1174 /*! \brief
1175  * Returns a pointer to appropriate spread kernel based on the input bool values
1176  *
1177  * \param[in]  pmeGpu                   The PME GPU structure.
1178  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1179  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1180  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1181  *
1182  * \return Pointer to CUDA kernel
1183  */
1184 static auto selectSpreadKernelPtr(const PmeGpu*  pmeGpu,
1185                                   ThreadsPerAtom threadsPerAtom,
1186                                   bool           writeSplinesToGlobal,
1187                                   const int      numGrids)
1188 {
1189     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1190     if (writeSplinesToGlobal)
1191     {
1192         if (threadsPerAtom == ThreadsPerAtom::Order)
1193         {
1194             if (numGrids == 2)
1195             {
1196                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Dual;
1197             }
1198             {
1199                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Single;
1200             }
1201         }
1202         else
1203         {
1204             if (numGrids == 2)
1205             {
1206                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelDual;
1207             }
1208             else
1209             {
1210                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelSingle;
1211             }
1212         }
1213     }
1214     else
1215     {
1216         /* if we are not saving the spline data we need to recalculate it
1217            using the spline and spread Kernel */
1218         if (threadsPerAtom == ThreadsPerAtom::Order)
1219         {
1220             if (numGrids == 2)
1221             {
1222                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1223             }
1224             else
1225             {
1226                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1227             }
1228         }
1229         else
1230         {
1231             if (numGrids == 2)
1232             {
1233                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1234             }
1235             else
1236             {
1237                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1238             }
1239         }
1240     }
1241     return kernelPtr;
1242 }
1243
1244 void pme_gpu_spread(const PmeGpu*         pmeGpu,
1245                     GpuEventSynchronizer* xReadyOnDevice,
1246                     real**                h_grids,
1247                     bool                  computeSplines,
1248                     bool                  spreadCharges,
1249                     const real            lambda)
1250 {
1251     GMX_ASSERT(
1252             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1253             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1254
1255     GMX_ASSERT(computeSplines || spreadCharges,
1256                "PME spline/spread kernel has invalid input (nothing to do)");
1257     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1258     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1259
1260     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1261
1262     const int order = pmeGpu->common->pme_order;
1263     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1264     const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1265     const int  threadsPerAtom =
1266             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1267     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1268
1269     GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1270                "Only 16 threads per atom supported in OpenCL");
1271     GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1272                "Recalculating splines not supported in OpenCL");
1273
1274     const int atomsPerBlock = blockSize / threadsPerAtom;
1275
1276     // TODO: pick smaller block size in runtime if needed
1277     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1278     // If doing so, change atomsPerBlock in the kernels as well.
1279     // TODO: test varying block sizes on modern arch-s as well
1280     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1281     //(for spline data mostly)
1282     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1283                "inconsistent atom data padding vs. spreading block size");
1284
1285     // Ensure that coordinates are ready on the device before launching spread;
1286     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1287     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1288     GMX_ASSERT(!GMX_GPU_CUDA || xReadyOnDevice != nullptr || pmeGpu->common->isRankPmeOnly
1289                        || pme_gpu_settings(pmeGpu).copyAllOutputs,
1290                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1291
1292     if (xReadyOnDevice)
1293     {
1294         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1295     }
1296
1297     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1298     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1299
1300     if (pmeGpu->common->ngrids == 1)
1301     {
1302         kernelParamsPtr->current.scale = 1.0;
1303     }
1304     else
1305     {
1306         kernelParamsPtr->current.scale = 1.0 - lambda;
1307     }
1308
1309     KernelLaunchConfig config;
1310     config.blockSize[0] = order;
1311     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1312     config.blockSize[2] = atomsPerBlock;
1313     config.gridSize[0]  = dimGrid.first;
1314     config.gridSize[1]  = dimGrid.second;
1315
1316     PmeStage                           timingId;
1317     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1318     if (computeSplines)
1319     {
1320         if (spreadCharges)
1321         {
1322             timingId  = PmeStage::SplineAndSpread;
1323             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu,
1324                                                        pmeGpu->settings.threadsPerAtom,
1325                                                        writeGlobal || (!recalculateSplines),
1326                                                        pmeGpu->common->ngrids);
1327         }
1328         else
1329         {
1330             timingId  = PmeStage::Spline;
1331             kernelPtr = selectSplineKernelPtr(pmeGpu,
1332                                               pmeGpu->settings.threadsPerAtom,
1333                                               writeGlobal || (!recalculateSplines),
1334                                               pmeGpu->common->ngrids);
1335         }
1336     }
1337     else
1338     {
1339         timingId  = PmeStage::Spread;
1340         kernelPtr = selectSpreadKernelPtr(pmeGpu,
1341                                           pmeGpu->settings.threadsPerAtom,
1342                                           writeGlobal || (!recalculateSplines),
1343                                           pmeGpu->common->ngrids);
1344     }
1345
1346
1347     pme_gpu_start_timing(pmeGpu, timingId);
1348     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1349 #if c_canEmbedBuffers
1350     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1351 #else
1352     const auto kernelArgs =
1353             prepareGpuKernelArguments(kernelPtr,
1354                                       config,
1355                                       kernelParamsPtr,
1356                                       &kernelParamsPtr->atoms.d_theta,
1357                                       &kernelParamsPtr->atoms.d_dtheta,
1358                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1359                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1360                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1361                                       &kernelParamsPtr->grid.d_fractShiftsTable,
1362                                       &kernelParamsPtr->grid.d_gridlineIndicesTable,
1363                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1364                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1365                                       &kernelParamsPtr->atoms.d_coordinates);
1366 #endif
1367
1368     launchGpuKernel(
1369             kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME spline/spread", kernelArgs);
1370     pme_gpu_stop_timing(pmeGpu, timingId);
1371
1372     const auto& settings    = pmeGpu->settings;
1373     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1374     if (copyBackGrid)
1375     {
1376         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1377         {
1378             float* h_grid = h_grids[gridIndex];
1379             pme_gpu_copy_output_spread_grid(pmeGpu, h_grid, gridIndex);
1380         }
1381     }
1382     const bool copyBackAtomData =
1383             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1384     if (copyBackAtomData)
1385     {
1386         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1387     }
1388 }
1389
1390 void pme_gpu_solve(const PmeGpu* pmeGpu,
1391                    const int     gridIndex,
1392                    t_complex*    h_grid,
1393                    GridOrdering  gridOrdering,
1394                    bool          computeEnergyAndVirial)
1395 {
1396     GMX_ASSERT(
1397             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1398             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1399     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
1400                "Invalid combination of gridIndex and number of grids");
1401
1402     const auto& settings               = pmeGpu->settings;
1403     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1404
1405     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1406
1407     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1408     if (copyInputAndOutputGrid)
1409     {
1410         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1411                            h_gridFloat,
1412                            0,
1413                            pmeGpu->archSpecific->complexGridSize[gridIndex],
1414                            pmeGpu->archSpecific->pmeStream_,
1415                            pmeGpu->settings.transferKind,
1416                            nullptr);
1417     }
1418
1419     int majorDim = -1, middleDim = -1, minorDim = -1;
1420     switch (gridOrdering)
1421     {
1422         case GridOrdering::YZX:
1423             majorDim  = YY;
1424             middleDim = ZZ;
1425             minorDim  = XX;
1426             break;
1427
1428         case GridOrdering::XYZ:
1429             majorDim  = XX;
1430             middleDim = YY;
1431             minorDim  = ZZ;
1432             break;
1433
1434         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1435     }
1436
1437     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1438
1439     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1440     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1441     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1442     int       cellsPerBlock;
1443     if (blocksPerGridLine == 1)
1444     {
1445         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1446     }
1447     else
1448     {
1449         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1450     }
1451     const int warpSize  = pmeGpu->programHandle_->warpSize();
1452     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1453
1454     static_assert(!GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1455                   "The CUDA solve energy kernels needs at least 4 warps. "
1456                   "Here we launch at least half of the max warps.");
1457
1458     KernelLaunchConfig config;
1459     config.blockSize[0] = blockSize;
1460     config.gridSize[0]  = blocksPerGridLine;
1461     // rounding up to full warps so that shuffle operations produce defined results
1462     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1463                          / gridLinesPerBlock;
1464     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1465
1466     PmeStage                           timingId  = PmeStage::Solve;
1467     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1468     if (gridOrdering == GridOrdering::YZX)
1469     {
1470         if (gridIndex == 0)
1471         {
1472             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelA
1473                                                : pmeGpu->programHandle_->impl_->solveYZXKernelA;
1474         }
1475         else
1476         {
1477             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelB
1478                                                : pmeGpu->programHandle_->impl_->solveYZXKernelB;
1479         }
1480     }
1481     else if (gridOrdering == GridOrdering::XYZ)
1482     {
1483         if (gridIndex == 0)
1484         {
1485             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelA
1486                                                : pmeGpu->programHandle_->impl_->solveXYZKernelA;
1487         }
1488         else
1489         {
1490             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelB
1491                                                : pmeGpu->programHandle_->impl_->solveXYZKernelB;
1492         }
1493     }
1494
1495     pme_gpu_start_timing(pmeGpu, timingId);
1496     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1497 #if c_canEmbedBuffers
1498     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1499 #else
1500     const auto kernelArgs =
1501             prepareGpuKernelArguments(kernelPtr,
1502                                       config,
1503                                       kernelParamsPtr,
1504                                       &kernelParamsPtr->grid.d_splineModuli[gridIndex],
1505                                       &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1506                                       &kernelParamsPtr->grid.d_fourierGrid[gridIndex]);
1507 #endif
1508     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve", kernelArgs);
1509     pme_gpu_stop_timing(pmeGpu, timingId);
1510
1511     if (computeEnergyAndVirial)
1512     {
1513         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy[gridIndex],
1514                              &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1515                              0,
1516                              c_virialAndEnergyCount,
1517                              pmeGpu->archSpecific->pmeStream_,
1518                              pmeGpu->settings.transferKind,
1519                              nullptr);
1520     }
1521
1522     if (copyInputAndOutputGrid)
1523     {
1524         copyFromDeviceBuffer(h_gridFloat,
1525                              &kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1526                              0,
1527                              pmeGpu->archSpecific->complexGridSize[gridIndex],
1528                              pmeGpu->archSpecific->pmeStream_,
1529                              pmeGpu->settings.transferKind,
1530                              nullptr);
1531     }
1532 }
1533
1534 /*! \brief
1535  * Returns a pointer to appropriate gather kernel based on the inputvalues
1536  *
1537  * \param[in]  pmeGpu                   The PME GPU structure.
1538  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1539  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1540  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1541  *
1542  * \return Pointer to CUDA kernel
1543  */
1544 inline auto selectGatherKernelPtr(const PmeGpu*  pmeGpu,
1545                                   ThreadsPerAtom threadsPerAtom,
1546                                   bool           readSplinesFromGlobal,
1547                                   const int      numGrids)
1548
1549 {
1550     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1551
1552     if (readSplinesFromGlobal)
1553     {
1554         if (threadsPerAtom == ThreadsPerAtom::Order)
1555         {
1556             if (numGrids == 2)
1557             {
1558                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Dual;
1559             }
1560             else
1561             {
1562                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Single;
1563             }
1564         }
1565         else
1566         {
1567             if (numGrids == 2)
1568             {
1569                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesDual;
1570             }
1571             else
1572             {
1573                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesSingle;
1574             }
1575         }
1576     }
1577     else
1578     {
1579         if (threadsPerAtom == ThreadsPerAtom::Order)
1580         {
1581             if (numGrids == 2)
1582             {
1583                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Dual;
1584             }
1585             else
1586             {
1587                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Single;
1588             }
1589         }
1590         else
1591         {
1592             if (numGrids == 2)
1593             {
1594                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelDual;
1595             }
1596             else
1597             {
1598                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelSingle;
1599             }
1600         }
1601     }
1602     return kernelPtr;
1603 }
1604
1605 void pme_gpu_gather(PmeGpu* pmeGpu, real** h_grids, const float lambda)
1606 {
1607     GMX_ASSERT(
1608             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1609             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1610
1611     const auto& settings = pmeGpu->settings;
1612
1613     if (!settings.performGPUFFT || settings.copyAllOutputs)
1614     {
1615         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1616         {
1617             float* h_grid = const_cast<float*>(h_grids[gridIndex]);
1618             pme_gpu_copy_input_gather_grid(pmeGpu, h_grid, gridIndex);
1619         }
1620     }
1621
1622     if (settings.copyAllOutputs)
1623     {
1624         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1625     }
1626
1627     /* Set if we have unit tests */
1628     const bool   readGlobal = pmeGpu->settings.copyAllOutputs;
1629     const size_t blockSize  = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1630     const int    order      = pmeGpu->common->pme_order;
1631     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1632     const int threadsPerAtom =
1633             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1634     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1635
1636     GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1637                "Only 16 threads per atom supported in OpenCL");
1638     GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1639                "Recalculating splines not supported in OpenCL");
1640
1641     const int atomsPerBlock = blockSize / threadsPerAtom;
1642
1643     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1644                "inconsistent atom data padding vs. gathering block size");
1645
1646     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1647     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1648
1649     KernelLaunchConfig config;
1650     config.blockSize[0] = order;
1651     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1652     config.blockSize[2] = atomsPerBlock;
1653     config.gridSize[0]  = dimGrid.first;
1654     config.gridSize[1]  = dimGrid.second;
1655
1656     // TODO test different cache configs
1657
1658     PmeStage                           timingId = PmeStage::Gather;
1659     PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1660             selectGatherKernelPtr(pmeGpu,
1661                                   pmeGpu->settings.threadsPerAtom,
1662                                   readGlobal || (!recalculateSplines),
1663                                   pmeGpu->common->ngrids);
1664     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1665
1666     pme_gpu_start_timing(pmeGpu, timingId);
1667     auto* timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1668     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1669     if (pmeGpu->common->ngrids == 1)
1670     {
1671         kernelParamsPtr->current.scale = 1.0;
1672     }
1673     else
1674     {
1675         kernelParamsPtr->current.scale = 1.0 - lambda;
1676     }
1677
1678 #if c_canEmbedBuffers
1679     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1680 #else
1681     const auto kernelArgs =
1682             prepareGpuKernelArguments(kernelPtr,
1683                                       config,
1684                                       kernelParamsPtr,
1685                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1686                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1687                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1688                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1689                                       &kernelParamsPtr->atoms.d_theta,
1690                                       &kernelParamsPtr->atoms.d_dtheta,
1691                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1692                                       &kernelParamsPtr->atoms.d_forces);
1693 #endif
1694     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather", kernelArgs);
1695     pme_gpu_stop_timing(pmeGpu, timingId);
1696
1697     if (pmeGpu->settings.useGpuForceReduction)
1698     {
1699         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1700     }
1701     else
1702     {
1703         pme_gpu_copy_output_forces(pmeGpu);
1704     }
1705 }
1706
1707 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1708 {
1709     if (pmeGpu && pmeGpu->kernelParams)
1710     {
1711         return pmeGpu->kernelParams->atoms.d_forces;
1712     }
1713     else
1714     {
1715         return nullptr;
1716     }
1717 }
1718
1719 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1720 {
1721     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1722                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1723                "initialized.");
1724
1725     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1726                "The device-side buffer can not be set.");
1727
1728     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1729 }
1730
1731 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1732 {
1733     if (pmeGpu && pmeGpu->kernelParams)
1734     {
1735         return &pmeGpu->archSpecific->pmeForcesReady;
1736     }
1737     else
1738     {
1739         return nullptr;
1740     }
1741 }