028a66a35a9f30df54cdfc40e1628b878b9ae154
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team.
5  * Copyright (c) 2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 /*! \internal \file
38  *
39  * \brief This file contains internal function implementations
40  * for performing the PME calculations on GPU.
41  *
42  * Note that this file is compiled as regular C++ source in OpenCL builds, but
43  * it is treated as CUDA source in CUDA-enabled GPU builds.
44  *
45  * \author Aleksei Iupinov <a.yupinov@gmail.com>
46  * \ingroup module_ewald
47  */
48
49 #include "gmxpre.h"
50
51 #include "pme_gpu_internal.h"
52
53 #include "config.h"
54
55 #include <list>
56 #include <memory>
57 #include <string>
58
59 #include "gromacs/ewald/ewald_utils.h"
60 #include "gromacs/gpu_utils/device_context.h"
61 #include "gromacs/gpu_utils/device_stream.h"
62 #include "gromacs/gpu_utils/gpu_utils.h"
63 #include "gromacs/hardware/device_information.h"
64 #include "gromacs/math/invertmatrix.h"
65 #include "gromacs/math/units.h"
66 #include "gromacs/timing/gpu_timing.h"
67 #include "gromacs/utility/exceptions.h"
68 #include "gromacs/utility/fatalerror.h"
69 #include "gromacs/utility/gmxassert.h"
70 #include "gromacs/utility/logger.h"
71 #include "gromacs/utility/stringutil.h"
72
73 #if GMX_GPU_CUDA
74 #    include "gromacs/gpu_utils/pmalloc_cuda.h"
75
76 #    include "pme.cuh"
77 #elif GMX_GPU_OPENCL
78 #    include "gromacs/gpu_utils/gmxopencl.h"
79 #elif GMX_GPU_SYCL
80 #    include "gromacs/gpu_utils/syclutils.h"
81 #endif
82
83 #include "gromacs/ewald/pme.h"
84
85 #include "pme_gpu_3dfft.h"
86 #include "pme_gpu_calculate_splines.h"
87 #include "pme_gpu_constants.h"
88 #include "pme_gpu_program_impl.h"
89 #include "pme_gpu_timings.h"
90 #include "pme_gpu_types.h"
91 #include "pme_gpu_types_host.h"
92 #include "pme_gpu_types_host_impl.h"
93 #include "pme_grid.h"
94 #include "pme_internal.h"
95 #include "pme_solve.h"
96
97 /*! \brief
98  * CUDA only
99  * Atom limit above which it is advantageous to turn on the
100  * recalculating of the splines in the gather and using less threads per atom in the spline and spread
101  */
102 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
103
104 /*! \internal \brief
105  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
106  *
107  * \param[in] pmeGpu  The PME GPU structure.
108  * \returns The pointer to the kernel parameters.
109  */
110 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
111 {
112     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
113     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
114     return kernelParamsPtr;
115 }
116
117 /*! \brief
118  * Atom data block size (in terms of number of atoms).
119  * This is the least common multiple of number of atoms processed by
120  * a single block/workgroup of the spread and gather kernels.
121  * The GPU atom data buffers must be padded, which means that
122  * the numbers of atoms used for determining the size of the memory
123  * allocation must be divisible by this.
124  */
125 constexpr int c_pmeAtomDataBlockSize = 64;
126
127 int pme_gpu_get_atom_data_block_size()
128 {
129     return c_pmeAtomDataBlockSize;
130 }
131
132 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
133 {
134     pmeGpu->archSpecific->pmeStream_.synchronize();
135 }
136
137 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
138 {
139     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
140
141     GMX_ASSERT(
142             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
143             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
144
145     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
146     {
147         allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
148                              c_virialAndEnergyCount,
149                              pmeGpu->archSpecific->deviceContext_);
150         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy[gridIndex]), energyAndVirialSize);
151     }
152 }
153
154 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
155 {
156     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
157     {
158         freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex]);
159         pfree(pmeGpu->staging.h_virialAndEnergy[gridIndex]);
160         pmeGpu->staging.h_virialAndEnergy[gridIndex] = nullptr;
161     }
162 }
163
164 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
165 {
166     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
167     {
168         clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
169                                0,
170                                c_virialAndEnergyCount,
171                                pmeGpu->archSpecific->pmeStream_);
172     }
173 }
174
175 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu, const int gridIndex)
176 {
177     GMX_ASSERT(
178             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
179             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
180     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
181                "Invalid combination of gridIndex and number of grids");
182
183     const int splineValuesOffset[DIM] = { 0,
184                                           pmeGpu->kernelParams->grid.realGridSize[XX],
185                                           pmeGpu->kernelParams->grid.realGridSize[XX]
186                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
187     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
188
189     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
190                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
191                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
192     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize[gridIndex]);
193     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
194                            newSplineValuesSize,
195                            &pmeGpu->archSpecific->splineValuesSize[gridIndex],
196                            &pmeGpu->archSpecific->splineValuesCapacity[gridIndex],
197                            pmeGpu->archSpecific->deviceContext_);
198     if (shouldRealloc)
199     {
200         /* Reallocate the host buffer */
201         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
202         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli[gridIndex]),
203                 newSplineValuesSize * sizeof(float));
204     }
205     for (int i = 0; i < DIM; i++)
206     {
207         memcpy(pmeGpu->staging.h_splineModuli[gridIndex] + splineValuesOffset[i],
208                pmeGpu->common->bsp_mod[i].data(),
209                pmeGpu->common->bsp_mod[i].size() * sizeof(float));
210     }
211     /* TODO: pin original buffer instead! */
212     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
213                        pmeGpu->staging.h_splineModuli[gridIndex],
214                        0,
215                        newSplineValuesSize,
216                        pmeGpu->archSpecific->pmeStream_,
217                        pmeGpu->settings.transferKind,
218                        nullptr);
219 }
220
221 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
222 {
223     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
224     {
225         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
226         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex]);
227     }
228 }
229
230 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
231 {
232     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
233     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
234     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
235                            newForcesSize,
236                            &pmeGpu->archSpecific->forcesSize,
237                            &pmeGpu->archSpecific->forcesSizeAlloc,
238                            pmeGpu->archSpecific->deviceContext_);
239     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
240     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
241 }
242
243 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
244 {
245     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
246 }
247
248 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
249 {
250     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
251     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
252     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
253                        h_forcesFloat,
254                        0,
255                        DIM * pmeGpu->kernelParams->atoms.nAtoms,
256                        pmeGpu->archSpecific->pmeStream_,
257                        pmeGpu->settings.transferKind,
258                        nullptr);
259 }
260
261 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
262 {
263     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
264     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
265     copyFromDeviceBuffer(h_forcesFloat,
266                          &pmeGpu->kernelParams->atoms.d_forces,
267                          0,
268                          DIM * pmeGpu->kernelParams->atoms.nAtoms,
269                          pmeGpu->archSpecific->pmeStream_,
270                          pmeGpu->settings.transferKind,
271                          nullptr);
272 }
273
274 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu,
275                                                  const float*  h_coefficients,
276                                                  const int     gridIndex)
277 {
278     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
279     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
280     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
281     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
282                            newCoefficientsSize,
283                            &pmeGpu->archSpecific->coefficientsSize[gridIndex],
284                            &pmeGpu->archSpecific->coefficientsCapacity[gridIndex],
285                            pmeGpu->archSpecific->deviceContext_);
286     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
287                        const_cast<float*>(h_coefficients),
288                        0,
289                        pmeGpu->kernelParams->atoms.nAtoms,
290                        pmeGpu->archSpecific->pmeStream_,
291                        pmeGpu->settings.transferKind,
292                        nullptr);
293
294     const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
295     const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
296     if (paddingCount > 0)
297     {
298         clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
299                                paddingIndex,
300                                paddingCount,
301                                pmeGpu->archSpecific->pmeStream_);
302     }
303 }
304
305 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
306 {
307     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
308     {
309         freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex]);
310     }
311 }
312
313 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
314 {
315     const int order             = pmeGpu->common->pme_order;
316     const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
317     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
318     /* Two arrays of the same size */
319     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
320     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
321     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
322     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta,
323                            newSplineDataSize,
324                            &currentSizeTemp,
325                            &currentSizeTempAlloc,
326                            pmeGpu->archSpecific->deviceContext_);
327     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta,
328                            newSplineDataSize,
329                            &pmeGpu->archSpecific->splineDataSize,
330                            &pmeGpu->archSpecific->splineDataSizeAlloc,
331                            pmeGpu->archSpecific->deviceContext_);
332     // the host side reallocation
333     if (shouldRealloc)
334     {
335         pfree(pmeGpu->staging.h_theta);
336         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
337         pfree(pmeGpu->staging.h_dtheta);
338         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
339     }
340 }
341
342 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
343 {
344     /* Two arrays of the same size */
345     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
346     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
347     pfree(pmeGpu->staging.h_theta);
348     pfree(pmeGpu->staging.h_dtheta);
349 }
350
351 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
352 {
353     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
354     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
355     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices,
356                            newIndicesSize,
357                            &pmeGpu->archSpecific->gridlineIndicesSize,
358                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
359                            pmeGpu->archSpecific->deviceContext_);
360     pfree(pmeGpu->staging.h_gridlineIndices);
361     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
362 }
363
364 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
365 {
366     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
367     pfree(pmeGpu->staging.h_gridlineIndices);
368 }
369
370 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
371 {
372     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
373
374     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
375                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
376                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
377     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
378                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
379                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
380     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
381     {
382         // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
383         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
384         {
385             /* 2 separate grids */
386             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
387                                    newComplexGridSize,
388                                    &pmeGpu->archSpecific->complexGridSize[gridIndex],
389                                    &pmeGpu->archSpecific->complexGridCapacity[gridIndex],
390                                    pmeGpu->archSpecific->deviceContext_);
391             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
392                                    newRealGridSize,
393                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
394                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
395                                    pmeGpu->archSpecific->deviceContext_);
396         }
397         else
398         {
399             /* A single buffer so that any grid will fit */
400             const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
401             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
402                                    newGridsSize,
403                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
404                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
405                                    pmeGpu->archSpecific->deviceContext_);
406             kernelParamsPtr->grid.d_fourierGrid[gridIndex] = kernelParamsPtr->grid.d_realGrid[gridIndex];
407             pmeGpu->archSpecific->complexGridSize[gridIndex] =
408                     pmeGpu->archSpecific->realGridSize[gridIndex];
409             // the size might get used later for copying the grid
410         }
411     }
412 }
413
414 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
415 {
416     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
417     {
418         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
419         {
420             freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid[gridIndex]);
421         }
422         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex]);
423     }
424 }
425
426 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
427 {
428     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
429     {
430         clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
431                                0,
432                                pmeGpu->archSpecific->realGridSize[gridIndex],
433                                pmeGpu->archSpecific->pmeStream_);
434     }
435 }
436
437 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
438 {
439     pme_gpu_free_fract_shifts(pmeGpu);
440
441     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
442
443     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
444     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
445     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
446     const int cellCount           = c_pmeNeighborUnitcellCount;
447     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
448
449     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
450
451     const int newFractShiftsSize = cellCount * (nx + ny + nz);
452
453     initParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
454                          &kernelParamsPtr->fractShiftsTableTexture,
455                          pmeGpu->common->fsh.data(),
456                          newFractShiftsSize,
457                          pmeGpu->archSpecific->deviceContext_);
458
459     initParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
460                          &kernelParamsPtr->gridlineIndicesTableTexture,
461                          pmeGpu->common->nn.data(),
462                          newFractShiftsSize,
463                          pmeGpu->archSpecific->deviceContext_);
464 }
465
466 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
467 {
468     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
469 #if GMX_GPU_CUDA
470     destroyParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
471                             kernelParamsPtr->fractShiftsTableTexture);
472     destroyParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
473                             kernelParamsPtr->gridlineIndicesTableTexture);
474 #elif GMX_GPU_OPENCL || GMX_GPU_SYCL
475     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
476     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
477 #endif
478 }
479
480 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
481 {
482     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
483 }
484
485 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, const float* h_grid, const int gridIndex)
486 {
487     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
488                        h_grid,
489                        0,
490                        pmeGpu->archSpecific->realGridSize[gridIndex],
491                        pmeGpu->archSpecific->pmeStream_,
492                        pmeGpu->settings.transferKind,
493                        nullptr);
494 }
495
496 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid, const int gridIndex)
497 {
498     copyFromDeviceBuffer(h_grid,
499                          &pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
500                          0,
501                          pmeGpu->archSpecific->realGridSize[gridIndex],
502                          pmeGpu->archSpecific->pmeStream_,
503                          pmeGpu->settings.transferKind,
504                          nullptr);
505     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
506 }
507
508 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
509 {
510     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
511     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
512     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta,
513                          &kernelParamsPtr->atoms.d_dtheta,
514                          0,
515                          splinesCount,
516                          pmeGpu->archSpecific->pmeStream_,
517                          pmeGpu->settings.transferKind,
518                          nullptr);
519     copyFromDeviceBuffer(pmeGpu->staging.h_theta,
520                          &kernelParamsPtr->atoms.d_theta,
521                          0,
522                          splinesCount,
523                          pmeGpu->archSpecific->pmeStream_,
524                          pmeGpu->settings.transferKind,
525                          nullptr);
526     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices,
527                          &kernelParamsPtr->atoms.d_gridlineIndices,
528                          0,
529                          kernelParamsPtr->atoms.nAtoms * DIM,
530                          pmeGpu->archSpecific->pmeStream_,
531                          pmeGpu->settings.transferKind,
532                          nullptr);
533 }
534
535 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
536 {
537     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
538     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
539
540     // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
541     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices,
542                            0,
543                            pmeGpu->nAtomsAlloc * DIM,
544                            pmeGpu->archSpecific->pmeStream_);
545     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta,
546                            0,
547                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
548                            pmeGpu->archSpecific->pmeStream_);
549     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta,
550                            0,
551                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
552                            pmeGpu->archSpecific->pmeStream_);
553
554     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta,
555                        pmeGpu->staging.h_dtheta,
556                        0,
557                        splinesCount,
558                        pmeGpu->archSpecific->pmeStream_,
559                        pmeGpu->settings.transferKind,
560                        nullptr);
561     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta,
562                        pmeGpu->staging.h_theta,
563                        0,
564                        splinesCount,
565                        pmeGpu->archSpecific->pmeStream_,
566                        pmeGpu->settings.transferKind,
567                        nullptr);
568     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices,
569                        pmeGpu->staging.h_gridlineIndices,
570                        0,
571                        kernelParamsPtr->atoms.nAtoms * DIM,
572                        pmeGpu->archSpecific->pmeStream_,
573                        pmeGpu->settings.transferKind,
574                        nullptr);
575 }
576
577 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
578 {
579     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
580 }
581
582 /*! \brief Internal GPU initialization for PME.
583  *
584  * \param[in]  pmeGpu         GPU PME data.
585  * \param[in]  deviceContext  GPU context.
586  * \param[in]  deviceStream   GPU stream.
587  */
588 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
589 {
590     /* Allocate the target-specific structures */
591     pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
592     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
593
594     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
595     /* This should give better performance, according to the cuFFT documentation.
596      * The performance seems to be the same though.
597      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
598      */
599
600 #if GMX_GPU_CUDA
601     pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
602 #else
603     // Use this path for any non-CUDA GPU acceleration
604     // TODO: is there no really global work size limit in OpenCL?
605     pmeGpu->maxGridWidthX = INT32_MAX / 2;
606 #endif
607 }
608
609 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
610 {
611     if (pme_gpu_settings(pmeGpu).performGPUFFT)
612     {
613         pmeGpu->archSpecific->fftSetup.resize(0);
614         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
615         {
616             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu, gridIndex));
617         }
618     }
619 }
620
621 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
622 {
623     pmeGpu->archSpecific->fftSetup.resize(0);
624 }
625
626 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, const float lambda, PmeOutput* output)
627 {
628     const PmeGpu* pmeGpu = pme.gpu;
629
630     GMX_ASSERT(lambda == 1.0 || pmeGpu->common->ngrids == 2,
631                "Invalid combination of lambda and number of grids");
632
633     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
634     {
635         for (int j = 0; j < c_virialAndEnergyCount; j++)
636         {
637             GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[gridIndex][j]),
638                        "PME GPU produces incorrect energy/virial.");
639         }
640     }
641     for (int dim1 = 0; dim1 < DIM; dim1++)
642     {
643         for (int dim2 = 0; dim2 < DIM; dim2++)
644         {
645             output->coulombVirial_[dim1][dim2] = 0;
646         }
647     }
648     output->coulombEnergy_ = 0;
649     float scale            = 1.0;
650     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
651     {
652         if (pmeGpu->common->ngrids == 2)
653         {
654             scale = gridIndex == 0 ? (1.0 - lambda) : lambda;
655         }
656         output->coulombVirial_[XX][XX] +=
657                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][0];
658         output->coulombVirial_[YY][YY] +=
659                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][1];
660         output->coulombVirial_[ZZ][ZZ] +=
661                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][2];
662         output->coulombVirial_[XX][YY] +=
663                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
664         output->coulombVirial_[YY][XX] +=
665                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
666         output->coulombVirial_[XX][ZZ] +=
667                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
668         output->coulombVirial_[ZZ][XX] +=
669                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
670         output->coulombVirial_[YY][ZZ] +=
671                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
672         output->coulombVirial_[ZZ][YY] +=
673                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
674         output->coulombEnergy_ += scale * 0.5F * pmeGpu->staging.h_virialAndEnergy[gridIndex][6];
675     }
676     if (pmeGpu->common->ngrids > 1)
677     {
678         output->coulombDvdl_ = 0.5F
679                                * (pmeGpu->staging.h_virialAndEnergy[FEP_STATE_B][6]
680                                   - pmeGpu->staging.h_virialAndEnergy[FEP_STATE_A][6]);
681     }
682 }
683
684 /*! \brief Sets the force-related members in \p output
685  *
686  * \param[in]   pmeGpu      PME GPU data structure
687  * \param[out]  output      Pointer to PME output data structure
688  */
689 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
690 {
691     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
692     if (output->haveForceOutput_)
693     {
694         output->forces_ = pmeGpu->staging.h_forces;
695     }
696 }
697
698 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial, const real lambdaQ)
699 {
700     PmeGpu* pmeGpu = pme.gpu;
701
702     PmeOutput output;
703
704     pme_gpu_getForceOutput(pmeGpu, &output);
705
706     if (computeEnergyAndVirial)
707     {
708         if (pme_gpu_settings(pmeGpu).performGPUSolve)
709         {
710             pme_gpu_getEnergyAndVirial(pme, lambdaQ, &output);
711         }
712         else
713         {
714             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
715         }
716     }
717     return output;
718 }
719
720 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
721 {
722 #if GMX_DOUBLE
723     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
724 #else
725     matrix scaledBox;
726     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
727     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
728     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
729     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
730     matrix recipBox;
731     gmx::invertBoxMatrix(scaledBox, recipBox);
732
733     /* The GPU recipBox is transposed as compared to the CPU recipBox.
734      * Spread uses matrix columns (while solve and gather use rows).
735      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
736      */
737     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
738                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
739                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
740     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
741 #endif
742 }
743
744 /*! \brief \libinternal
745  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
746  *
747  * \param[in] pmeGpu            The PME GPU structure.
748  */
749 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
750 {
751     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
752
753     GMX_ASSERT(
754             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
755             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
756
757     kernelParamsPtr->grid.ewaldFactor =
758             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
759     /* The grid size variants */
760     for (int i = 0; i < DIM; i++)
761     {
762         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
763         kernelParamsPtr->grid.realGridSizeFP[i] =
764                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
765         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
766
767         // The complex grid currently uses no padding;
768         // if it starts to do so, then another test should be added for that
769         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
770         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
771     }
772     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
773     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
774     {
775         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
776         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
777                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
778     }
779     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
780     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
781     kernelParamsPtr->grid.complexGridSize[ZZ]++;
782     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
783
784     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
785     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
786     {
787         pme_gpu_realloc_and_copy_bspline_values(pmeGpu, gridIndex);
788     }
789
790     pme_gpu_realloc_grids(pmeGpu);
791     pme_gpu_reinit_3dfft(pmeGpu);
792 }
793
794 /* Several GPU functions that refer to the CPU PME data live here.
795  * We would like to keep these away from the GPU-framework specific code for clarity,
796  * as well as compilation issues with MPI.
797  */
798
799 /*! \brief \libinternal
800  * Copies everything useful from the PME CPU to the PME GPU structure.
801  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
802  *
803  * \param[in] pme         The PME structure.
804  */
805 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
806 {
807     /* TODO: Consider refactoring the CPU PME code to use the same structure,
808      * so that this function becomes 2 lines */
809     PmeGpu* pmeGpu               = pme->gpu;
810     pmeGpu->common->ngrids       = pme->bFEP_q ? 2 : 1;
811     pmeGpu->common->epsilon_r    = pme->epsilon_r;
812     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
813     pmeGpu->common->nk[XX]       = pme->nkx;
814     pmeGpu->common->nk[YY]       = pme->nky;
815     pmeGpu->common->nk[ZZ]       = pme->nkz;
816     pmeGpu->common->pme_order    = pme->pme_order;
817     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
818     {
819         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
820     }
821     for (int i = 0; i < DIM; i++)
822     {
823         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
824     }
825     const int cellCount = c_pmeNeighborUnitcellCount;
826     pmeGpu->common->fsh.resize(0);
827     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
828     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
829     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
830     pmeGpu->common->nn.resize(0);
831     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
832     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
833     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
834     pmeGpu->common->runMode       = pme->runMode;
835     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
836     pmeGpu->common->boxScaler     = pme->boxScaler;
837 }
838
839 /*! \libinternal \brief
840  * uses heuristics to select the best performing PME gather and scatter kernels
841  *
842  * \param[in,out] pmeGpu         The PME GPU structure.
843  */
844 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
845 {
846     if (GMX_GPU_CUDA && pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit)
847     {
848         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::Order;
849         pmeGpu->settings.recalculateSplines = true;
850     }
851     else
852     {
853         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::OrderSquared;
854         pmeGpu->settings.recalculateSplines = false;
855     }
856 }
857
858
859 /*! \libinternal \brief
860  * Initializes the PME GPU data at the beginning of the run.
861  * TODO: this should become PmeGpu::PmeGpu()
862  *
863  * \param[in,out] pme            The PME structure.
864  * \param[in]     deviceContext  The GPU context.
865  * \param[in]     deviceStream   The GPU stream.
866  * \param[in,out] pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
867  */
868 static void pme_gpu_init(gmx_pme_t*           pme,
869                          const DeviceContext& deviceContext,
870                          const DeviceStream&  deviceStream,
871                          const PmeGpuProgram* pmeGpuProgram)
872 {
873     pme->gpu       = new PmeGpu();
874     PmeGpu* pmeGpu = pme->gpu;
875     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
876     pmeGpu->common = std::make_shared<PmeShared>();
877
878     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
879     /* A convenience variable. */
880     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
881     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
882     pmeGpu->settings.performGPUGather = true;
883     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
884     pmeGpu->settings.useGpuForceReduction = false;
885
886     pme_gpu_set_testing(pmeGpu, false);
887
888     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
889     pmeGpu->programHandle_ = pmeGpuProgram;
890
891     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
892
893     pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
894
895     pme_gpu_copy_common_data_from(pme);
896     pme_gpu_alloc_energy_virial(pmeGpu);
897
898     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
899
900     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
901     kernelParamsPtr->constants.elFactor = gmx::c_one4PiEps0 / pmeGpu->common->epsilon_r;
902 }
903
904 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
905 {
906     GMX_ASSERT(gridSize != nullptr, "");
907     GMX_ASSERT(paddedGridSize != nullptr, "");
908     GMX_ASSERT(pmeGpu != nullptr, "");
909     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
910     for (int i = 0; i < DIM; i++)
911     {
912         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
913         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
914     }
915 }
916
917 void pme_gpu_reinit(gmx_pme_t*           pme,
918                     const DeviceContext* deviceContext,
919                     const DeviceStream*  deviceStream,
920                     const PmeGpuProgram* pmeGpuProgram)
921 {
922     GMX_ASSERT(pme != nullptr, "Need valid PME object");
923
924     if (!pme->gpu)
925     {
926         GMX_RELEASE_ASSERT(deviceContext != nullptr,
927                            "Device context can not be nullptr when setting up PME on GPU.");
928         GMX_RELEASE_ASSERT(deviceStream != nullptr,
929                            "Device stream can not be nullptr when setting up PME on GPU.");
930         /* First-time initialization */
931         pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
932     }
933     else
934     {
935         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
936         pme_gpu_copy_common_data_from(pme);
937     }
938     /* GPU FFT will only get used for a single rank.*/
939     pme->gpu->settings.performGPUFFT =
940             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
941     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
942
943     /* Reinit active timers */
944     pme_gpu_reinit_timings(pme->gpu);
945
946     pme_gpu_reinit_grids(pme->gpu);
947     // Note: if timing the reinit launch overhead becomes more relevant
948     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
949     pme_gpu_reinit_computation(pme, nullptr);
950     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
951      * update for mixed mode on grid switch. TODO: use shared recipbox field.
952      */
953     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
954 }
955
956 void pme_gpu_destroy(PmeGpu* pmeGpu)
957 {
958     /* Free lots of data */
959     pme_gpu_free_energy_virial(pmeGpu);
960     pme_gpu_free_bspline_values(pmeGpu);
961     pme_gpu_free_forces(pmeGpu);
962     pme_gpu_free_coefficients(pmeGpu);
963     pme_gpu_free_spline_data(pmeGpu);
964     pme_gpu_free_grid_indices(pmeGpu);
965     pme_gpu_free_fract_shifts(pmeGpu);
966     pme_gpu_free_grids(pmeGpu);
967
968     pme_gpu_destroy_3dfft(pmeGpu);
969
970     delete pmeGpu;
971 }
972
973 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* chargesA, const real* chargesB)
974 {
975     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
976     kernelParamsPtr->atoms.nAtoms = nAtoms;
977     const int  block_size         = pme_gpu_get_atom_data_block_size();
978     const int  nAtomsNewPadded    = ((nAtoms + block_size - 1) / block_size) * block_size;
979     const bool haveToRealloc      = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
980     pmeGpu->nAtomsAlloc           = nAtomsNewPadded;
981
982 #if GMX_DOUBLE
983     GMX_RELEASE_ASSERT(false, "Only single precision supported");
984     GMX_UNUSED_VALUE(charges);
985 #else
986     int gridIndex = 0;
987     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
988     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
989     gridIndex++;
990     if (chargesB != nullptr)
991     {
992         pme_gpu_realloc_and_copy_input_coefficients(
993                 pmeGpu, reinterpret_cast<const float*>(chargesB), gridIndex);
994     }
995     else
996     {
997         /* Fill the second set of coefficients with chargesA as well to be able to avoid
998          * conditionals in the GPU kernels */
999         /* FIXME: This should be avoided by making a separate templated version of the
1000          * relevant kernel(s) (probably only pme_gather_kernel). That would require a
1001          * reduction of the current number of templated parameters of that kernel. */
1002         pme_gpu_realloc_and_copy_input_coefficients(
1003                 pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
1004     }
1005 #endif
1006
1007     if (haveToRealloc)
1008     {
1009         pme_gpu_realloc_forces(pmeGpu);
1010         pme_gpu_realloc_spline_data(pmeGpu);
1011         pme_gpu_realloc_grid_indices(pmeGpu);
1012     }
1013     pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
1014 }
1015
1016 /*! \internal \brief
1017  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
1018  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
1019  *
1020  * \param[in] pmeGpu         The PME GPU data structure.
1021  * \param[in] PMEStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
1022  */
1023 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
1024 {
1025     CommandEvent* timingEvent = nullptr;
1026     if (pme_gpu_timings_enabled(pmeGpu))
1027     {
1028         GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
1029                    "Wrong PME GPU timing event index");
1030         timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
1031     }
1032     return timingEvent;
1033 }
1034
1035 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, const int grid_index)
1036 {
1037     int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
1038
1039     pme_gpu_start_timing(pmeGpu, timerId);
1040     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
1041             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1042     pme_gpu_stop_timing(pmeGpu, timerId);
1043 }
1044
1045 /*! \brief
1046  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1047  * to minimize number of unused blocks.
1048  */
1049 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
1050 {
1051     // How many maximum widths in X do we need (hopefully just one)
1052     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1053     // Trying to make things even
1054     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1055     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1056     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1057                "pmeGpuCreateGrid: excessive blocks");
1058     return std::pair<int, int>(colCount, minRowCount);
1059 }
1060
1061 /*! \brief
1062  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1063  *
1064  * \param[in]  pmeGpu                   The PME GPU structure.
1065  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1066  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1067  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1068  *
1069  * \return Pointer to CUDA kernel
1070  */
1071 static auto selectSplineAndSpreadKernelPtr(const PmeGpu*  pmeGpu,
1072                                            ThreadsPerAtom threadsPerAtom,
1073                                            bool           writeSplinesToGlobal,
1074                                            const int      numGrids)
1075 {
1076     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1077     if (writeSplinesToGlobal)
1078     {
1079         if (threadsPerAtom == ThreadsPerAtom::Order)
1080         {
1081             if (numGrids == 2)
1082             {
1083                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Dual;
1084             }
1085             else
1086             {
1087                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Single;
1088             }
1089         }
1090         else
1091         {
1092             if (numGrids == 2)
1093             {
1094                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesDual;
1095             }
1096             else
1097             {
1098                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesSingle;
1099             }
1100         }
1101     }
1102     else
1103     {
1104         if (threadsPerAtom == ThreadsPerAtom::Order)
1105         {
1106             if (numGrids == 2)
1107             {
1108                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1109             }
1110             else
1111             {
1112                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1113             }
1114         }
1115         else
1116         {
1117             if (numGrids == 2)
1118             {
1119                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1120             }
1121             else
1122             {
1123                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1124             }
1125         }
1126     }
1127
1128     return kernelPtr;
1129 }
1130
1131 /*! \brief
1132  * Returns a pointer to appropriate spline kernel based on the input bool values
1133  *
1134  * \param[in]  pmeGpu                   The PME GPU structure.
1135  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1136  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1137  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1138  *
1139  * \return Pointer to CUDA kernel
1140  */
1141 static auto selectSplineKernelPtr(const PmeGpu*  pmeGpu,
1142                                   ThreadsPerAtom threadsPerAtom,
1143                                   bool gmx_unused writeSplinesToGlobal,
1144                                   const int       numGrids)
1145 {
1146     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1147     GMX_ASSERT(
1148             writeSplinesToGlobal,
1149             "Spline data should always be written to global memory when just calculating splines");
1150
1151     if (threadsPerAtom == ThreadsPerAtom::Order)
1152     {
1153         if (numGrids == 2)
1154         {
1155             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Dual;
1156         }
1157         else
1158         {
1159             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Single;
1160         }
1161     }
1162     else
1163     {
1164         if (numGrids == 2)
1165         {
1166             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelDual;
1167         }
1168         else
1169         {
1170             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelSingle;
1171         }
1172     }
1173     return kernelPtr;
1174 }
1175
1176 /*! \brief
1177  * Returns a pointer to appropriate spread kernel based on the input bool values
1178  *
1179  * \param[in]  pmeGpu                   The PME GPU structure.
1180  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1181  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1182  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1183  *
1184  * \return Pointer to CUDA kernel
1185  */
1186 static auto selectSpreadKernelPtr(const PmeGpu*  pmeGpu,
1187                                   ThreadsPerAtom threadsPerAtom,
1188                                   bool           writeSplinesToGlobal,
1189                                   const int      numGrids)
1190 {
1191     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1192     if (writeSplinesToGlobal)
1193     {
1194         if (threadsPerAtom == ThreadsPerAtom::Order)
1195         {
1196             if (numGrids == 2)
1197             {
1198                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Dual;
1199             }
1200             {
1201                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Single;
1202             }
1203         }
1204         else
1205         {
1206             if (numGrids == 2)
1207             {
1208                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelDual;
1209             }
1210             else
1211             {
1212                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelSingle;
1213             }
1214         }
1215     }
1216     else
1217     {
1218         /* if we are not saving the spline data we need to recalculate it
1219            using the spline and spread Kernel */
1220         if (threadsPerAtom == ThreadsPerAtom::Order)
1221         {
1222             if (numGrids == 2)
1223             {
1224                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1225             }
1226             else
1227             {
1228                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1229             }
1230         }
1231         else
1232         {
1233             if (numGrids == 2)
1234             {
1235                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1236             }
1237             else
1238             {
1239                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1240             }
1241         }
1242     }
1243     return kernelPtr;
1244 }
1245
1246 void pme_gpu_spread(const PmeGpu*         pmeGpu,
1247                     GpuEventSynchronizer* xReadyOnDevice,
1248                     real**                h_grids,
1249                     bool                  computeSplines,
1250                     bool                  spreadCharges,
1251                     const real            lambda)
1252 {
1253     GMX_ASSERT(
1254             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1255             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1256
1257     GMX_ASSERT(computeSplines || spreadCharges,
1258                "PME spline/spread kernel has invalid input (nothing to do)");
1259     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1260     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1261
1262     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1263
1264     const int order = pmeGpu->common->pme_order;
1265     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1266     const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1267     const int  threadsPerAtom =
1268             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1269     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1270
1271     GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1272                "Only 16 threads per atom supported in OpenCL");
1273     GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1274                "Recalculating splines not supported in OpenCL");
1275
1276     const int atomsPerBlock = blockSize / threadsPerAtom;
1277
1278     // TODO: pick smaller block size in runtime if needed
1279     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1280     // If doing so, change atomsPerBlock in the kernels as well.
1281     // TODO: test varying block sizes on modern arch-s as well
1282     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1283     //(for spline data mostly)
1284     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1285                "inconsistent atom data padding vs. spreading block size");
1286
1287     // Ensure that coordinates are ready on the device before launching spread;
1288     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1289     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1290     GMX_ASSERT(!GMX_GPU_CUDA || xReadyOnDevice != nullptr || pmeGpu->common->isRankPmeOnly
1291                        || pme_gpu_settings(pmeGpu).copyAllOutputs,
1292                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1293
1294     if (xReadyOnDevice)
1295     {
1296         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1297     }
1298
1299     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1300     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1301
1302     if (pmeGpu->common->ngrids == 1)
1303     {
1304         kernelParamsPtr->current.scale = 1.0;
1305     }
1306     else
1307     {
1308         kernelParamsPtr->current.scale = 1.0 - lambda;
1309     }
1310
1311     KernelLaunchConfig config;
1312     config.blockSize[0] = order;
1313     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1314     config.blockSize[2] = atomsPerBlock;
1315     config.gridSize[0]  = dimGrid.first;
1316     config.gridSize[1]  = dimGrid.second;
1317
1318     int                                timingId;
1319     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1320     if (computeSplines)
1321     {
1322         if (spreadCharges)
1323         {
1324             timingId  = gtPME_SPLINEANDSPREAD;
1325             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu,
1326                                                        pmeGpu->settings.threadsPerAtom,
1327                                                        writeGlobal || (!recalculateSplines),
1328                                                        pmeGpu->common->ngrids);
1329         }
1330         else
1331         {
1332             timingId  = gtPME_SPLINE;
1333             kernelPtr = selectSplineKernelPtr(pmeGpu,
1334                                               pmeGpu->settings.threadsPerAtom,
1335                                               writeGlobal || (!recalculateSplines),
1336                                               pmeGpu->common->ngrids);
1337         }
1338     }
1339     else
1340     {
1341         timingId  = gtPME_SPREAD;
1342         kernelPtr = selectSpreadKernelPtr(pmeGpu,
1343                                           pmeGpu->settings.threadsPerAtom,
1344                                           writeGlobal || (!recalculateSplines),
1345                                           pmeGpu->common->ngrids);
1346     }
1347
1348
1349     pme_gpu_start_timing(pmeGpu, timingId);
1350     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1351 #if c_canEmbedBuffers
1352     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1353 #else
1354     const auto kernelArgs =
1355             prepareGpuKernelArguments(kernelPtr,
1356                                       config,
1357                                       kernelParamsPtr,
1358                                       &kernelParamsPtr->atoms.d_theta,
1359                                       &kernelParamsPtr->atoms.d_dtheta,
1360                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1361                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1362                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1363                                       &kernelParamsPtr->grid.d_fractShiftsTable,
1364                                       &kernelParamsPtr->grid.d_gridlineIndicesTable,
1365                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1366                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1367                                       &kernelParamsPtr->atoms.d_coordinates);
1368 #endif
1369
1370     launchGpuKernel(
1371             kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME spline/spread", kernelArgs);
1372     pme_gpu_stop_timing(pmeGpu, timingId);
1373
1374     const auto& settings    = pmeGpu->settings;
1375     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1376     if (copyBackGrid)
1377     {
1378         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1379         {
1380             float* h_grid = h_grids[gridIndex];
1381             pme_gpu_copy_output_spread_grid(pmeGpu, h_grid, gridIndex);
1382         }
1383     }
1384     const bool copyBackAtomData =
1385             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1386     if (copyBackAtomData)
1387     {
1388         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1389     }
1390 }
1391
1392 void pme_gpu_solve(const PmeGpu* pmeGpu,
1393                    const int     gridIndex,
1394                    t_complex*    h_grid,
1395                    GridOrdering  gridOrdering,
1396                    bool          computeEnergyAndVirial)
1397 {
1398     GMX_ASSERT(
1399             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1400             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1401     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
1402                "Invalid combination of gridIndex and number of grids");
1403
1404     const auto& settings               = pmeGpu->settings;
1405     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1406
1407     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1408
1409     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1410     if (copyInputAndOutputGrid)
1411     {
1412         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1413                            h_gridFloat,
1414                            0,
1415                            pmeGpu->archSpecific->complexGridSize[gridIndex],
1416                            pmeGpu->archSpecific->pmeStream_,
1417                            pmeGpu->settings.transferKind,
1418                            nullptr);
1419     }
1420
1421     int majorDim = -1, middleDim = -1, minorDim = -1;
1422     switch (gridOrdering)
1423     {
1424         case GridOrdering::YZX:
1425             majorDim  = YY;
1426             middleDim = ZZ;
1427             minorDim  = XX;
1428             break;
1429
1430         case GridOrdering::XYZ:
1431             majorDim  = XX;
1432             middleDim = YY;
1433             minorDim  = ZZ;
1434             break;
1435
1436         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1437     }
1438
1439     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1440
1441     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1442     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1443     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1444     int       cellsPerBlock;
1445     if (blocksPerGridLine == 1)
1446     {
1447         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1448     }
1449     else
1450     {
1451         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1452     }
1453     const int warpSize  = pmeGpu->programHandle_->warpSize();
1454     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1455
1456     static_assert(!GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1457                   "The CUDA solve energy kernels needs at least 4 warps. "
1458                   "Here we launch at least half of the max warps.");
1459
1460     KernelLaunchConfig config;
1461     config.blockSize[0] = blockSize;
1462     config.gridSize[0]  = blocksPerGridLine;
1463     // rounding up to full warps so that shuffle operations produce defined results
1464     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1465                          / gridLinesPerBlock;
1466     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1467
1468     int                                timingId  = gtPME_SOLVE;
1469     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1470     if (gridOrdering == GridOrdering::YZX)
1471     {
1472         if (gridIndex == 0)
1473         {
1474             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelA
1475                                                : pmeGpu->programHandle_->impl_->solveYZXKernelA;
1476         }
1477         else
1478         {
1479             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelB
1480                                                : pmeGpu->programHandle_->impl_->solveYZXKernelB;
1481         }
1482     }
1483     else if (gridOrdering == GridOrdering::XYZ)
1484     {
1485         if (gridIndex == 0)
1486         {
1487             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelA
1488                                                : pmeGpu->programHandle_->impl_->solveXYZKernelA;
1489         }
1490         else
1491         {
1492             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelB
1493                                                : pmeGpu->programHandle_->impl_->solveXYZKernelB;
1494         }
1495     }
1496
1497     pme_gpu_start_timing(pmeGpu, timingId);
1498     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1499 #if c_canEmbedBuffers
1500     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1501 #else
1502     const auto kernelArgs =
1503             prepareGpuKernelArguments(kernelPtr,
1504                                       config,
1505                                       kernelParamsPtr,
1506                                       &kernelParamsPtr->grid.d_splineModuli[gridIndex],
1507                                       &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1508                                       &kernelParamsPtr->grid.d_fourierGrid[gridIndex]);
1509 #endif
1510     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve", kernelArgs);
1511     pme_gpu_stop_timing(pmeGpu, timingId);
1512
1513     if (computeEnergyAndVirial)
1514     {
1515         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy[gridIndex],
1516                              &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1517                              0,
1518                              c_virialAndEnergyCount,
1519                              pmeGpu->archSpecific->pmeStream_,
1520                              pmeGpu->settings.transferKind,
1521                              nullptr);
1522     }
1523
1524     if (copyInputAndOutputGrid)
1525     {
1526         copyFromDeviceBuffer(h_gridFloat,
1527                              &kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1528                              0,
1529                              pmeGpu->archSpecific->complexGridSize[gridIndex],
1530                              pmeGpu->archSpecific->pmeStream_,
1531                              pmeGpu->settings.transferKind,
1532                              nullptr);
1533     }
1534 }
1535
1536 /*! \brief
1537  * Returns a pointer to appropriate gather kernel based on the inputvalues
1538  *
1539  * \param[in]  pmeGpu                   The PME GPU structure.
1540  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1541  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1542  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1543  *
1544  * \return Pointer to CUDA kernel
1545  */
1546 inline auto selectGatherKernelPtr(const PmeGpu*  pmeGpu,
1547                                   ThreadsPerAtom threadsPerAtom,
1548                                   bool           readSplinesFromGlobal,
1549                                   const int      numGrids)
1550
1551 {
1552     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1553
1554     if (readSplinesFromGlobal)
1555     {
1556         if (threadsPerAtom == ThreadsPerAtom::Order)
1557         {
1558             if (numGrids == 2)
1559             {
1560                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Dual;
1561             }
1562             else
1563             {
1564                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Single;
1565             }
1566         }
1567         else
1568         {
1569             if (numGrids == 2)
1570             {
1571                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesDual;
1572             }
1573             else
1574             {
1575                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesSingle;
1576             }
1577         }
1578     }
1579     else
1580     {
1581         if (threadsPerAtom == ThreadsPerAtom::Order)
1582         {
1583             if (numGrids == 2)
1584             {
1585                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Dual;
1586             }
1587             else
1588             {
1589                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Single;
1590             }
1591         }
1592         else
1593         {
1594             if (numGrids == 2)
1595             {
1596                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelDual;
1597             }
1598             else
1599             {
1600                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelSingle;
1601             }
1602         }
1603     }
1604     return kernelPtr;
1605 }
1606
1607 void pme_gpu_gather(PmeGpu* pmeGpu, real** h_grids, const float lambda)
1608 {
1609     GMX_ASSERT(
1610             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1611             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1612
1613     const auto& settings = pmeGpu->settings;
1614
1615     if (!settings.performGPUFFT || settings.copyAllOutputs)
1616     {
1617         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1618         {
1619             float* h_grid = const_cast<float*>(h_grids[gridIndex]);
1620             pme_gpu_copy_input_gather_grid(pmeGpu, h_grid, gridIndex);
1621         }
1622     }
1623
1624     if (settings.copyAllOutputs)
1625     {
1626         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1627     }
1628
1629     /* Set if we have unit tests */
1630     const bool   readGlobal = pmeGpu->settings.copyAllOutputs;
1631     const size_t blockSize  = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1632     const int    order      = pmeGpu->common->pme_order;
1633     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1634     const int threadsPerAtom =
1635             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1636     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1637
1638     GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1639                "Only 16 threads per atom supported in OpenCL");
1640     GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1641                "Recalculating splines not supported in OpenCL");
1642
1643     const int atomsPerBlock = blockSize / threadsPerAtom;
1644
1645     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1646                "inconsistent atom data padding vs. gathering block size");
1647
1648     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1649     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1650
1651     KernelLaunchConfig config;
1652     config.blockSize[0] = order;
1653     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1654     config.blockSize[2] = atomsPerBlock;
1655     config.gridSize[0]  = dimGrid.first;
1656     config.gridSize[1]  = dimGrid.second;
1657
1658     // TODO test different cache configs
1659
1660     int                                timingId = gtPME_GATHER;
1661     PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1662             selectGatherKernelPtr(pmeGpu,
1663                                   pmeGpu->settings.threadsPerAtom,
1664                                   readGlobal || (!recalculateSplines),
1665                                   pmeGpu->common->ngrids);
1666     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1667
1668     pme_gpu_start_timing(pmeGpu, timingId);
1669     auto* timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1670     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1671     if (pmeGpu->common->ngrids == 1)
1672     {
1673         kernelParamsPtr->current.scale = 1.0;
1674     }
1675     else
1676     {
1677         kernelParamsPtr->current.scale = 1.0 - lambda;
1678     }
1679
1680 #if c_canEmbedBuffers
1681     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1682 #else
1683     const auto kernelArgs =
1684             prepareGpuKernelArguments(kernelPtr,
1685                                       config,
1686                                       kernelParamsPtr,
1687                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1688                                       &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1689                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1690                                       &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1691                                       &kernelParamsPtr->atoms.d_theta,
1692                                       &kernelParamsPtr->atoms.d_dtheta,
1693                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1694                                       &kernelParamsPtr->atoms.d_forces);
1695 #endif
1696     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather", kernelArgs);
1697     pme_gpu_stop_timing(pmeGpu, timingId);
1698
1699     if (pmeGpu->settings.useGpuForceReduction)
1700     {
1701         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1702     }
1703     else
1704     {
1705         pme_gpu_copy_output_forces(pmeGpu);
1706     }
1707 }
1708
1709 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1710 {
1711     if (pmeGpu && pmeGpu->kernelParams)
1712     {
1713         return pmeGpu->kernelParams->atoms.d_forces;
1714     }
1715     else
1716     {
1717         return nullptr;
1718     }
1719 }
1720
1721 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1722 {
1723     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1724                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1725                "initialized.");
1726
1727     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1728                "The device-side buffer can not be set.");
1729
1730     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1731 }
1732
1733 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1734 {
1735     if (pmeGpu && pmeGpu->kernelParams)
1736     {
1737         return &pmeGpu->archSpecific->pmeForcesReady;
1738     }
1739     else
1740     {
1741         return nullptr;
1742     }
1743 }