dd62e8c4cdfe4306527066936af4042a2edc4744
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme_gpu_internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <memory>
56 #include <string>
57
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
68
69 #if GMX_GPU == GMX_GPU_CUDA
70 #    include "gromacs/gpu_utils/pmalloc_cuda.h"
71
72 #    include "pme.cuh"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 #    include "gromacs/gpu_utils/gmxopencl.h"
75 #endif
76
77 #include "gromacs/ewald/pme.h"
78
79 #include "pme_gpu_3dfft.h"
80 #include "pme_gpu_calculate_splines.h"
81 #include "pme_gpu_constants.h"
82 #include "pme_gpu_program_impl.h"
83 #include "pme_gpu_timings.h"
84 #include "pme_gpu_types.h"
85 #include "pme_gpu_types_host.h"
86 #include "pme_gpu_types_host_impl.h"
87 #include "pme_grid.h"
88 #include "pme_internal.h"
89 #include "pme_solve.h"
90
91 /*! \brief
92  * CUDA only
93  * Atom limit above which it is advantageous to turn on the
94  * recalcuating of the splines in the gather and using less threads per atom in the spline and spread
95  */
96 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
97
98 /*! \internal \brief
99  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
100  *
101  * \param[in] pmeGpu  The PME GPU structure.
102  * \returns The pointer to the kernel parameters.
103  */
104 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
105 {
106     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
107     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
108     return kernelParamsPtr;
109 }
110
111 int pme_gpu_get_atom_data_alignment(const PmeGpu* /*unused*/)
112 {
113     // TODO: this can be simplified, as c_pmeAtomDataAlignment is now constant
114     if (c_usePadding)
115     {
116         return c_pmeAtomDataAlignment;
117     }
118     else
119     {
120         return 0;
121     }
122 }
123
124 int pme_gpu_get_atoms_per_warp(const PmeGpu* pmeGpu)
125 {
126     if (pmeGpu->settings.useOrderThreadsPerAtom)
127     {
128         return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom;
129     }
130     else
131     {
132         return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom;
133     }
134 }
135
136 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
137 {
138     gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
139 }
140
141 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
142 {
143     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
144     allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount,
145                          pmeGpu->archSpecific->deviceContext_);
146     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
147 }
148
149 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
150 {
151     freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
152     pfree(pmeGpu->staging.h_virialAndEnergy);
153     pmeGpu->staging.h_virialAndEnergy = nullptr;
154 }
155
156 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
157 {
158     clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
159                            c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
160 }
161
162 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu)
163 {
164     const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
165                                           pmeGpu->kernelParams->grid.realGridSize[XX]
166                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
167     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
168
169     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
170                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
171                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
172     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
173     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
174                            &pmeGpu->archSpecific->splineValuesSize,
175                            &pmeGpu->archSpecific->splineValuesSizeAlloc,
176                            pmeGpu->archSpecific->deviceContext_);
177     if (shouldRealloc)
178     {
179         /* Reallocate the host buffer */
180         pfree(pmeGpu->staging.h_splineModuli);
181         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli),
182                 newSplineValuesSize * sizeof(float));
183     }
184     for (int i = 0; i < DIM; i++)
185     {
186         memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i],
187                pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
188     }
189     /* TODO: pin original buffer instead! */
190     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
191                        0, newSplineValuesSize, pmeGpu->archSpecific->pmeStream,
192                        pmeGpu->settings.transferKind, nullptr);
193 }
194
195 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
196 {
197     pfree(pmeGpu->staging.h_splineModuli);
198     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
199 }
200
201 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
202 {
203     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
204     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
205     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
206                            &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc,
207                            pmeGpu->archSpecific->deviceContext_);
208     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
209     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
210 }
211
212 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
213 {
214     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
215 }
216
217 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
218 {
219     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
220     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
221     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
222                        DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream,
223                        pmeGpu->settings.transferKind, nullptr);
224 }
225
226 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
227 {
228     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
229     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
230     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
231                          DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream,
232                          pmeGpu->settings.transferKind, nullptr);
233 }
234
235 void pme_gpu_realloc_and_copy_input_coefficients(PmeGpu* pmeGpu, const float* h_coefficients)
236 {
237     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
238     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
239     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
240     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
241                            &pmeGpu->archSpecific->coefficientsSize,
242                            &pmeGpu->archSpecific->coefficientsSizeAlloc,
243                            pmeGpu->archSpecific->deviceContext_);
244     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients,
245                        const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
246                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
247     if (c_usePadding)
248     {
249         const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
250         const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
251         if (paddingCount > 0)
252         {
253             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
254                                    paddingCount, pmeGpu->archSpecific->pmeStream);
255         }
256     }
257 }
258
259 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
260 {
261     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
262 }
263
264 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
265 {
266     const int    order        = pmeGpu->common->pme_order;
267     const int    alignment    = pme_gpu_get_atoms_per_warp(pmeGpu);
268     const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
269     const int    newSplineDataSize = DIM * order * nAtomsPadded;
270     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
271     /* Two arrays of the same size */
272     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
273     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
274     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
275     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize, &currentSizeTemp,
276                            &currentSizeTempAlloc, pmeGpu->archSpecific->deviceContext_);
277     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
278                            &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc,
279                            pmeGpu->archSpecific->deviceContext_);
280     // the host side reallocation
281     if (shouldRealloc)
282     {
283         pfree(pmeGpu->staging.h_theta);
284         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
285         pfree(pmeGpu->staging.h_dtheta);
286         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
287     }
288 }
289
290 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
291 {
292     /* Two arrays of the same size */
293     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
294     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
295     pfree(pmeGpu->staging.h_theta);
296     pfree(pmeGpu->staging.h_dtheta);
297 }
298
299 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
300 {
301     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
302     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
303     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
304                            &pmeGpu->archSpecific->gridlineIndicesSize,
305                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
306                            pmeGpu->archSpecific->deviceContext_);
307     pfree(pmeGpu->staging.h_gridlineIndices);
308     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
309 }
310
311 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
312 {
313     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
314     pfree(pmeGpu->staging.h_gridlineIndices);
315 }
316
317 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
318 {
319     auto*     kernelParamsPtr = pmeGpu->kernelParams.get();
320     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
321                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
322                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
323     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
324                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
325                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
326     // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
327     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
328     {
329         /* 2 separate grids */
330         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
331                                &pmeGpu->archSpecific->complexGridSize,
332                                &pmeGpu->archSpecific->complexGridSizeAlloc,
333                                pmeGpu->archSpecific->deviceContext_);
334         reallocateDeviceBuffer(
335                 &kernelParamsPtr->grid.d_realGrid, newRealGridSize, &pmeGpu->archSpecific->realGridSize,
336                 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
337     }
338     else
339     {
340         /* A single buffer so that any grid will fit */
341         const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
342         reallocateDeviceBuffer(
343                 &kernelParamsPtr->grid.d_realGrid, newGridsSize, &pmeGpu->archSpecific->realGridSize,
344                 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
345         kernelParamsPtr->grid.d_fourierGrid   = kernelParamsPtr->grid.d_realGrid;
346         pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
347         // the size might get used later for copying the grid
348     }
349 }
350
351 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
352 {
353     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
354     {
355         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
356     }
357     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
358 }
359
360 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
361 {
362     clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
363                            pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
364 }
365
366 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
367 {
368     pme_gpu_free_fract_shifts(pmeGpu);
369
370     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
371
372     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
373     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
374     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
375     const int cellCount           = c_pmeNeighborUnitcellCount;
376     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
377
378     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
379
380     const int newFractShiftsSize = cellCount * (nx + ny + nz);
381
382 #if GMX_GPU == GMX_GPU_CUDA
383     initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable, kernelParamsPtr->fractShiftsTableTexture,
384                          pmeGpu->common->fsh.data(), newFractShiftsSize);
385
386     initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
387                          kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
388                          newFractShiftsSize);
389 #elif GMX_GPU == GMX_GPU_OPENCL
390     // No dedicated texture routines....
391     allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize,
392                          pmeGpu->archSpecific->deviceContext_);
393     allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize,
394                          pmeGpu->archSpecific->deviceContext_);
395     copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(), 0,
396                        newFractShiftsSize, pmeGpu->archSpecific->pmeStream,
397                        GpuApiCallBehavior::Async, nullptr);
398     copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(), 0,
399                        newFractShiftsSize, pmeGpu->archSpecific->pmeStream,
400                        GpuApiCallBehavior::Async, nullptr);
401 #endif
402 }
403
404 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
405 {
406     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
407 #if GMX_GPU == GMX_GPU_CUDA
408     destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
409                             kernelParamsPtr->fractShiftsTableTexture);
410     destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
411                             kernelParamsPtr->gridlineIndicesTableTexture);
412 #elif GMX_GPU == GMX_GPU_OPENCL
413     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
414     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
415 #endif
416 }
417
418 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
419 {
420     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
421 }
422
423 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, float* h_grid)
424 {
425     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid, 0, pmeGpu->archSpecific->realGridSize,
426                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
427 }
428
429 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid)
430 {
431     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid, 0,
432                          pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream,
433                          pmeGpu->settings.transferKind, nullptr);
434     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
435 }
436
437 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
438 {
439     const int    alignment       = pme_gpu_get_atoms_per_warp(pmeGpu);
440     const size_t nAtomsPadded    = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
441     const size_t splinesCount    = DIM * nAtomsPadded * pmeGpu->common->pme_order;
442     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
443     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
444                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
445     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
446                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
447     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
448                          0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream,
449                          pmeGpu->settings.transferKind, nullptr);
450 }
451
452 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
453 {
454     const int    alignment       = pme_gpu_get_atoms_per_warp(pmeGpu);
455     const size_t nAtomsPadded    = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
456     const size_t splinesCount    = DIM * nAtomsPadded * pmeGpu->common->pme_order;
457     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
458     if (c_usePadding)
459     {
460         // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
461         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
462                                pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
463         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
464                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
465                                pmeGpu->archSpecific->pmeStream);
466         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
467                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
468                                pmeGpu->archSpecific->pmeStream);
469     }
470     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
471                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
472     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
473                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
474     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
475                        0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream,
476                        pmeGpu->settings.transferKind, nullptr);
477 }
478
479 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
480 {
481     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
482 }
483
484 void pme_gpu_init_internal(PmeGpu* pmeGpu)
485 {
486 #if GMX_GPU == GMX_GPU_CUDA
487     // Prepare to use the device that this PME task was assigned earlier.
488     // Other entities, such as CUDA timing events, are known to implicitly use the device context.
489     CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
490 #endif
491
492     /* Allocate the target-specific structures */
493     pmeGpu->archSpecific.reset(new PmeGpuSpecific(pmeGpu->programHandle_->impl_->deviceContext_));
494     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
495
496     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
497     /* This should give better performance, according to the cuFFT documentation.
498      * The performance seems to be the same though.
499      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
500      */
501
502     // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
503     if (GMX_GPU == GMX_GPU_CUDA)
504     {
505         /* WARNING: CUDA timings are incorrect with multiple streams.
506          *          This is the main reason why they are disabled by default.
507          */
508         // TODO: Consider turning on by default when we can detect nr of streams.
509         pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
510     }
511     else if (GMX_GPU == GMX_GPU_OPENCL)
512     {
513         pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
514     }
515
516 #if GMX_GPU == GMX_GPU_CUDA
517     pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
518 #elif GMX_GPU == GMX_GPU_OPENCL
519     pmeGpu->maxGridWidthX = INT32_MAX / 2;
520     // TODO: is there no really global work size limit in OpenCL?
521 #endif
522
523     /* Creating a PME GPU stream:
524      * - default high priority with CUDA
525      * - no priorities implemented yet with OpenCL; see #2532
526      */
527 #if GMX_GPU == GMX_GPU_CUDA
528     cudaError_t stat;
529     int         highest_priority, lowest_priority;
530     stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
531     CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
532     stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
533                                         cudaStreamDefault, // cudaStreamNonBlocking,
534                                         highest_priority);
535     CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
536 #elif GMX_GPU == GMX_GPU_OPENCL
537     cl_command_queue_properties queueProperties =
538             pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
539     cl_device_id device_id = pmeGpu->deviceInfo->oclDeviceId;
540     cl_int       clError;
541     pmeGpu->archSpecific->pmeStream = clCreateCommandQueue(
542             pmeGpu->archSpecific->deviceContext_.context(), device_id, queueProperties, &clError);
543     if (clError != CL_SUCCESS)
544     {
545         GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
546     }
547 #endif
548 }
549
550 void pme_gpu_destroy_specific(const PmeGpu* pmeGpu)
551 {
552 #if GMX_GPU == GMX_GPU_CUDA
553     /* Destroy the CUDA stream */
554     cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
555     CU_RET_ERR(stat, "PME cudaStreamDestroy error");
556 #elif GMX_GPU == GMX_GPU_OPENCL
557     cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
558     if (clError != CL_SUCCESS)
559     {
560         gmx_warning("Failed to destroy PME command queue");
561     }
562 #endif
563 }
564
565 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
566 {
567     if (pme_gpu_settings(pmeGpu).performGPUFFT)
568     {
569         pmeGpu->archSpecific->fftSetup.resize(0);
570         for (int i = 0; i < pmeGpu->common->ngrids; i++)
571         {
572             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
573         }
574     }
575 }
576
577 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
578 {
579     pmeGpu->archSpecific->fftSetup.resize(0);
580 }
581
582 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
583 {
584     if (order != c_pmeGpuOrder)
585     {
586         throw order;
587     }
588     constexpr int fixedOrder = c_pmeGpuOrder;
589     GMX_UNUSED_VALUE(fixedOrder);
590
591     const int atomWarpIndex = atomIndex % atomsPerWarp;
592     const int warpIndex     = atomIndex / atomsPerWarp;
593     int       indexBase, result;
594     switch (atomsPerWarp)
595     {
596         case 1:
597             indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
598             result    = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
599             break;
600
601         case 2:
602             indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
603             result    = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
604             break;
605
606         case 4:
607             indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
608             result    = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
609             break;
610
611         case 8:
612             indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
613             result    = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
614             break;
615
616         default:
617             GMX_THROW(gmx::NotImplementedError(
618                     gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in "
619                                       "getSplineParamFullIndex",
620                                       atomsPerWarp)));
621     }
622     return result;
623 }
624
625 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, PmeOutput* output)
626 {
627     const PmeGpu* pmeGpu = pme.gpu;
628     for (int j = 0; j < c_virialAndEnergyCount; j++)
629     {
630         GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]),
631                    "PME GPU produces incorrect energy/virial.");
632     }
633
634     unsigned int j                 = 0;
635     output->coulombVirial_[XX][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
636     output->coulombVirial_[YY][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
637     output->coulombVirial_[ZZ][ZZ] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
638     output->coulombVirial_[XX][YY] = output->coulombVirial_[YY][XX] =
639             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
640     output->coulombVirial_[XX][ZZ] = output->coulombVirial_[ZZ][XX] =
641             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
642     output->coulombVirial_[YY][ZZ] = output->coulombVirial_[ZZ][YY] =
643             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
644     output->coulombEnergy_ = 0.5F * pmeGpu->staging.h_virialAndEnergy[j++];
645 }
646
647 /*! \brief Sets the force-related members in \p output
648  *
649  * \param[in]   pmeGpu      PME GPU data structure
650  * \param[out]  output      Pointer to PME output data structure
651  */
652 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
653 {
654     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
655     if (output->haveForceOutput_)
656     {
657         output->forces_ = pmeGpu->staging.h_forces;
658     }
659 }
660
661 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial)
662 {
663     PmeGpu* pmeGpu = pme.gpu;
664
665     PmeOutput output;
666
667     pme_gpu_getForceOutput(pmeGpu, &output);
668
669     if (computeEnergyAndVirial)
670     {
671         if (pme_gpu_settings(pmeGpu).performGPUSolve)
672         {
673             pme_gpu_getEnergyAndVirial(pme, &output);
674         }
675         else
676         {
677             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
678         }
679     }
680     return output;
681 }
682
683 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
684 {
685 #if GMX_DOUBLE
686     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
687 #else
688     matrix scaledBox;
689     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
690     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
691     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
692     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
693     matrix recipBox;
694     gmx::invertBoxMatrix(scaledBox, recipBox);
695
696     /* The GPU recipBox is transposed as compared to the CPU recipBox.
697      * Spread uses matrix columns (while solve and gather use rows).
698      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
699      */
700     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
701                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
702                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
703     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
704 #endif
705 }
706
707 /*! \brief \libinternal
708  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
709  *
710  * \param[in] pmeGpu            The PME GPU structure.
711  */
712 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
713 {
714     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
715     kernelParamsPtr->grid.ewaldFactor =
716             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
717
718     /* The grid size variants */
719     for (int i = 0; i < DIM; i++)
720     {
721         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
722         kernelParamsPtr->grid.realGridSizeFP[i] =
723                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
724         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
725
726         // The complex grid currently uses no padding;
727         // if it starts to do so, then another test should be added for that
728         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
729         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
730     }
731     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
732     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
733     {
734         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
735         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
736                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
737     }
738
739     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
740     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
741     kernelParamsPtr->grid.complexGridSize[ZZ]++;
742     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
743
744     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
745     pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
746     pme_gpu_realloc_grids(pmeGpu);
747     pme_gpu_reinit_3dfft(pmeGpu);
748 }
749
750 /* Several GPU functions that refer to the CPU PME data live here.
751  * We would like to keep these away from the GPU-framework specific code for clarity,
752  * as well as compilation issues with MPI.
753  */
754
755 /*! \brief \libinternal
756  * Copies everything useful from the PME CPU to the PME GPU structure.
757  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
758  *
759  * \param[in] pme         The PME structure.
760  */
761 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
762 {
763     /* TODO: Consider refactoring the CPU PME code to use the same structure,
764      * so that this function becomes 2 lines */
765     PmeGpu* pmeGpu               = pme->gpu;
766     pmeGpu->common->ngrids       = pme->ngrids;
767     pmeGpu->common->epsilon_r    = pme->epsilon_r;
768     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
769     pmeGpu->common->nk[XX]       = pme->nkx;
770     pmeGpu->common->nk[YY]       = pme->nky;
771     pmeGpu->common->nk[ZZ]       = pme->nkz;
772     pmeGpu->common->pme_order    = pme->pme_order;
773     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
774     {
775         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
776     }
777     for (int i = 0; i < DIM; i++)
778     {
779         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
780     }
781     const int cellCount = c_pmeNeighborUnitcellCount;
782     pmeGpu->common->fsh.resize(0);
783     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
784     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
785     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
786     pmeGpu->common->nn.resize(0);
787     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
788     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
789     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
790     pmeGpu->common->runMode       = pme->runMode;
791     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
792     pmeGpu->common->boxScaler     = pme->boxScaler;
793 }
794
795 /*! \libinternal \brief
796  * uses heuristics to select the best performing PME gather and scatter kernels
797  *
798  * \param[in,out] pmeGpu         The PME GPU structure.
799  */
800 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
801 {
802     if (pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit && (GMX_GPU == GMX_GPU_CUDA))
803     {
804         pmeGpu->settings.useOrderThreadsPerAtom = true;
805         pmeGpu->settings.recalculateSplines     = true;
806     }
807     else
808     {
809         pmeGpu->settings.useOrderThreadsPerAtom = false;
810         pmeGpu->settings.recalculateSplines     = false;
811     }
812 }
813
814
815 /*! \libinternal \brief
816  * Initializes the PME GPU data at the beginning of the run.
817  * TODO: this should become PmeGpu::PmeGpu()
818  *
819  * \param[in,out] pme            The PME structure.
820  * \param[in,out] deviceInfo     The GPU device information structure.
821  * \param[in]     pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
822  */
823 static void pme_gpu_init(gmx_pme_t* pme, const DeviceInformation* deviceInfo, const PmeGpuProgram* pmeGpuProgram)
824 {
825     pme->gpu       = new PmeGpu();
826     PmeGpu* pmeGpu = pme->gpu;
827     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
828     pmeGpu->common = std::make_shared<PmeShared>();
829
830     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
831     /* A convenience variable. */
832     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
833     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
834     pmeGpu->settings.performGPUGather = true;
835     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
836     pmeGpu->settings.useGpuForceReduction = false;
837
838     pme_gpu_set_testing(pmeGpu, false);
839
840     pmeGpu->deviceInfo = deviceInfo;
841     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
842     pmeGpu->programHandle_ = pmeGpuProgram;
843
844     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
845
846     pme_gpu_init_internal(pmeGpu);
847     pme_gpu_alloc_energy_virial(pmeGpu);
848
849     pme_gpu_copy_common_data_from(pme);
850
851     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
852
853     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
854     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
855 }
856
857 void pme_gpu_transform_spline_atom_data(const PmeGpu*      pmeGpu,
858                                         const PmeAtomComm* atc,
859                                         PmeSplineDataType  type,
860                                         int                dimIndex,
861                                         PmeLayoutTransform transform)
862 {
863     // The GPU atom spline data is laid out in a different way currently than the CPU one.
864     // This function converts the data from GPU to CPU layout (in the host memory).
865     // It is only intended for testing purposes so far.
866     // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their
867     // performance (e.g. spreading on GPU, gathering on CPU).
868     GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
869     const uintmax_t threadIndex  = 0;
870     const auto      atomCount    = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
871     const auto      atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
872     const auto      pmeOrder     = pmeGpu->common->pme_order;
873     GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
874
875     real*  cpuSplineBuffer;
876     float* h_splineBuffer;
877     switch (type)
878     {
879         case PmeSplineDataType::Values:
880             cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
881             h_splineBuffer  = pmeGpu->staging.h_theta;
882             break;
883
884         case PmeSplineDataType::Derivatives:
885             cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
886             h_splineBuffer  = pmeGpu->staging.h_dtheta;
887             break;
888
889         default: GMX_THROW(gmx::InternalError("Unknown spline data type"));
890     }
891
892     for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
893     {
894         for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
895         {
896             const auto gpuValueIndex =
897                     getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
898             const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
899             GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder,
900                        "Atom spline data index out of bounds (while transforming GPU data layout "
901                        "for host)");
902             switch (transform)
903             {
904                 case PmeLayoutTransform::GpuToHost:
905                     cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
906                     break;
907
908                 case PmeLayoutTransform::HostToGpu:
909                     h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
910                     break;
911
912                 default: GMX_THROW(gmx::InternalError("Unknown layout transform"));
913             }
914         }
915     }
916 }
917
918 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
919 {
920     GMX_ASSERT(gridSize != nullptr, "");
921     GMX_ASSERT(paddedGridSize != nullptr, "");
922     GMX_ASSERT(pmeGpu != nullptr, "");
923     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
924     for (int i = 0; i < DIM; i++)
925     {
926         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
927         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
928     }
929 }
930
931 void pme_gpu_reinit(gmx_pme_t* pme, const DeviceInformation* deviceInfo, const PmeGpuProgram* pmeGpuProgram)
932 {
933     GMX_ASSERT(pme != nullptr, "Need valid PME object");
934     if (pme->runMode == PmeRunMode::CPU)
935     {
936         GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object");
937         return;
938     }
939
940     if (!pme->gpu)
941     {
942         /* First-time initialization */
943         pme_gpu_init(pme, deviceInfo, pmeGpuProgram);
944     }
945     else
946     {
947         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
948         pme_gpu_copy_common_data_from(pme);
949     }
950     /* GPU FFT will only get used for a single rank.*/
951     pme->gpu->settings.performGPUFFT =
952             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
953     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
954
955     /* Reinit active timers */
956     pme_gpu_reinit_timings(pme->gpu);
957
958     pme_gpu_reinit_grids(pme->gpu);
959     // Note: if timing the reinit launch overhead becomes more relevant
960     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
961     pme_gpu_reinit_computation(pme, nullptr);
962     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
963      * update for mixed mode on grid switch. TODO: use shared recipbox field.
964      */
965     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
966 }
967
968 void pme_gpu_destroy(PmeGpu* pmeGpu)
969 {
970     /* Free lots of data */
971     pme_gpu_free_energy_virial(pmeGpu);
972     pme_gpu_free_bspline_values(pmeGpu);
973     pme_gpu_free_forces(pmeGpu);
974     pme_gpu_free_coefficients(pmeGpu);
975     pme_gpu_free_spline_data(pmeGpu);
976     pme_gpu_free_grid_indices(pmeGpu);
977     pme_gpu_free_fract_shifts(pmeGpu);
978     pme_gpu_free_grids(pmeGpu);
979
980     pme_gpu_destroy_3dfft(pmeGpu);
981
982     /* Free the GPU-framework specific data last */
983     pme_gpu_destroy_specific(pmeGpu);
984
985     delete pmeGpu;
986 }
987
988 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* charges)
989 {
990     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
991     kernelParamsPtr->atoms.nAtoms = nAtoms;
992     const int alignment           = pme_gpu_get_atom_data_alignment(pmeGpu);
993     pmeGpu->nAtomsPadded          = ((nAtoms + alignment - 1) / alignment) * alignment;
994     const int  nAtomsAlloc        = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
995     const bool haveToRealloc =
996             (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
997     pmeGpu->nAtomsAlloc = nAtomsAlloc;
998
999 #if GMX_DOUBLE
1000     GMX_RELEASE_ASSERT(false, "Only single precision supported");
1001     GMX_UNUSED_VALUE(charges);
1002 #else
1003     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(charges));
1004     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
1005 #endif
1006
1007     if (haveToRealloc)
1008     {
1009         pme_gpu_realloc_forces(pmeGpu);
1010         pme_gpu_realloc_spline_data(pmeGpu);
1011         pme_gpu_realloc_grid_indices(pmeGpu);
1012     }
1013     pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
1014 }
1015
1016 /*! \internal \brief
1017  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
1018  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
1019  *
1020  * \param[in] pmeGpu         The PME GPU data structure.
1021  * \param[in] PMEStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
1022  */
1023 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
1024 {
1025     CommandEvent* timingEvent = nullptr;
1026     if (pme_gpu_timings_enabled(pmeGpu))
1027     {
1028         GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
1029                    "Wrong PME GPU timing event index");
1030         timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
1031     }
1032     return timingEvent;
1033 }
1034
1035 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, int grid_index)
1036 {
1037     int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
1038
1039     pme_gpu_start_timing(pmeGpu, timerId);
1040     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
1041             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1042     pme_gpu_stop_timing(pmeGpu, timerId);
1043 }
1044
1045 /*! \brief
1046  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1047  * to minimize number of unused blocks.
1048  */
1049 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
1050 {
1051     // How many maximum widths in X do we need (hopefully just one)
1052     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1053     // Trying to make things even
1054     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1055     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1056     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1057                "pmeGpuCreateGrid: excessive blocks");
1058     return std::pair<int, int>(colCount, minRowCount);
1059 }
1060
1061 /*! \brief
1062  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1063  *
1064  * \param[in]  pmeGpu                   The PME GPU structure.
1065  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1066  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1067  *
1068  * \return Pointer to CUDA kernel
1069  */
1070 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool writeSplinesToGlobal)
1071 {
1072     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1073     if (writeSplinesToGlobal)
1074     {
1075         if (useOrderThreadsPerAtom)
1076         {
1077             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4;
1078         }
1079         else
1080         {
1081             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplines;
1082         }
1083     }
1084     else
1085     {
1086         if (useOrderThreadsPerAtom)
1087         {
1088             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
1089         }
1090         else
1091         {
1092             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1093         }
1094     }
1095
1096     return kernelPtr;
1097 }
1098
1099 /*! \brief
1100  * Returns a pointer to appropriate spline kernel based on the input bool values
1101  *
1102  * \param[in]  pmeGpu                   The PME GPU structure.
1103  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1104  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1105  *
1106  * \return Pointer to CUDA kernel
1107  */
1108 static auto selectSplineKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool gmx_unused writeSplinesToGlobal)
1109 {
1110     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1111     GMX_ASSERT(
1112             writeSplinesToGlobal,
1113             "Spline data should always be written to global memory when just calculating splines");
1114
1115     if (useOrderThreadsPerAtom)
1116     {
1117         kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4;
1118     }
1119     else
1120     {
1121         kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1122     }
1123     return kernelPtr;
1124 }
1125
1126 /*! \brief
1127  * Returns a pointer to appropriate spread kernel based on the input bool values
1128  *
1129  * \param[in]  pmeGpu                   The PME GPU structure.
1130  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1131  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1132  *
1133  * \return Pointer to CUDA kernel
1134  */
1135 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool writeSplinesToGlobal)
1136 {
1137     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1138     if (writeSplinesToGlobal)
1139     {
1140         if (useOrderThreadsPerAtom)
1141         {
1142             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4;
1143         }
1144         else
1145         {
1146             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1147         }
1148     }
1149     else
1150     {
1151         /* if we are not saving the spline data we need to recalculate it
1152            using the spline and spread Kernel */
1153         if (useOrderThreadsPerAtom)
1154         {
1155             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
1156         }
1157         else
1158         {
1159             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1160         }
1161     }
1162     return kernelPtr;
1163 }
1164
1165 void pme_gpu_spread(const PmeGpu*         pmeGpu,
1166                     GpuEventSynchronizer* xReadyOnDevice,
1167                     int gmx_unused gridIndex,
1168                     real*          h_grid,
1169                     bool           computeSplines,
1170                     bool           spreadCharges)
1171 {
1172     GMX_ASSERT(computeSplines || spreadCharges,
1173                "PME spline/spread kernel has invalid input (nothing to do)");
1174     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1175     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1176
1177     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1178
1179     const int order = pmeGpu->common->pme_order;
1180     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1181     const bool writeGlobal            = pmeGpu->settings.copyAllOutputs;
1182     const bool useOrderThreadsPerAtom = pmeGpu->settings.useOrderThreadsPerAtom;
1183     const bool recalculateSplines     = pmeGpu->settings.recalculateSplines;
1184 #if GMX_GPU == GMX_GPU_OPENCL
1185     GMX_ASSERT(!useOrderThreadsPerAtom, "Only 16 threads per atom supported in OpenCL");
1186     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1187 #endif
1188     const int atomsPerBlock = useOrderThreadsPerAtom ? blockSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
1189                                                      : blockSize / c_pmeSpreadGatherThreadsPerAtom;
1190
1191     // TODO: pick smaller block size in runtime if needed
1192     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1193     // If doing so, change atomsPerBlock in the kernels as well.
1194     // TODO: test varying block sizes on modern arch-s as well
1195     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1196     //(for spline data mostly)
1197     GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock),
1198                "inconsistent atom data padding vs. spreading block size");
1199
1200     // Ensure that coordinates are ready on the device before launching spread;
1201     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1202     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1203     GMX_ASSERT(xReadyOnDevice != nullptr || (GMX_GPU != GMX_GPU_CUDA)
1204                        || pmeGpu->common->isRankPmeOnly || pme_gpu_settings(pmeGpu).copyAllOutputs,
1205                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1206     if (xReadyOnDevice)
1207     {
1208         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream);
1209     }
1210
1211     const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1212     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1213
1214     KernelLaunchConfig config;
1215     config.blockSize[0] = order;
1216     config.blockSize[1] = useOrderThreadsPerAtom ? 1 : order;
1217     config.blockSize[2] = atomsPerBlock;
1218     config.gridSize[0]  = dimGrid.first;
1219     config.gridSize[1]  = dimGrid.second;
1220     config.stream       = pmeGpu->archSpecific->pmeStream;
1221
1222     int                                timingId;
1223     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1224     if (computeSplines)
1225     {
1226         if (spreadCharges)
1227         {
1228             timingId  = gtPME_SPLINEANDSPREAD;
1229             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, useOrderThreadsPerAtom,
1230                                                        writeGlobal || (!recalculateSplines));
1231         }
1232         else
1233         {
1234             timingId  = gtPME_SPLINE;
1235             kernelPtr = selectSplineKernelPtr(pmeGpu, useOrderThreadsPerAtom,
1236                                               writeGlobal || (!recalculateSplines));
1237         }
1238     }
1239     else
1240     {
1241         timingId  = gtPME_SPREAD;
1242         kernelPtr = selectSpreadKernelPtr(pmeGpu, useOrderThreadsPerAtom,
1243                                           writeGlobal || (!recalculateSplines));
1244     }
1245
1246
1247     pme_gpu_start_timing(pmeGpu, timingId);
1248     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1249 #if c_canEmbedBuffers
1250     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1251 #else
1252     const auto kernelArgs = prepareGpuKernelArguments(
1253             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1254             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1255             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->grid.d_fractShiftsTable,
1256             &kernelParamsPtr->grid.d_gridlineIndicesTable, &kernelParamsPtr->atoms.d_coefficients,
1257             &kernelParamsPtr->atoms.d_coordinates);
1258 #endif
1259
1260     launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1261     pme_gpu_stop_timing(pmeGpu, timingId);
1262
1263     const auto& settings    = pmeGpu->settings;
1264     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1265     if (copyBackGrid)
1266     {
1267         pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1268     }
1269     const bool copyBackAtomData =
1270             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1271     if (copyBackAtomData)
1272     {
1273         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1274     }
1275 }
1276
1277 void pme_gpu_solve(const PmeGpu* pmeGpu, t_complex* h_grid, GridOrdering gridOrdering, bool computeEnergyAndVirial)
1278 {
1279     const auto& settings               = pmeGpu->settings;
1280     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1281
1282     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1283
1284     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1285     if (copyInputAndOutputGrid)
1286     {
1287         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat, 0,
1288                            pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream,
1289                            pmeGpu->settings.transferKind, nullptr);
1290     }
1291
1292     int majorDim = -1, middleDim = -1, minorDim = -1;
1293     switch (gridOrdering)
1294     {
1295         case GridOrdering::YZX:
1296             majorDim  = YY;
1297             middleDim = ZZ;
1298             minorDim  = XX;
1299             break;
1300
1301         case GridOrdering::XYZ:
1302             majorDim  = XX;
1303             middleDim = YY;
1304             minorDim  = ZZ;
1305             break;
1306
1307         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1308     }
1309
1310     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1311
1312     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1313     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1314     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1315     int       cellsPerBlock;
1316     if (blocksPerGridLine == 1)
1317     {
1318         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1319     }
1320     else
1321     {
1322         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1323     }
1324     const int warpSize  = pmeGpu->programHandle_->impl_->warpSize;
1325     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1326
1327     static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1328                   "The CUDA solve energy kernels needs at least 4 warps. "
1329                   "Here we launch at least half of the max warps.");
1330
1331     KernelLaunchConfig config;
1332     config.blockSize[0] = blockSize;
1333     config.gridSize[0]  = blocksPerGridLine;
1334     // rounding up to full warps so that shuffle operations produce defined results
1335     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1336                          / gridLinesPerBlock;
1337     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1338     config.stream      = pmeGpu->archSpecific->pmeStream;
1339
1340     int                                timingId  = gtPME_SOLVE;
1341     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1342     if (gridOrdering == GridOrdering::YZX)
1343     {
1344         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernel
1345                                            : pmeGpu->programHandle_->impl_->solveYZXKernel;
1346     }
1347     else if (gridOrdering == GridOrdering::XYZ)
1348     {
1349         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernel
1350                                            : pmeGpu->programHandle_->impl_->solveXYZKernel;
1351     }
1352
1353     pme_gpu_start_timing(pmeGpu, timingId);
1354     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1355 #if c_canEmbedBuffers
1356     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1357 #else
1358     const auto kernelArgs = prepareGpuKernelArguments(
1359             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli,
1360             &kernelParamsPtr->constants.d_virialAndEnergy, &kernelParamsPtr->grid.d_fourierGrid);
1361 #endif
1362     launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1363     pme_gpu_stop_timing(pmeGpu, timingId);
1364
1365     if (computeEnergyAndVirial)
1366     {
1367         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy,
1368                              &kernelParamsPtr->constants.d_virialAndEnergy, 0, c_virialAndEnergyCount,
1369                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1370     }
1371
1372     if (copyInputAndOutputGrid)
1373     {
1374         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid, 0,
1375                              pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream,
1376                              pmeGpu->settings.transferKind, nullptr);
1377     }
1378 }
1379
1380 /*! \brief
1381  * Returns a pointer to appropriate gather kernel based on the inputvalues
1382  *
1383  * \param[in]  pmeGpu                   The PME GPU structure.
1384  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1385  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1386  *
1387  * \return Pointer to CUDA kernel
1388  */
1389 inline auto selectGatherKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool readSplinesFromGlobal)
1390
1391 {
1392     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1393
1394     if (readSplinesFromGlobal)
1395     {
1396         if (useOrderThreadsPerAtom)
1397         {
1398             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4;
1399         }
1400         else
1401         {
1402             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplines;
1403         }
1404     }
1405     else
1406     {
1407         if (useOrderThreadsPerAtom)
1408         {
1409             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4;
1410         }
1411         else
1412         {
1413             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernel;
1414         }
1415     }
1416     return kernelPtr;
1417 }
1418
1419
1420 void pme_gpu_gather(PmeGpu* pmeGpu, const float* h_grid)
1421 {
1422     const auto& settings = pmeGpu->settings;
1423     if (!settings.performGPUFFT || settings.copyAllOutputs)
1424     {
1425         pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float*>(h_grid));
1426     }
1427
1428     if (settings.copyAllOutputs)
1429     {
1430         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1431     }
1432
1433     /* Set if we have unit tests */
1434     const bool   readGlobal             = pmeGpu->settings.copyAllOutputs;
1435     const size_t blockSize              = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1436     const bool   useOrderThreadsPerAtom = pmeGpu->settings.useOrderThreadsPerAtom;
1437     const bool   recalculateSplines     = pmeGpu->settings.recalculateSplines;
1438 #if GMX_GPU == GMX_GPU_OPENCL
1439     GMX_ASSERT(!useOrderThreadsPerAtom, "Only 16 threads per atom supported in OpenCL");
1440     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1441 #endif
1442     const int atomsPerBlock = useOrderThreadsPerAtom ? blockSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
1443                                                      : blockSize / c_pmeSpreadGatherThreadsPerAtom;
1444
1445     GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock),
1446                "inconsistent atom data padding vs. gathering block size");
1447
1448     const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1449     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1450
1451     const int order = pmeGpu->common->pme_order;
1452     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1453
1454     KernelLaunchConfig config;
1455     config.blockSize[0] = order;
1456     config.blockSize[1] = useOrderThreadsPerAtom ? 1 : order;
1457     config.blockSize[2] = atomsPerBlock;
1458     config.gridSize[0]  = dimGrid.first;
1459     config.gridSize[1]  = dimGrid.second;
1460     config.stream       = pmeGpu->archSpecific->pmeStream;
1461
1462     // TODO test different cache configs
1463
1464     int                                timingId = gtPME_GATHER;
1465     PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1466             selectGatherKernelPtr(pmeGpu, useOrderThreadsPerAtom, readGlobal || (!recalculateSplines));
1467     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1468
1469     pme_gpu_start_timing(pmeGpu, timingId);
1470     auto*       timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1471     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1472 #if c_canEmbedBuffers
1473     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1474 #else
1475     const auto kernelArgs = prepareGpuKernelArguments(
1476             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients,
1477             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->atoms.d_theta,
1478             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1479             &kernelParamsPtr->atoms.d_forces);
1480 #endif
1481     launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1482     pme_gpu_stop_timing(pmeGpu, timingId);
1483
1484     if (pmeGpu->settings.useGpuForceReduction)
1485     {
1486         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream);
1487     }
1488     else
1489     {
1490         pme_gpu_copy_output_forces(pmeGpu);
1491     }
1492 }
1493
1494 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1495 {
1496     if (pmeGpu && pmeGpu->kernelParams)
1497     {
1498         return pmeGpu->kernelParams->atoms.d_forces;
1499     }
1500     else
1501     {
1502         return nullptr;
1503     }
1504 }
1505
1506 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1507 {
1508     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1509                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1510                "initialized.");
1511
1512     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1513                "The device-side buffer can not be set.");
1514
1515     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1516 }
1517
1518 void* pme_gpu_get_stream(const PmeGpu* pmeGpu)
1519 {
1520     if (pmeGpu)
1521     {
1522         return static_cast<void*>(&pmeGpu->archSpecific->pmeStream);
1523     }
1524     else
1525     {
1526         return nullptr;
1527     }
1528 }
1529
1530 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1531 {
1532     if (pmeGpu && pmeGpu->kernelParams)
1533     {
1534         return &pmeGpu->archSpecific->pmeForcesReady;
1535     }
1536     else
1537     {
1538         return nullptr;
1539     }
1540 }