Clean up ewald module internals
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme_gpu_internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <memory>
56 #include <string>
57
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
68
69 #if GMX_GPU == GMX_GPU_CUDA
70 #    include "gromacs/gpu_utils/pmalloc_cuda.h"
71
72 #    include "pme.cuh"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 #    include "gromacs/gpu_utils/gmxopencl.h"
75 #endif
76
77 #include "gromacs/ewald/pme.h"
78
79 #include "pme_gpu_3dfft.h"
80 #include "pme_gpu_constants.h"
81 #include "pme_gpu_program_impl.h"
82 #include "pme_gpu_timings.h"
83 #include "pme_gpu_types.h"
84 #include "pme_gpu_types_host.h"
85 #include "pme_gpu_types_host_impl.h"
86 #include "pme_gpu_utils.h"
87 #include "pme_grid.h"
88 #include "pme_internal.h"
89 #include "pme_solve.h"
90
91 /*! \brief
92  * CUDA only
93  * Atom limit above which it is advantageous to turn on the
94  * recalcuating of the splines in the gather and using less threads per atom in the spline and spread
95  */
96 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
97
98 /*! \internal \brief
99  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
100  *
101  * \param[in] pmeGpu  The PME GPU structure.
102  * \returns The pointer to the kernel parameters.
103  */
104 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
105 {
106     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
107     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
108     return kernelParamsPtr;
109 }
110
111 int pme_gpu_get_atom_data_alignment(const PmeGpu* /*unused*/)
112 {
113     // TODO: this can be simplified, as c_pmeAtomDataAlignment is now constant
114     if (c_usePadding)
115     {
116         return c_pmeAtomDataAlignment;
117     }
118     else
119     {
120         return 0;
121     }
122 }
123
124 int pme_gpu_get_atoms_per_warp(const PmeGpu* pmeGpu)
125 {
126     if (pmeGpu->settings.useOrderThreadsPerAtom)
127     {
128         return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom;
129     }
130     else
131     {
132         return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom;
133     }
134 }
135
136 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
137 {
138     gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
139 }
140
141 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
142 {
143     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
144     allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount,
145                          pmeGpu->archSpecific->context);
146     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
147 }
148
149 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
150 {
151     freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
152     pfree(pmeGpu->staging.h_virialAndEnergy);
153     pmeGpu->staging.h_virialAndEnergy = nullptr;
154 }
155
156 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
157 {
158     clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
159                            c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
160 }
161
162 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu)
163 {
164     const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
165                                           pmeGpu->kernelParams->grid.realGridSize[XX]
166                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
167     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
168
169     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
170                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
171                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
172     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
173     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
174                            &pmeGpu->archSpecific->splineValuesSize,
175                            &pmeGpu->archSpecific->splineValuesSizeAlloc, pmeGpu->archSpecific->context);
176     if (shouldRealloc)
177     {
178         /* Reallocate the host buffer */
179         pfree(pmeGpu->staging.h_splineModuli);
180         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli),
181                 newSplineValuesSize * sizeof(float));
182     }
183     for (int i = 0; i < DIM; i++)
184     {
185         memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i],
186                pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
187     }
188     /* TODO: pin original buffer instead! */
189     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
190                        0, newSplineValuesSize, pmeGpu->archSpecific->pmeStream,
191                        pmeGpu->settings.transferKind, nullptr);
192 }
193
194 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
195 {
196     pfree(pmeGpu->staging.h_splineModuli);
197     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
198 }
199
200 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
201 {
202     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
203     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
204     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
205                            &pmeGpu->archSpecific->forcesSize,
206                            &pmeGpu->archSpecific->forcesSizeAlloc, pmeGpu->archSpecific->context);
207     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
208     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
209 }
210
211 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
212 {
213     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
214 }
215
216 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
217 {
218     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
219     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
220     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
221                        DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream,
222                        pmeGpu->settings.transferKind, nullptr);
223 }
224
225 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
226 {
227     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
228     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
229     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
230                          DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream,
231                          pmeGpu->settings.transferKind, nullptr);
232 }
233
234 void pme_gpu_realloc_coordinates(PmeGpu* pmeGpu)
235 {
236     const size_t newCoordinatesSize = pmeGpu->nAtomsAlloc * DIM;
237     GMX_ASSERT(newCoordinatesSize > 0, "Bad number of atoms in PME GPU");
238     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, newCoordinatesSize,
239                            &pmeGpu->archSpecific->coordinatesSize,
240                            &pmeGpu->archSpecific->coordinatesSizeAlloc, pmeGpu->archSpecific->context);
241     if (c_usePadding)
242     {
243         const size_t paddingIndex = DIM * pmeGpu->kernelParams->atoms.nAtoms;
244         const size_t paddingCount = DIM * pmeGpu->nAtomsAlloc - paddingIndex;
245         if (paddingCount > 0)
246         {
247             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coordinates, paddingIndex,
248                                    paddingCount, pmeGpu->archSpecific->pmeStream);
249         }
250     }
251 }
252
253 void pme_gpu_free_coordinates(const PmeGpu* pmeGpu)
254 {
255     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates);
256 }
257
258 void pme_gpu_realloc_and_copy_input_coefficients(PmeGpu* pmeGpu, const float* h_coefficients)
259 {
260     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
261     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
262     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
263     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
264                            &pmeGpu->archSpecific->coefficientsSize,
265                            &pmeGpu->archSpecific->coefficientsSizeAlloc, pmeGpu->archSpecific->context);
266     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients,
267                        const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
268                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
269     if (c_usePadding)
270     {
271         const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
272         const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
273         if (paddingCount > 0)
274         {
275             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
276                                    paddingCount, pmeGpu->archSpecific->pmeStream);
277         }
278     }
279 }
280
281 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
282 {
283     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
284 }
285
286 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
287 {
288     const int    order        = pmeGpu->common->pme_order;
289     const int    alignment    = pme_gpu_get_atoms_per_warp(pmeGpu);
290     const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
291     const int    newSplineDataSize = DIM * order * nAtomsPadded;
292     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
293     /* Two arrays of the same size */
294     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
295     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
296     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
297     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize,
298                            &currentSizeTemp, &currentSizeTempAlloc, pmeGpu->archSpecific->context);
299     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
300                            &pmeGpu->archSpecific->splineDataSize,
301                            &pmeGpu->archSpecific->splineDataSizeAlloc, pmeGpu->archSpecific->context);
302     // the host side reallocation
303     if (shouldRealloc)
304     {
305         pfree(pmeGpu->staging.h_theta);
306         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
307         pfree(pmeGpu->staging.h_dtheta);
308         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
309     }
310 }
311
312 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
313 {
314     /* Two arrays of the same size */
315     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
316     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
317     pfree(pmeGpu->staging.h_theta);
318     pfree(pmeGpu->staging.h_dtheta);
319 }
320
321 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
322 {
323     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
324     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
325     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
326                            &pmeGpu->archSpecific->gridlineIndicesSize,
327                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc, pmeGpu->archSpecific->context);
328     pfree(pmeGpu->staging.h_gridlineIndices);
329     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
330 }
331
332 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
333 {
334     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
335     pfree(pmeGpu->staging.h_gridlineIndices);
336 }
337
338 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
339 {
340     auto*     kernelParamsPtr = pmeGpu->kernelParams.get();
341     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
342                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
343                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
344     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
345                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
346                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
347     // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
348     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
349     {
350         /* 2 separate grids */
351         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
352                                &pmeGpu->archSpecific->complexGridSize,
353                                &pmeGpu->archSpecific->complexGridSizeAlloc, pmeGpu->archSpecific->context);
354         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newRealGridSize,
355                                &pmeGpu->archSpecific->realGridSize,
356                                &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
357     }
358     else
359     {
360         /* A single buffer so that any grid will fit */
361         const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
362         reallocateDeviceBuffer(
363                 &kernelParamsPtr->grid.d_realGrid, newGridsSize, &pmeGpu->archSpecific->realGridSize,
364                 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
365         kernelParamsPtr->grid.d_fourierGrid   = kernelParamsPtr->grid.d_realGrid;
366         pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
367         // the size might get used later for copying the grid
368     }
369 }
370
371 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
372 {
373     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
374     {
375         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
376     }
377     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
378 }
379
380 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
381 {
382     clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
383                            pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
384 }
385
386 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
387 {
388     pme_gpu_free_fract_shifts(pmeGpu);
389
390     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
391
392     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
393     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
394     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
395     const int cellCount           = c_pmeNeighborUnitcellCount;
396     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
397
398     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
399
400     const int newFractShiftsSize = cellCount * (nx + ny + nz);
401
402 #if GMX_GPU == GMX_GPU_CUDA
403     initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable, kernelParamsPtr->fractShiftsTableTexture,
404                          pmeGpu->common->fsh.data(), newFractShiftsSize);
405
406     initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
407                          kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
408                          newFractShiftsSize);
409 #elif GMX_GPU == GMX_GPU_OPENCL
410     // No dedicated texture routines....
411     allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize,
412                          pmeGpu->archSpecific->context);
413     allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize,
414                          pmeGpu->archSpecific->context);
415     copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(), 0,
416                        newFractShiftsSize, pmeGpu->archSpecific->pmeStream,
417                        GpuApiCallBehavior::Async, nullptr);
418     copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(), 0,
419                        newFractShiftsSize, pmeGpu->archSpecific->pmeStream,
420                        GpuApiCallBehavior::Async, nullptr);
421 #endif
422 }
423
424 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
425 {
426     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
427 #if GMX_GPU == GMX_GPU_CUDA
428     destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
429                             kernelParamsPtr->fractShiftsTableTexture);
430     destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
431                             kernelParamsPtr->gridlineIndicesTableTexture);
432 #elif GMX_GPU == GMX_GPU_OPENCL
433     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
434     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
435 #endif
436 }
437
438 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
439 {
440     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
441 }
442
443 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, float* h_grid)
444 {
445     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid, 0, pmeGpu->archSpecific->realGridSize,
446                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
447 }
448
449 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid)
450 {
451     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid, 0,
452                          pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream,
453                          pmeGpu->settings.transferKind, nullptr);
454     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
455 }
456
457 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
458 {
459     const int    alignment       = pme_gpu_get_atoms_per_warp(pmeGpu);
460     const size_t nAtomsPadded    = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
461     const size_t splinesCount    = DIM * nAtomsPadded * pmeGpu->common->pme_order;
462     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
463     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
464                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
465     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
466                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
467     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
468                          0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream,
469                          pmeGpu->settings.transferKind, nullptr);
470 }
471
472 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
473 {
474     const int    alignment       = pme_gpu_get_atoms_per_warp(pmeGpu);
475     const size_t nAtomsPadded    = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
476     const size_t splinesCount    = DIM * nAtomsPadded * pmeGpu->common->pme_order;
477     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
478     if (c_usePadding)
479     {
480         // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
481         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
482                                pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
483         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
484                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
485                                pmeGpu->archSpecific->pmeStream);
486         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
487                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
488                                pmeGpu->archSpecific->pmeStream);
489     }
490     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
491                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
492     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
493                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
494     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
495                        0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream,
496                        pmeGpu->settings.transferKind, nullptr);
497 }
498
499 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
500 {
501     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
502 }
503
504 void pme_gpu_init_internal(PmeGpu* pmeGpu)
505 {
506 #if GMX_GPU == GMX_GPU_CUDA
507     // Prepare to use the device that this PME task was assigned earlier.
508     // Other entities, such as CUDA timing events, are known to implicitly use the device context.
509     CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
510 #endif
511
512     /* Allocate the target-specific structures */
513     pmeGpu->archSpecific.reset(new PmeGpuSpecific());
514     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
515
516     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
517     /* This should give better performance, according to the cuFFT documentation.
518      * The performance seems to be the same though.
519      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
520      */
521
522     // TODO: this is just a convenient reuse because programHandle_ currently is in charge of creating context
523     pmeGpu->archSpecific->context = pmeGpu->programHandle_->impl_->context;
524
525     // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
526     if (GMX_GPU == GMX_GPU_CUDA)
527     {
528         /* WARNING: CUDA timings are incorrect with multiple streams.
529          *          This is the main reason why they are disabled by default.
530          */
531         // TODO: Consider turning on by default when we can detect nr of streams.
532         pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
533     }
534     else if (GMX_GPU == GMX_GPU_OPENCL)
535     {
536         pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
537     }
538
539 #if GMX_GPU == GMX_GPU_CUDA
540     pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
541 #elif GMX_GPU == GMX_GPU_OPENCL
542     pmeGpu->maxGridWidthX = INT32_MAX / 2;
543     // TODO: is there no really global work size limit in OpenCL?
544 #endif
545
546     /* Creating a PME GPU stream:
547      * - default high priority with CUDA
548      * - no priorities implemented yet with OpenCL; see #2532
549      */
550 #if GMX_GPU == GMX_GPU_CUDA
551     cudaError_t stat;
552     int         highest_priority, lowest_priority;
553     stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
554     CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
555     stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
556                                         cudaStreamDefault, // cudaStreamNonBlocking,
557                                         highest_priority);
558     CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
559 #elif GMX_GPU == GMX_GPU_OPENCL
560     cl_command_queue_properties queueProperties =
561             pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
562     cl_device_id device_id = pmeGpu->deviceInfo->ocl_gpu_id.ocl_device_id;
563     cl_int       clError;
564     pmeGpu->archSpecific->pmeStream =
565             clCreateCommandQueue(pmeGpu->archSpecific->context, device_id, queueProperties, &clError);
566     if (clError != CL_SUCCESS)
567     {
568         GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
569     }
570 #endif
571 }
572
573 void pme_gpu_destroy_specific(const PmeGpu* pmeGpu)
574 {
575 #if GMX_GPU == GMX_GPU_CUDA
576     /* Destroy the CUDA stream */
577     cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
578     CU_RET_ERR(stat, "PME cudaStreamDestroy error");
579 #elif GMX_GPU == GMX_GPU_OPENCL
580     cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
581     if (clError != CL_SUCCESS)
582     {
583         gmx_warning("Failed to destroy PME command queue");
584     }
585 #endif
586 }
587
588 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
589 {
590     if (pme_gpu_settings(pmeGpu).performGPUFFT)
591     {
592         pmeGpu->archSpecific->fftSetup.resize(0);
593         for (int i = 0; i < pmeGpu->common->ngrids; i++)
594         {
595             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
596         }
597     }
598 }
599
600 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
601 {
602     pmeGpu->archSpecific->fftSetup.resize(0);
603 }
604
605 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
606 {
607     if (order != c_pmeGpuOrder)
608     {
609         throw order;
610     }
611     constexpr int fixedOrder = c_pmeGpuOrder;
612     GMX_UNUSED_VALUE(fixedOrder);
613
614     const int atomWarpIndex = atomIndex % atomsPerWarp;
615     const int warpIndex     = atomIndex / atomsPerWarp;
616     int       indexBase, result;
617     switch (atomsPerWarp)
618     {
619         case 1:
620             indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
621             result    = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
622             break;
623
624         case 2:
625             indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
626             result    = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
627             break;
628
629         case 4:
630             indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
631             result    = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
632             break;
633
634         case 8:
635             indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
636             result    = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
637             break;
638
639         default:
640             GMX_THROW(gmx::NotImplementedError(
641                     gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in "
642                                       "getSplineParamFullIndex",
643                                       atomsPerWarp)));
644     }
645     return result;
646 }
647
648 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, PmeOutput* output)
649 {
650     const PmeGpu* pmeGpu = pme.gpu;
651     for (int j = 0; j < c_virialAndEnergyCount; j++)
652     {
653         GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]),
654                    "PME GPU produces incorrect energy/virial.");
655     }
656
657     unsigned int j                 = 0;
658     output->coulombVirial_[XX][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
659     output->coulombVirial_[YY][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
660     output->coulombVirial_[ZZ][ZZ] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
661     output->coulombVirial_[XX][YY] = output->coulombVirial_[YY][XX] =
662             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
663     output->coulombVirial_[XX][ZZ] = output->coulombVirial_[ZZ][XX] =
664             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
665     output->coulombVirial_[YY][ZZ] = output->coulombVirial_[ZZ][YY] =
666             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
667     output->coulombEnergy_ = 0.5F * pmeGpu->staging.h_virialAndEnergy[j++];
668 }
669
670 /*! \brief Sets the force-related members in \p output
671  *
672  * \param[in]   pmeGpu      PME GPU data structure
673  * \param[out]  output      Pointer to PME output data structure
674  */
675 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
676 {
677     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
678     if (output->haveForceOutput_)
679     {
680         output->forces_ = pmeGpu->staging.h_forces;
681     }
682 }
683
684 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const int flags)
685 {
686     PmeGpu*    pmeGpu                      = pme.gpu;
687     const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
688
689     PmeOutput output;
690
691     pme_gpu_getForceOutput(pmeGpu, &output);
692
693     // The caller knows from the flags that the energy and the virial are not usable
694     // on the else branch
695     if (haveComputedEnergyAndVirial)
696     {
697         if (pme_gpu_settings(pmeGpu).performGPUSolve)
698         {
699             pme_gpu_getEnergyAndVirial(pme, &output);
700         }
701         else
702         {
703             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
704         }
705     }
706     return output;
707 }
708
709 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
710 {
711 #if GMX_DOUBLE
712     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
713 #else
714     matrix scaledBox;
715     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
716     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
717     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
718     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
719     matrix recipBox;
720     gmx::invertBoxMatrix(scaledBox, recipBox);
721
722     /* The GPU recipBox is transposed as compared to the CPU recipBox.
723      * Spread uses matrix columns (while solve and gather use rows).
724      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
725      */
726     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
727                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
728                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
729     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
730 #endif
731 }
732
733 /*! \brief \libinternal
734  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
735  *
736  * \param[in] pmeGpu            The PME GPU structure.
737  */
738 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
739 {
740     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
741     kernelParamsPtr->grid.ewaldFactor =
742             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
743
744     /* The grid size variants */
745     for (int i = 0; i < DIM; i++)
746     {
747         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
748         kernelParamsPtr->grid.realGridSizeFP[i] =
749                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
750         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
751
752         // The complex grid currently uses no padding;
753         // if it starts to do so, then another test should be added for that
754         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
755         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
756     }
757     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
758     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
759     {
760         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
761         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
762                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
763     }
764
765     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
766     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
767     kernelParamsPtr->grid.complexGridSize[ZZ]++;
768     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
769
770     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
771     pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
772     pme_gpu_realloc_grids(pmeGpu);
773     pme_gpu_reinit_3dfft(pmeGpu);
774 }
775
776 /* Several GPU functions that refer to the CPU PME data live here.
777  * We would like to keep these away from the GPU-framework specific code for clarity,
778  * as well as compilation issues with MPI.
779  */
780
781 /*! \brief \libinternal
782  * Copies everything useful from the PME CPU to the PME GPU structure.
783  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
784  *
785  * \param[in] pme         The PME structure.
786  */
787 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
788 {
789     /* TODO: Consider refactoring the CPU PME code to use the same structure,
790      * so that this function becomes 2 lines */
791     PmeGpu* pmeGpu               = pme->gpu;
792     pmeGpu->common->ngrids       = pme->ngrids;
793     pmeGpu->common->epsilon_r    = pme->epsilon_r;
794     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
795     pmeGpu->common->nk[XX]       = pme->nkx;
796     pmeGpu->common->nk[YY]       = pme->nky;
797     pmeGpu->common->nk[ZZ]       = pme->nkz;
798     pmeGpu->common->pme_order    = pme->pme_order;
799     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
800     {
801         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
802     }
803     for (int i = 0; i < DIM; i++)
804     {
805         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
806     }
807     const int cellCount = c_pmeNeighborUnitcellCount;
808     pmeGpu->common->fsh.resize(0);
809     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
810     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
811     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
812     pmeGpu->common->nn.resize(0);
813     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
814     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
815     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
816     pmeGpu->common->runMode       = pme->runMode;
817     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
818     pmeGpu->common->boxScaler     = pme->boxScaler;
819 }
820
821 /*! \libinternal \brief
822  * uses heuristics to select the best performing PME gather and scatter kernels
823  *
824  * \param[in,out] pmeGpu         The PME GPU structure.
825  */
826 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
827 {
828     if (pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit && (GMX_GPU == GMX_GPU_CUDA))
829     {
830         pmeGpu->settings.useOrderThreadsPerAtom = true;
831         pmeGpu->settings.recalculateSplines     = true;
832     }
833     else
834     {
835         pmeGpu->settings.useOrderThreadsPerAtom = false;
836         pmeGpu->settings.recalculateSplines     = false;
837     }
838 }
839
840
841 /*! \libinternal \brief
842  * Initializes the PME GPU data at the beginning of the run.
843  * TODO: this should become PmeGpu::PmeGpu()
844  *
845  * \param[in,out] pme            The PME structure.
846  * \param[in,out] gpuInfo        The GPU information structure.
847  * \param[in]     pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
848  */
849 static void pme_gpu_init(gmx_pme_t* pme, const gmx_device_info_t* gpuInfo, const PmeGpuProgram* pmeGpuProgram)
850 {
851     pme->gpu       = new PmeGpu();
852     PmeGpu* pmeGpu = pme->gpu;
853     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
854     pmeGpu->common = std::make_shared<PmeShared>();
855
856     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
857     /* A convenience variable. */
858     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
859     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
860     pmeGpu->settings.performGPUGather = true;
861     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
862     pmeGpu->settings.useGpuForceReduction = false;
863
864     pme_gpu_set_testing(pmeGpu, false);
865
866     pmeGpu->deviceInfo = gpuInfo;
867     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
868     pmeGpu->programHandle_ = pmeGpuProgram;
869
870     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
871
872     pme_gpu_init_internal(pmeGpu);
873     pme_gpu_alloc_energy_virial(pmeGpu);
874
875     pme_gpu_copy_common_data_from(pme);
876
877     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
878
879     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
880     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
881 }
882
883 void pme_gpu_transform_spline_atom_data(const PmeGpu*      pmeGpu,
884                                         const PmeAtomComm* atc,
885                                         PmeSplineDataType  type,
886                                         int                dimIndex,
887                                         PmeLayoutTransform transform)
888 {
889     // The GPU atom spline data is laid out in a different way currently than the CPU one.
890     // This function converts the data from GPU to CPU layout (in the host memory).
891     // It is only intended for testing purposes so far.
892     // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their
893     // performance (e.g. spreading on GPU, gathering on CPU).
894     GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
895     const uintmax_t threadIndex  = 0;
896     const auto      atomCount    = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
897     const auto      atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
898     const auto      pmeOrder     = pmeGpu->common->pme_order;
899     GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
900
901     real*  cpuSplineBuffer;
902     float* h_splineBuffer;
903     switch (type)
904     {
905         case PmeSplineDataType::Values:
906             cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
907             h_splineBuffer  = pmeGpu->staging.h_theta;
908             break;
909
910         case PmeSplineDataType::Derivatives:
911             cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
912             h_splineBuffer  = pmeGpu->staging.h_dtheta;
913             break;
914
915         default: GMX_THROW(gmx::InternalError("Unknown spline data type"));
916     }
917
918     for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
919     {
920         for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
921         {
922             const auto gpuValueIndex =
923                     getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
924             const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
925             GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder,
926                        "Atom spline data index out of bounds (while transforming GPU data layout "
927                        "for host)");
928             switch (transform)
929             {
930                 case PmeLayoutTransform::GpuToHost:
931                     cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
932                     break;
933
934                 case PmeLayoutTransform::HostToGpu:
935                     h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
936                     break;
937
938                 default: GMX_THROW(gmx::InternalError("Unknown layout transform"));
939             }
940         }
941     }
942 }
943
944 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
945 {
946     GMX_ASSERT(gridSize != nullptr, "");
947     GMX_ASSERT(paddedGridSize != nullptr, "");
948     GMX_ASSERT(pmeGpu != nullptr, "");
949     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
950     for (int i = 0; i < DIM; i++)
951     {
952         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
953         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
954     }
955 }
956
957 void pme_gpu_reinit(gmx_pme_t* pme, const gmx_device_info_t* gpuInfo, const PmeGpuProgram* pmeGpuProgram)
958 {
959     GMX_ASSERT(pme != nullptr, "Need valid PME object");
960     if (pme->runMode == PmeRunMode::CPU)
961     {
962         GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object");
963         return;
964     }
965
966     if (!pme->gpu)
967     {
968         /* First-time initialization */
969         pme_gpu_init(pme, gpuInfo, pmeGpuProgram);
970     }
971     else
972     {
973         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
974         pme_gpu_copy_common_data_from(pme);
975     }
976     /* GPU FFT will only get used for a single rank.*/
977     pme->gpu->settings.performGPUFFT =
978             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
979     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
980
981     /* Reinit active timers */
982     pme_gpu_reinit_timings(pme->gpu);
983
984     pme_gpu_reinit_grids(pme->gpu);
985     // Note: if timing the reinit launch overhead becomes more relevant
986     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
987     pme_gpu_reinit_computation(pme, nullptr);
988     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
989      * update for mixed mode on grid switch. TODO: use shared recipbox field.
990      */
991     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
992     pme_gpu_select_best_performing_pme_spreadgather_kernels(pme->gpu);
993 }
994
995 void pme_gpu_destroy(PmeGpu* pmeGpu)
996 {
997     /* Free lots of data */
998     pme_gpu_free_energy_virial(pmeGpu);
999     pme_gpu_free_bspline_values(pmeGpu);
1000     pme_gpu_free_forces(pmeGpu);
1001     pme_gpu_free_coefficients(pmeGpu);
1002     pme_gpu_free_spline_data(pmeGpu);
1003     pme_gpu_free_grid_indices(pmeGpu);
1004     pme_gpu_free_fract_shifts(pmeGpu);
1005     pme_gpu_free_grids(pmeGpu);
1006
1007     pme_gpu_destroy_3dfft(pmeGpu);
1008
1009     /* Free the GPU-framework specific data last */
1010     pme_gpu_destroy_specific(pmeGpu);
1011
1012     delete pmeGpu;
1013 }
1014
1015 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* charges)
1016 {
1017     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
1018     kernelParamsPtr->atoms.nAtoms = nAtoms;
1019     const int alignment           = pme_gpu_get_atom_data_alignment(pmeGpu);
1020     pmeGpu->nAtomsPadded          = ((nAtoms + alignment - 1) / alignment) * alignment;
1021     const int  nAtomsAlloc        = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
1022     const bool haveToRealloc =
1023             (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
1024     pmeGpu->nAtomsAlloc = nAtomsAlloc;
1025
1026 #if GMX_DOUBLE
1027     GMX_RELEASE_ASSERT(false, "Only single precision supported");
1028     GMX_UNUSED_VALUE(charges);
1029 #else
1030     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(charges));
1031     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
1032 #endif
1033
1034     if (haveToRealloc)
1035     {
1036         pme_gpu_realloc_forces(pmeGpu);
1037         pme_gpu_realloc_spline_data(pmeGpu);
1038         pme_gpu_realloc_grid_indices(pmeGpu);
1039     }
1040 }
1041
1042 /*! \internal \brief
1043  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
1044  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
1045  *
1046  * \param[in] pmeGpu         The PME GPU data structure.
1047  * \param[in] PMEStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
1048  */
1049 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
1050 {
1051     CommandEvent* timingEvent = nullptr;
1052     if (pme_gpu_timings_enabled(pmeGpu))
1053     {
1054         GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
1055                    "Wrong PME GPU timing event index");
1056         timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
1057     }
1058     return timingEvent;
1059 }
1060
1061 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, int grid_index)
1062 {
1063     int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
1064
1065     pme_gpu_start_timing(pmeGpu, timerId);
1066     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
1067             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1068     pme_gpu_stop_timing(pmeGpu, timerId);
1069 }
1070
1071 /*! \brief
1072  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1073  * to minimize number of unused blocks.
1074  */
1075 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
1076 {
1077     // How many maximum widths in X do we need (hopefully just one)
1078     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1079     // Trying to make things even
1080     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1081     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1082     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1083                "pmeGpuCreateGrid: excessive blocks");
1084     return std::pair<int, int>(colCount, minRowCount);
1085 }
1086
1087 /*! \brief
1088  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1089  *
1090  * \param[in]  pmeGpu                   The PME GPU structure.
1091  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1092  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1093  *
1094  * \return Pointer to CUDA kernel
1095  */
1096 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool writeSplinesToGlobal)
1097 {
1098     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1099     if (writeSplinesToGlobal)
1100     {
1101         if (useOrderThreadsPerAtom)
1102         {
1103             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4;
1104         }
1105         else
1106         {
1107             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplines;
1108         }
1109     }
1110     else
1111     {
1112         if (useOrderThreadsPerAtom)
1113         {
1114             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
1115         }
1116         else
1117         {
1118             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1119         }
1120     }
1121
1122     return kernelPtr;
1123 }
1124
1125 /*! \brief
1126  * Returns a pointer to appropriate spline kernel based on the input bool values
1127  *
1128  * \param[in]  pmeGpu                   The PME GPU structure.
1129  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1130  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1131  *
1132  * \return Pointer to CUDA kernel
1133  */
1134 static auto selectSplineKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool gmx_unused writeSplinesToGlobal)
1135 {
1136     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1137     GMX_ASSERT(
1138             writeSplinesToGlobal,
1139             "Spline data should always be written to global memory when just calculating splines");
1140
1141     if (useOrderThreadsPerAtom)
1142     {
1143         kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4;
1144     }
1145     else
1146     {
1147         kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1148     }
1149     return kernelPtr;
1150 }
1151
1152 /*! \brief
1153  * Returns a pointer to appropriate spread kernel based on the input bool values
1154  *
1155  * \param[in]  pmeGpu                   The PME GPU structure.
1156  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1157  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1158  *
1159  * \return Pointer to CUDA kernel
1160  */
1161 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool writeSplinesToGlobal)
1162 {
1163     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1164     if (writeSplinesToGlobal)
1165     {
1166         if (useOrderThreadsPerAtom)
1167         {
1168             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4;
1169         }
1170         else
1171         {
1172             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1173         }
1174     }
1175     else
1176     {
1177         /* if we are not saving the spline data we need to recalculate it
1178            using the spline and spread Kernel */
1179         if (useOrderThreadsPerAtom)
1180         {
1181             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
1182         }
1183         else
1184         {
1185             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1186         }
1187     }
1188     return kernelPtr;
1189 }
1190
1191 void pme_gpu_spread(const PmeGpu*         pmeGpu,
1192                     GpuEventSynchronizer* xReadyOnDevice,
1193                     int gmx_unused gridIndex,
1194                     real*          h_grid,
1195                     bool           computeSplines,
1196                     bool           spreadCharges)
1197 {
1198     GMX_ASSERT(computeSplines || spreadCharges,
1199                "PME spline/spread kernel has invalid input (nothing to do)");
1200     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1201     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1202
1203     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1204
1205     const int order = pmeGpu->common->pme_order;
1206     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1207     const bool writeGlobal            = pmeGpu->settings.copyAllOutputs;
1208     const bool useOrderThreadsPerAtom = pmeGpu->settings.useOrderThreadsPerAtom;
1209     const bool recalculateSplines     = pmeGpu->settings.recalculateSplines;
1210 #if GMX_GPU == GMX_GPU_OPENCL
1211     GMX_ASSERT(!useOrderThreadsPerAtom, "Only 16 threads per atom supported in OpenCL");
1212     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1213 #endif
1214     const int atomsPerBlock = useOrderThreadsPerAtom ? blockSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
1215                                                      : blockSize / c_pmeSpreadGatherThreadsPerAtom;
1216
1217     // TODO: pick smaller block size in runtime if needed
1218     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1219     // If doing so, change atomsPerBlock in the kernels as well.
1220     // TODO: test varying block sizes on modern arch-s as well
1221     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1222     //(for spline data mostly)
1223     GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock),
1224                "inconsistent atom data padding vs. spreading block size");
1225
1226     // Ensure that coordinates are ready on the device before launching spread;
1227     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1228     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1229     GMX_ASSERT(xReadyOnDevice != nullptr || (GMX_GPU != GMX_GPU_CUDA)
1230                        || pmeGpu->common->isRankPmeOnly || pme_gpu_settings(pmeGpu).copyAllOutputs,
1231                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1232     if (xReadyOnDevice)
1233     {
1234         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream);
1235     }
1236
1237     const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1238     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1239
1240     KernelLaunchConfig config;
1241     config.blockSize[0] = order;
1242     config.blockSize[1] = useOrderThreadsPerAtom ? 1 : order;
1243     config.blockSize[2] = atomsPerBlock;
1244     config.gridSize[0]  = dimGrid.first;
1245     config.gridSize[1]  = dimGrid.second;
1246     config.stream       = pmeGpu->archSpecific->pmeStream;
1247
1248     int                                timingId;
1249     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1250     if (computeSplines)
1251     {
1252         if (spreadCharges)
1253         {
1254             timingId  = gtPME_SPLINEANDSPREAD;
1255             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, useOrderThreadsPerAtom,
1256                                                        writeGlobal || (!recalculateSplines));
1257         }
1258         else
1259         {
1260             timingId  = gtPME_SPLINE;
1261             kernelPtr = selectSplineKernelPtr(pmeGpu, useOrderThreadsPerAtom,
1262                                               writeGlobal || (!recalculateSplines));
1263         }
1264     }
1265     else
1266     {
1267         timingId  = gtPME_SPREAD;
1268         kernelPtr = selectSpreadKernelPtr(pmeGpu, useOrderThreadsPerAtom,
1269                                           writeGlobal || (!recalculateSplines));
1270     }
1271
1272
1273     pme_gpu_start_timing(pmeGpu, timingId);
1274     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1275 #if c_canEmbedBuffers
1276     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1277 #else
1278     const auto kernelArgs = prepareGpuKernelArguments(
1279             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1280             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1281             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->grid.d_fractShiftsTable,
1282             &kernelParamsPtr->grid.d_gridlineIndicesTable, &kernelParamsPtr->atoms.d_coefficients,
1283             &kernelParamsPtr->atoms.d_coordinates);
1284 #endif
1285
1286     launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1287     pme_gpu_stop_timing(pmeGpu, timingId);
1288
1289     const auto& settings    = pmeGpu->settings;
1290     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1291     if (copyBackGrid)
1292     {
1293         pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1294     }
1295     const bool copyBackAtomData =
1296             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1297     if (copyBackAtomData)
1298     {
1299         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1300     }
1301 }
1302
1303 void pme_gpu_solve(const PmeGpu* pmeGpu, t_complex* h_grid, GridOrdering gridOrdering, bool computeEnergyAndVirial)
1304 {
1305     const auto& settings               = pmeGpu->settings;
1306     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1307
1308     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1309
1310     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1311     if (copyInputAndOutputGrid)
1312     {
1313         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat, 0,
1314                            pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream,
1315                            pmeGpu->settings.transferKind, nullptr);
1316     }
1317
1318     int majorDim = -1, middleDim = -1, minorDim = -1;
1319     switch (gridOrdering)
1320     {
1321         case GridOrdering::YZX:
1322             majorDim  = YY;
1323             middleDim = ZZ;
1324             minorDim  = XX;
1325             break;
1326
1327         case GridOrdering::XYZ:
1328             majorDim  = XX;
1329             middleDim = YY;
1330             minorDim  = ZZ;
1331             break;
1332
1333         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1334     }
1335
1336     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1337
1338     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1339     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1340     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1341     int       cellsPerBlock;
1342     if (blocksPerGridLine == 1)
1343     {
1344         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1345     }
1346     else
1347     {
1348         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1349     }
1350     const int warpSize  = pmeGpu->programHandle_->impl_->warpSize;
1351     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1352
1353     static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1354                   "The CUDA solve energy kernels needs at least 4 warps. "
1355                   "Here we launch at least half of the max warps.");
1356
1357     KernelLaunchConfig config;
1358     config.blockSize[0] = blockSize;
1359     config.gridSize[0]  = blocksPerGridLine;
1360     // rounding up to full warps so that shuffle operations produce defined results
1361     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1362                          / gridLinesPerBlock;
1363     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1364     config.stream      = pmeGpu->archSpecific->pmeStream;
1365
1366     int                                timingId  = gtPME_SOLVE;
1367     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1368     if (gridOrdering == GridOrdering::YZX)
1369     {
1370         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernel
1371                                            : pmeGpu->programHandle_->impl_->solveYZXKernel;
1372     }
1373     else if (gridOrdering == GridOrdering::XYZ)
1374     {
1375         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernel
1376                                            : pmeGpu->programHandle_->impl_->solveXYZKernel;
1377     }
1378
1379     pme_gpu_start_timing(pmeGpu, timingId);
1380     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1381 #if c_canEmbedBuffers
1382     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1383 #else
1384     const auto kernelArgs = prepareGpuKernelArguments(
1385             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli,
1386             &kernelParamsPtr->constants.d_virialAndEnergy, &kernelParamsPtr->grid.d_fourierGrid);
1387 #endif
1388     launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1389     pme_gpu_stop_timing(pmeGpu, timingId);
1390
1391     if (computeEnergyAndVirial)
1392     {
1393         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy,
1394                              &kernelParamsPtr->constants.d_virialAndEnergy, 0, c_virialAndEnergyCount,
1395                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1396     }
1397
1398     if (copyInputAndOutputGrid)
1399     {
1400         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid, 0,
1401                              pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream,
1402                              pmeGpu->settings.transferKind, nullptr);
1403     }
1404 }
1405
1406 /*! \brief
1407  * Returns a pointer to appropriate gather kernel based on the inputvalues
1408  *
1409  * \param[in]  pmeGpu                   The PME GPU structure.
1410  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1411  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1412  * \param[in]  forceTreatment           Controls if the forces from the gather should increment or replace the input forces.
1413  *
1414  * \return Pointer to CUDA kernel
1415  */
1416 inline auto selectGatherKernelPtr(const PmeGpu*          pmeGpu,
1417                                   bool                   useOrderThreadsPerAtom,
1418                                   bool                   readSplinesFromGlobal,
1419                                   PmeForceOutputHandling forceTreatment)
1420
1421 {
1422     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1423
1424     if (readSplinesFromGlobal)
1425     {
1426         if (useOrderThreadsPerAtom)
1427         {
1428             kernelPtr = (forceTreatment == PmeForceOutputHandling::Set)
1429                                 ? pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4
1430                                 : pmeGpu->programHandle_->impl_->gatherReduceWithInputKernelReadSplinesThPerAtom4;
1431         }
1432         else
1433         {
1434             kernelPtr = (forceTreatment == PmeForceOutputHandling::Set)
1435                                 ? pmeGpu->programHandle_->impl_->gatherKernelReadSplines
1436                                 : pmeGpu->programHandle_->impl_->gatherReduceWithInputKernelReadSplines;
1437         }
1438     }
1439     else
1440     {
1441         if (useOrderThreadsPerAtom)
1442         {
1443             kernelPtr = (forceTreatment == PmeForceOutputHandling::Set)
1444                                 ? pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4
1445                                 : pmeGpu->programHandle_->impl_->gatherReduceWithInputKernelThPerAtom4;
1446         }
1447         else
1448         {
1449             kernelPtr = (forceTreatment == PmeForceOutputHandling::Set)
1450                                 ? pmeGpu->programHandle_->impl_->gatherKernel
1451                                 : pmeGpu->programHandle_->impl_->gatherReduceWithInputKernel;
1452         }
1453     }
1454     return kernelPtr;
1455 }
1456
1457
1458 void pme_gpu_gather(PmeGpu* pmeGpu, PmeForceOutputHandling forceTreatment, const float* h_grid)
1459 {
1460     /* Copying the input CPU forces for reduction */
1461     if (forceTreatment != PmeForceOutputHandling::Set)
1462     {
1463         pme_gpu_copy_input_forces(pmeGpu);
1464     }
1465
1466     const auto& settings = pmeGpu->settings;
1467     if (!settings.performGPUFFT || settings.copyAllOutputs)
1468     {
1469         pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float*>(h_grid));
1470     }
1471
1472     if (settings.copyAllOutputs)
1473     {
1474         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1475     }
1476
1477     /* Set if we have unit tests */
1478     const bool   readGlobal             = pmeGpu->settings.copyAllOutputs;
1479     const size_t blockSize              = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1480     const bool   useOrderThreadsPerAtom = pmeGpu->settings.useOrderThreadsPerAtom;
1481     const bool   recalculateSplines     = pmeGpu->settings.recalculateSplines;
1482 #if GMX_GPU == GMX_GPU_OPENCL
1483     GMX_ASSERT(!useOrderThreadsPerAtom, "Only 16 threads per atom supported in OpenCL");
1484     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1485 #endif
1486     const int atomsPerBlock = useOrderThreadsPerAtom ? blockSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
1487                                                      : blockSize / c_pmeSpreadGatherThreadsPerAtom;
1488
1489     GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock),
1490                "inconsistent atom data padding vs. gathering block size");
1491
1492     const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1493     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1494
1495     const int order = pmeGpu->common->pme_order;
1496     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1497
1498     KernelLaunchConfig config;
1499     config.blockSize[0] = order;
1500     config.blockSize[1] = useOrderThreadsPerAtom ? 1 : order;
1501     config.blockSize[2] = atomsPerBlock;
1502     config.gridSize[0]  = dimGrid.first;
1503     config.gridSize[1]  = dimGrid.second;
1504     config.stream       = pmeGpu->archSpecific->pmeStream;
1505
1506     // TODO test different cache configs
1507
1508     int                                timingId  = gtPME_GATHER;
1509     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = selectGatherKernelPtr(
1510             pmeGpu, useOrderThreadsPerAtom, readGlobal || (!recalculateSplines), forceTreatment);
1511     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1512
1513     pme_gpu_start_timing(pmeGpu, timingId);
1514     auto*       timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1515     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1516 #if c_canEmbedBuffers
1517     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1518 #else
1519     const auto kernelArgs = prepareGpuKernelArguments(
1520             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients,
1521             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->atoms.d_theta,
1522             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1523             &kernelParamsPtr->atoms.d_forces);
1524 #endif
1525     launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1526     pme_gpu_stop_timing(pmeGpu, timingId);
1527
1528     if (pmeGpu->settings.useGpuForceReduction)
1529     {
1530         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream);
1531     }
1532     else
1533     {
1534         pme_gpu_copy_output_forces(pmeGpu);
1535     }
1536 }
1537
1538 DeviceBuffer<float> pme_gpu_get_kernelparam_coordinates(const PmeGpu* pmeGpu)
1539 {
1540     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1541                "PME GPU device buffer was requested in non-GPU build or before the GPU PME was "
1542                "initialized.");
1543
1544     return pmeGpu->kernelParams->atoms.d_coordinates;
1545 }
1546
1547 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1548 {
1549     if (pmeGpu && pmeGpu->kernelParams)
1550     {
1551         return pmeGpu->kernelParams->atoms.d_forces;
1552     }
1553     else
1554     {
1555         return nullptr;
1556     }
1557 }
1558
1559 /*! \brief Check the validity of the device buffer.
1560  *
1561  * Checks if the buffer is not nullptr and, when possible, if it is big enough.
1562  *
1563  * \todo Split and move this function to gpu_utils.
1564  *
1565  * \param[in] buffer        Device buffer to be checked.
1566  * \param[in] requiredSize  Number of elements that the buffer will have to accommodate.
1567  *
1568  * \returns If the device buffer can be set.
1569  */
1570 template<typename T>
1571 static bool checkDeviceBuffer(gmx_unused DeviceBuffer<T> buffer, gmx_unused int requiredSize)
1572 {
1573 #if GMX_GPU == GMX_GPU_CUDA
1574     GMX_ASSERT(buffer != nullptr, "The device pointer is nullptr");
1575     return buffer != nullptr;
1576 #elif GMX_GPU == GMX_GPU_OPENCL
1577     size_t size;
1578     int    retval = clGetMemObjectInfo(buffer, CL_MEM_SIZE, sizeof(size), &size, nullptr);
1579     GMX_ASSERT(retval == CL_SUCCESS,
1580                gmx::formatString("clGetMemObjectInfo failed with error code #%d", retval).c_str());
1581     GMX_ASSERT(static_cast<int>(size) >= requiredSize,
1582                "Number of atoms in device buffer is smaller then required size.");
1583     return retval == CL_SUCCESS && static_cast<int>(size) >= requiredSize;
1584 #elif GMX_GPU == GMX_GPU_NONE
1585     GMX_ASSERT(false, "Setter for device-side coordinates was called in non-GPU build.");
1586     return false;
1587 #endif
1588 }
1589
1590 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<float> d_x)
1591 {
1592     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1593                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1594                "initialized.");
1595
1596     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1597                "The device-side buffer can not be set.");
1598
1599     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1600 }
1601
1602 void* pme_gpu_get_stream(const PmeGpu* pmeGpu)
1603 {
1604     if (pmeGpu)
1605     {
1606         return static_cast<void*>(&pmeGpu->archSpecific->pmeStream);
1607     }
1608     else
1609     {
1610         return nullptr;
1611     }
1612 }
1613
1614 void* pme_gpu_get_context(const PmeGpu* pmeGpu)
1615 {
1616     if (pmeGpu)
1617     {
1618         return static_cast<void*>(&pmeGpu->archSpecific->context);
1619     }
1620     else
1621     {
1622         return nullptr;
1623     }
1624 }
1625
1626 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1627 {
1628     if (pmeGpu && pmeGpu->kernelParams)
1629     {
1630         return &pmeGpu->archSpecific->pmeForcesReady;
1631     }
1632     else
1633     {
1634         return nullptr;
1635     }
1636 }