Apply clang-format to source tree
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme_gpu_internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <memory>
56 #include <string>
57
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
68
69 #if GMX_GPU == GMX_GPU_CUDA
70 #    include "gromacs/gpu_utils/pmalloc_cuda.h"
71
72 #    include "pme.cuh"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 #    include "gromacs/gpu_utils/gmxopencl.h"
75 #endif
76
77 #include "gromacs/ewald/pme.h"
78
79 #include "pme_gpu_3dfft.h"
80 #include "pme_gpu_constants.h"
81 #include "pme_gpu_program_impl.h"
82 #include "pme_gpu_timings.h"
83 #include "pme_gpu_types.h"
84 #include "pme_gpu_types_host.h"
85 #include "pme_gpu_types_host_impl.h"
86 #include "pme_gpu_utils.h"
87 #include "pme_grid.h"
88 #include "pme_internal.h"
89 #include "pme_solve.h"
90
91
92 /*! \brief
93  *  CUDA only.
94  *  Controls if we should use order (i.e. 4) threads per atom for the GPU
95  *  or order*order (i.e. 16) threads per atom.
96  */
97 constexpr bool c_useOrderThreadsPerAtom = false;
98 /*! \brief
99  * CUDA only.
100  * Controls if we should recalculate the splines in the gather or
101  * save the values in the spread and reload in the gather.
102  */
103 constexpr bool c_recalculateSplines = false;
104
105 /*! \internal \brief
106  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
107  *
108  * \param[in] pmeGpu  The PME GPU structure.
109  * \returns The pointer to the kernel parameters.
110  */
111 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
112 {
113     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
114     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
115     return kernelParamsPtr;
116 }
117
118 int pme_gpu_get_atom_data_alignment(const PmeGpu* /*unused*/)
119 {
120     // TODO: this can be simplified, as c_pmeAtomDataAlignment is now constant
121     if (c_usePadding)
122     {
123         return c_pmeAtomDataAlignment;
124     }
125     else
126     {
127         return 0;
128     }
129 }
130
131 int pme_gpu_get_atoms_per_warp(const PmeGpu* pmeGpu)
132 {
133     if (c_useOrderThreadsPerAtom)
134     {
135         return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom;
136     }
137     else
138     {
139         return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom;
140     }
141 }
142
143 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
144 {
145     gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
146 }
147
148 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
149 {
150     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
151     allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount,
152                          pmeGpu->archSpecific->context);
153     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
154 }
155
156 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
157 {
158     freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
159     pfree(pmeGpu->staging.h_virialAndEnergy);
160     pmeGpu->staging.h_virialAndEnergy = nullptr;
161 }
162
163 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
164 {
165     clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
166                            c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
167 }
168
169 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu)
170 {
171     const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
172                                           pmeGpu->kernelParams->grid.realGridSize[XX]
173                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
174     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
175
176     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
177                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
178                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
179     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
180     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
181                            &pmeGpu->archSpecific->splineValuesSize,
182                            &pmeGpu->archSpecific->splineValuesSizeAlloc, pmeGpu->archSpecific->context);
183     if (shouldRealloc)
184     {
185         /* Reallocate the host buffer */
186         pfree(pmeGpu->staging.h_splineModuli);
187         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli),
188                 newSplineValuesSize * sizeof(float));
189     }
190     for (int i = 0; i < DIM; i++)
191     {
192         memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i],
193                pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
194     }
195     /* TODO: pin original buffer instead! */
196     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
197                        0, newSplineValuesSize, pmeGpu->archSpecific->pmeStream,
198                        pmeGpu->settings.transferKind, nullptr);
199 }
200
201 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
202 {
203     pfree(pmeGpu->staging.h_splineModuli);
204     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
205 }
206
207 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
208 {
209     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
210     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
211     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
212                            &pmeGpu->archSpecific->forcesSize,
213                            &pmeGpu->archSpecific->forcesSizeAlloc, pmeGpu->archSpecific->context);
214     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
215     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
216 }
217
218 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
219 {
220     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
221 }
222
223 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
224 {
225     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
226     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
227     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
228                        DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream,
229                        pmeGpu->settings.transferKind, nullptr);
230 }
231
232 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
233 {
234     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
235     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
236     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
237                          DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream,
238                          pmeGpu->settings.transferKind, nullptr);
239 }
240
241 void pme_gpu_realloc_coordinates(const PmeGpu* pmeGpu)
242 {
243     const size_t newCoordinatesSize = pmeGpu->nAtomsAlloc * DIM;
244     GMX_ASSERT(newCoordinatesSize > 0, "Bad number of atoms in PME GPU");
245     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, newCoordinatesSize,
246                            &pmeGpu->archSpecific->coordinatesSize,
247                            &pmeGpu->archSpecific->coordinatesSizeAlloc, pmeGpu->archSpecific->context);
248     if (c_usePadding)
249     {
250         const size_t paddingIndex = DIM * pmeGpu->kernelParams->atoms.nAtoms;
251         const size_t paddingCount = DIM * pmeGpu->nAtomsAlloc - paddingIndex;
252         if (paddingCount > 0)
253         {
254             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coordinates, paddingIndex,
255                                    paddingCount, pmeGpu->archSpecific->pmeStream);
256         }
257     }
258 }
259
260 void pme_gpu_free_coordinates(const PmeGpu* pmeGpu)
261 {
262     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates);
263 }
264
265 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu, const float* h_coefficients)
266 {
267     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
268     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
269     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
270     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
271                            &pmeGpu->archSpecific->coefficientsSize,
272                            &pmeGpu->archSpecific->coefficientsSizeAlloc, pmeGpu->archSpecific->context);
273     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients,
274                        const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
275                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
276     if (c_usePadding)
277     {
278         const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
279         const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
280         if (paddingCount > 0)
281         {
282             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
283                                    paddingCount, pmeGpu->archSpecific->pmeStream);
284         }
285     }
286 }
287
288 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
289 {
290     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
291 }
292
293 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
294 {
295     const int    order        = pmeGpu->common->pme_order;
296     const int    alignment    = pme_gpu_get_atoms_per_warp(pmeGpu);
297     const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
298     const int    newSplineDataSize = DIM * order * nAtomsPadded;
299     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
300     /* Two arrays of the same size */
301     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
302     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
303     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
304     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize,
305                            &currentSizeTemp, &currentSizeTempAlloc, pmeGpu->archSpecific->context);
306     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
307                            &pmeGpu->archSpecific->splineDataSize,
308                            &pmeGpu->archSpecific->splineDataSizeAlloc, pmeGpu->archSpecific->context);
309     // the host side reallocation
310     if (shouldRealloc)
311     {
312         pfree(pmeGpu->staging.h_theta);
313         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
314         pfree(pmeGpu->staging.h_dtheta);
315         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
316     }
317 }
318
319 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
320 {
321     /* Two arrays of the same size */
322     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
323     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
324     pfree(pmeGpu->staging.h_theta);
325     pfree(pmeGpu->staging.h_dtheta);
326 }
327
328 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
329 {
330     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
331     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
332     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
333                            &pmeGpu->archSpecific->gridlineIndicesSize,
334                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc, pmeGpu->archSpecific->context);
335     pfree(pmeGpu->staging.h_gridlineIndices);
336     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
337 }
338
339 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
340 {
341     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
342     pfree(pmeGpu->staging.h_gridlineIndices);
343 }
344
345 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
346 {
347     auto*     kernelParamsPtr = pmeGpu->kernelParams.get();
348     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
349                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
350                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
351     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
352                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
353                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
354     // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
355     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
356     {
357         /* 2 separate grids */
358         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
359                                &pmeGpu->archSpecific->complexGridSize,
360                                &pmeGpu->archSpecific->complexGridSizeAlloc, pmeGpu->archSpecific->context);
361         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newRealGridSize,
362                                &pmeGpu->archSpecific->realGridSize,
363                                &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
364     }
365     else
366     {
367         /* A single buffer so that any grid will fit */
368         const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
369         reallocateDeviceBuffer(
370                 &kernelParamsPtr->grid.d_realGrid, newGridsSize, &pmeGpu->archSpecific->realGridSize,
371                 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
372         kernelParamsPtr->grid.d_fourierGrid   = kernelParamsPtr->grid.d_realGrid;
373         pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
374         // the size might get used later for copying the grid
375     }
376 }
377
378 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
379 {
380     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
381     {
382         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
383     }
384     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
385 }
386
387 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
388 {
389     clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
390                            pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
391 }
392
393 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
394 {
395     pme_gpu_free_fract_shifts(pmeGpu);
396
397     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
398
399     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
400     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
401     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
402     const int cellCount           = c_pmeNeighborUnitcellCount;
403     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
404
405     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
406
407     const int newFractShiftsSize = cellCount * (nx + ny + nz);
408
409 #if GMX_GPU == GMX_GPU_CUDA
410     initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable, kernelParamsPtr->fractShiftsTableTexture,
411                          pmeGpu->common->fsh.data(), newFractShiftsSize);
412
413     initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
414                          kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
415                          newFractShiftsSize);
416 #elif GMX_GPU == GMX_GPU_OPENCL
417     // No dedicated texture routines....
418     allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize,
419                          pmeGpu->archSpecific->context);
420     allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize,
421                          pmeGpu->archSpecific->context);
422     copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(), 0,
423                        newFractShiftsSize, pmeGpu->archSpecific->pmeStream,
424                        GpuApiCallBehavior::Async, nullptr);
425     copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(), 0,
426                        newFractShiftsSize, pmeGpu->archSpecific->pmeStream,
427                        GpuApiCallBehavior::Async, nullptr);
428 #endif
429 }
430
431 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
432 {
433     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
434 #if GMX_GPU == GMX_GPU_CUDA
435     destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
436                             kernelParamsPtr->fractShiftsTableTexture);
437     destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
438                             kernelParamsPtr->gridlineIndicesTableTexture);
439 #elif GMX_GPU == GMX_GPU_OPENCL
440     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
441     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
442 #endif
443 }
444
445 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
446 {
447     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
448 }
449
450 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, float* h_grid)
451 {
452     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid, 0, pmeGpu->archSpecific->realGridSize,
453                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
454 }
455
456 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid)
457 {
458     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid, 0,
459                          pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream,
460                          pmeGpu->settings.transferKind, nullptr);
461     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
462 }
463
464 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
465 {
466     const int    alignment       = pme_gpu_get_atoms_per_warp(pmeGpu);
467     const size_t nAtomsPadded    = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
468     const size_t splinesCount    = DIM * nAtomsPadded * pmeGpu->common->pme_order;
469     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
470     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
471                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
472     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
473                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
474     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
475                          0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream,
476                          pmeGpu->settings.transferKind, nullptr);
477 }
478
479 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
480 {
481     const int    alignment       = pme_gpu_get_atoms_per_warp(pmeGpu);
482     const size_t nAtomsPadded    = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
483     const size_t splinesCount    = DIM * nAtomsPadded * pmeGpu->common->pme_order;
484     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
485     if (c_usePadding)
486     {
487         // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
488         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
489                                pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
490         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
491                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
492                                pmeGpu->archSpecific->pmeStream);
493         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
494                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
495                                pmeGpu->archSpecific->pmeStream);
496     }
497     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
498                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
499     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
500                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
501     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
502                        0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream,
503                        pmeGpu->settings.transferKind, nullptr);
504 }
505
506 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
507 {
508     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
509 }
510
511 void pme_gpu_init_internal(PmeGpu* pmeGpu)
512 {
513 #if GMX_GPU == GMX_GPU_CUDA
514     // Prepare to use the device that this PME task was assigned earlier.
515     // Other entities, such as CUDA timing events, are known to implicitly use the device context.
516     CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
517 #endif
518
519     /* Allocate the target-specific structures */
520     pmeGpu->archSpecific.reset(new PmeGpuSpecific());
521     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
522
523     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
524     /* This should give better performance, according to the cuFFT documentation.
525      * The performance seems to be the same though.
526      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
527      */
528
529     // TODO: this is just a convenient reuse because programHandle_ currently is in charge of creating context
530     pmeGpu->archSpecific->context = pmeGpu->programHandle_->impl_->context;
531
532     // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
533     if (GMX_GPU == GMX_GPU_CUDA)
534     {
535         /* WARNING: CUDA timings are incorrect with multiple streams.
536          *          This is the main reason why they are disabled by default.
537          */
538         // TODO: Consider turning on by default when we can detect nr of streams.
539         pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
540     }
541     else if (GMX_GPU == GMX_GPU_OPENCL)
542     {
543         pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
544     }
545
546 #if GMX_GPU == GMX_GPU_CUDA
547     pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
548 #elif GMX_GPU == GMX_GPU_OPENCL
549     pmeGpu->maxGridWidthX = INT32_MAX / 2;
550     // TODO: is there no really global work size limit in OpenCL?
551 #endif
552
553     /* Creating a PME GPU stream:
554      * - default high priority with CUDA
555      * - no priorities implemented yet with OpenCL; see #2532
556      */
557 #if GMX_GPU == GMX_GPU_CUDA
558     cudaError_t stat;
559     int         highest_priority, lowest_priority;
560     stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
561     CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
562     stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
563                                         cudaStreamDefault, // cudaStreamNonBlocking,
564                                         highest_priority);
565     CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
566 #elif GMX_GPU == GMX_GPU_OPENCL
567     cl_command_queue_properties queueProperties =
568             pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
569     cl_device_id device_id = pmeGpu->deviceInfo->ocl_gpu_id.ocl_device_id;
570     cl_int       clError;
571     pmeGpu->archSpecific->pmeStream =
572             clCreateCommandQueue(pmeGpu->archSpecific->context, device_id, queueProperties, &clError);
573     if (clError != CL_SUCCESS)
574     {
575         GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
576     }
577 #endif
578 }
579
580 void pme_gpu_destroy_specific(const PmeGpu* pmeGpu)
581 {
582 #if GMX_GPU == GMX_GPU_CUDA
583     /* Destroy the CUDA stream */
584     cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
585     CU_RET_ERR(stat, "PME cudaStreamDestroy error");
586 #elif GMX_GPU == GMX_GPU_OPENCL
587     cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
588     if (clError != CL_SUCCESS)
589     {
590         gmx_warning("Failed to destroy PME command queue");
591     }
592 #endif
593 }
594
595 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
596 {
597     if (pme_gpu_performs_FFT(pmeGpu))
598     {
599         pmeGpu->archSpecific->fftSetup.resize(0);
600         for (int i = 0; i < pmeGpu->common->ngrids; i++)
601         {
602             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
603         }
604     }
605 }
606
607 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
608 {
609     pmeGpu->archSpecific->fftSetup.resize(0);
610 }
611
612 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
613 {
614     if (order != c_pmeGpuOrder)
615     {
616         throw order;
617     }
618     constexpr int fixedOrder = c_pmeGpuOrder;
619     GMX_UNUSED_VALUE(fixedOrder);
620
621     const int atomWarpIndex = atomIndex % atomsPerWarp;
622     const int warpIndex     = atomIndex / atomsPerWarp;
623     int       indexBase, result;
624     switch (atomsPerWarp)
625     {
626         case 1:
627             indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
628             result    = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
629             break;
630
631         case 2:
632             indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
633             result    = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
634             break;
635
636         case 4:
637             indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
638             result    = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
639             break;
640
641         case 8:
642             indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
643             result    = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
644             break;
645
646         default:
647             GMX_THROW(gmx::NotImplementedError(
648                     gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in "
649                                       "getSplineParamFullIndex",
650                                       atomsPerWarp)));
651     }
652     return result;
653 }
654
655 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, PmeOutput* output)
656 {
657     const PmeGpu* pmeGpu = pme.gpu;
658     for (int j = 0; j < c_virialAndEnergyCount; j++)
659     {
660         GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]),
661                    "PME GPU produces incorrect energy/virial.");
662     }
663
664     unsigned int j                 = 0;
665     output->coulombVirial_[XX][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
666     output->coulombVirial_[YY][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
667     output->coulombVirial_[ZZ][ZZ] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
668     output->coulombVirial_[XX][YY] = output->coulombVirial_[YY][XX] =
669             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
670     output->coulombVirial_[XX][ZZ] = output->coulombVirial_[ZZ][XX] =
671             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
672     output->coulombVirial_[YY][ZZ] = output->coulombVirial_[ZZ][YY] =
673             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
674     output->coulombEnergy_ = 0.5F * pmeGpu->staging.h_virialAndEnergy[j++];
675 }
676
677 /*! \brief Sets the force-related members in \p output
678  *
679  * \param[in]   pmeGpu      PME GPU data structure
680  * \param[out]  output      Pointer to PME output data structure
681  */
682 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
683 {
684     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
685     if (output->haveForceOutput_)
686     {
687         output->forces_ = pmeGpu->staging.h_forces;
688     }
689 }
690
691 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const int flags)
692 {
693     PmeGpu*    pmeGpu                      = pme.gpu;
694     const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
695
696     PmeOutput output;
697
698     pme_gpu_getForceOutput(pmeGpu, &output);
699
700     // The caller knows from the flags that the energy and the virial are not usable
701     // on the else branch
702     if (haveComputedEnergyAndVirial)
703     {
704         if (pme_gpu_performs_solve(pmeGpu))
705         {
706             pme_gpu_getEnergyAndVirial(pme, &output);
707         }
708         else
709         {
710             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
711         }
712     }
713     return output;
714 }
715
716 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
717 {
718 #if GMX_DOUBLE
719     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
720 #else
721     matrix scaledBox;
722     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
723     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
724     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
725     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
726     matrix recipBox;
727     gmx::invertBoxMatrix(scaledBox, recipBox);
728
729     /* The GPU recipBox is transposed as compared to the CPU recipBox.
730      * Spread uses matrix columns (while solve and gather use rows).
731      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
732      */
733     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
734                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
735                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
736     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
737 #endif
738 }
739
740 /*! \brief \libinternal
741  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
742  *
743  * \param[in] pmeGpu            The PME GPU structure.
744  */
745 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
746 {
747     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
748     kernelParamsPtr->grid.ewaldFactor =
749             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
750
751     /* The grid size variants */
752     for (int i = 0; i < DIM; i++)
753     {
754         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
755         kernelParamsPtr->grid.realGridSizeFP[i] =
756                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
757         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
758
759         // The complex grid currently uses no padding;
760         // if it starts to do so, then another test should be added for that
761         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
762         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
763     }
764     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
765     if (!pme_gpu_performs_FFT(pmeGpu))
766     {
767         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
768         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
769                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
770     }
771
772     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
773     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
774     kernelParamsPtr->grid.complexGridSize[ZZ]++;
775     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
776
777     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
778     pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
779     pme_gpu_realloc_grids(pmeGpu);
780     pme_gpu_reinit_3dfft(pmeGpu);
781 }
782
783 /* Several GPU functions that refer to the CPU PME data live here.
784  * We would like to keep these away from the GPU-framework specific code for clarity,
785  * as well as compilation issues with MPI.
786  */
787
788 /*! \brief \libinternal
789  * Copies everything useful from the PME CPU to the PME GPU structure.
790  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
791  *
792  * \param[in] pme         The PME structure.
793  */
794 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
795 {
796     /* TODO: Consider refactoring the CPU PME code to use the same structure,
797      * so that this function becomes 2 lines */
798     PmeGpu* pmeGpu               = pme->gpu;
799     pmeGpu->common->ngrids       = pme->ngrids;
800     pmeGpu->common->epsilon_r    = pme->epsilon_r;
801     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
802     pmeGpu->common->nk[XX]       = pme->nkx;
803     pmeGpu->common->nk[YY]       = pme->nky;
804     pmeGpu->common->nk[ZZ]       = pme->nkz;
805     pmeGpu->common->pme_order    = pme->pme_order;
806     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
807     {
808         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
809     }
810     for (int i = 0; i < DIM; i++)
811     {
812         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
813     }
814     const int cellCount = c_pmeNeighborUnitcellCount;
815     pmeGpu->common->fsh.resize(0);
816     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
817     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
818     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
819     pmeGpu->common->nn.resize(0);
820     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
821     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
822     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
823     pmeGpu->common->runMode       = pme->runMode;
824     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
825     pmeGpu->common->boxScaler     = pme->boxScaler;
826 }
827
828 /*! \libinternal \brief
829  * Initializes the PME GPU data at the beginning of the run.
830  * TODO: this should become PmeGpu::PmeGpu()
831  *
832  * \param[in,out] pme            The PME structure.
833  * \param[in,out] gpuInfo        The GPU information structure.
834  * \param[in]     pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
835  */
836 static void pme_gpu_init(gmx_pme_t* pme, const gmx_device_info_t* gpuInfo, PmeGpuProgramHandle pmeGpuProgram)
837 {
838     pme->gpu       = new PmeGpu();
839     PmeGpu* pmeGpu = pme->gpu;
840     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
841     pmeGpu->common = std::make_shared<PmeShared>();
842
843     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
844     /* A convenience variable. */
845     pmeGpu->settings.useDecomposition = (pme->nnodes == 1);
846     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
847     pmeGpu->settings.performGPUGather = true;
848     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
849     pmeGpu->settings.useGpuForceReduction = false;
850
851     pme_gpu_set_testing(pmeGpu, false);
852
853     pmeGpu->deviceInfo = gpuInfo;
854     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
855     pmeGpu->programHandle_ = pmeGpuProgram;
856
857     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
858
859     pme_gpu_init_internal(pmeGpu);
860     pme_gpu_alloc_energy_virial(pmeGpu);
861
862     pme_gpu_copy_common_data_from(pme);
863
864     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
865
866     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
867     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
868 }
869
870 void pme_gpu_transform_spline_atom_data(const PmeGpu*      pmeGpu,
871                                         const PmeAtomComm* atc,
872                                         PmeSplineDataType  type,
873                                         int                dimIndex,
874                                         PmeLayoutTransform transform)
875 {
876     // The GPU atom spline data is laid out in a different way currently than the CPU one.
877     // This function converts the data from GPU to CPU layout (in the host memory).
878     // It is only intended for testing purposes so far.
879     // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their
880     // performance (e.g. spreading on GPU, gathering on CPU).
881     GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
882     const uintmax_t threadIndex  = 0;
883     const auto      atomCount    = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
884     const auto      atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
885     const auto      pmeOrder     = pmeGpu->common->pme_order;
886     GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
887
888     real*  cpuSplineBuffer;
889     float* h_splineBuffer;
890     switch (type)
891     {
892         case PmeSplineDataType::Values:
893             cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
894             h_splineBuffer  = pmeGpu->staging.h_theta;
895             break;
896
897         case PmeSplineDataType::Derivatives:
898             cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
899             h_splineBuffer  = pmeGpu->staging.h_dtheta;
900             break;
901
902         default: GMX_THROW(gmx::InternalError("Unknown spline data type"));
903     }
904
905     for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
906     {
907         for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
908         {
909             const auto gpuValueIndex =
910                     getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
911             const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
912             GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder,
913                        "Atom spline data index out of bounds (while transforming GPU data layout "
914                        "for host)");
915             switch (transform)
916             {
917                 case PmeLayoutTransform::GpuToHost:
918                     cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
919                     break;
920
921                 case PmeLayoutTransform::HostToGpu:
922                     h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
923                     break;
924
925                 default: GMX_THROW(gmx::InternalError("Unknown layout transform"));
926             }
927         }
928     }
929 }
930
931 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
932 {
933     GMX_ASSERT(gridSize != nullptr, "");
934     GMX_ASSERT(paddedGridSize != nullptr, "");
935     GMX_ASSERT(pmeGpu != nullptr, "");
936     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
937     for (int i = 0; i < DIM; i++)
938     {
939         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
940         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
941     }
942 }
943
944 void pme_gpu_reinit(gmx_pme_t* pme, const gmx_device_info_t* gpuInfo, PmeGpuProgramHandle pmeGpuProgram)
945 {
946     if (!pme_gpu_active(pme))
947     {
948         return;
949     }
950
951     if (!pme->gpu)
952     {
953         /* First-time initialization */
954         pme_gpu_init(pme, gpuInfo, pmeGpuProgram);
955     }
956     else
957     {
958         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
959         pme_gpu_copy_common_data_from(pme);
960     }
961     /* GPU FFT will only get used for a single rank.*/
962     pme->gpu->settings.performGPUFFT =
963             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme_gpu_uses_dd(pme->gpu);
964     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
965
966     /* Reinit active timers */
967     pme_gpu_reinit_timings(pme->gpu);
968
969     pme_gpu_reinit_grids(pme->gpu);
970     // Note: if timing the reinit launch overhead becomes more relevant
971     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
972     pme_gpu_reinit_computation(pme, nullptr);
973     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
974      * update for mixed mode on grid switch. TODO: use shared recipbox field.
975      */
976     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
977 }
978
979 void pme_gpu_destroy(PmeGpu* pmeGpu)
980 {
981     /* Free lots of data */
982     pme_gpu_free_energy_virial(pmeGpu);
983     pme_gpu_free_bspline_values(pmeGpu);
984     pme_gpu_free_forces(pmeGpu);
985     pme_gpu_free_coefficients(pmeGpu);
986     pme_gpu_free_spline_data(pmeGpu);
987     pme_gpu_free_grid_indices(pmeGpu);
988     pme_gpu_free_fract_shifts(pmeGpu);
989     pme_gpu_free_grids(pmeGpu);
990
991     pme_gpu_destroy_3dfft(pmeGpu);
992
993     /* Free the GPU-framework specific data last */
994     pme_gpu_destroy_specific(pmeGpu);
995
996     delete pmeGpu;
997 }
998
999 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* charges)
1000 {
1001     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
1002     kernelParamsPtr->atoms.nAtoms = nAtoms;
1003     const int alignment           = pme_gpu_get_atom_data_alignment(pmeGpu);
1004     pmeGpu->nAtomsPadded          = ((nAtoms + alignment - 1) / alignment) * alignment;
1005     const int  nAtomsAlloc        = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
1006     const bool haveToRealloc =
1007             (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
1008     pmeGpu->nAtomsAlloc = nAtomsAlloc;
1009
1010 #if GMX_DOUBLE
1011     GMX_RELEASE_ASSERT(false, "Only single precision supported");
1012     GMX_UNUSED_VALUE(charges);
1013 #else
1014     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(charges));
1015     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
1016 #endif
1017
1018     if (haveToRealloc)
1019     {
1020         pme_gpu_realloc_forces(pmeGpu);
1021         pme_gpu_realloc_spline_data(pmeGpu);
1022         pme_gpu_realloc_grid_indices(pmeGpu);
1023     }
1024 }
1025
1026 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, int grid_index)
1027 {
1028     int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
1029
1030     pme_gpu_start_timing(pmeGpu, timerId);
1031     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
1032             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1033     pme_gpu_stop_timing(pmeGpu, timerId);
1034 }
1035
1036 /*! \brief
1037  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1038  * to minimize number of unused blocks.
1039  */
1040 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
1041 {
1042     // How many maximum widths in X do we need (hopefully just one)
1043     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1044     // Trying to make things even
1045     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1046     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1047     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1048                "pmeGpuCreateGrid: excessive blocks");
1049     return std::pair<int, int>(colCount, minRowCount);
1050 }
1051
1052 /*! \brief
1053  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1054  *
1055  * \param[in]  pmeGpu                   The PME GPU structure.
1056  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1057  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1058  *
1059  * \return Pointer to CUDA kernel
1060  */
1061 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool writeSplinesToGlobal)
1062 {
1063     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1064     if (writeSplinesToGlobal)
1065     {
1066         if (useOrderThreadsPerAtom)
1067         {
1068             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4;
1069         }
1070         else
1071         {
1072             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplines;
1073         }
1074     }
1075     else
1076     {
1077         if (useOrderThreadsPerAtom)
1078         {
1079             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
1080         }
1081         else
1082         {
1083             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1084         }
1085     }
1086
1087     return kernelPtr;
1088 }
1089
1090 /*! \brief
1091  * Returns a pointer to appropriate spline kernel based on the input bool values
1092  *
1093  * \param[in]  pmeGpu                   The PME GPU structure.
1094  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1095  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1096  *
1097  * \return Pointer to CUDA kernel
1098  */
1099 static auto selectSplineKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool gmx_unused writeSplinesToGlobal)
1100 {
1101     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1102     GMX_ASSERT(
1103             writeSplinesToGlobal,
1104             "Spline data should always be written to global memory when just calculating splines");
1105
1106     if (useOrderThreadsPerAtom)
1107     {
1108         kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4;
1109     }
1110     else
1111     {
1112         kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1113     }
1114     return kernelPtr;
1115 }
1116
1117 /*! \brief
1118  * Returns a pointer to appropriate spread kernel based on the input bool values
1119  *
1120  * \param[in]  pmeGpu                   The PME GPU structure.
1121  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1122  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1123  *
1124  * \return Pointer to CUDA kernel
1125  */
1126 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool writeSplinesToGlobal)
1127 {
1128     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1129     if (writeSplinesToGlobal)
1130     {
1131         if (useOrderThreadsPerAtom)
1132         {
1133             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4;
1134         }
1135         else
1136         {
1137             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1138         }
1139     }
1140     else
1141     {
1142         /* if we are not saving the spline data we need to recalculate it
1143            using the spline and spread Kernel */
1144         if (useOrderThreadsPerAtom)
1145         {
1146             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
1147         }
1148         else
1149         {
1150             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1151         }
1152     }
1153     return kernelPtr;
1154 }
1155
1156 void pme_gpu_spread(const PmeGpu*         pmeGpu,
1157                     GpuEventSynchronizer* xReadyOnDevice,
1158                     int gmx_unused gridIndex,
1159                     real*          h_grid,
1160                     bool           computeSplines,
1161                     bool           spreadCharges)
1162 {
1163     GMX_ASSERT(computeSplines || spreadCharges,
1164                "PME spline/spread kernel has invalid input (nothing to do)");
1165     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1166     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1167
1168     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1169
1170     const int order = pmeGpu->common->pme_order;
1171     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1172     const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1173 #if GMX_GPU == GMX_GPU_OPENCL
1174     GMX_ASSERT(!c_useOrderThreadsPerAtom, "Only 16 threads per atom supported in OpenCL");
1175     GMX_ASSERT(!c_recalculateSplines, "Recalculating splines not supported in OpenCL");
1176 #endif
1177     const int atomsPerBlock = c_useOrderThreadsPerAtom ? blockSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
1178                                                        : blockSize / c_pmeSpreadGatherThreadsPerAtom;
1179
1180     // TODO: pick smaller block size in runtime if needed
1181     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1182     // If doing so, change atomsPerBlock in the kernels as well.
1183     // TODO: test varying block sizes on modern arch-s as well
1184     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1185     //(for spline data mostly)
1186     GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock),
1187                "inconsistent atom data padding vs. spreading block size");
1188
1189     // Ensure that coordinates are ready on the device before launching spread;
1190     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1191     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1192     GMX_ASSERT(xReadyOnDevice != nullptr || (GMX_GPU != GMX_GPU_CUDA)
1193                        || pmeGpu->common->isRankPmeOnly || pme_gpu_is_testing(pmeGpu),
1194                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1195     if (xReadyOnDevice)
1196     {
1197         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream);
1198     }
1199
1200     const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1201     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1202
1203     KernelLaunchConfig config;
1204     config.blockSize[0] = order;
1205     config.blockSize[1] = c_useOrderThreadsPerAtom ? 1 : order;
1206     config.blockSize[2] = atomsPerBlock;
1207     config.gridSize[0]  = dimGrid.first;
1208     config.gridSize[1]  = dimGrid.second;
1209     config.stream       = pmeGpu->archSpecific->pmeStream;
1210
1211     int                                timingId;
1212     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1213     if (computeSplines)
1214     {
1215         if (spreadCharges)
1216         {
1217             timingId  = gtPME_SPLINEANDSPREAD;
1218             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, c_useOrderThreadsPerAtom,
1219                                                        writeGlobal || (!c_recalculateSplines));
1220         }
1221         else
1222         {
1223             timingId  = gtPME_SPLINE;
1224             kernelPtr = selectSplineKernelPtr(pmeGpu, c_useOrderThreadsPerAtom,
1225                                               writeGlobal || (!c_recalculateSplines));
1226         }
1227     }
1228     else
1229     {
1230         timingId  = gtPME_SPREAD;
1231         kernelPtr = selectSpreadKernelPtr(pmeGpu, c_useOrderThreadsPerAtom,
1232                                           writeGlobal || (!c_recalculateSplines));
1233     }
1234
1235
1236     pme_gpu_start_timing(pmeGpu, timingId);
1237     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1238 #if c_canEmbedBuffers
1239     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1240 #else
1241     const auto kernelArgs = prepareGpuKernelArguments(
1242             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1243             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1244             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->grid.d_fractShiftsTable,
1245             &kernelParamsPtr->grid.d_gridlineIndicesTable, &kernelParamsPtr->atoms.d_coefficients,
1246             &kernelParamsPtr->atoms.d_coordinates);
1247 #endif
1248
1249     launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1250     pme_gpu_stop_timing(pmeGpu, timingId);
1251
1252     const bool copyBackGrid =
1253             spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
1254     if (copyBackGrid)
1255     {
1256         pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1257     }
1258     const bool copyBackAtomData =
1259             computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
1260     if (copyBackAtomData)
1261     {
1262         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1263     }
1264 }
1265
1266 void pme_gpu_solve(const PmeGpu* pmeGpu, t_complex* h_grid, GridOrdering gridOrdering, bool computeEnergyAndVirial)
1267 {
1268     const bool copyInputAndOutputGrid = pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu);
1269
1270     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1271
1272     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1273     if (copyInputAndOutputGrid)
1274     {
1275         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat, 0,
1276                            pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream,
1277                            pmeGpu->settings.transferKind, nullptr);
1278     }
1279
1280     int majorDim = -1, middleDim = -1, minorDim = -1;
1281     switch (gridOrdering)
1282     {
1283         case GridOrdering::YZX:
1284             majorDim  = YY;
1285             middleDim = ZZ;
1286             minorDim  = XX;
1287             break;
1288
1289         case GridOrdering::XYZ:
1290             majorDim  = XX;
1291             middleDim = YY;
1292             minorDim  = ZZ;
1293             break;
1294
1295         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1296     }
1297
1298     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1299
1300     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1301     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1302     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1303     int       cellsPerBlock;
1304     if (blocksPerGridLine == 1)
1305     {
1306         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1307     }
1308     else
1309     {
1310         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1311     }
1312     const int warpSize  = pmeGpu->programHandle_->impl_->warpSize;
1313     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1314
1315     static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1316                   "The CUDA solve energy kernels needs at least 4 warps. "
1317                   "Here we launch at least half of the max warps.");
1318
1319     KernelLaunchConfig config;
1320     config.blockSize[0] = blockSize;
1321     config.gridSize[0]  = blocksPerGridLine;
1322     // rounding up to full warps so that shuffle operations produce defined results
1323     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1324                          / gridLinesPerBlock;
1325     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1326     config.stream      = pmeGpu->archSpecific->pmeStream;
1327
1328     int                                timingId  = gtPME_SOLVE;
1329     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1330     if (gridOrdering == GridOrdering::YZX)
1331     {
1332         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernel
1333                                            : pmeGpu->programHandle_->impl_->solveYZXKernel;
1334     }
1335     else if (gridOrdering == GridOrdering::XYZ)
1336     {
1337         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernel
1338                                            : pmeGpu->programHandle_->impl_->solveXYZKernel;
1339     }
1340
1341     pme_gpu_start_timing(pmeGpu, timingId);
1342     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1343 #if c_canEmbedBuffers
1344     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1345 #else
1346     const auto kernelArgs = prepareGpuKernelArguments(
1347             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli,
1348             &kernelParamsPtr->constants.d_virialAndEnergy, &kernelParamsPtr->grid.d_fourierGrid);
1349 #endif
1350     launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1351     pme_gpu_stop_timing(pmeGpu, timingId);
1352
1353     if (computeEnergyAndVirial)
1354     {
1355         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy,
1356                              &kernelParamsPtr->constants.d_virialAndEnergy, 0, c_virialAndEnergyCount,
1357                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1358     }
1359
1360     if (copyInputAndOutputGrid)
1361     {
1362         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid, 0,
1363                              pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream,
1364                              pmeGpu->settings.transferKind, nullptr);
1365     }
1366 }
1367
1368 /*! \brief
1369  * Returns a pointer to appropriate gather kernel based on the inputvalues
1370  *
1371  * \param[in]  pmeGpu                   The PME GPU structure.
1372  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1373  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1374  * \param[in]  forceTreatment           Controls if the forces from the gather should increment or replace the input forces.
1375  *
1376  * \return Pointer to CUDA kernel
1377  */
1378 inline auto selectGatherKernelPtr(const PmeGpu*          pmeGpu,
1379                                   bool                   useOrderThreadsPerAtom,
1380                                   bool                   readSplinesFromGlobal,
1381                                   PmeForceOutputHandling forceTreatment)
1382
1383 {
1384     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1385
1386     if (readSplinesFromGlobal)
1387     {
1388         if (useOrderThreadsPerAtom)
1389         {
1390             kernelPtr = (forceTreatment == PmeForceOutputHandling::Set)
1391                                 ? pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4
1392                                 : pmeGpu->programHandle_->impl_->gatherReduceWithInputKernelReadSplinesThPerAtom4;
1393         }
1394         else
1395         {
1396             kernelPtr = (forceTreatment == PmeForceOutputHandling::Set)
1397                                 ? pmeGpu->programHandle_->impl_->gatherKernelReadSplines
1398                                 : pmeGpu->programHandle_->impl_->gatherReduceWithInputKernelReadSplines;
1399         }
1400     }
1401     else
1402     {
1403         if (useOrderThreadsPerAtom)
1404         {
1405             kernelPtr = (forceTreatment == PmeForceOutputHandling::Set)
1406                                 ? pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4
1407                                 : pmeGpu->programHandle_->impl_->gatherReduceWithInputKernelThPerAtom4;
1408         }
1409         else
1410         {
1411             kernelPtr = (forceTreatment == PmeForceOutputHandling::Set)
1412                                 ? pmeGpu->programHandle_->impl_->gatherKernel
1413                                 : pmeGpu->programHandle_->impl_->gatherReduceWithInputKernel;
1414         }
1415     }
1416     return kernelPtr;
1417 }
1418
1419
1420 void pme_gpu_gather(PmeGpu* pmeGpu, PmeForceOutputHandling forceTreatment, const float* h_grid)
1421 {
1422     /* Copying the input CPU forces for reduction */
1423     if (forceTreatment != PmeForceOutputHandling::Set)
1424     {
1425         pme_gpu_copy_input_forces(pmeGpu);
1426     }
1427
1428     if (!pme_gpu_performs_FFT(pmeGpu) || pme_gpu_is_testing(pmeGpu))
1429     {
1430         pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float*>(h_grid));
1431     }
1432
1433     if (pme_gpu_is_testing(pmeGpu))
1434     {
1435         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1436     }
1437
1438     /* Set if we have unit tests */
1439     const bool   readGlobal = pmeGpu->settings.copyAllOutputs;
1440     const size_t blockSize  = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1441 #if GMX_GPU == GMX_GPU_OPENCL
1442     GMX_ASSERT(!c_useOrderThreadsPerAtom, "Only 16 threads per atom supported in OpenCL");
1443     GMX_ASSERT(!c_recalculateSplines, "Recalculating splines not supported in OpenCL");
1444 #endif
1445     const int atomsPerBlock = c_useOrderThreadsPerAtom ? blockSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
1446                                                        : blockSize / c_pmeSpreadGatherThreadsPerAtom;
1447
1448     GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock),
1449                "inconsistent atom data padding vs. gathering block size");
1450
1451     const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1452     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1453
1454     const int order = pmeGpu->common->pme_order;
1455     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1456
1457     KernelLaunchConfig config;
1458     config.blockSize[0] = order;
1459     config.blockSize[1] = c_useOrderThreadsPerAtom ? 1 : order;
1460     config.blockSize[2] = atomsPerBlock;
1461     config.gridSize[0]  = dimGrid.first;
1462     config.gridSize[1]  = dimGrid.second;
1463     config.stream       = pmeGpu->archSpecific->pmeStream;
1464
1465     // TODO test different cache configs
1466
1467     int                                timingId  = gtPME_GATHER;
1468     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = selectGatherKernelPtr(
1469             pmeGpu, c_useOrderThreadsPerAtom, readGlobal || (!c_recalculateSplines), forceTreatment);
1470     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1471
1472     pme_gpu_start_timing(pmeGpu, timingId);
1473     auto*       timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1474     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1475 #if c_canEmbedBuffers
1476     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1477 #else
1478     const auto kernelArgs = prepareGpuKernelArguments(
1479             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients,
1480             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->atoms.d_theta,
1481             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1482             &kernelParamsPtr->atoms.d_forces);
1483 #endif
1484     launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1485     pme_gpu_stop_timing(pmeGpu, timingId);
1486
1487     if (pmeGpu->settings.useGpuForceReduction)
1488     {
1489         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream);
1490     }
1491     else
1492     {
1493         pme_gpu_copy_output_forces(pmeGpu);
1494     }
1495 }
1496
1497 DeviceBuffer<float> pme_gpu_get_kernelparam_coordinates(const PmeGpu* pmeGpu)
1498 {
1499     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1500                "PME GPU device buffer was requested in non-GPU build or before the GPU PME was "
1501                "initialized.");
1502
1503     return pmeGpu->kernelParams->atoms.d_coordinates;
1504 }
1505
1506 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1507 {
1508     if (pmeGpu && pmeGpu->kernelParams)
1509     {
1510         return pmeGpu->kernelParams->atoms.d_forces;
1511     }
1512     else
1513     {
1514         return nullptr;
1515     }
1516 }
1517
1518 /*! \brief Check the validity of the device buffer.
1519  *
1520  * Checks if the buffer is not nullptr and, when possible, if it is big enough.
1521  *
1522  * \todo Split and move this function to gpu_utils.
1523  *
1524  * \param[in] buffer        Device buffer to be checked.
1525  * \param[in] requiredSize  Number of elements that the buffer will have to accommodate.
1526  *
1527  * \returns If the device buffer can be set.
1528  */
1529 template<typename T>
1530 static bool checkDeviceBuffer(gmx_unused DeviceBuffer<T> buffer, gmx_unused int requiredSize)
1531 {
1532 #if GMX_GPU == GMX_GPU_CUDA
1533     GMX_ASSERT(buffer != nullptr, "The device pointer is nullptr");
1534     return buffer != nullptr;
1535 #elif GMX_GPU == GMX_GPU_OPENCL
1536     size_t size;
1537     int    retval = clGetMemObjectInfo(buffer, CL_MEM_SIZE, sizeof(size), &size, nullptr);
1538     GMX_ASSERT(retval == CL_SUCCESS,
1539                gmx::formatString("clGetMemObjectInfo failed with error code #%d", retval).c_str());
1540     GMX_ASSERT(static_cast<int>(size) >= requiredSize,
1541                "Number of atoms in device buffer is smaller then required size.");
1542     return retval == CL_SUCCESS && static_cast<int>(size) >= requiredSize;
1543 #elif GMX_GPU == GMX_GPU_NONE
1544     GMX_ASSERT(false, "Setter for device-side coordinates was called in non-GPU build.");
1545     return false;
1546 #endif
1547 }
1548
1549 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<float> d_x)
1550 {
1551     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1552                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1553                "initialized.");
1554
1555     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1556                "The device-side buffer can not be set.");
1557
1558     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1559 }
1560
1561 void* pme_gpu_get_stream(const PmeGpu* pmeGpu)
1562 {
1563     if (pmeGpu)
1564     {
1565         return static_cast<void*>(&pmeGpu->archSpecific->pmeStream);
1566     }
1567     else
1568     {
1569         return nullptr;
1570     }
1571 }
1572
1573 void* pme_gpu_get_context(const PmeGpu* pmeGpu)
1574 {
1575     if (pmeGpu)
1576     {
1577         return static_cast<void*>(&pmeGpu->archSpecific->context);
1578     }
1579     else
1580     {
1581         return nullptr;
1582     }
1583 }
1584
1585 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1586 {
1587     if (pmeGpu && pmeGpu->kernelParams)
1588     {
1589         return &pmeGpu->archSpecific->pmeForcesReady;
1590     }
1591     else
1592     {
1593         return nullptr;
1594     }
1595 }