ec83ef20f98c8e5f9500e131298ac8112d05df1d
[alexxy/gromacs.git] / src / gromacs / ewald / pme-gpu-internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme-gpu-internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <string>
56
57 #include "gromacs/ewald/ewald-utils.h"
58 #include "gromacs/gpu_utils/gpu_utils.h"
59 #include "gromacs/math/invertmatrix.h"
60 #include "gromacs/math/units.h"
61 #include "gromacs/timing/gpu_timing.h"
62 #include "gromacs/utility/exceptions.h"
63 #include "gromacs/utility/fatalerror.h"
64 #include "gromacs/utility/gmxassert.h"
65 #include "gromacs/utility/logger.h"
66 #include "gromacs/utility/stringutil.h"
67
68 #if GMX_GPU == GMX_GPU_CUDA
69 #include "gromacs/gpu_utils/pmalloc_cuda.h"
70
71 #include "pme.cuh"
72 #elif GMX_GPU == GMX_GPU_OPENCL
73 #include "gromacs/gpu_utils/gmxopencl.h"
74 #endif
75
76 #include "gromacs/ewald/pme.h"
77
78 #include "pme-gpu-3dfft.h"
79 #include "pme-gpu-constants.h"
80 #include "pme-gpu-program-impl.h"
81 #include "pme-gpu-timings.h"
82 #include "pme-gpu-types.h"
83 #include "pme-gpu-types-host.h"
84 #include "pme-gpu-types-host-impl.h"
85 #include "pme-gpu-utils.h"
86 #include "pme-grid.h"
87 #include "pme-internal.h"
88
89 /*! \internal \brief
90  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
91  *
92  * \param[in] pmeGpu  The PME GPU structure.
93  * \returns The pointer to the kernel parameters.
94  */
95 static PmeGpuKernelParamsBase *pme_gpu_get_kernel_params_base_ptr(const PmeGpu *pmeGpu)
96 {
97     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
98     auto *kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase *>(pmeGpu->kernelParams.get());
99     return kernelParamsPtr;
100 }
101
102 int pme_gpu_get_atom_data_alignment(const PmeGpu *)
103 {
104     //TODO: this can be simplified, as PME_ATOM_DATA_ALIGNMENT is now constant
105     return PME_ATOM_DATA_ALIGNMENT;
106 }
107
108 int pme_gpu_get_atoms_per_warp(const PmeGpu *pmeGpu)
109 {
110 #if GMX_GPU == GMX_GPU_CUDA
111     const int order = pmeGpu->common->pme_order;
112     GMX_ASSERT(order > 0, "Invalid PME order");
113     return PME_SPREADGATHER_ATOMS_PER_WARP;
114 #else
115     GMX_THROW(gmx::NotImplementedError("Atom alignment per warp has to be deduced dynamically for OpenCL"));
116     GMX_UNUSED_VALUE(pmeGpu);
117 #endif
118 }
119
120 void pme_gpu_synchronize(const PmeGpu *pmeGpu)
121 {
122     gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
123 }
124
125 void pme_gpu_alloc_energy_virial(const PmeGpu *pmeGpu)
126 {
127     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
128     allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount, pmeGpu->archSpecific->context);
129     pmalloc((void **)&pmeGpu->staging.h_virialAndEnergy, energyAndVirialSize);
130 }
131
132 void pme_gpu_free_energy_virial(PmeGpu *pmeGpu)
133 {
134     freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
135     pfree(pmeGpu->staging.h_virialAndEnergy);
136     pmeGpu->staging.h_virialAndEnergy = nullptr;
137 }
138
139 void pme_gpu_clear_energy_virial(const PmeGpu *pmeGpu)
140 {
141     clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
142                            c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
143 }
144
145 void pme_gpu_realloc_and_copy_bspline_values(const PmeGpu *pmeGpu)
146 {
147     const int splineValuesOffset[DIM] = {
148         0,
149         pmeGpu->kernelParams->grid.realGridSize[XX],
150         pmeGpu->kernelParams->grid.realGridSize[XX] + pmeGpu->kernelParams->grid.realGridSize[YY]
151     };
152     memcpy((void *)&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
153
154     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX] +
155         pmeGpu->kernelParams->grid.realGridSize[YY] +
156         pmeGpu->kernelParams->grid.realGridSize[ZZ];
157     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
158     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
159                            &pmeGpu->archSpecific->splineValuesSize, &pmeGpu->archSpecific->splineValuesSizeAlloc, pmeGpu->archSpecific->context);
160     if (shouldRealloc)
161     {
162         /* Reallocate the host buffer */
163         pfree(pmeGpu->staging.h_splineModuli);
164         pmalloc((void **)&pmeGpu->staging.h_splineModuli, newSplineValuesSize * sizeof(float));
165     }
166     for (int i = 0; i < DIM; i++)
167     {
168         memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i], pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
169     }
170     /* TODO: pin original buffer instead! */
171     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
172                        0, newSplineValuesSize,
173                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
174 }
175
176 void pme_gpu_free_bspline_values(const PmeGpu *pmeGpu)
177 {
178     pfree(pmeGpu->staging.h_splineModuli);
179     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
180 }
181
182 void pme_gpu_realloc_forces(PmeGpu *pmeGpu)
183 {
184     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
185     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
186     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
187                            &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc, pmeGpu->archSpecific->context);
188     pmeGpu->staging.h_forces.reserve(pmeGpu->nAtomsAlloc);
189     pmeGpu->staging.h_forces.resize(pmeGpu->kernelParams->atoms.nAtoms);
190 }
191
192 void pme_gpu_free_forces(const PmeGpu *pmeGpu)
193 {
194     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
195 }
196
197 void pme_gpu_copy_input_forces(PmeGpu *pmeGpu)
198 {
199     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
200     float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
201     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat,
202                        0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
203                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
204 }
205
206 void pme_gpu_copy_output_forces(PmeGpu *pmeGpu)
207 {
208     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
209     float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
210     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces,
211                          0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
212                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
213 }
214
215 void pme_gpu_realloc_coordinates(const PmeGpu *pmeGpu)
216 {
217     const size_t newCoordinatesSize = pmeGpu->nAtomsAlloc * DIM;
218     GMX_ASSERT(newCoordinatesSize > 0, "Bad number of atoms in PME GPU");
219     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, newCoordinatesSize,
220                            &pmeGpu->archSpecific->coordinatesSize, &pmeGpu->archSpecific->coordinatesSizeAlloc, pmeGpu->archSpecific->context);
221     if (c_usePadding)
222     {
223         const size_t paddingIndex = DIM * pmeGpu->kernelParams->atoms.nAtoms;
224         const size_t paddingCount = DIM * pmeGpu->nAtomsAlloc - paddingIndex;
225         if (paddingCount > 0)
226         {
227             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coordinates, paddingIndex,
228                                    paddingCount, pmeGpu->archSpecific->pmeStream);
229         }
230     }
231 }
232
233 void pme_gpu_copy_input_coordinates(const PmeGpu *pmeGpu, const rvec *h_coordinates)
234 {
235     GMX_ASSERT(h_coordinates, "Bad host-side coordinate buffer in PME GPU");
236 #if GMX_DOUBLE
237     GMX_RELEASE_ASSERT(false, "Only single precision is supported");
238     GMX_UNUSED_VALUE(h_coordinates);
239 #else
240     const float *h_coordinatesFloat = reinterpret_cast<const float *>(h_coordinates);
241     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, h_coordinatesFloat,
242                        0, pmeGpu->kernelParams->atoms.nAtoms * DIM,
243                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
244 #endif
245 }
246
247 void pme_gpu_free_coordinates(const PmeGpu *pmeGpu)
248 {
249     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates);
250 }
251
252 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu *pmeGpu, const float *h_coefficients)
253 {
254     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
255     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
256     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
257     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
258                            &pmeGpu->archSpecific->coefficientsSize, &pmeGpu->archSpecific->coefficientsSizeAlloc, pmeGpu->archSpecific->context);
259     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, const_cast<float *>(h_coefficients),
260                        0, pmeGpu->kernelParams->atoms.nAtoms,
261                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
262     if (c_usePadding)
263     {
264         const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
265         const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
266         if (paddingCount > 0)
267         {
268             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
269                                    paddingCount, pmeGpu->archSpecific->pmeStream);
270         }
271     }
272 }
273
274 void pme_gpu_free_coefficients(const PmeGpu *pmeGpu)
275 {
276     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
277 }
278
279 void pme_gpu_realloc_spline_data(const PmeGpu *pmeGpu)
280 {
281     const int    order             = pmeGpu->common->pme_order;
282     const int    alignment         = pme_gpu_get_atoms_per_warp(pmeGpu);
283     const size_t nAtomsPadded      = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
284     const int    newSplineDataSize = DIM * order * nAtomsPadded;
285     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
286     /* Two arrays of the same size */
287     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
288     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
289     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
290     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize,
291                            &currentSizeTemp, &currentSizeTempAlloc, pmeGpu->archSpecific->context);
292     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
293                            &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc, pmeGpu->archSpecific->context);
294     // the host side reallocation
295     if (shouldRealloc)
296     {
297         pfree(pmeGpu->staging.h_theta);
298         pmalloc((void **)&pmeGpu->staging.h_theta, newSplineDataSize * sizeof(float));
299         pfree(pmeGpu->staging.h_dtheta);
300         pmalloc((void **)&pmeGpu->staging.h_dtheta, newSplineDataSize * sizeof(float));
301     }
302 }
303
304 void pme_gpu_free_spline_data(const PmeGpu *pmeGpu)
305 {
306     /* Two arrays of the same size */
307     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
308     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
309     pfree(pmeGpu->staging.h_theta);
310     pfree(pmeGpu->staging.h_dtheta);
311 }
312
313 void pme_gpu_realloc_grid_indices(const PmeGpu *pmeGpu)
314 {
315     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
316     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
317     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
318                            &pmeGpu->archSpecific->gridlineIndicesSize, &pmeGpu->archSpecific->gridlineIndicesSizeAlloc, pmeGpu->archSpecific->context);
319     pfree(pmeGpu->staging.h_gridlineIndices);
320     pmalloc((void **)&pmeGpu->staging.h_gridlineIndices, newIndicesSize * sizeof(int));
321 }
322
323 void pme_gpu_free_grid_indices(const PmeGpu *pmeGpu)
324 {
325     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
326     pfree(pmeGpu->staging.h_gridlineIndices);
327 }
328
329 void pme_gpu_realloc_grids(PmeGpu *pmeGpu)
330 {
331     auto     *kernelParamsPtr = pmeGpu->kernelParams.get();
332     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX] *
333         kernelParamsPtr->grid.realGridSizePadded[YY] *
334         kernelParamsPtr->grid.realGridSizePadded[ZZ];
335     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX] *
336         kernelParamsPtr->grid.complexGridSizePadded[YY] *
337         kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
338     // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
339     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
340     {
341         /* 2 separate grids */
342         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
343                                &pmeGpu->archSpecific->complexGridSize, &pmeGpu->archSpecific->complexGridSizeAlloc, pmeGpu->archSpecific->context);
344         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newRealGridSize,
345                                &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
346     }
347     else
348     {
349         /* A single buffer so that any grid will fit */
350         const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
351         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newGridsSize,
352                                &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
353         kernelParamsPtr->grid.d_fourierGrid   = kernelParamsPtr->grid.d_realGrid;
354         pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
355         // the size might get used later for copying the grid
356     }
357 }
358
359 void pme_gpu_free_grids(const PmeGpu *pmeGpu)
360 {
361     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
362     {
363         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
364     }
365     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
366 }
367
368 void pme_gpu_clear_grids(const PmeGpu *pmeGpu)
369 {
370     clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
371                            pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
372 }
373
374 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu *pmeGpu)
375 {
376     pme_gpu_free_fract_shifts(pmeGpu);
377
378     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
379
380     const int    nx                  = kernelParamsPtr->grid.realGridSize[XX];
381     const int    ny                  = kernelParamsPtr->grid.realGridSize[YY];
382     const int    nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
383     const int    cellCount           = c_pmeNeighborUnitcellCount;
384     const int    gridDataOffset[DIM] = {0, cellCount * nx, cellCount * (nx + ny)};
385
386     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
387
388     const int    newFractShiftsSize  = cellCount * (nx + ny + nz);
389
390 #if GMX_GPU == GMX_GPU_CUDA
391     initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
392                          kernelParamsPtr->fractShiftsTableTexture,
393                          pmeGpu->common->fsh.data(),
394                          newFractShiftsSize,
395                          pmeGpu->deviceInfo);
396
397     initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
398                          kernelParamsPtr->gridlineIndicesTableTexture,
399                          pmeGpu->common->nn.data(),
400                          newFractShiftsSize,
401                          pmeGpu->deviceInfo);
402 #elif GMX_GPU == GMX_GPU_OPENCL
403     // No dedicated texture routines....
404     allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize, pmeGpu->archSpecific->context);
405     allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize, pmeGpu->archSpecific->context);
406     copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(),
407                        0, newFractShiftsSize,
408                        pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
409     copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(),
410                        0, newFractShiftsSize,
411                        pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
412 #endif
413 }
414
415 void pme_gpu_free_fract_shifts(const PmeGpu *pmeGpu)
416 {
417     auto *kernelParamsPtr = pmeGpu->kernelParams.get();
418 #if GMX_GPU == GMX_GPU_CUDA
419     destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
420                             kernelParamsPtr->fractShiftsTableTexture,
421                             pmeGpu->deviceInfo);
422     destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
423                             kernelParamsPtr->gridlineIndicesTableTexture,
424                             pmeGpu->deviceInfo);
425 #elif GMX_GPU == GMX_GPU_OPENCL
426     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
427     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
428 #endif
429 }
430
431 bool pme_gpu_stream_query(const PmeGpu *pmeGpu)
432 {
433     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
434 }
435
436 void pme_gpu_copy_input_gather_grid(const PmeGpu *pmeGpu, float *h_grid)
437 {
438     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid,
439                        0, pmeGpu->archSpecific->realGridSize,
440                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
441 }
442
443 void pme_gpu_copy_output_spread_grid(const PmeGpu *pmeGpu, float *h_grid)
444 {
445     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid,
446                          0, pmeGpu->archSpecific->realGridSize,
447                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
448     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
449 }
450
451 void pme_gpu_copy_output_spread_atom_data(const PmeGpu *pmeGpu)
452 {
453     const int    alignment        = pme_gpu_get_atoms_per_warp(pmeGpu);
454     const size_t nAtomsPadded     = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
455     const size_t splinesCount     = DIM * nAtomsPadded * pmeGpu->common->pme_order;
456     auto        *kernelParamsPtr  = pmeGpu->kernelParams.get();
457     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta,
458                          0, splinesCount,
459                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
460     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta,
461                          0, splinesCount,
462                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
463     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
464                          0, kernelParamsPtr->atoms.nAtoms * DIM,
465                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
466 }
467
468 void pme_gpu_copy_input_gather_atom_data(const PmeGpu *pmeGpu)
469 {
470     const int    alignment       = pme_gpu_get_atoms_per_warp(pmeGpu);
471     const size_t nAtomsPadded    = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
472     const size_t splinesCount    = DIM * nAtomsPadded * pmeGpu->common->pme_order;
473     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
474     if (c_usePadding)
475     {
476         // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
477         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
478                                pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
479         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
480                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
481         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
482                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
483     }
484     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta,
485                        0, splinesCount,
486                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
487     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta,
488                        0, splinesCount,
489                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
490     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
491                        0, kernelParamsPtr->atoms.nAtoms * DIM,
492                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
493 }
494
495 void pme_gpu_sync_spread_grid(const PmeGpu *pmeGpu)
496 {
497     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
498 }
499
500 void pme_gpu_init_internal(PmeGpu *pmeGpu)
501 {
502     /* Allocate the target-specific structures */
503     pmeGpu->archSpecific.reset(new PmeGpuSpecific());
504     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
505
506     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
507     /* This should give better performance, according to the cuFFT documentation.
508      * The performance seems to be the same though.
509      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
510      */
511
512     // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
513     if (GMX_GPU == GMX_GPU_CUDA)
514     {
515         /* WARNING: CUDA timings are incorrect with multiple streams.
516          *          This is the main reason why they are disabled by default.
517          */
518         // TODO: Consider turning on by default when we can detect nr of streams.
519         pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
520     }
521     else if (GMX_GPU == GMX_GPU_OPENCL)
522     {
523         pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
524     }
525
526     // TODO: this is just a convenient reuse because programHandle_ currently is in charge of creating context
527     pmeGpu->archSpecific->context = pmeGpu->programHandle_->impl_->context;
528
529 #if GMX_GPU == GMX_GPU_CUDA
530     // Prepare to use the device that this PME task was assigned earlier.
531     CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
532 #endif
533
534 #if GMX_GPU == GMX_GPU_CUDA
535     pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
536 #elif GMX_GPU == GMX_GPU_OPENCL
537     //TODO we'll need work size checks for OpenCL too
538 #endif
539
540     /* Creating a PME GPU stream:
541      * - default high priority with CUDA
542      * - no priorities implemented yet with OpenCL; see #2532
543      */
544 #if GMX_GPU == GMX_GPU_CUDA
545     cudaError_t stat;
546     int         highest_priority, lowest_priority;
547     stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
548     CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
549     stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
550                                         cudaStreamDefault, //cudaStreamNonBlocking,
551                                         highest_priority);
552     CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
553 #elif GMX_GPU == GMX_GPU_OPENCL
554     cl_command_queue_properties queueProperties = pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
555     cl_device_id                device_id       = pmeGpu->deviceInfo->ocl_gpu_id.ocl_device_id;
556     cl_int                      clError;
557     pmeGpu->archSpecific->pmeStream = clCreateCommandQueue(pmeGpu->archSpecific->context,
558                                                            device_id, queueProperties, &clError);
559     if (clError != CL_SUCCESS)
560     {
561         GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
562     }
563 #endif
564 }
565
566 void pme_gpu_destroy_specific(const PmeGpu *pmeGpu)
567 {
568 #if GMX_GPU == GMX_GPU_CUDA
569     /* Destroy the CUDA stream */
570     cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
571     CU_RET_ERR(stat, "PME cudaStreamDestroy error");
572 #elif GMX_GPU == GMX_GPU_OPENCL
573     cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
574     if (clError != CL_SUCCESS)
575     {
576         gmx_warning("Failed to destroy PME command queue");
577     }
578 #endif
579 }
580
581 void pme_gpu_reinit_3dfft(const PmeGpu *pmeGpu)
582 {
583     if (pme_gpu_performs_FFT(pmeGpu))
584     {
585         pmeGpu->archSpecific->fftSetup.resize(0);
586         for (int i = 0; i < pmeGpu->common->ngrids; i++)
587         {
588             pmeGpu->archSpecific->fftSetup.push_back(std::unique_ptr<GpuParallel3dFft>(new GpuParallel3dFft(pmeGpu)));
589         }
590     }
591 }
592
593 void pme_gpu_destroy_3dfft(const PmeGpu *pmeGpu)
594 {
595     pmeGpu->archSpecific->fftSetup.resize(0);
596 }
597
598 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int warpIndex, int atomWarpIndex)
599 {
600     if (order != 4)
601     {
602         throw order;
603     }
604     constexpr int fixedOrder = 4;
605     GMX_UNUSED_VALUE(fixedOrder);
606     const int     indexBase  = getSplineParamIndexBase<fixedOrder>(warpIndex, atomWarpIndex);
607     return getSplineParamIndex<fixedOrder>(indexBase, dimIndex, splineIndex);
608 }
609
610 gmx::ArrayRef<gmx::RVec> pme_gpu_get_forces(PmeGpu *pmeGpu)
611 {
612     return pmeGpu->staging.h_forces;
613 }
614
615 void pme_gpu_get_energy_virial(const PmeGpu *pmeGpu, real *energy, matrix virial)
616 {
617     for (int j = 0; j < c_virialAndEnergyCount; j++)
618     {
619         GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]), "PME GPU produces incorrect energy/virial.");
620     }
621
622     GMX_ASSERT(energy, "Invalid energy output pointer in PME GPU");
623     unsigned int j = 0;
624     virial[XX][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
625     virial[YY][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
626     virial[ZZ][ZZ] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
627     virial[XX][YY] = virial[YY][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
628     virial[XX][ZZ] = virial[ZZ][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
629     virial[YY][ZZ] = virial[ZZ][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
630     *energy        = 0.5f * pmeGpu->staging.h_virialAndEnergy[j++];
631 }
632
633 void pme_gpu_update_input_box(PmeGpu gmx_unused       *pmeGpu,
634                               const matrix gmx_unused  box)
635 {
636 #if GMX_DOUBLE
637     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
638 #else
639     matrix  scaledBox;
640     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
641     auto   *kernelParamsPtr      = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
642     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
643     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0f, "Zero volume of the unit cell");
644     matrix recipBox;
645     gmx::invertBoxMatrix(scaledBox, recipBox);
646
647     /* The GPU recipBox is transposed as compared to the CPU recipBox.
648      * Spread uses matrix columns (while solve and gather use rows).
649      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
650      */
651     const real newRecipBox[DIM][DIM] =
652     {
653         {recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX]},
654         {             0.0, recipBox[YY][YY], recipBox[ZZ][YY]},
655         {             0.0,              0.0, recipBox[ZZ][ZZ]}
656     };
657     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
658 #endif
659 }
660
661 /*! \brief \libinternal
662  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
663  *
664  * \param[in] pmeGpu            The PME GPU structure.
665  */
666 static void pme_gpu_reinit_grids(PmeGpu *pmeGpu)
667 {
668     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
669     kernelParamsPtr->grid.ewaldFactor = (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
670
671     /* The grid size variants */
672     for (int i = 0; i < DIM; i++)
673     {
674         kernelParamsPtr->grid.realGridSize[i]       = pmeGpu->common->nk[i];
675         kernelParamsPtr->grid.realGridSizeFP[i]     = (float)kernelParamsPtr->grid.realGridSize[i];
676         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
677
678         // The complex grid currently uses no padding;
679         // if it starts to do so, then another test should be added for that
680         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
681         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
682     }
683     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
684     if (!pme_gpu_performs_FFT(pmeGpu))
685     {
686         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
687         kernelParamsPtr->grid.realGridSizePadded[ZZ] = (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
688     }
689
690     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
691     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
692     kernelParamsPtr->grid.complexGridSize[ZZ]++;
693     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
694
695     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
696     pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
697     pme_gpu_realloc_grids(pmeGpu);
698     pme_gpu_reinit_3dfft(pmeGpu);
699 }
700
701 /* Several GPU functions that refer to the CPU PME data live here.
702  * We would like to keep these away from the GPU-framework specific code for clarity,
703  * as well as compilation issues with MPI.
704  */
705
706 /*! \brief \libinternal
707  * Copies everything useful from the PME CPU to the PME GPU structure.
708  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
709  *
710  * \param[in] pme         The PME structure.
711  */
712 static void pme_gpu_copy_common_data_from(const gmx_pme_t *pme)
713 {
714     /* TODO: Consider refactoring the CPU PME code to use the same structure,
715      * so that this function becomes 2 lines */
716     PmeGpu *pmeGpu             = pme->gpu;
717     pmeGpu->common->ngrids        = pme->ngrids;
718     pmeGpu->common->epsilon_r     = pme->epsilon_r;
719     pmeGpu->common->ewaldcoeff_q  = pme->ewaldcoeff_q;
720     pmeGpu->common->nk[XX]        = pme->nkx;
721     pmeGpu->common->nk[YY]        = pme->nky;
722     pmeGpu->common->nk[ZZ]        = pme->nkz;
723     pmeGpu->common->pme_order     = pme->pme_order;
724     for (int i = 0; i < DIM; i++)
725     {
726         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
727     }
728     const int cellCount = c_pmeNeighborUnitcellCount;
729     pmeGpu->common->fsh.resize(0);
730     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
731     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
732     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
733     pmeGpu->common->nn.resize(0);
734     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
735     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
736     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
737     pmeGpu->common->runMode   = pme->runMode;
738     pmeGpu->common->boxScaler = pme->boxScaler;
739 }
740
741 /*! \libinternal \brief
742  * Initializes the PME GPU data at the beginning of the run.
743  * TODO: this should become PmeGpu::PmeGpu()
744  *
745  * \param[in,out] pme            The PME structure.
746  * \param[in,out] gpuInfo        The GPU information structure.
747  * \param[in]     pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
748  */
749 static void pme_gpu_init(gmx_pme_t          *pme,
750                          gmx_device_info_t  *gpuInfo,
751                          PmeGpuProgramHandle pmeGpuProgram)
752 {
753     pme->gpu          = new PmeGpu();
754     PmeGpu *pmeGpu = pme->gpu;
755     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
756     pmeGpu->common = std::shared_ptr<PmeShared>(new PmeShared());
757
758     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
759     /* A convenience variable. */
760     pmeGpu->settings.useDecomposition = (pme->nnodes == 1);
761     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
762     pmeGpu->settings.performGPUGather = true;
763
764     pme_gpu_set_testing(pmeGpu, false);
765
766     pmeGpu->deviceInfo = gpuInfo;
767     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
768     pmeGpu->programHandle_ = pmeGpuProgram;
769
770     pme_gpu_init_internal(pmeGpu);
771     pme_gpu_alloc_energy_virial(pmeGpu);
772
773     pme_gpu_copy_common_data_from(pme);
774
775     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0f, "PME GPU: bad electrostatic coefficient");
776
777     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
778     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
779 }
780
781 void pme_gpu_transform_spline_atom_data(const PmeGpu *pmeGpu, const pme_atomcomm_t *atc,
782                                         PmeSplineDataType type, int dimIndex, PmeLayoutTransform transform)
783 {
784     // The GPU atom spline data is laid out in a different way currently than the CPU one.
785     // This function converts the data from GPU to CPU layout (in the host memory).
786     // It is only intended for testing purposes so far.
787     // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their performance
788     // (e.g. spreading on GPU, gathering on CPU).
789     GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
790     const uintmax_t threadIndex  = 0;
791     const auto      atomCount    = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
792     const auto      atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
793     const auto      pmeOrder     = pmeGpu->common->pme_order;
794
795     real           *cpuSplineBuffer;
796     float          *h_splineBuffer;
797     switch (type)
798     {
799         case PmeSplineDataType::Values:
800             cpuSplineBuffer = atc->spline[threadIndex].theta[dimIndex];
801             h_splineBuffer  = pmeGpu->staging.h_theta;
802             break;
803
804         case PmeSplineDataType::Derivatives:
805             cpuSplineBuffer = atc->spline[threadIndex].dtheta[dimIndex];
806             h_splineBuffer  = pmeGpu->staging.h_dtheta;
807             break;
808
809         default:
810             GMX_THROW(gmx::InternalError("Unknown spline data type"));
811     }
812
813     for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
814     {
815         auto atomWarpIndex = atomIndex % atomsPerWarp;
816         auto warpIndex     = atomIndex / atomsPerWarp;
817         for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
818         {
819             const auto gpuValueIndex = getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, warpIndex, atomWarpIndex);
820             const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
821             GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder, "Atom spline data index out of bounds (while transforming GPU data layout for host)");
822             switch (transform)
823             {
824                 case PmeLayoutTransform::GpuToHost:
825                     cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
826                     break;
827
828                 case PmeLayoutTransform::HostToGpu:
829                     h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
830                     break;
831
832                 default:
833                     GMX_THROW(gmx::InternalError("Unknown layout transform"));
834             }
835         }
836     }
837 }
838
839 void pme_gpu_get_real_grid_sizes(const PmeGpu *pmeGpu, gmx::IVec *gridSize, gmx::IVec *paddedGridSize)
840 {
841     GMX_ASSERT(gridSize != nullptr, "");
842     GMX_ASSERT(paddedGridSize != nullptr, "");
843     GMX_ASSERT(pmeGpu != nullptr, "");
844     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
845     for (int i = 0; i < DIM; i++)
846     {
847         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
848         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
849     }
850 }
851
852 void pme_gpu_reinit(gmx_pme_t          *pme,
853                     gmx_device_info_t  *gpuInfo,
854                     PmeGpuProgramHandle pmeGpuProgram)
855 {
856     if (!pme_gpu_active(pme))
857     {
858         return;
859     }
860
861     if (!pme->gpu)
862     {
863         /* First-time initialization */
864         pme_gpu_init(pme, gpuInfo, pmeGpuProgram);
865     }
866     else
867     {
868         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
869         pme_gpu_copy_common_data_from(pme);
870     }
871     /* GPU FFT will only get used for a single rank.*/
872     pme->gpu->settings.performGPUFFT   = (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme_gpu_uses_dd(pme->gpu);
873     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
874
875     /* Reinit active timers */
876     pme_gpu_reinit_timings(pme->gpu);
877
878     pme_gpu_reinit_grids(pme->gpu);
879     // Note: if timing the reinit launch overhead becomes more relevant
880     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
881     pme_gpu_reinit_computation(pme, nullptr);
882     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
883      * update for mixed mode on grid switch. TODO: use shared recipbox field.
884      */
885     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
886 }
887
888 void pme_gpu_destroy(PmeGpu *pmeGpu)
889 {
890     /* Free lots of data */
891     pme_gpu_free_energy_virial(pmeGpu);
892     pme_gpu_free_bspline_values(pmeGpu);
893     pme_gpu_free_forces(pmeGpu);
894     pme_gpu_free_coordinates(pmeGpu);
895     pme_gpu_free_coefficients(pmeGpu);
896     pme_gpu_free_spline_data(pmeGpu);
897     pme_gpu_free_grid_indices(pmeGpu);
898     pme_gpu_free_fract_shifts(pmeGpu);
899     pme_gpu_free_grids(pmeGpu);
900
901     pme_gpu_destroy_3dfft(pmeGpu);
902
903     /* Free the GPU-framework specific data last */
904     pme_gpu_destroy_specific(pmeGpu);
905
906     delete pmeGpu;
907 }
908
909 void pme_gpu_reinit_atoms(PmeGpu *pmeGpu, const int nAtoms, const real *charges)
910 {
911     auto      *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
912     kernelParamsPtr->atoms.nAtoms = nAtoms;
913     const int  alignment = pme_gpu_get_atom_data_alignment(pmeGpu);
914     pmeGpu->nAtomsPadded = ((nAtoms + alignment - 1) / alignment) * alignment;
915     const int  nAtomsAlloc   = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
916     const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
917     pmeGpu->nAtomsAlloc = nAtomsAlloc;
918
919 #if GMX_DOUBLE
920     GMX_RELEASE_ASSERT(false, "Only single precision supported");
921     GMX_UNUSED_VALUE(charges);
922 #else
923     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float *>(charges));
924     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
925 #endif
926
927     if (haveToRealloc)
928     {
929         pme_gpu_realloc_coordinates(pmeGpu);
930         pme_gpu_realloc_forces(pmeGpu);
931         pme_gpu_realloc_spline_data(pmeGpu);
932         pme_gpu_realloc_grid_indices(pmeGpu);
933     }
934 }
935
936 void pme_gpu_3dfft(const PmeGpu *pmeGpu, gmx_fft_direction dir, int grid_index)
937 {
938     int   timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
939
940     auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timerId);
941     pme_gpu_start_timing(pmeGpu, timerId);
942     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(dir, timingEvent);
943     pme_gpu_stop_timing(pmeGpu, timerId);
944 }
945
946 /*! \brief
947  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
948  * to minimize number of unused blocks.
949  */
950 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu *pmeGpu, int blockCount)
951 {
952     // How many maximum widths in X do we need (hopefully just one)
953     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
954     // Trying to make things even
955     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
956     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
957     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount, "pmeGpuCreateGrid: excessive blocks");
958     return std::pair<int, int>(colCount, minRowCount);
959 }
960
961 void pme_gpu_spread(const PmeGpu    *pmeGpu,
962                     int gmx_unused   gridIndex,
963                     real            *h_grid,
964                     bool             computeSplines,
965                     bool             spreadCharges)
966 {
967     GMX_ASSERT(computeSplines || spreadCharges, "PME spline/spread kernel has invalid input (nothing to do)");
968     const auto   *kernelParamsPtr = pmeGpu->kernelParams.get();
969     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
970
971 #if GMX_GPU == GMX_GPU_OPENCL
972     const int c_spreadMaxThreadsPerBlock = 1; //FIXME make a runtime decision
973 #endif
974
975     const int order         = pmeGpu->common->pme_order;
976     const int atomsPerBlock = c_spreadMaxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM;
977     // TODO: pick smaller block size in runtime if needed
978     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
979     // If doing so, change atomsPerBlock in the kernels as well.
980     // TODO: test varying block sizes on modern arch-s as well
981     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
982     //(for spline data mostly, together with varying PME_GPU_PARALLEL_SPLINE define)
983     GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. spreading block size");
984
985     const int          blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
986     auto               dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
987
988     KernelLaunchConfig config;
989     config.blockSize[0] = config.blockSize[1] = order;
990     config.blockSize[2] = atomsPerBlock;
991     config.gridSize[0]  = dimGrid.first;
992     config.gridSize[1]  = dimGrid.second;
993     config.stream       = pmeGpu->archSpecific->pmeStream;
994
995     if (order != 4)
996     {
997         GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not implemented!"));
998     }
999
1000     int  timingId;
1001     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1002     if (computeSplines)
1003     {
1004         if (spreadCharges)
1005         {
1006             timingId  = gtPME_SPLINEANDSPREAD;
1007             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1008         }
1009         else
1010         {
1011             timingId  = gtPME_SPLINE;
1012             kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1013         }
1014     }
1015     else
1016     {
1017         timingId  = gtPME_SPREAD;
1018         kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1019     }
1020
1021     pme_gpu_start_timing(pmeGpu, timingId);
1022     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1023     const auto kernelArgs  = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1024     launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1025     pme_gpu_stop_timing(pmeGpu, timingId);
1026
1027     const bool copyBackGrid = spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
1028     if (copyBackGrid)
1029     {
1030         pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1031     }
1032     const bool copyBackAtomData = computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
1033     if (copyBackAtomData)
1034     {
1035         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1036     }
1037 }
1038
1039 void pme_gpu_solve(const PmeGpu *pmeGpu, t_complex *h_grid,
1040                    GridOrdering gridOrdering, bool computeEnergyAndVirial)
1041 {
1042     const bool   copyInputAndOutputGrid = pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu);
1043
1044     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
1045
1046     float       *h_gridFloat = reinterpret_cast<float *>(h_grid);
1047     if (copyInputAndOutputGrid)
1048     {
1049         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat,
1050                            0, pmeGpu->archSpecific->complexGridSize,
1051                            pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1052     }
1053
1054     int majorDim = -1, middleDim = -1, minorDim = -1;
1055     switch (gridOrdering)
1056     {
1057         case GridOrdering::YZX:
1058             majorDim  = YY;
1059             middleDim = ZZ;
1060             minorDim  = XX;
1061             break;
1062
1063         case GridOrdering::XYZ:
1064             majorDim  = XX;
1065             middleDim = YY;
1066             minorDim  = ZZ;
1067             break;
1068
1069         default:
1070             GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1071     }
1072
1073 #if GMX_GPU == GMX_GPU_OPENCL
1074     const int c_solveMaxThreadsPerBlock = 1; //FIXME make a runtime decision
1075 #endif
1076
1077     const int maxBlockSize      = c_solveMaxThreadsPerBlock;
1078     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1079     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1080     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1081     const int cellsPerBlock     = gridLineSize * gridLinesPerBlock;
1082     const int blockSize         = (cellsPerBlock + warp_size - 1) / warp_size * warp_size;
1083
1084
1085     KernelLaunchConfig config;
1086     config.blockSize[0] = blockSize;
1087     config.gridSize[0]  = blocksPerGridLine;
1088     // rounding up to full warps so that shuffle operations produce defined results
1089     config.gridSize[1]  = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1) / gridLinesPerBlock;
1090     config.gridSize[2]  = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1091     config.stream       = pmeGpu->archSpecific->pmeStream;
1092
1093     int  timingId = gtPME_SOLVE;
1094     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1095     if (gridOrdering == GridOrdering::YZX)
1096     {
1097         kernelPtr = computeEnergyAndVirial ?
1098             pmeGpu->programHandle_->impl_->solveYZXEnergyKernel :
1099             pmeGpu->programHandle_->impl_->solveYZXKernel;
1100     }
1101     else if (gridOrdering == GridOrdering::XYZ)
1102     {
1103         kernelPtr = computeEnergyAndVirial ?
1104             pmeGpu->programHandle_->impl_->solveXYZEnergyKernel :
1105             pmeGpu->programHandle_->impl_->solveXYZKernel;
1106     }
1107
1108     pme_gpu_start_timing(pmeGpu, timingId);
1109     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1110     const auto kernelArgs  = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1111     launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1112     pme_gpu_stop_timing(pmeGpu, timingId);
1113
1114     if (computeEnergyAndVirial)
1115     {
1116         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy, &kernelParamsPtr->constants.d_virialAndEnergy,
1117                              0, c_virialAndEnergyCount,
1118                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1119     }
1120
1121     if (copyInputAndOutputGrid)
1122     {
1123         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid,
1124                              0, pmeGpu->archSpecific->complexGridSize,
1125                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1126     }
1127 }
1128
1129 void pme_gpu_gather(PmeGpu                *pmeGpu,
1130                     PmeForceOutputHandling forceTreatment,
1131                     const float           *h_grid
1132                     )
1133 {
1134     /* Copying the input CPU forces for reduction */
1135     if (forceTreatment != PmeForceOutputHandling::Set)
1136     {
1137         pme_gpu_copy_input_forces(pmeGpu);
1138     }
1139
1140     const int    order           = pmeGpu->common->pme_order;
1141     const auto  *kernelParamsPtr = pmeGpu->kernelParams.get();
1142
1143     if (!pme_gpu_performs_FFT(pmeGpu) || pme_gpu_is_testing(pmeGpu))
1144     {
1145         pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float *>(h_grid));
1146     }
1147
1148     if (pme_gpu_is_testing(pmeGpu))
1149     {
1150         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1151     }
1152
1153 #if GMX_GPU == GMX_GPU_OPENCL
1154     const int c_gatherMaxThreadsPerBlock = 1; //FIXME make a runtime decision
1155 #endif
1156     const int atomsPerBlock  =  (c_gatherMaxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM);
1157     GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. gathering block size");
1158
1159     const int          blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1160     auto               dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1161
1162     KernelLaunchConfig config;
1163     config.blockSize[0] = config.blockSize[1] = order;
1164     config.blockSize[2] = atomsPerBlock;
1165     config.gridSize[0]  = dimGrid.first;
1166     config.gridSize[1]  = dimGrid.second;
1167     config.stream       = pmeGpu->archSpecific->pmeStream;
1168
1169     if (order != 4)
1170     {
1171         GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not implemented!"));
1172     }
1173
1174     // TODO test different cache configs
1175
1176     int  timingId = gtPME_GATHER;
1177     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1178     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = (forceTreatment == PmeForceOutputHandling::Set) ?
1179         pmeGpu->programHandle_->impl_->gatherKernel :
1180         pmeGpu->programHandle_->impl_->gatherReduceWithInputKernel;
1181
1182     pme_gpu_start_timing(pmeGpu, timingId);
1183     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1184     const auto kernelArgs  = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1185     launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1186     pme_gpu_stop_timing(pmeGpu, timingId);
1187
1188     pme_gpu_copy_output_forces(pmeGpu);
1189 }