Merge branch release-2019 into master
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme_gpu_internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <memory>
56 #include <string>
57
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
68
69 #if GMX_GPU == GMX_GPU_CUDA
70 #include "gromacs/gpu_utils/pmalloc_cuda.h"
71
72 #include "pme.cuh"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 #include "gromacs/gpu_utils/gmxopencl.h"
75 #endif
76
77 #include "gromacs/ewald/pme.h"
78
79 #include "pme_gpu_3dfft.h"
80 #include "pme_gpu_constants.h"
81 #include "pme_gpu_program_impl.h"
82 #include "pme_gpu_timings.h"
83 #include "pme_gpu_types.h"
84 #include "pme_gpu_types_host.h"
85 #include "pme_gpu_types_host_impl.h"
86 #include "pme_gpu_utils.h"
87 #include "pme_grid.h"
88 #include "pme_internal.h"
89 #include "pme_solve.h"
90
91 /*! \internal \brief
92  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
93  *
94  * \param[in] pmeGpu  The PME GPU structure.
95  * \returns The pointer to the kernel parameters.
96  */
97 static PmeGpuKernelParamsBase *pme_gpu_get_kernel_params_base_ptr(const PmeGpu *pmeGpu)
98 {
99     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
100     auto *kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase *>(pmeGpu->kernelParams.get());
101     return kernelParamsPtr;
102 }
103
104 int pme_gpu_get_atom_data_alignment(const PmeGpu * /*unused*/)
105 {
106     //TODO: this can be simplified, as c_pmeAtomDataAlignment is now constant
107     if (c_usePadding)
108     {
109         return c_pmeAtomDataAlignment;
110     }
111     else
112     {
113         return 0;
114     }
115
116 }
117
118 int pme_gpu_get_atoms_per_warp(const PmeGpu *pmeGpu)
119 {
120     return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom;
121 }
122
123 void pme_gpu_synchronize(const PmeGpu *pmeGpu)
124 {
125     gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
126 }
127
128 void pme_gpu_alloc_energy_virial(PmeGpu *pmeGpu)
129 {
130     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
131     allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount, pmeGpu->archSpecific->context);
132     pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
133 }
134
135 void pme_gpu_free_energy_virial(PmeGpu *pmeGpu)
136 {
137     freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
138     pfree(pmeGpu->staging.h_virialAndEnergy);
139     pmeGpu->staging.h_virialAndEnergy = nullptr;
140 }
141
142 void pme_gpu_clear_energy_virial(const PmeGpu *pmeGpu)
143 {
144     clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
145                            c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
146 }
147
148 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu *pmeGpu)
149 {
150     const int splineValuesOffset[DIM] = {
151         0,
152         pmeGpu->kernelParams->grid.realGridSize[XX],
153         pmeGpu->kernelParams->grid.realGridSize[XX] + pmeGpu->kernelParams->grid.realGridSize[YY]
154     };
155     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
156
157     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX] +
158         pmeGpu->kernelParams->grid.realGridSize[YY] +
159         pmeGpu->kernelParams->grid.realGridSize[ZZ];
160     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
161     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
162                            &pmeGpu->archSpecific->splineValuesSize, &pmeGpu->archSpecific->splineValuesSizeAlloc, pmeGpu->archSpecific->context);
163     if (shouldRealloc)
164     {
165         /* Reallocate the host buffer */
166         pfree(pmeGpu->staging.h_splineModuli);
167         pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_splineModuli), newSplineValuesSize * sizeof(float));
168     }
169     for (int i = 0; i < DIM; i++)
170     {
171         memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i], pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
172     }
173     /* TODO: pin original buffer instead! */
174     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
175                        0, newSplineValuesSize,
176                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
177 }
178
179 void pme_gpu_free_bspline_values(const PmeGpu *pmeGpu)
180 {
181     pfree(pmeGpu->staging.h_splineModuli);
182     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
183 }
184
185 void pme_gpu_realloc_forces(PmeGpu *pmeGpu)
186 {
187     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
188     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
189     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
190                            &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc, pmeGpu->archSpecific->context);
191     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
192     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
193 }
194
195 void pme_gpu_free_forces(const PmeGpu *pmeGpu)
196 {
197     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
198 }
199
200 void pme_gpu_copy_input_forces(PmeGpu *pmeGpu)
201 {
202     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
203     float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
204     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat,
205                        0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
206                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
207 }
208
209 void pme_gpu_copy_output_forces(PmeGpu *pmeGpu)
210 {
211     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
212     float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
213     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces,
214                          0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
215                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
216 }
217
218 void pme_gpu_realloc_coordinates(const PmeGpu *pmeGpu)
219 {
220     const size_t newCoordinatesSize = pmeGpu->nAtomsAlloc * DIM;
221     GMX_ASSERT(newCoordinatesSize > 0, "Bad number of atoms in PME GPU");
222     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, newCoordinatesSize,
223                            &pmeGpu->archSpecific->coordinatesSize, &pmeGpu->archSpecific->coordinatesSizeAlloc, pmeGpu->archSpecific->context);
224     if (c_usePadding)
225     {
226         const size_t paddingIndex = DIM * pmeGpu->kernelParams->atoms.nAtoms;
227         const size_t paddingCount = DIM * pmeGpu->nAtomsAlloc - paddingIndex;
228         if (paddingCount > 0)
229         {
230             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coordinates, paddingIndex,
231                                    paddingCount, pmeGpu->archSpecific->pmeStream);
232         }
233     }
234 }
235
236 void pme_gpu_copy_input_coordinates(const PmeGpu *pmeGpu, const rvec *h_coordinates)
237 {
238     GMX_ASSERT(h_coordinates, "Bad host-side coordinate buffer in PME GPU");
239 #if GMX_DOUBLE
240     GMX_RELEASE_ASSERT(false, "Only single precision is supported");
241     GMX_UNUSED_VALUE(h_coordinates);
242 #else
243     const float *h_coordinatesFloat = reinterpret_cast<const float *>(h_coordinates);
244     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, h_coordinatesFloat,
245                        0, pmeGpu->kernelParams->atoms.nAtoms * DIM,
246                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
247     // FIXME: sync required since the copied data will be used by PP stream when using single GPU for both
248     //        Remove after adding the required event-based sync between the above H2D and the transform kernel
249     pme_gpu_synchronize(pmeGpu);
250 #endif
251 }
252
253 void pme_gpu_free_coordinates(const PmeGpu *pmeGpu)
254 {
255     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates);
256 }
257
258 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu *pmeGpu, const float *h_coefficients)
259 {
260     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
261     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
262     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
263     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
264                            &pmeGpu->archSpecific->coefficientsSize, &pmeGpu->archSpecific->coefficientsSizeAlloc, pmeGpu->archSpecific->context);
265     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, const_cast<float *>(h_coefficients),
266                        0, pmeGpu->kernelParams->atoms.nAtoms,
267                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
268     if (c_usePadding)
269     {
270         const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
271         const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
272         if (paddingCount > 0)
273         {
274             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
275                                    paddingCount, pmeGpu->archSpecific->pmeStream);
276         }
277     }
278 }
279
280 void pme_gpu_free_coefficients(const PmeGpu *pmeGpu)
281 {
282     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
283 }
284
285 void pme_gpu_realloc_spline_data(PmeGpu *pmeGpu)
286 {
287     const int    order             = pmeGpu->common->pme_order;
288     const int    alignment         = pme_gpu_get_atoms_per_warp(pmeGpu);
289     const size_t nAtomsPadded      = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
290     const int    newSplineDataSize = DIM * order * nAtomsPadded;
291     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
292     /* Two arrays of the same size */
293     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
294     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
295     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
296     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize,
297                            &currentSizeTemp, &currentSizeTempAlloc, pmeGpu->archSpecific->context);
298     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
299                            &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc, pmeGpu->archSpecific->context);
300     // the host side reallocation
301     if (shouldRealloc)
302     {
303         pfree(pmeGpu->staging.h_theta);
304         pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
305         pfree(pmeGpu->staging.h_dtheta);
306         pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
307     }
308 }
309
310 void pme_gpu_free_spline_data(const PmeGpu *pmeGpu)
311 {
312     /* Two arrays of the same size */
313     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
314     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
315     pfree(pmeGpu->staging.h_theta);
316     pfree(pmeGpu->staging.h_dtheta);
317 }
318
319 void pme_gpu_realloc_grid_indices(PmeGpu *pmeGpu)
320 {
321     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
322     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
323     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
324                            &pmeGpu->archSpecific->gridlineIndicesSize, &pmeGpu->archSpecific->gridlineIndicesSizeAlloc, pmeGpu->archSpecific->context);
325     pfree(pmeGpu->staging.h_gridlineIndices);
326     pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
327 }
328
329 void pme_gpu_free_grid_indices(const PmeGpu *pmeGpu)
330 {
331     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
332     pfree(pmeGpu->staging.h_gridlineIndices);
333 }
334
335 void pme_gpu_realloc_grids(PmeGpu *pmeGpu)
336 {
337     auto     *kernelParamsPtr = pmeGpu->kernelParams.get();
338     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX] *
339         kernelParamsPtr->grid.realGridSizePadded[YY] *
340         kernelParamsPtr->grid.realGridSizePadded[ZZ];
341     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX] *
342         kernelParamsPtr->grid.complexGridSizePadded[YY] *
343         kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
344     // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
345     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
346     {
347         /* 2 separate grids */
348         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
349                                &pmeGpu->archSpecific->complexGridSize, &pmeGpu->archSpecific->complexGridSizeAlloc, pmeGpu->archSpecific->context);
350         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newRealGridSize,
351                                &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
352     }
353     else
354     {
355         /* A single buffer so that any grid will fit */
356         const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
357         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newGridsSize,
358                                &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
359         kernelParamsPtr->grid.d_fourierGrid   = kernelParamsPtr->grid.d_realGrid;
360         pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
361         // the size might get used later for copying the grid
362     }
363 }
364
365 void pme_gpu_free_grids(const PmeGpu *pmeGpu)
366 {
367     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
368     {
369         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
370     }
371     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
372 }
373
374 void pme_gpu_clear_grids(const PmeGpu *pmeGpu)
375 {
376     clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
377                            pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
378 }
379
380 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu *pmeGpu)
381 {
382     pme_gpu_free_fract_shifts(pmeGpu);
383
384     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
385
386     const int    nx                  = kernelParamsPtr->grid.realGridSize[XX];
387     const int    ny                  = kernelParamsPtr->grid.realGridSize[YY];
388     const int    nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
389     const int    cellCount           = c_pmeNeighborUnitcellCount;
390     const int    gridDataOffset[DIM] = {0, cellCount * nx, cellCount * (nx + ny)};
391
392     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
393
394     const int    newFractShiftsSize  = cellCount * (nx + ny + nz);
395
396 #if GMX_GPU == GMX_GPU_CUDA
397     initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
398                          kernelParamsPtr->fractShiftsTableTexture,
399                          pmeGpu->common->fsh.data(),
400                          newFractShiftsSize);
401
402     initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
403                          kernelParamsPtr->gridlineIndicesTableTexture,
404                          pmeGpu->common->nn.data(),
405                          newFractShiftsSize);
406 #elif GMX_GPU == GMX_GPU_OPENCL
407     // No dedicated texture routines....
408     allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize, pmeGpu->archSpecific->context);
409     allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize, pmeGpu->archSpecific->context);
410     copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(),
411                        0, newFractShiftsSize,
412                        pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
413     copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(),
414                        0, newFractShiftsSize,
415                        pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
416 #endif
417 }
418
419 void pme_gpu_free_fract_shifts(const PmeGpu *pmeGpu)
420 {
421     auto *kernelParamsPtr = pmeGpu->kernelParams.get();
422 #if GMX_GPU == GMX_GPU_CUDA
423     destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
424                             kernelParamsPtr->fractShiftsTableTexture);
425     destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
426                             kernelParamsPtr->gridlineIndicesTableTexture);
427 #elif GMX_GPU == GMX_GPU_OPENCL
428     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
429     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
430 #endif
431 }
432
433 bool pme_gpu_stream_query(const PmeGpu *pmeGpu)
434 {
435     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
436 }
437
438 void pme_gpu_copy_input_gather_grid(const PmeGpu *pmeGpu, float *h_grid)
439 {
440     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid,
441                        0, pmeGpu->archSpecific->realGridSize,
442                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
443 }
444
445 void pme_gpu_copy_output_spread_grid(const PmeGpu *pmeGpu, float *h_grid)
446 {
447     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid,
448                          0, pmeGpu->archSpecific->realGridSize,
449                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
450     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
451 }
452
453 void pme_gpu_copy_output_spread_atom_data(const PmeGpu *pmeGpu)
454 {
455     const int    alignment        = pme_gpu_get_atoms_per_warp(pmeGpu);
456     const size_t nAtomsPadded     = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
457     const size_t splinesCount     = DIM * nAtomsPadded * pmeGpu->common->pme_order;
458     auto        *kernelParamsPtr  = pmeGpu->kernelParams.get();
459     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta,
460                          0, splinesCount,
461                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
462     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta,
463                          0, splinesCount,
464                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
465     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
466                          0, kernelParamsPtr->atoms.nAtoms * DIM,
467                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
468 }
469
470 void pme_gpu_copy_input_gather_atom_data(const PmeGpu *pmeGpu)
471 {
472     const int    alignment       = pme_gpu_get_atoms_per_warp(pmeGpu);
473     const size_t nAtomsPadded    = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
474     const size_t splinesCount    = DIM * nAtomsPadded * pmeGpu->common->pme_order;
475     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
476     if (c_usePadding)
477     {
478         // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
479         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
480                                pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
481         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
482                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
483         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
484                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
485     }
486     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta,
487                        0, splinesCount,
488                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
489     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta,
490                        0, splinesCount,
491                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
492     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
493                        0, kernelParamsPtr->atoms.nAtoms * DIM,
494                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
495 }
496
497 void pme_gpu_sync_spread_grid(const PmeGpu *pmeGpu)
498 {
499     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
500 }
501
502 void pme_gpu_init_internal(PmeGpu *pmeGpu)
503 {
504 #if GMX_GPU == GMX_GPU_CUDA
505     // Prepare to use the device that this PME task was assigned earlier.
506     // Other entities, such as CUDA timing events, are known to implicitly use the device context.
507     CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
508 #endif
509
510     /* Allocate the target-specific structures */
511     pmeGpu->archSpecific.reset(new PmeGpuSpecific());
512     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
513
514     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
515     /* This should give better performance, according to the cuFFT documentation.
516      * The performance seems to be the same though.
517      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
518      */
519
520     // TODO: this is just a convenient reuse because programHandle_ currently is in charge of creating context
521     pmeGpu->archSpecific->context = pmeGpu->programHandle_->impl_->context;
522
523     // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
524     if (GMX_GPU == GMX_GPU_CUDA)
525     {
526         /* WARNING: CUDA timings are incorrect with multiple streams.
527          *          This is the main reason why they are disabled by default.
528          */
529         // TODO: Consider turning on by default when we can detect nr of streams.
530         pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
531     }
532     else if (GMX_GPU == GMX_GPU_OPENCL)
533     {
534         pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
535     }
536
537 #if GMX_GPU == GMX_GPU_CUDA
538     pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
539 #elif GMX_GPU == GMX_GPU_OPENCL
540     pmeGpu->maxGridWidthX = INT32_MAX / 2;
541     // TODO: is there no really global work size limit in OpenCL?
542 #endif
543
544     /* Creating a PME GPU stream:
545      * - default high priority with CUDA
546      * - no priorities implemented yet with OpenCL; see #2532
547      */
548 #if GMX_GPU == GMX_GPU_CUDA
549     cudaError_t stat;
550     int         highest_priority, lowest_priority;
551     stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
552     CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
553     stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
554                                         cudaStreamDefault, //cudaStreamNonBlocking,
555                                         highest_priority);
556     CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
557 #elif GMX_GPU == GMX_GPU_OPENCL
558     cl_command_queue_properties queueProperties = pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
559     cl_device_id                device_id       = pmeGpu->deviceInfo->ocl_gpu_id.ocl_device_id;
560     cl_int                      clError;
561     pmeGpu->archSpecific->pmeStream = clCreateCommandQueue(pmeGpu->archSpecific->context,
562                                                            device_id, queueProperties, &clError);
563     if (clError != CL_SUCCESS)
564     {
565         GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
566     }
567 #endif
568 }
569
570 void pme_gpu_destroy_specific(const PmeGpu *pmeGpu)
571 {
572 #if GMX_GPU == GMX_GPU_CUDA
573     /* Destroy the CUDA stream */
574     cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
575     CU_RET_ERR(stat, "PME cudaStreamDestroy error");
576 #elif GMX_GPU == GMX_GPU_OPENCL
577     cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
578     if (clError != CL_SUCCESS)
579     {
580         gmx_warning("Failed to destroy PME command queue");
581     }
582 #endif
583 }
584
585 void pme_gpu_reinit_3dfft(const PmeGpu *pmeGpu)
586 {
587     if (pme_gpu_performs_FFT(pmeGpu))
588     {
589         pmeGpu->archSpecific->fftSetup.resize(0);
590         for (int i = 0; i < pmeGpu->common->ngrids; i++)
591         {
592             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
593         }
594     }
595 }
596
597 void pme_gpu_destroy_3dfft(const PmeGpu *pmeGpu)
598 {
599     pmeGpu->archSpecific->fftSetup.resize(0);
600 }
601
602 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
603 {
604     if (order != c_pmeGpuOrder)
605     {
606         throw order;
607     }
608     constexpr int fixedOrder = c_pmeGpuOrder;
609     GMX_UNUSED_VALUE(fixedOrder);
610
611     const int atomWarpIndex = atomIndex % atomsPerWarp;
612     const int warpIndex     = atomIndex / atomsPerWarp;
613     int       indexBase, result;
614     switch (atomsPerWarp)
615     {
616         case 1:
617             indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
618             result    = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
619             break;
620
621         case 2:
622             indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
623             result    = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
624             break;
625
626         case 4:
627             indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
628             result    = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
629             break;
630
631         case 8:
632             indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
633             result    = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
634             break;
635
636         default:
637             GMX_THROW(gmx::NotImplementedError(gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in getSplineParamFullIndex", atomsPerWarp)));
638     }
639     return result;
640 }
641
642 void pme_gpu_getEnergyAndVirial(const gmx_pme_t &pme,
643                                 PmeOutput       *output)
644 {
645     const PmeGpu *pmeGpu = pme.gpu;
646     for (int j = 0; j < c_virialAndEnergyCount; j++)
647     {
648         GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]), "PME GPU produces incorrect energy/virial.");
649     }
650
651     unsigned int j = 0;
652     output->coulombVirial_[XX][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
653     output->coulombVirial_[YY][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
654     output->coulombVirial_[ZZ][ZZ] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
655     output->coulombVirial_[XX][YY] = output->coulombVirial_[YY][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
656     output->coulombVirial_[XX][ZZ] = output->coulombVirial_[ZZ][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
657     output->coulombVirial_[YY][ZZ] = output->coulombVirial_[ZZ][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
658     output->coulombEnergy_         = 0.5F * pmeGpu->staging.h_virialAndEnergy[j++];
659 }
660
661 /*! \brief Sets the force-related members in \p output
662  *
663  * \param[in]   pmeGpu      PME GPU data structure
664  * \param[out]  output      Pointer to PME output data structure
665  */
666 static void pme_gpu_getForceOutput(PmeGpu    *pmeGpu,
667                                    PmeOutput *output)
668 {
669     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
670     if (output->haveForceOutput_)
671     {
672         output->forces_      =  pmeGpu->staging.h_forces;
673     }
674 }
675
676 PmeOutput pme_gpu_getOutput(const gmx_pme_t &pme,
677                             const int        flags)
678 {
679     PmeGpu    *pmeGpu = pme.gpu;
680     const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
681
682     PmeOutput  output;
683
684     pme_gpu_getForceOutput(pmeGpu, &output);
685
686     // The caller knows from the flags that the energy and the virial are not usable
687     // on the else branch
688     if (haveComputedEnergyAndVirial)
689     {
690         if (pme_gpu_performs_solve(pmeGpu))
691         {
692             pme_gpu_getEnergyAndVirial(pme, &output);
693         }
694         else
695         {
696             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
697         }
698     }
699     return output;
700 }
701
702 void pme_gpu_update_input_box(PmeGpu gmx_unused       *pmeGpu,
703                               const matrix gmx_unused  box)
704 {
705 #if GMX_DOUBLE
706     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
707 #else
708     matrix  scaledBox;
709     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
710     auto   *kernelParamsPtr      = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
711     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
712     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
713     matrix recipBox;
714     gmx::invertBoxMatrix(scaledBox, recipBox);
715
716     /* The GPU recipBox is transposed as compared to the CPU recipBox.
717      * Spread uses matrix columns (while solve and gather use rows).
718      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
719      */
720     const real newRecipBox[DIM][DIM] =
721     {
722         {recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX]},
723         {             0.0, recipBox[YY][YY], recipBox[ZZ][YY]},
724         {             0.0,              0.0, recipBox[ZZ][ZZ]}
725     };
726     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
727 #endif
728 }
729
730 /*! \brief \libinternal
731  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
732  *
733  * \param[in] pmeGpu            The PME GPU structure.
734  */
735 static void pme_gpu_reinit_grids(PmeGpu *pmeGpu)
736 {
737     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
738     kernelParamsPtr->grid.ewaldFactor = (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
739
740     /* The grid size variants */
741     for (int i = 0; i < DIM; i++)
742     {
743         kernelParamsPtr->grid.realGridSize[i]       = pmeGpu->common->nk[i];
744         kernelParamsPtr->grid.realGridSizeFP[i]     = static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
745         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
746
747         // The complex grid currently uses no padding;
748         // if it starts to do so, then another test should be added for that
749         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
750         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
751     }
752     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
753     if (!pme_gpu_performs_FFT(pmeGpu))
754     {
755         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
756         kernelParamsPtr->grid.realGridSizePadded[ZZ] = (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
757     }
758
759     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
760     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
761     kernelParamsPtr->grid.complexGridSize[ZZ]++;
762     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
763
764     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
765     pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
766     pme_gpu_realloc_grids(pmeGpu);
767     pme_gpu_reinit_3dfft(pmeGpu);
768 }
769
770 /* Several GPU functions that refer to the CPU PME data live here.
771  * We would like to keep these away from the GPU-framework specific code for clarity,
772  * as well as compilation issues with MPI.
773  */
774
775 /*! \brief \libinternal
776  * Copies everything useful from the PME CPU to the PME GPU structure.
777  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
778  *
779  * \param[in] pme         The PME structure.
780  */
781 static void pme_gpu_copy_common_data_from(const gmx_pme_t *pme)
782 {
783     /* TODO: Consider refactoring the CPU PME code to use the same structure,
784      * so that this function becomes 2 lines */
785     PmeGpu *pmeGpu             = pme->gpu;
786     pmeGpu->common->ngrids        = pme->ngrids;
787     pmeGpu->common->epsilon_r     = pme->epsilon_r;
788     pmeGpu->common->ewaldcoeff_q  = pme->ewaldcoeff_q;
789     pmeGpu->common->nk[XX]        = pme->nkx;
790     pmeGpu->common->nk[YY]        = pme->nky;
791     pmeGpu->common->nk[ZZ]        = pme->nkz;
792     pmeGpu->common->pme_order     = pme->pme_order;
793     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
794     {
795         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
796     }
797     for (int i = 0; i < DIM; i++)
798     {
799         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
800     }
801     const int cellCount = c_pmeNeighborUnitcellCount;
802     pmeGpu->common->fsh.resize(0);
803     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
804     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
805     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
806     pmeGpu->common->nn.resize(0);
807     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
808     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
809     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
810     pmeGpu->common->runMode   = pme->runMode;
811     pmeGpu->common->boxScaler = pme->boxScaler;
812 }
813
814 /*! \libinternal \brief
815  * Initializes the PME GPU data at the beginning of the run.
816  * TODO: this should become PmeGpu::PmeGpu()
817  *
818  * \param[in,out] pme            The PME structure.
819  * \param[in,out] gpuInfo        The GPU information structure.
820  * \param[in]     pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
821  */
822 static void pme_gpu_init(gmx_pme_t               *pme,
823                          const gmx_device_info_t *gpuInfo,
824                          PmeGpuProgramHandle      pmeGpuProgram)
825 {
826     pme->gpu          = new PmeGpu();
827     PmeGpu *pmeGpu = pme->gpu;
828     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
829     pmeGpu->common = std::make_shared<PmeShared>();
830
831     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
832     /* A convenience variable. */
833     pmeGpu->settings.useDecomposition = (pme->nnodes == 1);
834     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
835     pmeGpu->settings.performGPUGather = true;
836     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
837     pmeGpu->settings.useGpuForceReduction = false;
838
839     pme_gpu_set_testing(pmeGpu, false);
840
841     pmeGpu->deviceInfo = gpuInfo;
842     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
843     pmeGpu->programHandle_ = pmeGpuProgram;
844
845     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
846
847     pme_gpu_init_internal(pmeGpu);
848     pme_gpu_alloc_energy_virial(pmeGpu);
849
850     pme_gpu_copy_common_data_from(pme);
851
852     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
853
854     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
855     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
856 }
857
858 void pme_gpu_transform_spline_atom_data(const PmeGpu *pmeGpu, const PmeAtomComm *atc,
859                                         PmeSplineDataType type, int dimIndex, PmeLayoutTransform transform)
860 {
861     // The GPU atom spline data is laid out in a different way currently than the CPU one.
862     // This function converts the data from GPU to CPU layout (in the host memory).
863     // It is only intended for testing purposes so far.
864     // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their performance
865     // (e.g. spreading on GPU, gathering on CPU).
866     GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
867     const uintmax_t threadIndex  = 0;
868     const auto      atomCount    = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
869     const auto      atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
870     const auto      pmeOrder     = pmeGpu->common->pme_order;
871     GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
872
873     real           *cpuSplineBuffer;
874     float          *h_splineBuffer;
875     switch (type)
876     {
877         case PmeSplineDataType::Values:
878             cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
879             h_splineBuffer  = pmeGpu->staging.h_theta;
880             break;
881
882         case PmeSplineDataType::Derivatives:
883             cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
884             h_splineBuffer  = pmeGpu->staging.h_dtheta;
885             break;
886
887         default:
888             GMX_THROW(gmx::InternalError("Unknown spline data type"));
889     }
890
891     for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
892     {
893         for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
894         {
895             const auto gpuValueIndex = getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
896             const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
897             GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder, "Atom spline data index out of bounds (while transforming GPU data layout for host)");
898             switch (transform)
899             {
900                 case PmeLayoutTransform::GpuToHost:
901                     cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
902                     break;
903
904                 case PmeLayoutTransform::HostToGpu:
905                     h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
906                     break;
907
908                 default:
909                     GMX_THROW(gmx::InternalError("Unknown layout transform"));
910             }
911         }
912     }
913 }
914
915 void pme_gpu_get_real_grid_sizes(const PmeGpu *pmeGpu, gmx::IVec *gridSize, gmx::IVec *paddedGridSize)
916 {
917     GMX_ASSERT(gridSize != nullptr, "");
918     GMX_ASSERT(paddedGridSize != nullptr, "");
919     GMX_ASSERT(pmeGpu != nullptr, "");
920     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
921     for (int i = 0; i < DIM; i++)
922     {
923         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
924         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
925     }
926 }
927
928 void pme_gpu_reinit(gmx_pme_t               *pme,
929                     const gmx_device_info_t *gpuInfo,
930                     PmeGpuProgramHandle      pmeGpuProgram)
931 {
932     if (!pme_gpu_active(pme))
933     {
934         return;
935     }
936
937     if (!pme->gpu)
938     {
939         /* First-time initialization */
940         pme_gpu_init(pme, gpuInfo, pmeGpuProgram);
941     }
942     else
943     {
944         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
945         pme_gpu_copy_common_data_from(pme);
946     }
947     /* GPU FFT will only get used for a single rank.*/
948     pme->gpu->settings.performGPUFFT   = (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme_gpu_uses_dd(pme->gpu);
949     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
950
951     /* Reinit active timers */
952     pme_gpu_reinit_timings(pme->gpu);
953
954     pme_gpu_reinit_grids(pme->gpu);
955     // Note: if timing the reinit launch overhead becomes more relevant
956     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
957     pme_gpu_reinit_computation(pme, nullptr);
958     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
959      * update for mixed mode on grid switch. TODO: use shared recipbox field.
960      */
961     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
962 }
963
964 void pme_gpu_destroy(PmeGpu *pmeGpu)
965 {
966     /* Free lots of data */
967     pme_gpu_free_energy_virial(pmeGpu);
968     pme_gpu_free_bspline_values(pmeGpu);
969     pme_gpu_free_forces(pmeGpu);
970     pme_gpu_free_coordinates(pmeGpu);
971     pme_gpu_free_coefficients(pmeGpu);
972     pme_gpu_free_spline_data(pmeGpu);
973     pme_gpu_free_grid_indices(pmeGpu);
974     pme_gpu_free_fract_shifts(pmeGpu);
975     pme_gpu_free_grids(pmeGpu);
976
977     pme_gpu_destroy_3dfft(pmeGpu);
978
979     /* Free the GPU-framework specific data last */
980     pme_gpu_destroy_specific(pmeGpu);
981
982     delete pmeGpu;
983 }
984
985 void pme_gpu_reinit_atoms(PmeGpu *pmeGpu, const int nAtoms, const real *charges)
986 {
987     auto      *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
988     kernelParamsPtr->atoms.nAtoms = nAtoms;
989     const int  alignment = pme_gpu_get_atom_data_alignment(pmeGpu);
990     pmeGpu->nAtomsPadded = ((nAtoms + alignment - 1) / alignment) * alignment;
991     const int  nAtomsAlloc   = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
992     const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
993     pmeGpu->nAtomsAlloc = nAtomsAlloc;
994
995 #if GMX_DOUBLE
996     GMX_RELEASE_ASSERT(false, "Only single precision supported");
997     GMX_UNUSED_VALUE(charges);
998 #else
999     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float *>(charges));
1000     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
1001 #endif
1002
1003     if (haveToRealloc)
1004     {
1005         pme_gpu_realloc_coordinates(pmeGpu);
1006         pme_gpu_realloc_forces(pmeGpu);
1007         pme_gpu_realloc_spline_data(pmeGpu);
1008         pme_gpu_realloc_grid_indices(pmeGpu);
1009     }
1010 }
1011
1012 void pme_gpu_3dfft(const PmeGpu *pmeGpu, gmx_fft_direction dir, int grid_index)
1013 {
1014     int   timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
1015
1016     pme_gpu_start_timing(pmeGpu, timerId);
1017     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1018     pme_gpu_stop_timing(pmeGpu, timerId);
1019 }
1020
1021 /*! \brief
1022  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1023  * to minimize number of unused blocks.
1024  */
1025 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu *pmeGpu, int blockCount)
1026 {
1027     // How many maximum widths in X do we need (hopefully just one)
1028     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1029     // Trying to make things even
1030     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1031     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1032     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount, "pmeGpuCreateGrid: excessive blocks");
1033     return std::pair<int, int>(colCount, minRowCount);
1034 }
1035
1036 void pme_gpu_spread(const PmeGpu    *pmeGpu,
1037                     int gmx_unused   gridIndex,
1038                     real            *h_grid,
1039                     bool             computeSplines,
1040                     bool             spreadCharges)
1041 {
1042     GMX_ASSERT(computeSplines || spreadCharges, "PME spline/spread kernel has invalid input (nothing to do)");
1043     const auto   *kernelParamsPtr = pmeGpu->kernelParams.get();
1044     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1045
1046     const size_t blockSize  = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1047
1048     const int    order         = pmeGpu->common->pme_order;
1049     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1050     const int    atomsPerBlock = blockSize / c_pmeSpreadGatherThreadsPerAtom;
1051     // TODO: pick smaller block size in runtime if needed
1052     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1053     // If doing so, change atomsPerBlock in the kernels as well.
1054     // TODO: test varying block sizes on modern arch-s as well
1055     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1056     //(for spline data mostly, together with varying PME_GPU_PARALLEL_SPLINE define)
1057     GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock), "inconsistent atom data padding vs. spreading block size");
1058
1059     const int          blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1060     auto               dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1061
1062     KernelLaunchConfig config;
1063     config.blockSize[0] = config.blockSize[1] = order;
1064     config.blockSize[2] = atomsPerBlock;
1065     config.gridSize[0]  = dimGrid.first;
1066     config.gridSize[1]  = dimGrid.second;
1067     config.stream       = pmeGpu->archSpecific->pmeStream;
1068
1069     int  timingId;
1070     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1071     if (computeSplines)
1072     {
1073         if (spreadCharges)
1074         {
1075             timingId  = gtPME_SPLINEANDSPREAD;
1076             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1077         }
1078         else
1079         {
1080             timingId  = gtPME_SPLINE;
1081             kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1082         }
1083     }
1084     else
1085     {
1086         timingId  = gtPME_SPREAD;
1087         kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1088     }
1089
1090     pme_gpu_start_timing(pmeGpu, timingId);
1091     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1092 #if c_canEmbedBuffers
1093     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1094 #else
1095     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr,
1096                                                       &kernelParamsPtr->atoms.d_theta,
1097                                                       &kernelParamsPtr->atoms.d_dtheta,
1098                                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1099                                                       &kernelParamsPtr->grid.d_realGrid,
1100                                                       &kernelParamsPtr->grid.d_fractShiftsTable,
1101                                                       &kernelParamsPtr->grid.d_gridlineIndicesTable,
1102                                                       &kernelParamsPtr->atoms.d_coefficients,
1103                                                       &kernelParamsPtr->atoms.d_coordinates
1104                                                       );
1105 #endif
1106
1107     launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1108     pme_gpu_stop_timing(pmeGpu, timingId);
1109
1110     const bool copyBackGrid = spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
1111     if (copyBackGrid)
1112     {
1113         pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1114     }
1115     const bool copyBackAtomData = computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
1116     if (copyBackAtomData)
1117     {
1118         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1119     }
1120 }
1121
1122 void pme_gpu_solve(const PmeGpu *pmeGpu, t_complex *h_grid,
1123                    GridOrdering gridOrdering, bool computeEnergyAndVirial)
1124 {
1125     const bool   copyInputAndOutputGrid = pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu);
1126
1127     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
1128
1129     float       *h_gridFloat = reinterpret_cast<float *>(h_grid);
1130     if (copyInputAndOutputGrid)
1131     {
1132         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat,
1133                            0, pmeGpu->archSpecific->complexGridSize,
1134                            pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1135     }
1136
1137     int majorDim = -1, middleDim = -1, minorDim = -1;
1138     switch (gridOrdering)
1139     {
1140         case GridOrdering::YZX:
1141             majorDim  = YY;
1142             middleDim = ZZ;
1143             minorDim  = XX;
1144             break;
1145
1146         case GridOrdering::XYZ:
1147             majorDim  = XX;
1148             middleDim = YY;
1149             minorDim  = ZZ;
1150             break;
1151
1152         default:
1153             GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1154     }
1155
1156     const int    maxBlockSize       = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1157
1158     const int    gridLineSize       = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1159     const int    gridLinesPerBlock  = std::max(maxBlockSize / gridLineSize, 1);
1160     const int    blocksPerGridLine  = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1161     int          cellsPerBlock;
1162     if (blocksPerGridLine == 1)
1163     {
1164         cellsPerBlock               = gridLineSize * gridLinesPerBlock;
1165     }
1166     else
1167     {
1168         cellsPerBlock               = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1169     }
1170     const int    warpSize           = pmeGpu->programHandle_->impl_->warpSize;
1171     const int    blockSize          = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1172
1173     static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock/2 >= 4,
1174                   "The CUDA solve energy kernels needs at least 4 warps. "
1175                   "Here we launch at least half of the max warps.");
1176
1177     KernelLaunchConfig config;
1178     config.blockSize[0] = blockSize;
1179     config.gridSize[0]  = blocksPerGridLine;
1180     // rounding up to full warps so that shuffle operations produce defined results
1181     config.gridSize[1]  = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1) / gridLinesPerBlock;
1182     config.gridSize[2]  = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1183     config.stream       = pmeGpu->archSpecific->pmeStream;
1184
1185     int  timingId = gtPME_SOLVE;
1186     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1187     if (gridOrdering == GridOrdering::YZX)
1188     {
1189         kernelPtr = computeEnergyAndVirial ?
1190             pmeGpu->programHandle_->impl_->solveYZXEnergyKernel :
1191             pmeGpu->programHandle_->impl_->solveYZXKernel;
1192     }
1193     else if (gridOrdering == GridOrdering::XYZ)
1194     {
1195         kernelPtr = computeEnergyAndVirial ?
1196             pmeGpu->programHandle_->impl_->solveXYZEnergyKernel :
1197             pmeGpu->programHandle_->impl_->solveXYZKernel;
1198     }
1199
1200     pme_gpu_start_timing(pmeGpu, timingId);
1201     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1202 #if c_canEmbedBuffers
1203     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1204 #else
1205     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr,
1206                                                       &kernelParamsPtr->grid.d_splineModuli,
1207                                                       &kernelParamsPtr->constants.d_virialAndEnergy,
1208                                                       &kernelParamsPtr->grid.d_fourierGrid);
1209 #endif
1210     launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1211     pme_gpu_stop_timing(pmeGpu, timingId);
1212
1213     if (computeEnergyAndVirial)
1214     {
1215         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy, &kernelParamsPtr->constants.d_virialAndEnergy,
1216                              0, c_virialAndEnergyCount,
1217                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1218     }
1219
1220     if (copyInputAndOutputGrid)
1221     {
1222         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid,
1223                              0, pmeGpu->archSpecific->complexGridSize,
1224                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1225     }
1226 }
1227
1228 void pme_gpu_gather(PmeGpu                *pmeGpu,
1229                     PmeForceOutputHandling forceTreatment,
1230                     const float           *h_grid)
1231 {
1232     /* Copying the input CPU forces for reduction */
1233     if (forceTreatment != PmeForceOutputHandling::Set)
1234     {
1235         pme_gpu_copy_input_forces(pmeGpu);
1236     }
1237
1238     if (!pme_gpu_performs_FFT(pmeGpu) || pme_gpu_is_testing(pmeGpu))
1239     {
1240         pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float *>(h_grid));
1241     }
1242
1243     if (pme_gpu_is_testing(pmeGpu))
1244     {
1245         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1246     }
1247
1248     const size_t blockSize     = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1249     const int    atomsPerBlock = blockSize / c_pmeSpreadGatherThreadsPerAtom;
1250     GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock), "inconsistent atom data padding vs. gathering block size");
1251
1252     const int          blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1253     auto               dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1254
1255
1256     const int order = pmeGpu->common->pme_order;
1257     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1258
1259     KernelLaunchConfig config;
1260     config.blockSize[0] = config.blockSize[1] = order;
1261     config.blockSize[2] = atomsPerBlock;
1262     config.gridSize[0]  = dimGrid.first;
1263     config.gridSize[1]  = dimGrid.second;
1264     config.stream       = pmeGpu->archSpecific->pmeStream;
1265
1266     // TODO test different cache configs
1267
1268     int  timingId = gtPME_GATHER;
1269     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1270     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = (forceTreatment == PmeForceOutputHandling::Set) ?
1271         pmeGpu->programHandle_->impl_->gatherKernel :
1272         pmeGpu->programHandle_->impl_->gatherReduceWithInputKernel;
1273
1274     pme_gpu_start_timing(pmeGpu, timingId);
1275     auto       *timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1276     const auto *kernelParamsPtr = pmeGpu->kernelParams.get();
1277 #if c_canEmbedBuffers
1278     const auto  kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1279 #else
1280     const auto  kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr,
1281                                                        &kernelParamsPtr->atoms.d_coefficients,
1282                                                        &kernelParamsPtr->grid.d_realGrid,
1283                                                        &kernelParamsPtr->atoms.d_theta,
1284                                                        &kernelParamsPtr->atoms.d_dtheta,
1285                                                        &kernelParamsPtr->atoms.d_gridlineIndices,
1286                                                        &kernelParamsPtr->atoms.d_forces);
1287 #endif
1288     launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1289     pme_gpu_stop_timing(pmeGpu, timingId);
1290
1291     if (pmeGpu->settings.useGpuForceReduction)
1292     {
1293         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream);
1294     }
1295     else
1296     {
1297         pme_gpu_copy_output_forces(pmeGpu);
1298     }
1299 }
1300
1301 DeviceBuffer<float> pme_gpu_get_kernelparam_coordinates(const PmeGpu *pmeGpu)
1302 {
1303     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams, "PME GPU device buffer was requested in non-GPU build or before the GPU PME was initialized.");
1304
1305     return pmeGpu->kernelParams->atoms.d_coordinates;
1306 }
1307
1308 void * pme_gpu_get_kernelparam_forces(const PmeGpu *pmeGpu)
1309 {
1310     if (pmeGpu && pmeGpu->kernelParams)
1311     {
1312         return pmeGpu->kernelParams->atoms.d_forces;
1313     }
1314     else
1315     {
1316         return nullptr;
1317     }
1318 }
1319
1320 void * pme_gpu_get_stream(const PmeGpu *pmeGpu)
1321 {
1322     if (pmeGpu)
1323     {
1324         return static_cast<void *>(&pmeGpu->archSpecific->pmeStream);
1325     }
1326     else
1327     {
1328         return nullptr;
1329     }
1330 }
1331
1332 GpuEventSynchronizer *pme_gpu_get_forces_ready_synchronizer(const PmeGpu *pmeGpu)
1333 {
1334     if (pmeGpu && pmeGpu->kernelParams)
1335     {
1336         return &pmeGpu->archSpecific->pmeForcesReady;
1337     }
1338     else
1339     {
1340         return nullptr;
1341     }
1342 }