63b77aa86f81326aee15640c164f02ee64722ab1
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme_gpu_internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <memory>
56 #include <string>
57
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/device_context.h"
60 #include "gromacs/gpu_utils/device_stream.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/math/invertmatrix.h"
63 #include "gromacs/math/units.h"
64 #include "gromacs/timing/gpu_timing.h"
65 #include "gromacs/utility/exceptions.h"
66 #include "gromacs/utility/fatalerror.h"
67 #include "gromacs/utility/gmxassert.h"
68 #include "gromacs/utility/logger.h"
69 #include "gromacs/utility/stringutil.h"
70
71 #if GMX_GPU == GMX_GPU_CUDA
72 #    include "gromacs/gpu_utils/pmalloc_cuda.h"
73
74 #    include "pme.cuh"
75 #elif GMX_GPU == GMX_GPU_OPENCL
76 #    include "gromacs/gpu_utils/gmxopencl.h"
77 #endif
78
79 #include "gromacs/ewald/pme.h"
80
81 #include "pme_gpu_3dfft.h"
82 #include "pme_gpu_calculate_splines.h"
83 #include "pme_gpu_constants.h"
84 #include "pme_gpu_program_impl.h"
85 #include "pme_gpu_timings.h"
86 #include "pme_gpu_types.h"
87 #include "pme_gpu_types_host.h"
88 #include "pme_gpu_types_host_impl.h"
89 #include "pme_grid.h"
90 #include "pme_internal.h"
91 #include "pme_solve.h"
92
93 /*! \brief
94  * CUDA only
95  * Atom limit above which it is advantageous to turn on the
96  * recalcuating of the splines in the gather and using less threads per atom in the spline and spread
97  */
98 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
99
100 /*! \internal \brief
101  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
102  *
103  * \param[in] pmeGpu  The PME GPU structure.
104  * \returns The pointer to the kernel parameters.
105  */
106 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
107 {
108     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
109     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
110     return kernelParamsPtr;
111 }
112
113 /*! \brief
114  * Atom data block size (in terms of number of atoms).
115  * This is the least common multiple of number of atoms processed by
116  * a single block/workgroup of the spread and gather kernels.
117  * The GPU atom data buffers must be padded, which means that
118  * the numbers of atoms used for determining the size of the memory
119  * allocation must be divisible by this.
120  */
121 constexpr int c_pmeAtomDataBlockSize = 64;
122
123 int pme_gpu_get_atom_data_block_size()
124 {
125     return c_pmeAtomDataBlockSize;
126 }
127
128 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
129 {
130     pmeGpu->archSpecific->pmeStream_.synchronize();
131 }
132
133 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
134 {
135     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
136     allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount,
137                          pmeGpu->archSpecific->deviceContext_);
138     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
139 }
140
141 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
142 {
143     freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
144     pfree(pmeGpu->staging.h_virialAndEnergy);
145     pmeGpu->staging.h_virialAndEnergy = nullptr;
146 }
147
148 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
149 {
150     clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
151                            c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_);
152 }
153
154 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu)
155 {
156     const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
157                                           pmeGpu->kernelParams->grid.realGridSize[XX]
158                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
159     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
160
161     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
162                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
163                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
164     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
165     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
166                            &pmeGpu->archSpecific->splineValuesSize,
167                            &pmeGpu->archSpecific->splineValuesSizeAlloc,
168                            pmeGpu->archSpecific->deviceContext_);
169     if (shouldRealloc)
170     {
171         /* Reallocate the host buffer */
172         pfree(pmeGpu->staging.h_splineModuli);
173         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli),
174                 newSplineValuesSize * sizeof(float));
175     }
176     for (int i = 0; i < DIM; i++)
177     {
178         memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i],
179                pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
180     }
181     /* TODO: pin original buffer instead! */
182     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
183                        0, newSplineValuesSize, pmeGpu->archSpecific->pmeStream_,
184                        pmeGpu->settings.transferKind, nullptr);
185 }
186
187 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
188 {
189     pfree(pmeGpu->staging.h_splineModuli);
190     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
191 }
192
193 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
194 {
195     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
196     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
197     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
198                            &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc,
199                            pmeGpu->archSpecific->deviceContext_);
200     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
201     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
202 }
203
204 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
205 {
206     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
207 }
208
209 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
210 {
211     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
212     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
213     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
214                        DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
215                        pmeGpu->settings.transferKind, nullptr);
216 }
217
218 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
219 {
220     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
221     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
222     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
223                          DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
224                          pmeGpu->settings.transferKind, nullptr);
225 }
226
227 void pme_gpu_realloc_and_copy_input_coefficients(PmeGpu* pmeGpu, const float* h_coefficients)
228 {
229     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
230     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
231     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
232     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
233                            &pmeGpu->archSpecific->coefficientsSize,
234                            &pmeGpu->archSpecific->coefficientsSizeAlloc,
235                            pmeGpu->archSpecific->deviceContext_);
236     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients,
237                        const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
238                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
239
240     const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
241     const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
242     if (paddingCount > 0)
243     {
244         clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
245                                paddingCount, pmeGpu->archSpecific->pmeStream_);
246     }
247 }
248
249 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
250 {
251     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
252 }
253
254 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
255 {
256     const int order             = pmeGpu->common->pme_order;
257     const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
258     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
259     /* Two arrays of the same size */
260     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
261     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
262     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
263     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize, &currentSizeTemp,
264                            &currentSizeTempAlloc, pmeGpu->archSpecific->deviceContext_);
265     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
266                            &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc,
267                            pmeGpu->archSpecific->deviceContext_);
268     // the host side reallocation
269     if (shouldRealloc)
270     {
271         pfree(pmeGpu->staging.h_theta);
272         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
273         pfree(pmeGpu->staging.h_dtheta);
274         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
275     }
276 }
277
278 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
279 {
280     /* Two arrays of the same size */
281     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
282     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
283     pfree(pmeGpu->staging.h_theta);
284     pfree(pmeGpu->staging.h_dtheta);
285 }
286
287 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
288 {
289     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
290     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
291     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
292                            &pmeGpu->archSpecific->gridlineIndicesSize,
293                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
294                            pmeGpu->archSpecific->deviceContext_);
295     pfree(pmeGpu->staging.h_gridlineIndices);
296     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
297 }
298
299 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
300 {
301     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
302     pfree(pmeGpu->staging.h_gridlineIndices);
303 }
304
305 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
306 {
307     auto*     kernelParamsPtr = pmeGpu->kernelParams.get();
308     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
309                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
310                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
311     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
312                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
313                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
314     // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
315     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
316     {
317         /* 2 separate grids */
318         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
319                                &pmeGpu->archSpecific->complexGridSize,
320                                &pmeGpu->archSpecific->complexGridSizeAlloc,
321                                pmeGpu->archSpecific->deviceContext_);
322         reallocateDeviceBuffer(
323                 &kernelParamsPtr->grid.d_realGrid, newRealGridSize, &pmeGpu->archSpecific->realGridSize,
324                 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
325     }
326     else
327     {
328         /* A single buffer so that any grid will fit */
329         const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
330         reallocateDeviceBuffer(
331                 &kernelParamsPtr->grid.d_realGrid, newGridsSize, &pmeGpu->archSpecific->realGridSize,
332                 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
333         kernelParamsPtr->grid.d_fourierGrid   = kernelParamsPtr->grid.d_realGrid;
334         pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
335         // the size might get used later for copying the grid
336     }
337 }
338
339 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
340 {
341     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
342     {
343         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
344     }
345     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
346 }
347
348 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
349 {
350     clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
351                            pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream_);
352 }
353
354 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
355 {
356     pme_gpu_free_fract_shifts(pmeGpu);
357
358     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
359
360     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
361     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
362     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
363     const int cellCount           = c_pmeNeighborUnitcellCount;
364     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
365
366     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
367
368     const int newFractShiftsSize = cellCount * (nx + ny + nz);
369
370     initParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
371                          &kernelParamsPtr->fractShiftsTableTexture, pmeGpu->common->fsh.data(),
372                          newFractShiftsSize, pmeGpu->archSpecific->deviceContext_);
373
374     initParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
375                          &kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
376                          newFractShiftsSize, pmeGpu->archSpecific->deviceContext_);
377 }
378
379 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
380 {
381     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
382 #if GMX_GPU == GMX_GPU_CUDA
383     destroyParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
384                             kernelParamsPtr->fractShiftsTableTexture);
385     destroyParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
386                             kernelParamsPtr->gridlineIndicesTableTexture);
387 #elif GMX_GPU == GMX_GPU_OPENCL
388     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
389     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
390 #endif
391 }
392
393 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
394 {
395     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
396 }
397
398 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, float* h_grid)
399 {
400     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid, 0, pmeGpu->archSpecific->realGridSize,
401                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
402 }
403
404 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid)
405 {
406     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid, 0,
407                          pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream_,
408                          pmeGpu->settings.transferKind, nullptr);
409     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
410 }
411
412 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
413 {
414     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
415     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
416     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
417                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
418     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
419                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
420     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
421                          0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
422                          pmeGpu->settings.transferKind, nullptr);
423 }
424
425 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
426 {
427     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
428     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
429
430     // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
431     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0, pmeGpu->nAtomsAlloc * DIM,
432                            pmeGpu->archSpecific->pmeStream_);
433     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
434                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
435                            pmeGpu->archSpecific->pmeStream_);
436     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
437                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
438                            pmeGpu->archSpecific->pmeStream_);
439
440     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
441                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
442     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
443                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
444     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
445                        0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
446                        pmeGpu->settings.transferKind, nullptr);
447 }
448
449 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
450 {
451     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
452 }
453
454 /*! \brief Internal GPU initialization for PME.
455  *
456  * \param[in]  pmeGpu         GPU PME data.
457  * \param[in]  deviceContext  GPU context.
458  * \param[in]  deviceStream   GPU stream.
459  */
460 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
461 {
462 #if GMX_GPU == GMX_GPU_CUDA
463     // Prepare to use the device that this PME task was assigned earlier.
464     // Other entities, such as CUDA timing events, are known to implicitly use the device context.
465     CU_RET_ERR(cudaSetDevice(deviceContext.deviceInfo().id), "Switching to PME CUDA device");
466 #endif
467
468     /* Allocate the target-specific structures */
469     pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
470     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
471
472     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
473     /* This should give better performance, according to the cuFFT documentation.
474      * The performance seems to be the same though.
475      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
476      */
477
478 #if GMX_GPU == GMX_GPU_CUDA
479     pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
480 #elif GMX_GPU == GMX_GPU_OPENCL
481     pmeGpu->maxGridWidthX = INT32_MAX / 2;
482     // TODO: is there no really global work size limit in OpenCL?
483 #endif
484 }
485
486 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
487 {
488     if (pme_gpu_settings(pmeGpu).performGPUFFT)
489     {
490         pmeGpu->archSpecific->fftSetup.resize(0);
491         for (int i = 0; i < pmeGpu->common->ngrids; i++)
492         {
493             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
494         }
495     }
496 }
497
498 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
499 {
500     pmeGpu->archSpecific->fftSetup.resize(0);
501 }
502
503 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, PmeOutput* output)
504 {
505     const PmeGpu* pmeGpu = pme.gpu;
506     for (int j = 0; j < c_virialAndEnergyCount; j++)
507     {
508         GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]),
509                    "PME GPU produces incorrect energy/virial.");
510     }
511
512     unsigned int j                 = 0;
513     output->coulombVirial_[XX][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
514     output->coulombVirial_[YY][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
515     output->coulombVirial_[ZZ][ZZ] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
516     output->coulombVirial_[XX][YY] = output->coulombVirial_[YY][XX] =
517             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
518     output->coulombVirial_[XX][ZZ] = output->coulombVirial_[ZZ][XX] =
519             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
520     output->coulombVirial_[YY][ZZ] = output->coulombVirial_[ZZ][YY] =
521             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
522     output->coulombEnergy_ = 0.5F * pmeGpu->staging.h_virialAndEnergy[j++];
523 }
524
525 /*! \brief Sets the force-related members in \p output
526  *
527  * \param[in]   pmeGpu      PME GPU data structure
528  * \param[out]  output      Pointer to PME output data structure
529  */
530 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
531 {
532     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
533     if (output->haveForceOutput_)
534     {
535         output->forces_ = pmeGpu->staging.h_forces;
536     }
537 }
538
539 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial)
540 {
541     PmeGpu* pmeGpu = pme.gpu;
542
543     PmeOutput output;
544
545     pme_gpu_getForceOutput(pmeGpu, &output);
546
547     if (computeEnergyAndVirial)
548     {
549         if (pme_gpu_settings(pmeGpu).performGPUSolve)
550         {
551             pme_gpu_getEnergyAndVirial(pme, &output);
552         }
553         else
554         {
555             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
556         }
557     }
558     return output;
559 }
560
561 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
562 {
563 #if GMX_DOUBLE
564     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
565 #else
566     matrix scaledBox;
567     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
568     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
569     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
570     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
571     matrix recipBox;
572     gmx::invertBoxMatrix(scaledBox, recipBox);
573
574     /* The GPU recipBox is transposed as compared to the CPU recipBox.
575      * Spread uses matrix columns (while solve and gather use rows).
576      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
577      */
578     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
579                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
580                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
581     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
582 #endif
583 }
584
585 /*! \brief \libinternal
586  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
587  *
588  * \param[in] pmeGpu            The PME GPU structure.
589  */
590 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
591 {
592     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
593     kernelParamsPtr->grid.ewaldFactor =
594             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
595
596     /* The grid size variants */
597     for (int i = 0; i < DIM; i++)
598     {
599         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
600         kernelParamsPtr->grid.realGridSizeFP[i] =
601                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
602         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
603
604         // The complex grid currently uses no padding;
605         // if it starts to do so, then another test should be added for that
606         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
607         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
608     }
609     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
610     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
611     {
612         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
613         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
614                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
615     }
616
617     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
618     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
619     kernelParamsPtr->grid.complexGridSize[ZZ]++;
620     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
621
622     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
623     pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
624     pme_gpu_realloc_grids(pmeGpu);
625     pme_gpu_reinit_3dfft(pmeGpu);
626 }
627
628 /* Several GPU functions that refer to the CPU PME data live here.
629  * We would like to keep these away from the GPU-framework specific code for clarity,
630  * as well as compilation issues with MPI.
631  */
632
633 /*! \brief \libinternal
634  * Copies everything useful from the PME CPU to the PME GPU structure.
635  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
636  *
637  * \param[in] pme         The PME structure.
638  */
639 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
640 {
641     /* TODO: Consider refactoring the CPU PME code to use the same structure,
642      * so that this function becomes 2 lines */
643     PmeGpu* pmeGpu               = pme->gpu;
644     pmeGpu->common->ngrids       = pme->ngrids;
645     pmeGpu->common->epsilon_r    = pme->epsilon_r;
646     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
647     pmeGpu->common->nk[XX]       = pme->nkx;
648     pmeGpu->common->nk[YY]       = pme->nky;
649     pmeGpu->common->nk[ZZ]       = pme->nkz;
650     pmeGpu->common->pme_order    = pme->pme_order;
651     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
652     {
653         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
654     }
655     for (int i = 0; i < DIM; i++)
656     {
657         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
658     }
659     const int cellCount = c_pmeNeighborUnitcellCount;
660     pmeGpu->common->fsh.resize(0);
661     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
662     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
663     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
664     pmeGpu->common->nn.resize(0);
665     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
666     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
667     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
668     pmeGpu->common->runMode       = pme->runMode;
669     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
670     pmeGpu->common->boxScaler     = pme->boxScaler;
671 }
672
673 /*! \libinternal \brief
674  * uses heuristics to select the best performing PME gather and scatter kernels
675  *
676  * \param[in,out] pmeGpu         The PME GPU structure.
677  */
678 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
679 {
680     if (pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit && (GMX_GPU == GMX_GPU_CUDA))
681     {
682         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::Order;
683         pmeGpu->settings.recalculateSplines = true;
684     }
685     else
686     {
687         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::OrderSquared;
688         pmeGpu->settings.recalculateSplines = false;
689     }
690 }
691
692
693 /*! \libinternal \brief
694  * Initializes the PME GPU data at the beginning of the run.
695  * TODO: this should become PmeGpu::PmeGpu()
696  *
697  * \param[in,out] pme            The PME structure.
698  * \param[in]     deviceContext  The GPU context.
699  * \param[in]     deviceStream   The GPU stream.
700  * \param[in,out] pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
701  */
702 static void pme_gpu_init(gmx_pme_t*           pme,
703                          const DeviceContext& deviceContext,
704                          const DeviceStream&  deviceStream,
705                          const PmeGpuProgram* pmeGpuProgram)
706 {
707     pme->gpu       = new PmeGpu();
708     PmeGpu* pmeGpu = pme->gpu;
709     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
710     pmeGpu->common = std::make_shared<PmeShared>();
711
712     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
713     /* A convenience variable. */
714     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
715     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
716     pmeGpu->settings.performGPUGather = true;
717     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
718     pmeGpu->settings.useGpuForceReduction = false;
719
720     pme_gpu_set_testing(pmeGpu, false);
721
722     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
723     pmeGpu->programHandle_ = pmeGpuProgram;
724
725     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
726
727     pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
728     pme_gpu_alloc_energy_virial(pmeGpu);
729
730     pme_gpu_copy_common_data_from(pme);
731
732     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
733
734     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
735     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
736 }
737
738 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
739 {
740     GMX_ASSERT(gridSize != nullptr, "");
741     GMX_ASSERT(paddedGridSize != nullptr, "");
742     GMX_ASSERT(pmeGpu != nullptr, "");
743     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
744     for (int i = 0; i < DIM; i++)
745     {
746         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
747         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
748     }
749 }
750
751 void pme_gpu_reinit(gmx_pme_t*           pme,
752                     const DeviceContext* deviceContext,
753                     const DeviceStream*  deviceStream,
754                     const PmeGpuProgram* pmeGpuProgram)
755 {
756     GMX_ASSERT(pme != nullptr, "Need valid PME object");
757
758     if (!pme->gpu)
759     {
760         GMX_RELEASE_ASSERT(deviceContext != nullptr,
761                            "Device context can not be nullptr when setting up PME on GPU.");
762         GMX_RELEASE_ASSERT(deviceStream != nullptr,
763                            "Device stream can not be nullptr when setting up PME on GPU.");
764         /* First-time initialization */
765         pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
766     }
767     else
768     {
769         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
770         pme_gpu_copy_common_data_from(pme);
771     }
772     /* GPU FFT will only get used for a single rank.*/
773     pme->gpu->settings.performGPUFFT =
774             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
775     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
776
777     /* Reinit active timers */
778     pme_gpu_reinit_timings(pme->gpu);
779
780     pme_gpu_reinit_grids(pme->gpu);
781     // Note: if timing the reinit launch overhead becomes more relevant
782     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
783     pme_gpu_reinit_computation(pme, nullptr);
784     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
785      * update for mixed mode on grid switch. TODO: use shared recipbox field.
786      */
787     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
788 }
789
790 void pme_gpu_destroy(PmeGpu* pmeGpu)
791 {
792     /* Free lots of data */
793     pme_gpu_free_energy_virial(pmeGpu);
794     pme_gpu_free_bspline_values(pmeGpu);
795     pme_gpu_free_forces(pmeGpu);
796     pme_gpu_free_coefficients(pmeGpu);
797     pme_gpu_free_spline_data(pmeGpu);
798     pme_gpu_free_grid_indices(pmeGpu);
799     pme_gpu_free_fract_shifts(pmeGpu);
800     pme_gpu_free_grids(pmeGpu);
801
802     pme_gpu_destroy_3dfft(pmeGpu);
803
804     delete pmeGpu;
805 }
806
807 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* charges)
808 {
809     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
810     kernelParamsPtr->atoms.nAtoms = nAtoms;
811     const int  block_size         = pme_gpu_get_atom_data_block_size();
812     const int  nAtomsNewPadded    = ((nAtoms + block_size - 1) / block_size) * block_size;
813     const bool haveToRealloc      = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
814     pmeGpu->nAtomsAlloc           = nAtomsNewPadded;
815
816 #if GMX_DOUBLE
817     GMX_RELEASE_ASSERT(false, "Only single precision supported");
818     GMX_UNUSED_VALUE(charges);
819 #else
820     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(charges));
821     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
822 #endif
823
824     if (haveToRealloc)
825     {
826         pme_gpu_realloc_forces(pmeGpu);
827         pme_gpu_realloc_spline_data(pmeGpu);
828         pme_gpu_realloc_grid_indices(pmeGpu);
829     }
830     pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
831 }
832
833 /*! \internal \brief
834  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
835  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
836  *
837  * \param[in] pmeGpu         The PME GPU data structure.
838  * \param[in] PMEStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
839  */
840 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
841 {
842     CommandEvent* timingEvent = nullptr;
843     if (pme_gpu_timings_enabled(pmeGpu))
844     {
845         GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
846                    "Wrong PME GPU timing event index");
847         timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
848     }
849     return timingEvent;
850 }
851
852 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, int grid_index)
853 {
854     int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
855
856     pme_gpu_start_timing(pmeGpu, timerId);
857     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
858             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
859     pme_gpu_stop_timing(pmeGpu, timerId);
860 }
861
862 /*! \brief
863  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
864  * to minimize number of unused blocks.
865  */
866 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
867 {
868     // How many maximum widths in X do we need (hopefully just one)
869     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
870     // Trying to make things even
871     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
872     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
873     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
874                "pmeGpuCreateGrid: excessive blocks");
875     return std::pair<int, int>(colCount, minRowCount);
876 }
877
878 /*! \brief
879  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
880  *
881  * \param[in]  pmeGpu                   The PME GPU structure.
882  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
883  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
884  *
885  * \return Pointer to CUDA kernel
886  */
887 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu, ThreadsPerAtom threadsPerAtom, bool writeSplinesToGlobal)
888 {
889     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
890     if (writeSplinesToGlobal)
891     {
892         if (threadsPerAtom == ThreadsPerAtom::Order)
893         {
894             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4;
895         }
896         else
897         {
898             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplines;
899         }
900     }
901     else
902     {
903         if (threadsPerAtom == ThreadsPerAtom::Order)
904         {
905             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
906         }
907         else
908         {
909             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
910         }
911     }
912
913     return kernelPtr;
914 }
915
916 /*! \brief
917  * Returns a pointer to appropriate spline kernel based on the input bool values
918  *
919  * \param[in]  pmeGpu                   The PME GPU structure.
920  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
921  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
922  *
923  * \return Pointer to CUDA kernel
924  */
925 static auto selectSplineKernelPtr(const PmeGpu*  pmeGpu,
926                                   ThreadsPerAtom threadsPerAtom,
927                                   bool gmx_unused writeSplinesToGlobal)
928 {
929     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
930     GMX_ASSERT(
931             writeSplinesToGlobal,
932             "Spline data should always be written to global memory when just calculating splines");
933
934     if (threadsPerAtom == ThreadsPerAtom::Order)
935     {
936         kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4;
937     }
938     else
939     {
940         kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
941     }
942     return kernelPtr;
943 }
944
945 /*! \brief
946  * Returns a pointer to appropriate spread kernel based on the input bool values
947  *
948  * \param[in]  pmeGpu                   The PME GPU structure.
949  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
950  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
951  *
952  * \return Pointer to CUDA kernel
953  */
954 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu, ThreadsPerAtom threadsPerAtom, bool writeSplinesToGlobal)
955 {
956     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
957     if (writeSplinesToGlobal)
958     {
959         if (threadsPerAtom == ThreadsPerAtom::Order)
960         {
961             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4;
962         }
963         else
964         {
965             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
966         }
967     }
968     else
969     {
970         /* if we are not saving the spline data we need to recalculate it
971            using the spline and spread Kernel */
972         if (threadsPerAtom == ThreadsPerAtom::Order)
973         {
974             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
975         }
976         else
977         {
978             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
979         }
980     }
981     return kernelPtr;
982 }
983
984 void pme_gpu_spread(const PmeGpu*         pmeGpu,
985                     GpuEventSynchronizer* xReadyOnDevice,
986                     int gmx_unused gridIndex,
987                     real*          h_grid,
988                     bool           computeSplines,
989                     bool           spreadCharges)
990 {
991     GMX_ASSERT(computeSplines || spreadCharges,
992                "PME spline/spread kernel has invalid input (nothing to do)");
993     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
994     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
995
996     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
997
998     const int order = pmeGpu->common->pme_order;
999     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1000     const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1001     const int  threadsPerAtom =
1002             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1003     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1004 #if GMX_GPU == GMX_GPU_OPENCL
1005     GMX_ASSERT(pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1006                "Only 16 threads per atom supported in OpenCL");
1007     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1008 #endif
1009     const int atomsPerBlock = blockSize / threadsPerAtom;
1010
1011     // TODO: pick smaller block size in runtime if needed
1012     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1013     // If doing so, change atomsPerBlock in the kernels as well.
1014     // TODO: test varying block sizes on modern arch-s as well
1015     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1016     //(for spline data mostly)
1017     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1018                "inconsistent atom data padding vs. spreading block size");
1019
1020     // Ensure that coordinates are ready on the device before launching spread;
1021     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1022     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1023     GMX_ASSERT(xReadyOnDevice != nullptr || (GMX_GPU != GMX_GPU_CUDA)
1024                        || pmeGpu->common->isRankPmeOnly || pme_gpu_settings(pmeGpu).copyAllOutputs,
1025                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1026     if (xReadyOnDevice)
1027     {
1028         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1029     }
1030
1031     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1032     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1033
1034     KernelLaunchConfig config;
1035     config.blockSize[0] = order;
1036     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1037     config.blockSize[2] = atomsPerBlock;
1038     config.gridSize[0]  = dimGrid.first;
1039     config.gridSize[1]  = dimGrid.second;
1040
1041     int                                timingId;
1042     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1043     if (computeSplines)
1044     {
1045         if (spreadCharges)
1046         {
1047             timingId  = gtPME_SPLINEANDSPREAD;
1048             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1049                                                        writeGlobal || (!recalculateSplines));
1050         }
1051         else
1052         {
1053             timingId  = gtPME_SPLINE;
1054             kernelPtr = selectSplineKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1055                                               writeGlobal || (!recalculateSplines));
1056         }
1057     }
1058     else
1059     {
1060         timingId  = gtPME_SPREAD;
1061         kernelPtr = selectSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1062                                           writeGlobal || (!recalculateSplines));
1063     }
1064
1065
1066     pme_gpu_start_timing(pmeGpu, timingId);
1067     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1068 #if c_canEmbedBuffers
1069     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1070 #else
1071     const auto kernelArgs = prepareGpuKernelArguments(
1072             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1073             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1074             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->grid.d_fractShiftsTable,
1075             &kernelParamsPtr->grid.d_gridlineIndicesTable, &kernelParamsPtr->atoms.d_coefficients,
1076             &kernelParamsPtr->atoms.d_coordinates);
1077 #endif
1078
1079     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent,
1080                     "PME spline/spread", kernelArgs);
1081     pme_gpu_stop_timing(pmeGpu, timingId);
1082
1083     const auto& settings    = pmeGpu->settings;
1084     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1085     if (copyBackGrid)
1086     {
1087         pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1088     }
1089     const bool copyBackAtomData =
1090             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1091     if (copyBackAtomData)
1092     {
1093         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1094     }
1095 }
1096
1097 void pme_gpu_solve(const PmeGpu* pmeGpu, t_complex* h_grid, GridOrdering gridOrdering, bool computeEnergyAndVirial)
1098 {
1099     const auto& settings               = pmeGpu->settings;
1100     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1101
1102     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1103
1104     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1105     if (copyInputAndOutputGrid)
1106     {
1107         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat, 0,
1108                            pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream_,
1109                            pmeGpu->settings.transferKind, nullptr);
1110     }
1111
1112     int majorDim = -1, middleDim = -1, minorDim = -1;
1113     switch (gridOrdering)
1114     {
1115         case GridOrdering::YZX:
1116             majorDim  = YY;
1117             middleDim = ZZ;
1118             minorDim  = XX;
1119             break;
1120
1121         case GridOrdering::XYZ:
1122             majorDim  = XX;
1123             middleDim = YY;
1124             minorDim  = ZZ;
1125             break;
1126
1127         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1128     }
1129
1130     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1131
1132     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1133     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1134     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1135     int       cellsPerBlock;
1136     if (blocksPerGridLine == 1)
1137     {
1138         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1139     }
1140     else
1141     {
1142         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1143     }
1144     const int warpSize  = pmeGpu->programHandle_->warpSize();
1145     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1146
1147     static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1148                   "The CUDA solve energy kernels needs at least 4 warps. "
1149                   "Here we launch at least half of the max warps.");
1150
1151     KernelLaunchConfig config;
1152     config.blockSize[0] = blockSize;
1153     config.gridSize[0]  = blocksPerGridLine;
1154     // rounding up to full warps so that shuffle operations produce defined results
1155     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1156                          / gridLinesPerBlock;
1157     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1158
1159     int                                timingId  = gtPME_SOLVE;
1160     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1161     if (gridOrdering == GridOrdering::YZX)
1162     {
1163         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernel
1164                                            : pmeGpu->programHandle_->impl_->solveYZXKernel;
1165     }
1166     else if (gridOrdering == GridOrdering::XYZ)
1167     {
1168         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernel
1169                                            : pmeGpu->programHandle_->impl_->solveXYZKernel;
1170     }
1171
1172     pme_gpu_start_timing(pmeGpu, timingId);
1173     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1174 #if c_canEmbedBuffers
1175     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1176 #else
1177     const auto kernelArgs = prepareGpuKernelArguments(
1178             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli,
1179             &kernelParamsPtr->constants.d_virialAndEnergy, &kernelParamsPtr->grid.d_fourierGrid);
1180 #endif
1181     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve",
1182                     kernelArgs);
1183     pme_gpu_stop_timing(pmeGpu, timingId);
1184
1185     if (computeEnergyAndVirial)
1186     {
1187         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy,
1188                              &kernelParamsPtr->constants.d_virialAndEnergy, 0, c_virialAndEnergyCount,
1189                              pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1190     }
1191
1192     if (copyInputAndOutputGrid)
1193     {
1194         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid, 0,
1195                              pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream_,
1196                              pmeGpu->settings.transferKind, nullptr);
1197     }
1198 }
1199
1200 /*! \brief
1201  * Returns a pointer to appropriate gather kernel based on the inputvalues
1202  *
1203  * \param[in]  pmeGpu                   The PME GPU structure.
1204  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1205  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1206  *
1207  * \return Pointer to CUDA kernel
1208  */
1209 inline auto selectGatherKernelPtr(const PmeGpu* pmeGpu, ThreadsPerAtom threadsPerAtom, bool readSplinesFromGlobal)
1210
1211 {
1212     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1213
1214     if (readSplinesFromGlobal)
1215     {
1216         if (threadsPerAtom == ThreadsPerAtom::Order)
1217         {
1218             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4;
1219         }
1220         else
1221         {
1222             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplines;
1223         }
1224     }
1225     else
1226     {
1227         if (threadsPerAtom == ThreadsPerAtom::Order)
1228         {
1229             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4;
1230         }
1231         else
1232         {
1233             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernel;
1234         }
1235     }
1236     return kernelPtr;
1237 }
1238
1239
1240 void pme_gpu_gather(PmeGpu* pmeGpu, const float* h_grid)
1241 {
1242     const auto& settings = pmeGpu->settings;
1243     if (!settings.performGPUFFT || settings.copyAllOutputs)
1244     {
1245         pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float*>(h_grid));
1246     }
1247
1248     if (settings.copyAllOutputs)
1249     {
1250         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1251     }
1252
1253     /* Set if we have unit tests */
1254     const bool   readGlobal = pmeGpu->settings.copyAllOutputs;
1255     const size_t blockSize  = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1256     const int    order      = pmeGpu->common->pme_order;
1257     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1258     const int threadsPerAtom =
1259             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1260     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1261 #if GMX_GPU == GMX_GPU_OPENCL
1262     GMX_ASSERT(pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1263                "Only 16 threads per atom supported in OpenCL");
1264     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1265 #endif
1266     const int atomsPerBlock = blockSize / threadsPerAtom;
1267
1268     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1269                "inconsistent atom data padding vs. gathering block size");
1270
1271     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1272     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1273
1274     KernelLaunchConfig config;
1275     config.blockSize[0] = order;
1276     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1277     config.blockSize[2] = atomsPerBlock;
1278     config.gridSize[0]  = dimGrid.first;
1279     config.gridSize[1]  = dimGrid.second;
1280
1281     // TODO test different cache configs
1282
1283     int                                timingId  = gtPME_GATHER;
1284     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = selectGatherKernelPtr(
1285             pmeGpu, pmeGpu->settings.threadsPerAtom, readGlobal || (!recalculateSplines));
1286     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1287
1288     pme_gpu_start_timing(pmeGpu, timingId);
1289     auto*       timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1290     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1291 #if c_canEmbedBuffers
1292     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1293 #else
1294     const auto kernelArgs = prepareGpuKernelArguments(
1295             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients,
1296             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->atoms.d_theta,
1297             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1298             &kernelParamsPtr->atoms.d_forces);
1299 #endif
1300     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather",
1301                     kernelArgs);
1302     pme_gpu_stop_timing(pmeGpu, timingId);
1303
1304     if (pmeGpu->settings.useGpuForceReduction)
1305     {
1306         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1307     }
1308     else
1309     {
1310         pme_gpu_copy_output_forces(pmeGpu);
1311     }
1312 }
1313
1314 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1315 {
1316     if (pmeGpu && pmeGpu->kernelParams)
1317     {
1318         return pmeGpu->kernelParams->atoms.d_forces;
1319     }
1320     else
1321     {
1322         return nullptr;
1323     }
1324 }
1325
1326 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1327 {
1328     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1329                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1330                "initialized.");
1331
1332     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1333                "The device-side buffer can not be set.");
1334
1335     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1336 }
1337
1338 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1339 {
1340     if (pmeGpu && pmeGpu->kernelParams)
1341     {
1342         return &pmeGpu->archSpecific->pmeForcesReady;
1343     }
1344     else
1345     {
1346         return nullptr;
1347     }
1348 }