8c61eac86dc245ffb93469da79e5009361ab1dac
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme_gpu_internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <memory>
56 #include <string>
57
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
68
69 #if GMX_GPU == GMX_GPU_CUDA
70 #    include "gromacs/gpu_utils/pmalloc_cuda.h"
71
72 #    include "pme.cuh"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 #    include "gromacs/gpu_utils/gmxopencl.h"
75 #endif
76
77 #include "gromacs/ewald/pme.h"
78
79 #include "pme_gpu_3dfft.h"
80 #include "pme_gpu_calculate_splines.h"
81 #include "pme_gpu_constants.h"
82 #include "pme_gpu_program_impl.h"
83 #include "pme_gpu_timings.h"
84 #include "pme_gpu_types.h"
85 #include "pme_gpu_types_host.h"
86 #include "pme_gpu_types_host_impl.h"
87 #include "pme_grid.h"
88 #include "pme_internal.h"
89 #include "pme_solve.h"
90
91 /*! \brief
92  * CUDA only
93  * Atom limit above which it is advantageous to turn on the
94  * recalcuating of the splines in the gather and using less threads per atom in the spline and spread
95  */
96 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
97
98 /*! \internal \brief
99  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
100  *
101  * \param[in] pmeGpu  The PME GPU structure.
102  * \returns The pointer to the kernel parameters.
103  */
104 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
105 {
106     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
107     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
108     return kernelParamsPtr;
109 }
110
111 int pme_gpu_get_atom_data_block_size()
112 {
113     return c_pmeAtomDataBlockSize;
114 }
115
116 int pme_gpu_get_atoms_per_warp(const PmeGpu* pmeGpu)
117 {
118     if (pmeGpu->settings.useOrderThreadsPerAtom)
119     {
120         return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom;
121     }
122     else
123     {
124         return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom;
125     }
126 }
127
128 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
129 {
130     pmeGpu->archSpecific->pmeStream_.synchronize();
131 }
132
133 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
134 {
135     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
136     allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount,
137                          pmeGpu->archSpecific->deviceContext_);
138     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
139 }
140
141 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
142 {
143     freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
144     pfree(pmeGpu->staging.h_virialAndEnergy);
145     pmeGpu->staging.h_virialAndEnergy = nullptr;
146 }
147
148 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
149 {
150     clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
151                            c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_);
152 }
153
154 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu)
155 {
156     const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
157                                           pmeGpu->kernelParams->grid.realGridSize[XX]
158                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
159     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
160
161     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
162                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
163                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
164     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
165     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
166                            &pmeGpu->archSpecific->splineValuesSize,
167                            &pmeGpu->archSpecific->splineValuesSizeAlloc,
168                            pmeGpu->archSpecific->deviceContext_);
169     if (shouldRealloc)
170     {
171         /* Reallocate the host buffer */
172         pfree(pmeGpu->staging.h_splineModuli);
173         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli),
174                 newSplineValuesSize * sizeof(float));
175     }
176     for (int i = 0; i < DIM; i++)
177     {
178         memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i],
179                pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
180     }
181     /* TODO: pin original buffer instead! */
182     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
183                        0, newSplineValuesSize, pmeGpu->archSpecific->pmeStream_,
184                        pmeGpu->settings.transferKind, nullptr);
185 }
186
187 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
188 {
189     pfree(pmeGpu->staging.h_splineModuli);
190     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
191 }
192
193 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
194 {
195     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
196     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
197     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
198                            &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc,
199                            pmeGpu->archSpecific->deviceContext_);
200     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
201     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
202 }
203
204 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
205 {
206     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
207 }
208
209 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
210 {
211     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
212     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
213     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
214                        DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
215                        pmeGpu->settings.transferKind, nullptr);
216 }
217
218 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
219 {
220     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
221     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
222     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
223                          DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
224                          pmeGpu->settings.transferKind, nullptr);
225 }
226
227 void pme_gpu_realloc_and_copy_input_coefficients(PmeGpu* pmeGpu, const float* h_coefficients)
228 {
229     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
230     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
231     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
232     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
233                            &pmeGpu->archSpecific->coefficientsSize,
234                            &pmeGpu->archSpecific->coefficientsSizeAlloc,
235                            pmeGpu->archSpecific->deviceContext_);
236     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients,
237                        const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
238                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
239
240     const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
241     const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
242     if (paddingCount > 0)
243     {
244         clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
245                                paddingCount, pmeGpu->archSpecific->pmeStream_);
246     }
247 }
248
249 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
250 {
251     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
252 }
253
254 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
255 {
256     const int order             = pmeGpu->common->pme_order;
257     const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
258     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
259     /* Two arrays of the same size */
260     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
261     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
262     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
263     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize, &currentSizeTemp,
264                            &currentSizeTempAlloc, pmeGpu->archSpecific->deviceContext_);
265     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
266                            &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc,
267                            pmeGpu->archSpecific->deviceContext_);
268     // the host side reallocation
269     if (shouldRealloc)
270     {
271         pfree(pmeGpu->staging.h_theta);
272         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
273         pfree(pmeGpu->staging.h_dtheta);
274         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
275     }
276 }
277
278 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
279 {
280     /* Two arrays of the same size */
281     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
282     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
283     pfree(pmeGpu->staging.h_theta);
284     pfree(pmeGpu->staging.h_dtheta);
285 }
286
287 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
288 {
289     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
290     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
291     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
292                            &pmeGpu->archSpecific->gridlineIndicesSize,
293                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
294                            pmeGpu->archSpecific->deviceContext_);
295     pfree(pmeGpu->staging.h_gridlineIndices);
296     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
297 }
298
299 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
300 {
301     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
302     pfree(pmeGpu->staging.h_gridlineIndices);
303 }
304
305 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
306 {
307     auto*     kernelParamsPtr = pmeGpu->kernelParams.get();
308     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
309                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
310                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
311     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
312                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
313                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
314     // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
315     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
316     {
317         /* 2 separate grids */
318         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
319                                &pmeGpu->archSpecific->complexGridSize,
320                                &pmeGpu->archSpecific->complexGridSizeAlloc,
321                                pmeGpu->archSpecific->deviceContext_);
322         reallocateDeviceBuffer(
323                 &kernelParamsPtr->grid.d_realGrid, newRealGridSize, &pmeGpu->archSpecific->realGridSize,
324                 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
325     }
326     else
327     {
328         /* A single buffer so that any grid will fit */
329         const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
330         reallocateDeviceBuffer(
331                 &kernelParamsPtr->grid.d_realGrid, newGridsSize, &pmeGpu->archSpecific->realGridSize,
332                 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
333         kernelParamsPtr->grid.d_fourierGrid   = kernelParamsPtr->grid.d_realGrid;
334         pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
335         // the size might get used later for copying the grid
336     }
337 }
338
339 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
340 {
341     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
342     {
343         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
344     }
345     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
346 }
347
348 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
349 {
350     clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
351                            pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream_);
352 }
353
354 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
355 {
356     pme_gpu_free_fract_shifts(pmeGpu);
357
358     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
359
360     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
361     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
362     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
363     const int cellCount           = c_pmeNeighborUnitcellCount;
364     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
365
366     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
367
368     const int newFractShiftsSize = cellCount * (nx + ny + nz);
369
370 #if GMX_GPU == GMX_GPU_CUDA
371     initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable, kernelParamsPtr->fractShiftsTableTexture,
372                          pmeGpu->common->fsh.data(), newFractShiftsSize);
373
374     initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
375                          kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
376                          newFractShiftsSize);
377 #elif GMX_GPU == GMX_GPU_OPENCL
378     // No dedicated texture routines....
379     allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize,
380                          pmeGpu->archSpecific->deviceContext_);
381     allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize,
382                          pmeGpu->archSpecific->deviceContext_);
383     copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(), 0,
384                        newFractShiftsSize, pmeGpu->archSpecific->pmeStream_,
385                        GpuApiCallBehavior::Async, nullptr);
386     copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(), 0,
387                        newFractShiftsSize, pmeGpu->archSpecific->pmeStream_,
388                        GpuApiCallBehavior::Async, nullptr);
389 #endif
390 }
391
392 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
393 {
394     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
395 #if GMX_GPU == GMX_GPU_CUDA
396     destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
397                             kernelParamsPtr->fractShiftsTableTexture);
398     destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
399                             kernelParamsPtr->gridlineIndicesTableTexture);
400 #elif GMX_GPU == GMX_GPU_OPENCL
401     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
402     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
403 #endif
404 }
405
406 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
407 {
408     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
409 }
410
411 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, float* h_grid)
412 {
413     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid, 0, pmeGpu->archSpecific->realGridSize,
414                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
415 }
416
417 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid)
418 {
419     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid, 0,
420                          pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream_,
421                          pmeGpu->settings.transferKind, nullptr);
422     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
423 }
424
425 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
426 {
427     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
428     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
429     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
430                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
431     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
432                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
433     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
434                          0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
435                          pmeGpu->settings.transferKind, nullptr);
436 }
437
438 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
439 {
440     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
441     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
442
443     // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
444     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0, pmeGpu->nAtomsAlloc * DIM,
445                            pmeGpu->archSpecific->pmeStream_);
446     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
447                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
448                            pmeGpu->archSpecific->pmeStream_);
449     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
450                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
451                            pmeGpu->archSpecific->pmeStream_);
452
453     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
454                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
455     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
456                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
457     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
458                        0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
459                        pmeGpu->settings.transferKind, nullptr);
460 }
461
462 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
463 {
464     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
465 }
466
467 void pme_gpu_init_internal(PmeGpu* pmeGpu)
468 {
469 #if GMX_GPU == GMX_GPU_CUDA
470     // Prepare to use the device that this PME task was assigned earlier.
471     // Other entities, such as CUDA timing events, are known to implicitly use the device context.
472     CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
473 #endif
474
475     /* Allocate the target-specific structures */
476     pmeGpu->archSpecific.reset(new PmeGpuSpecific(pmeGpu->programHandle_->impl_->deviceContext_));
477     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
478
479     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
480     /* This should give better performance, according to the cuFFT documentation.
481      * The performance seems to be the same though.
482      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
483      */
484
485     // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
486     if (GMX_GPU == GMX_GPU_CUDA)
487     {
488         /* WARNING: CUDA timings are incorrect with multiple streams.
489          *          This is the main reason why they are disabled by default.
490          */
491         // TODO: Consider turning on by default when we can detect nr of streams.
492         pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
493     }
494     else if (GMX_GPU == GMX_GPU_OPENCL)
495     {
496         pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
497     }
498
499 #if GMX_GPU == GMX_GPU_CUDA
500     pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
501 #elif GMX_GPU == GMX_GPU_OPENCL
502     pmeGpu->maxGridWidthX = INT32_MAX / 2;
503     // TODO: is there no really global work size limit in OpenCL?
504 #endif
505
506     /* Creating a PME GPU stream:
507      * - default high priority with CUDA
508      * - no priorities implemented yet with OpenCL; see #2532
509      */
510     pmeGpu->archSpecific->pmeStream_.init(*pmeGpu->deviceInfo, pmeGpu->archSpecific->deviceContext_,
511                                           DeviceStreamPriority::High, pmeGpu->archSpecific->useTiming);
512 }
513
514 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
515 {
516     if (pme_gpu_settings(pmeGpu).performGPUFFT)
517     {
518         pmeGpu->archSpecific->fftSetup.resize(0);
519         for (int i = 0; i < pmeGpu->common->ngrids; i++)
520         {
521             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
522         }
523     }
524 }
525
526 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
527 {
528     pmeGpu->archSpecific->fftSetup.resize(0);
529 }
530
531 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
532 {
533     if (order != c_pmeGpuOrder)
534     {
535         throw order;
536     }
537     constexpr int fixedOrder = c_pmeGpuOrder;
538     GMX_UNUSED_VALUE(fixedOrder);
539
540     const int atomWarpIndex = atomIndex % atomsPerWarp;
541     const int warpIndex     = atomIndex / atomsPerWarp;
542     int       indexBase, result;
543     switch (atomsPerWarp)
544     {
545         case 1:
546             indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
547             result    = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
548             break;
549
550         case 2:
551             indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
552             result    = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
553             break;
554
555         case 4:
556             indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
557             result    = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
558             break;
559
560         case 8:
561             indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
562             result    = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
563             break;
564
565         default:
566             GMX_THROW(gmx::NotImplementedError(
567                     gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in "
568                                       "getSplineParamFullIndex",
569                                       atomsPerWarp)));
570     }
571     return result;
572 }
573
574 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, PmeOutput* output)
575 {
576     const PmeGpu* pmeGpu = pme.gpu;
577     for (int j = 0; j < c_virialAndEnergyCount; j++)
578     {
579         GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]),
580                    "PME GPU produces incorrect energy/virial.");
581     }
582
583     unsigned int j                 = 0;
584     output->coulombVirial_[XX][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
585     output->coulombVirial_[YY][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
586     output->coulombVirial_[ZZ][ZZ] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
587     output->coulombVirial_[XX][YY] = output->coulombVirial_[YY][XX] =
588             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
589     output->coulombVirial_[XX][ZZ] = output->coulombVirial_[ZZ][XX] =
590             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
591     output->coulombVirial_[YY][ZZ] = output->coulombVirial_[ZZ][YY] =
592             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
593     output->coulombEnergy_ = 0.5F * pmeGpu->staging.h_virialAndEnergy[j++];
594 }
595
596 /*! \brief Sets the force-related members in \p output
597  *
598  * \param[in]   pmeGpu      PME GPU data structure
599  * \param[out]  output      Pointer to PME output data structure
600  */
601 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
602 {
603     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
604     if (output->haveForceOutput_)
605     {
606         output->forces_ = pmeGpu->staging.h_forces;
607     }
608 }
609
610 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial)
611 {
612     PmeGpu* pmeGpu = pme.gpu;
613
614     PmeOutput output;
615
616     pme_gpu_getForceOutput(pmeGpu, &output);
617
618     if (computeEnergyAndVirial)
619     {
620         if (pme_gpu_settings(pmeGpu).performGPUSolve)
621         {
622             pme_gpu_getEnergyAndVirial(pme, &output);
623         }
624         else
625         {
626             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
627         }
628     }
629     return output;
630 }
631
632 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
633 {
634 #if GMX_DOUBLE
635     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
636 #else
637     matrix scaledBox;
638     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
639     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
640     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
641     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
642     matrix recipBox;
643     gmx::invertBoxMatrix(scaledBox, recipBox);
644
645     /* The GPU recipBox is transposed as compared to the CPU recipBox.
646      * Spread uses matrix columns (while solve and gather use rows).
647      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
648      */
649     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
650                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
651                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
652     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
653 #endif
654 }
655
656 /*! \brief \libinternal
657  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
658  *
659  * \param[in] pmeGpu            The PME GPU structure.
660  */
661 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
662 {
663     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
664     kernelParamsPtr->grid.ewaldFactor =
665             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
666
667     /* The grid size variants */
668     for (int i = 0; i < DIM; i++)
669     {
670         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
671         kernelParamsPtr->grid.realGridSizeFP[i] =
672                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
673         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
674
675         // The complex grid currently uses no padding;
676         // if it starts to do so, then another test should be added for that
677         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
678         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
679     }
680     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
681     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
682     {
683         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
684         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
685                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
686     }
687
688     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
689     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
690     kernelParamsPtr->grid.complexGridSize[ZZ]++;
691     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
692
693     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
694     pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
695     pme_gpu_realloc_grids(pmeGpu);
696     pme_gpu_reinit_3dfft(pmeGpu);
697 }
698
699 /* Several GPU functions that refer to the CPU PME data live here.
700  * We would like to keep these away from the GPU-framework specific code for clarity,
701  * as well as compilation issues with MPI.
702  */
703
704 /*! \brief \libinternal
705  * Copies everything useful from the PME CPU to the PME GPU structure.
706  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
707  *
708  * \param[in] pme         The PME structure.
709  */
710 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
711 {
712     /* TODO: Consider refactoring the CPU PME code to use the same structure,
713      * so that this function becomes 2 lines */
714     PmeGpu* pmeGpu               = pme->gpu;
715     pmeGpu->common->ngrids       = pme->ngrids;
716     pmeGpu->common->epsilon_r    = pme->epsilon_r;
717     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
718     pmeGpu->common->nk[XX]       = pme->nkx;
719     pmeGpu->common->nk[YY]       = pme->nky;
720     pmeGpu->common->nk[ZZ]       = pme->nkz;
721     pmeGpu->common->pme_order    = pme->pme_order;
722     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
723     {
724         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
725     }
726     for (int i = 0; i < DIM; i++)
727     {
728         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
729     }
730     const int cellCount = c_pmeNeighborUnitcellCount;
731     pmeGpu->common->fsh.resize(0);
732     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
733     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
734     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
735     pmeGpu->common->nn.resize(0);
736     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
737     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
738     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
739     pmeGpu->common->runMode       = pme->runMode;
740     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
741     pmeGpu->common->boxScaler     = pme->boxScaler;
742 }
743
744 /*! \libinternal \brief
745  * uses heuristics to select the best performing PME gather and scatter kernels
746  *
747  * \param[in,out] pmeGpu         The PME GPU structure.
748  */
749 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
750 {
751     if (pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit && (GMX_GPU == GMX_GPU_CUDA))
752     {
753         pmeGpu->settings.useOrderThreadsPerAtom = true;
754         pmeGpu->settings.recalculateSplines     = true;
755     }
756     else
757     {
758         pmeGpu->settings.useOrderThreadsPerAtom = false;
759         pmeGpu->settings.recalculateSplines     = false;
760     }
761 }
762
763
764 /*! \libinternal \brief
765  * Initializes the PME GPU data at the beginning of the run.
766  * TODO: this should become PmeGpu::PmeGpu()
767  *
768  * \param[in,out] pme            The PME structure.
769  * \param[in,out] deviceInfo     The GPU device information structure.
770  * \param[in]     pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
771  */
772 static void pme_gpu_init(gmx_pme_t* pme, const DeviceInformation* deviceInfo, const PmeGpuProgram* pmeGpuProgram)
773 {
774     GMX_ASSERT(deviceInfo != nullptr,
775                "Device information can not be nullptr when GPU is used for PME.");
776     pme->gpu       = new PmeGpu();
777     PmeGpu* pmeGpu = pme->gpu;
778     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
779     pmeGpu->common = std::make_shared<PmeShared>();
780
781     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
782     /* A convenience variable. */
783     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
784     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
785     pmeGpu->settings.performGPUGather = true;
786     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
787     pmeGpu->settings.useGpuForceReduction = false;
788
789     pme_gpu_set_testing(pmeGpu, false);
790
791     pmeGpu->deviceInfo = deviceInfo;
792     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
793     pmeGpu->programHandle_ = pmeGpuProgram;
794
795     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
796
797     pme_gpu_init_internal(pmeGpu);
798     pme_gpu_alloc_energy_virial(pmeGpu);
799
800     pme_gpu_copy_common_data_from(pme);
801
802     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
803
804     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
805     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
806 }
807
808 void pme_gpu_transform_spline_atom_data(const PmeGpu*      pmeGpu,
809                                         const PmeAtomComm* atc,
810                                         PmeSplineDataType  type,
811                                         int                dimIndex,
812                                         PmeLayoutTransform transform)
813 {
814     // The GPU atom spline data is laid out in a different way currently than the CPU one.
815     // This function converts the data from GPU to CPU layout (in the host memory).
816     // It is only intended for testing purposes so far.
817     // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their
818     // performance (e.g. spreading on GPU, gathering on CPU).
819     GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
820     const uintmax_t threadIndex  = 0;
821     const auto      atomCount    = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
822     const auto      atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
823     const auto      pmeOrder     = pmeGpu->common->pme_order;
824     GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
825
826     real*  cpuSplineBuffer;
827     float* h_splineBuffer;
828     switch (type)
829     {
830         case PmeSplineDataType::Values:
831             cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
832             h_splineBuffer  = pmeGpu->staging.h_theta;
833             break;
834
835         case PmeSplineDataType::Derivatives:
836             cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
837             h_splineBuffer  = pmeGpu->staging.h_dtheta;
838             break;
839
840         default: GMX_THROW(gmx::InternalError("Unknown spline data type"));
841     }
842
843     for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
844     {
845         for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
846         {
847             const auto gpuValueIndex =
848                     getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
849             const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
850             GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder,
851                        "Atom spline data index out of bounds (while transforming GPU data layout "
852                        "for host)");
853             switch (transform)
854             {
855                 case PmeLayoutTransform::GpuToHost:
856                     cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
857                     break;
858
859                 case PmeLayoutTransform::HostToGpu:
860                     h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
861                     break;
862
863                 default: GMX_THROW(gmx::InternalError("Unknown layout transform"));
864             }
865         }
866     }
867 }
868
869 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
870 {
871     GMX_ASSERT(gridSize != nullptr, "");
872     GMX_ASSERT(paddedGridSize != nullptr, "");
873     GMX_ASSERT(pmeGpu != nullptr, "");
874     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
875     for (int i = 0; i < DIM; i++)
876     {
877         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
878         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
879     }
880 }
881
882 void pme_gpu_reinit(gmx_pme_t* pme, const DeviceInformation* deviceInfo, const PmeGpuProgram* pmeGpuProgram)
883 {
884     GMX_ASSERT(pme != nullptr, "Need valid PME object");
885     if (pme->runMode == PmeRunMode::CPU)
886     {
887         GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object");
888         return;
889     }
890
891     if (!pme->gpu)
892     {
893         /* First-time initialization */
894         pme_gpu_init(pme, deviceInfo, pmeGpuProgram);
895     }
896     else
897     {
898         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
899         pme_gpu_copy_common_data_from(pme);
900     }
901     /* GPU FFT will only get used for a single rank.*/
902     pme->gpu->settings.performGPUFFT =
903             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
904     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
905
906     /* Reinit active timers */
907     pme_gpu_reinit_timings(pme->gpu);
908
909     pme_gpu_reinit_grids(pme->gpu);
910     // Note: if timing the reinit launch overhead becomes more relevant
911     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
912     pme_gpu_reinit_computation(pme, nullptr);
913     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
914      * update for mixed mode on grid switch. TODO: use shared recipbox field.
915      */
916     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
917 }
918
919 void pme_gpu_destroy(PmeGpu* pmeGpu)
920 {
921     /* Free lots of data */
922     pme_gpu_free_energy_virial(pmeGpu);
923     pme_gpu_free_bspline_values(pmeGpu);
924     pme_gpu_free_forces(pmeGpu);
925     pme_gpu_free_coefficients(pmeGpu);
926     pme_gpu_free_spline_data(pmeGpu);
927     pme_gpu_free_grid_indices(pmeGpu);
928     pme_gpu_free_fract_shifts(pmeGpu);
929     pme_gpu_free_grids(pmeGpu);
930
931     pme_gpu_destroy_3dfft(pmeGpu);
932
933     delete pmeGpu;
934 }
935
936 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* charges)
937 {
938     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
939     kernelParamsPtr->atoms.nAtoms = nAtoms;
940     const int  block_size         = pme_gpu_get_atom_data_block_size();
941     const int  nAtomsNewPadded    = ((nAtoms + block_size - 1) / block_size) * block_size;
942     const bool haveToRealloc      = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
943     pmeGpu->nAtomsAlloc           = nAtomsNewPadded;
944
945 #if GMX_DOUBLE
946     GMX_RELEASE_ASSERT(false, "Only single precision supported");
947     GMX_UNUSED_VALUE(charges);
948 #else
949     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(charges));
950     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
951 #endif
952
953     if (haveToRealloc)
954     {
955         pme_gpu_realloc_forces(pmeGpu);
956         pme_gpu_realloc_spline_data(pmeGpu);
957         pme_gpu_realloc_grid_indices(pmeGpu);
958     }
959     pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
960 }
961
962 /*! \internal \brief
963  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
964  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
965  *
966  * \param[in] pmeGpu         The PME GPU data structure.
967  * \param[in] PMEStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
968  */
969 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
970 {
971     CommandEvent* timingEvent = nullptr;
972     if (pme_gpu_timings_enabled(pmeGpu))
973     {
974         GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
975                    "Wrong PME GPU timing event index");
976         timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
977     }
978     return timingEvent;
979 }
980
981 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, int grid_index)
982 {
983     int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
984
985     pme_gpu_start_timing(pmeGpu, timerId);
986     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
987             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
988     pme_gpu_stop_timing(pmeGpu, timerId);
989 }
990
991 /*! \brief
992  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
993  * to minimize number of unused blocks.
994  */
995 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
996 {
997     // How many maximum widths in X do we need (hopefully just one)
998     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
999     // Trying to make things even
1000     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1001     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1002     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1003                "pmeGpuCreateGrid: excessive blocks");
1004     return std::pair<int, int>(colCount, minRowCount);
1005 }
1006
1007 /*! \brief
1008  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1009  *
1010  * \param[in]  pmeGpu                   The PME GPU structure.
1011  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1012  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1013  *
1014  * \return Pointer to CUDA kernel
1015  */
1016 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool writeSplinesToGlobal)
1017 {
1018     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1019     if (writeSplinesToGlobal)
1020     {
1021         if (useOrderThreadsPerAtom)
1022         {
1023             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4;
1024         }
1025         else
1026         {
1027             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplines;
1028         }
1029     }
1030     else
1031     {
1032         if (useOrderThreadsPerAtom)
1033         {
1034             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
1035         }
1036         else
1037         {
1038             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1039         }
1040     }
1041
1042     return kernelPtr;
1043 }
1044
1045 /*! \brief
1046  * Returns a pointer to appropriate spline kernel based on the input bool values
1047  *
1048  * \param[in]  pmeGpu                   The PME GPU structure.
1049  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1050  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1051  *
1052  * \return Pointer to CUDA kernel
1053  */
1054 static auto selectSplineKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool gmx_unused writeSplinesToGlobal)
1055 {
1056     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1057     GMX_ASSERT(
1058             writeSplinesToGlobal,
1059             "Spline data should always be written to global memory when just calculating splines");
1060
1061     if (useOrderThreadsPerAtom)
1062     {
1063         kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4;
1064     }
1065     else
1066     {
1067         kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1068     }
1069     return kernelPtr;
1070 }
1071
1072 /*! \brief
1073  * Returns a pointer to appropriate spread kernel based on the input bool values
1074  *
1075  * \param[in]  pmeGpu                   The PME GPU structure.
1076  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1077  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1078  *
1079  * \return Pointer to CUDA kernel
1080  */
1081 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool writeSplinesToGlobal)
1082 {
1083     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1084     if (writeSplinesToGlobal)
1085     {
1086         if (useOrderThreadsPerAtom)
1087         {
1088             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4;
1089         }
1090         else
1091         {
1092             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1093         }
1094     }
1095     else
1096     {
1097         /* if we are not saving the spline data we need to recalculate it
1098            using the spline and spread Kernel */
1099         if (useOrderThreadsPerAtom)
1100         {
1101             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
1102         }
1103         else
1104         {
1105             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1106         }
1107     }
1108     return kernelPtr;
1109 }
1110
1111 void pme_gpu_spread(const PmeGpu*         pmeGpu,
1112                     GpuEventSynchronizer* xReadyOnDevice,
1113                     int gmx_unused gridIndex,
1114                     real*          h_grid,
1115                     bool           computeSplines,
1116                     bool           spreadCharges)
1117 {
1118     GMX_ASSERT(computeSplines || spreadCharges,
1119                "PME spline/spread kernel has invalid input (nothing to do)");
1120     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1121     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1122
1123     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1124
1125     const int order = pmeGpu->common->pme_order;
1126     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1127     const bool writeGlobal            = pmeGpu->settings.copyAllOutputs;
1128     const bool useOrderThreadsPerAtom = pmeGpu->settings.useOrderThreadsPerAtom;
1129     const bool recalculateSplines     = pmeGpu->settings.recalculateSplines;
1130 #if GMX_GPU == GMX_GPU_OPENCL
1131     GMX_ASSERT(!useOrderThreadsPerAtom, "Only 16 threads per atom supported in OpenCL");
1132     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1133 #endif
1134     const int atomsPerBlock = useOrderThreadsPerAtom ? blockSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
1135                                                      : blockSize / c_pmeSpreadGatherThreadsPerAtom;
1136
1137     // TODO: pick smaller block size in runtime if needed
1138     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1139     // If doing so, change atomsPerBlock in the kernels as well.
1140     // TODO: test varying block sizes on modern arch-s as well
1141     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1142     //(for spline data mostly)
1143     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1144                "inconsistent atom data padding vs. spreading block size");
1145
1146     // Ensure that coordinates are ready on the device before launching spread;
1147     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1148     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1149     GMX_ASSERT(xReadyOnDevice != nullptr || (GMX_GPU != GMX_GPU_CUDA)
1150                        || pmeGpu->common->isRankPmeOnly || pme_gpu_settings(pmeGpu).copyAllOutputs,
1151                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1152     if (xReadyOnDevice)
1153     {
1154         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1155     }
1156
1157     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1158     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1159
1160     KernelLaunchConfig config;
1161     config.blockSize[0] = order;
1162     config.blockSize[1] = useOrderThreadsPerAtom ? 1 : order;
1163     config.blockSize[2] = atomsPerBlock;
1164     config.gridSize[0]  = dimGrid.first;
1165     config.gridSize[1]  = dimGrid.second;
1166     config.stream       = pmeGpu->archSpecific->pmeStream_.stream();
1167
1168     int                                timingId;
1169     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1170     if (computeSplines)
1171     {
1172         if (spreadCharges)
1173         {
1174             timingId  = gtPME_SPLINEANDSPREAD;
1175             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, useOrderThreadsPerAtom,
1176                                                        writeGlobal || (!recalculateSplines));
1177         }
1178         else
1179         {
1180             timingId  = gtPME_SPLINE;
1181             kernelPtr = selectSplineKernelPtr(pmeGpu, useOrderThreadsPerAtom,
1182                                               writeGlobal || (!recalculateSplines));
1183         }
1184     }
1185     else
1186     {
1187         timingId  = gtPME_SPREAD;
1188         kernelPtr = selectSpreadKernelPtr(pmeGpu, useOrderThreadsPerAtom,
1189                                           writeGlobal || (!recalculateSplines));
1190     }
1191
1192
1193     pme_gpu_start_timing(pmeGpu, timingId);
1194     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1195 #if c_canEmbedBuffers
1196     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1197 #else
1198     const auto kernelArgs = prepareGpuKernelArguments(
1199             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1200             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1201             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->grid.d_fractShiftsTable,
1202             &kernelParamsPtr->grid.d_gridlineIndicesTable, &kernelParamsPtr->atoms.d_coefficients,
1203             &kernelParamsPtr->atoms.d_coordinates);
1204 #endif
1205
1206     launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1207     pme_gpu_stop_timing(pmeGpu, timingId);
1208
1209     const auto& settings    = pmeGpu->settings;
1210     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1211     if (copyBackGrid)
1212     {
1213         pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1214     }
1215     const bool copyBackAtomData =
1216             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1217     if (copyBackAtomData)
1218     {
1219         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1220     }
1221 }
1222
1223 void pme_gpu_solve(const PmeGpu* pmeGpu, t_complex* h_grid, GridOrdering gridOrdering, bool computeEnergyAndVirial)
1224 {
1225     const auto& settings               = pmeGpu->settings;
1226     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1227
1228     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1229
1230     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1231     if (copyInputAndOutputGrid)
1232     {
1233         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat, 0,
1234                            pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream_,
1235                            pmeGpu->settings.transferKind, nullptr);
1236     }
1237
1238     int majorDim = -1, middleDim = -1, minorDim = -1;
1239     switch (gridOrdering)
1240     {
1241         case GridOrdering::YZX:
1242             majorDim  = YY;
1243             middleDim = ZZ;
1244             minorDim  = XX;
1245             break;
1246
1247         case GridOrdering::XYZ:
1248             majorDim  = XX;
1249             middleDim = YY;
1250             minorDim  = ZZ;
1251             break;
1252
1253         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1254     }
1255
1256     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1257
1258     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1259     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1260     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1261     int       cellsPerBlock;
1262     if (blocksPerGridLine == 1)
1263     {
1264         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1265     }
1266     else
1267     {
1268         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1269     }
1270     const int warpSize  = pmeGpu->programHandle_->impl_->warpSize;
1271     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1272
1273     static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1274                   "The CUDA solve energy kernels needs at least 4 warps. "
1275                   "Here we launch at least half of the max warps.");
1276
1277     KernelLaunchConfig config;
1278     config.blockSize[0] = blockSize;
1279     config.gridSize[0]  = blocksPerGridLine;
1280     // rounding up to full warps so that shuffle operations produce defined results
1281     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1282                          / gridLinesPerBlock;
1283     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1284     config.stream      = pmeGpu->archSpecific->pmeStream_.stream();
1285
1286     int                                timingId  = gtPME_SOLVE;
1287     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1288     if (gridOrdering == GridOrdering::YZX)
1289     {
1290         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernel
1291                                            : pmeGpu->programHandle_->impl_->solveYZXKernel;
1292     }
1293     else if (gridOrdering == GridOrdering::XYZ)
1294     {
1295         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernel
1296                                            : pmeGpu->programHandle_->impl_->solveXYZKernel;
1297     }
1298
1299     pme_gpu_start_timing(pmeGpu, timingId);
1300     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1301 #if c_canEmbedBuffers
1302     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1303 #else
1304     const auto kernelArgs = prepareGpuKernelArguments(
1305             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli,
1306             &kernelParamsPtr->constants.d_virialAndEnergy, &kernelParamsPtr->grid.d_fourierGrid);
1307 #endif
1308     launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1309     pme_gpu_stop_timing(pmeGpu, timingId);
1310
1311     if (computeEnergyAndVirial)
1312     {
1313         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy,
1314                              &kernelParamsPtr->constants.d_virialAndEnergy, 0, c_virialAndEnergyCount,
1315                              pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1316     }
1317
1318     if (copyInputAndOutputGrid)
1319     {
1320         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid, 0,
1321                              pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream_,
1322                              pmeGpu->settings.transferKind, nullptr);
1323     }
1324 }
1325
1326 /*! \brief
1327  * Returns a pointer to appropriate gather kernel based on the inputvalues
1328  *
1329  * \param[in]  pmeGpu                   The PME GPU structure.
1330  * \param[in]  useOrderThreadsPerAtom   bool controlling if we should use order or order*order threads per atom
1331  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1332  *
1333  * \return Pointer to CUDA kernel
1334  */
1335 inline auto selectGatherKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool readSplinesFromGlobal)
1336
1337 {
1338     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1339
1340     if (readSplinesFromGlobal)
1341     {
1342         if (useOrderThreadsPerAtom)
1343         {
1344             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4;
1345         }
1346         else
1347         {
1348             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplines;
1349         }
1350     }
1351     else
1352     {
1353         if (useOrderThreadsPerAtom)
1354         {
1355             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4;
1356         }
1357         else
1358         {
1359             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernel;
1360         }
1361     }
1362     return kernelPtr;
1363 }
1364
1365
1366 void pme_gpu_gather(PmeGpu* pmeGpu, const float* h_grid)
1367 {
1368     const auto& settings = pmeGpu->settings;
1369     if (!settings.performGPUFFT || settings.copyAllOutputs)
1370     {
1371         pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float*>(h_grid));
1372     }
1373
1374     if (settings.copyAllOutputs)
1375     {
1376         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1377     }
1378
1379     /* Set if we have unit tests */
1380     const bool   readGlobal             = pmeGpu->settings.copyAllOutputs;
1381     const size_t blockSize              = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1382     const bool   useOrderThreadsPerAtom = pmeGpu->settings.useOrderThreadsPerAtom;
1383     const bool   recalculateSplines     = pmeGpu->settings.recalculateSplines;
1384 #if GMX_GPU == GMX_GPU_OPENCL
1385     GMX_ASSERT(!useOrderThreadsPerAtom, "Only 16 threads per atom supported in OpenCL");
1386     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1387 #endif
1388     const int atomsPerBlock = useOrderThreadsPerAtom ? blockSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
1389                                                      : blockSize / c_pmeSpreadGatherThreadsPerAtom;
1390
1391     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1392                "inconsistent atom data padding vs. gathering block size");
1393
1394     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1395     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1396
1397     const int order = pmeGpu->common->pme_order;
1398     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1399
1400     KernelLaunchConfig config;
1401     config.blockSize[0] = order;
1402     config.blockSize[1] = useOrderThreadsPerAtom ? 1 : order;
1403     config.blockSize[2] = atomsPerBlock;
1404     config.gridSize[0]  = dimGrid.first;
1405     config.gridSize[1]  = dimGrid.second;
1406     config.stream       = pmeGpu->archSpecific->pmeStream_.stream();
1407
1408     // TODO test different cache configs
1409
1410     int                                timingId = gtPME_GATHER;
1411     PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1412             selectGatherKernelPtr(pmeGpu, useOrderThreadsPerAtom, readGlobal || (!recalculateSplines));
1413     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1414
1415     pme_gpu_start_timing(pmeGpu, timingId);
1416     auto*       timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1417     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1418 #if c_canEmbedBuffers
1419     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1420 #else
1421     const auto kernelArgs = prepareGpuKernelArguments(
1422             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients,
1423             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->atoms.d_theta,
1424             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1425             &kernelParamsPtr->atoms.d_forces);
1426 #endif
1427     launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1428     pme_gpu_stop_timing(pmeGpu, timingId);
1429
1430     if (pmeGpu->settings.useGpuForceReduction)
1431     {
1432         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1433     }
1434     else
1435     {
1436         pme_gpu_copy_output_forces(pmeGpu);
1437     }
1438 }
1439
1440 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1441 {
1442     if (pmeGpu && pmeGpu->kernelParams)
1443     {
1444         return pmeGpu->kernelParams->atoms.d_forces;
1445     }
1446     else
1447     {
1448         return nullptr;
1449     }
1450 }
1451
1452 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1453 {
1454     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1455                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1456                "initialized.");
1457
1458     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1459                "The device-side buffer can not be set.");
1460
1461     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1462 }
1463
1464 const DeviceStream* pme_gpu_get_stream(const PmeGpu* pmeGpu)
1465 {
1466     if (pmeGpu)
1467     {
1468         return &pmeGpu->archSpecific->pmeStream_;
1469     }
1470     else
1471     {
1472         return nullptr;
1473     }
1474 }
1475
1476 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1477 {
1478     if (pmeGpu && pmeGpu->kernelParams)
1479     {
1480         return &pmeGpu->archSpecific->pmeForcesReady;
1481     }
1482     else
1483     {
1484         return nullptr;
1485     }
1486 }