635f0f1a62130fafa5b9aed31c2a0844d20d48f5
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme_gpu_internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <memory>
56 #include <string>
57
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/device_context.h"
60 #include "gromacs/gpu_utils/device_stream.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/math/invertmatrix.h"
63 #include "gromacs/math/units.h"
64 #include "gromacs/timing/gpu_timing.h"
65 #include "gromacs/utility/exceptions.h"
66 #include "gromacs/utility/fatalerror.h"
67 #include "gromacs/utility/gmxassert.h"
68 #include "gromacs/utility/logger.h"
69 #include "gromacs/utility/stringutil.h"
70
71 #if GMX_GPU == GMX_GPU_CUDA
72 #    include "gromacs/gpu_utils/pmalloc_cuda.h"
73
74 #    include "pme.cuh"
75 #elif GMX_GPU == GMX_GPU_OPENCL
76 #    include "gromacs/gpu_utils/gmxopencl.h"
77 #endif
78
79 #include "gromacs/ewald/pme.h"
80
81 #include "pme_gpu_3dfft.h"
82 #include "pme_gpu_calculate_splines.h"
83 #include "pme_gpu_constants.h"
84 #include "pme_gpu_program_impl.h"
85 #include "pme_gpu_timings.h"
86 #include "pme_gpu_types.h"
87 #include "pme_gpu_types_host.h"
88 #include "pme_gpu_types_host_impl.h"
89 #include "pme_grid.h"
90 #include "pme_internal.h"
91 #include "pme_solve.h"
92
93 /*! \brief
94  * CUDA only
95  * Atom limit above which it is advantageous to turn on the
96  * recalcuating of the splines in the gather and using less threads per atom in the spline and spread
97  */
98 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
99
100 /*! \internal \brief
101  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
102  *
103  * \param[in] pmeGpu  The PME GPU structure.
104  * \returns The pointer to the kernel parameters.
105  */
106 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
107 {
108     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
109     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
110     return kernelParamsPtr;
111 }
112
113 /*! \brief
114  * Atom data block size (in terms of number of atoms).
115  * This is the least common multiple of number of atoms processed by
116  * a single block/workgroup of the spread and gather kernels.
117  * The GPU atom data buffers must be padded, which means that
118  * the numbers of atoms used for determining the size of the memory
119  * allocation must be divisible by this.
120  */
121 constexpr int c_pmeAtomDataBlockSize = 64;
122
123 int pme_gpu_get_atom_data_block_size()
124 {
125     return c_pmeAtomDataBlockSize;
126 }
127
128 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
129 {
130     pmeGpu->archSpecific->pmeStream_.synchronize();
131 }
132
133 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
134 {
135     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
136
137     GMX_ASSERT(
138             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
139             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
140
141     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
142     {
143         allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
144                              c_virialAndEnergyCount, pmeGpu->archSpecific->deviceContext_);
145         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy[gridIndex]), energyAndVirialSize);
146     }
147 }
148
149 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
150 {
151     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
152     {
153         freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex]);
154         pfree(pmeGpu->staging.h_virialAndEnergy[gridIndex]);
155         pmeGpu->staging.h_virialAndEnergy[gridIndex] = nullptr;
156     }
157 }
158
159 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
160 {
161     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
162     {
163         clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex], 0,
164                                c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_);
165     }
166 }
167
168 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu, const int gridIndex)
169 {
170     GMX_ASSERT(
171             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
172             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
173     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
174                "Invalid combination of gridIndex and number of grids");
175
176     const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
177                                           pmeGpu->kernelParams->grid.realGridSize[XX]
178                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
179     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
180
181     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
182                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
183                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
184     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize[gridIndex]);
185     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
186                            newSplineValuesSize, &pmeGpu->archSpecific->splineValuesSize[gridIndex],
187                            &pmeGpu->archSpecific->splineValuesCapacity[gridIndex],
188                            pmeGpu->archSpecific->deviceContext_);
189     if (shouldRealloc)
190     {
191         /* Reallocate the host buffer */
192         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
193         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli[gridIndex]),
194                 newSplineValuesSize * sizeof(float));
195     }
196     for (int i = 0; i < DIM; i++)
197     {
198         memcpy(pmeGpu->staging.h_splineModuli[gridIndex] + splineValuesOffset[i],
199                pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
200     }
201     /* TODO: pin original buffer instead! */
202     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
203                        pmeGpu->staging.h_splineModuli[gridIndex], 0, newSplineValuesSize,
204                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
205 }
206
207 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
208 {
209     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
210     {
211         pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
212         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex]);
213     }
214 }
215
216 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
217 {
218     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
219     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
220     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
221                            &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc,
222                            pmeGpu->archSpecific->deviceContext_);
223     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
224     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
225 }
226
227 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
228 {
229     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
230 }
231
232 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
233 {
234     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
235     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
236     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
237                        DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
238                        pmeGpu->settings.transferKind, nullptr);
239 }
240
241 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
242 {
243     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
244     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
245     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
246                          DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
247                          pmeGpu->settings.transferKind, nullptr);
248 }
249
250 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu,
251                                                  const float*  h_coefficients,
252                                                  const int     gridIndex)
253 {
254     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
255     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
256     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
257     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
258                            newCoefficientsSize, &pmeGpu->archSpecific->coefficientsSize[gridIndex],
259                            &pmeGpu->archSpecific->coefficientsCapacity[gridIndex],
260                            pmeGpu->archSpecific->deviceContext_);
261     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
262                        const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
263                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
264
265     const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
266     const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
267     if (paddingCount > 0)
268     {
269         clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex], paddingIndex,
270                                paddingCount, pmeGpu->archSpecific->pmeStream_);
271     }
272 }
273
274 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
275 {
276     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
277     {
278         freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex]);
279     }
280 }
281
282 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
283 {
284     const int order             = pmeGpu->common->pme_order;
285     const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
286     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
287     /* Two arrays of the same size */
288     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
289     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
290     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
291     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize, &currentSizeTemp,
292                            &currentSizeTempAlloc, pmeGpu->archSpecific->deviceContext_);
293     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
294                            &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc,
295                            pmeGpu->archSpecific->deviceContext_);
296     // the host side reallocation
297     if (shouldRealloc)
298     {
299         pfree(pmeGpu->staging.h_theta);
300         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
301         pfree(pmeGpu->staging.h_dtheta);
302         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
303     }
304 }
305
306 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
307 {
308     /* Two arrays of the same size */
309     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
310     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
311     pfree(pmeGpu->staging.h_theta);
312     pfree(pmeGpu->staging.h_dtheta);
313 }
314
315 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
316 {
317     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
318     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
319     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
320                            &pmeGpu->archSpecific->gridlineIndicesSize,
321                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
322                            pmeGpu->archSpecific->deviceContext_);
323     pfree(pmeGpu->staging.h_gridlineIndices);
324     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
325 }
326
327 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
328 {
329     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
330     pfree(pmeGpu->staging.h_gridlineIndices);
331 }
332
333 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
334 {
335     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
336
337     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
338                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
339                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
340     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
341                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
342                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
343     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
344     {
345         // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
346         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
347         {
348             /* 2 separate grids */
349             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex], newComplexGridSize,
350                                    &pmeGpu->archSpecific->complexGridSize[gridIndex],
351                                    &pmeGpu->archSpecific->complexGridCapacity[gridIndex],
352                                    pmeGpu->archSpecific->deviceContext_);
353             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex], newRealGridSize,
354                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
355                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
356                                    pmeGpu->archSpecific->deviceContext_);
357         }
358         else
359         {
360             /* A single buffer so that any grid will fit */
361             const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
362             reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex], newGridsSize,
363                                    &pmeGpu->archSpecific->realGridSize[gridIndex],
364                                    &pmeGpu->archSpecific->realGridCapacity[gridIndex],
365                                    pmeGpu->archSpecific->deviceContext_);
366             kernelParamsPtr->grid.d_fourierGrid[gridIndex] = kernelParamsPtr->grid.d_realGrid[gridIndex];
367             pmeGpu->archSpecific->complexGridSize[gridIndex] =
368                     pmeGpu->archSpecific->realGridSize[gridIndex];
369             // the size might get used later for copying the grid
370         }
371     }
372 }
373
374 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
375 {
376     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
377     {
378         if (pmeGpu->archSpecific->performOutOfPlaceFFT)
379         {
380             freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid[gridIndex]);
381         }
382         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex]);
383     }
384 }
385
386 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
387 {
388     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
389     {
390         clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex], 0,
391                                pmeGpu->archSpecific->realGridSize[gridIndex],
392                                pmeGpu->archSpecific->pmeStream_);
393     }
394 }
395
396 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
397 {
398     pme_gpu_free_fract_shifts(pmeGpu);
399
400     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
401
402     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
403     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
404     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
405     const int cellCount           = c_pmeNeighborUnitcellCount;
406     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
407
408     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
409
410     const int newFractShiftsSize = cellCount * (nx + ny + nz);
411
412     initParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
413                          &kernelParamsPtr->fractShiftsTableTexture, pmeGpu->common->fsh.data(),
414                          newFractShiftsSize, pmeGpu->archSpecific->deviceContext_);
415
416     initParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
417                          &kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
418                          newFractShiftsSize, pmeGpu->archSpecific->deviceContext_);
419 }
420
421 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
422 {
423     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
424 #if GMX_GPU == GMX_GPU_CUDA
425     destroyParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
426                             kernelParamsPtr->fractShiftsTableTexture);
427     destroyParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
428                             kernelParamsPtr->gridlineIndicesTableTexture);
429 #elif GMX_GPU == GMX_GPU_OPENCL
430     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
431     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
432 #endif
433 }
434
435 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
436 {
437     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
438 }
439
440 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, const float* h_grid, const int gridIndex)
441 {
442     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex], h_grid, 0,
443                        pmeGpu->archSpecific->realGridSize[gridIndex],
444                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
445 }
446
447 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid, const int gridIndex)
448 {
449     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid[gridIndex], 0,
450                          pmeGpu->archSpecific->realGridSize[gridIndex],
451                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
452     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
453 }
454
455 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
456 {
457     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
458     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
459     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
460                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
461     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
462                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
463     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
464                          0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
465                          pmeGpu->settings.transferKind, nullptr);
466 }
467
468 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
469 {
470     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
471     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
472
473     // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
474     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0, pmeGpu->nAtomsAlloc * DIM,
475                            pmeGpu->archSpecific->pmeStream_);
476     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
477                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
478                            pmeGpu->archSpecific->pmeStream_);
479     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
480                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
481                            pmeGpu->archSpecific->pmeStream_);
482
483     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
484                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
485     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
486                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
487     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
488                        0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
489                        pmeGpu->settings.transferKind, nullptr);
490 }
491
492 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
493 {
494     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
495 }
496
497 /*! \brief Internal GPU initialization for PME.
498  *
499  * \param[in]  pmeGpu         GPU PME data.
500  * \param[in]  deviceContext  GPU context.
501  * \param[in]  deviceStream   GPU stream.
502  */
503 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
504 {
505 #if GMX_GPU == GMX_GPU_CUDA
506     // Prepare to use the device that this PME task was assigned earlier.
507     // Other entities, such as CUDA timing events, are known to implicitly use the device context.
508     CU_RET_ERR(cudaSetDevice(deviceContext.deviceInfo().id), "Switching to PME CUDA device");
509 #endif
510
511     /* Allocate the target-specific structures */
512     pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
513     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
514
515     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
516     /* This should give better performance, according to the cuFFT documentation.
517      * The performance seems to be the same though.
518      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
519      */
520
521 #if GMX_GPU == GMX_GPU_CUDA
522     pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
523 #elif GMX_GPU == GMX_GPU_OPENCL
524     pmeGpu->maxGridWidthX = INT32_MAX / 2;
525     // TODO: is there no really global work size limit in OpenCL?
526 #endif
527 }
528
529 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
530 {
531     if (pme_gpu_settings(pmeGpu).performGPUFFT)
532     {
533         pmeGpu->archSpecific->fftSetup.resize(0);
534         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
535         {
536             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu, gridIndex));
537         }
538     }
539 }
540
541 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
542 {
543     pmeGpu->archSpecific->fftSetup.resize(0);
544 }
545
546 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, const float lambda, PmeOutput* output)
547 {
548     const PmeGpu* pmeGpu = pme.gpu;
549
550     GMX_ASSERT(lambda == 1.0 || pmeGpu->common->ngrids == 2,
551                "Invalid combination of lambda and number of grids");
552
553     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
554     {
555         for (int j = 0; j < c_virialAndEnergyCount; j++)
556         {
557             GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[gridIndex][j]),
558                        "PME GPU produces incorrect energy/virial.");
559         }
560     }
561     for (int dim1 = 0; dim1 < DIM; dim1++)
562     {
563         for (int dim2 = 0; dim2 < DIM; dim2++)
564         {
565             output->coulombVirial_[dim1][dim2] = 0;
566         }
567     }
568     output->coulombEnergy_ = 0;
569     float scale            = 1.0;
570     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
571     {
572         if (pmeGpu->common->ngrids == 2)
573         {
574             scale = gridIndex == 0 ? (1.0 - lambda) : lambda;
575         }
576         output->coulombVirial_[XX][XX] +=
577                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][0];
578         output->coulombVirial_[YY][YY] +=
579                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][1];
580         output->coulombVirial_[ZZ][ZZ] +=
581                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][2];
582         output->coulombVirial_[XX][YY] +=
583                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
584         output->coulombVirial_[YY][XX] +=
585                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
586         output->coulombVirial_[XX][ZZ] +=
587                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
588         output->coulombVirial_[ZZ][XX] +=
589                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
590         output->coulombVirial_[YY][ZZ] +=
591                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
592         output->coulombVirial_[ZZ][YY] +=
593                 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
594         output->coulombEnergy_ += scale * 0.5F * pmeGpu->staging.h_virialAndEnergy[gridIndex][6];
595     }
596     if (pmeGpu->common->ngrids > 1)
597     {
598         output->coulombDvdl_ = 0.5F
599                                * (pmeGpu->staging.h_virialAndEnergy[FEP_STATE_B][6]
600                                   - pmeGpu->staging.h_virialAndEnergy[FEP_STATE_A][6]);
601     }
602 }
603
604 /*! \brief Sets the force-related members in \p output
605  *
606  * \param[in]   pmeGpu      PME GPU data structure
607  * \param[out]  output      Pointer to PME output data structure
608  */
609 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
610 {
611     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
612     if (output->haveForceOutput_)
613     {
614         output->forces_ = pmeGpu->staging.h_forces;
615     }
616 }
617
618 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial, const real lambdaQ)
619 {
620     PmeGpu* pmeGpu = pme.gpu;
621
622     PmeOutput output;
623
624     pme_gpu_getForceOutput(pmeGpu, &output);
625
626     if (computeEnergyAndVirial)
627     {
628         if (pme_gpu_settings(pmeGpu).performGPUSolve)
629         {
630             pme_gpu_getEnergyAndVirial(pme, lambdaQ, &output);
631         }
632         else
633         {
634             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
635         }
636     }
637     return output;
638 }
639
640 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
641 {
642 #if GMX_DOUBLE
643     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
644 #else
645     matrix scaledBox;
646     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
647     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
648     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
649     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
650     matrix recipBox;
651     gmx::invertBoxMatrix(scaledBox, recipBox);
652
653     /* The GPU recipBox is transposed as compared to the CPU recipBox.
654      * Spread uses matrix columns (while solve and gather use rows).
655      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
656      */
657     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
658                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
659                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
660     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
661 #endif
662 }
663
664 /*! \brief \libinternal
665  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
666  *
667  * \param[in] pmeGpu            The PME GPU structure.
668  */
669 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
670 {
671     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
672
673     GMX_ASSERT(
674             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
675             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
676
677     kernelParamsPtr->grid.ewaldFactor =
678             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
679     /* The grid size variants */
680     for (int i = 0; i < DIM; i++)
681     {
682         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
683         kernelParamsPtr->grid.realGridSizeFP[i] =
684                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
685         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
686
687         // The complex grid currently uses no padding;
688         // if it starts to do so, then another test should be added for that
689         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
690         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
691     }
692     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
693     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
694     {
695         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
696         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
697                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
698     }
699     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
700     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
701     kernelParamsPtr->grid.complexGridSize[ZZ]++;
702     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
703
704     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
705     for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
706     {
707         pme_gpu_realloc_and_copy_bspline_values(pmeGpu, gridIndex);
708     }
709
710     pme_gpu_realloc_grids(pmeGpu);
711     pme_gpu_reinit_3dfft(pmeGpu);
712 }
713
714 /* Several GPU functions that refer to the CPU PME data live here.
715  * We would like to keep these away from the GPU-framework specific code for clarity,
716  * as well as compilation issues with MPI.
717  */
718
719 /*! \brief \libinternal
720  * Copies everything useful from the PME CPU to the PME GPU structure.
721  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
722  *
723  * \param[in] pme         The PME structure.
724  */
725 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
726 {
727     /* TODO: Consider refactoring the CPU PME code to use the same structure,
728      * so that this function becomes 2 lines */
729     PmeGpu* pmeGpu               = pme->gpu;
730     pmeGpu->common->ngrids       = pme->bFEP_q ? 2 : 1;
731     pmeGpu->common->epsilon_r    = pme->epsilon_r;
732     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
733     pmeGpu->common->nk[XX]       = pme->nkx;
734     pmeGpu->common->nk[YY]       = pme->nky;
735     pmeGpu->common->nk[ZZ]       = pme->nkz;
736     pmeGpu->common->pme_order    = pme->pme_order;
737     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
738     {
739         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
740     }
741     for (int i = 0; i < DIM; i++)
742     {
743         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
744     }
745     const int cellCount = c_pmeNeighborUnitcellCount;
746     pmeGpu->common->fsh.resize(0);
747     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
748     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
749     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
750     pmeGpu->common->nn.resize(0);
751     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
752     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
753     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
754     pmeGpu->common->runMode       = pme->runMode;
755     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
756     pmeGpu->common->boxScaler     = pme->boxScaler;
757 }
758
759 /*! \libinternal \brief
760  * uses heuristics to select the best performing PME gather and scatter kernels
761  *
762  * \param[in,out] pmeGpu         The PME GPU structure.
763  */
764 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
765 {
766     if (pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit && (GMX_GPU == GMX_GPU_CUDA))
767     {
768         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::Order;
769         pmeGpu->settings.recalculateSplines = true;
770     }
771     else
772     {
773         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::OrderSquared;
774         pmeGpu->settings.recalculateSplines = false;
775     }
776 }
777
778
779 /*! \libinternal \brief
780  * Initializes the PME GPU data at the beginning of the run.
781  * TODO: this should become PmeGpu::PmeGpu()
782  *
783  * \param[in,out] pme            The PME structure.
784  * \param[in]     deviceContext  The GPU context.
785  * \param[in]     deviceStream   The GPU stream.
786  * \param[in,out] pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
787  */
788 static void pme_gpu_init(gmx_pme_t*           pme,
789                          const DeviceContext& deviceContext,
790                          const DeviceStream&  deviceStream,
791                          const PmeGpuProgram* pmeGpuProgram)
792 {
793     pme->gpu       = new PmeGpu();
794     PmeGpu* pmeGpu = pme->gpu;
795     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
796     pmeGpu->common = std::make_shared<PmeShared>();
797
798     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
799     /* A convenience variable. */
800     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
801     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
802     pmeGpu->settings.performGPUGather = true;
803     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
804     pmeGpu->settings.useGpuForceReduction = false;
805
806     pme_gpu_set_testing(pmeGpu, false);
807
808     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
809     pmeGpu->programHandle_ = pmeGpuProgram;
810
811     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
812
813     pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
814
815     pme_gpu_copy_common_data_from(pme);
816     pme_gpu_alloc_energy_virial(pmeGpu);
817
818     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
819
820     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
821     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
822 }
823
824 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
825 {
826     GMX_ASSERT(gridSize != nullptr, "");
827     GMX_ASSERT(paddedGridSize != nullptr, "");
828     GMX_ASSERT(pmeGpu != nullptr, "");
829     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
830     for (int i = 0; i < DIM; i++)
831     {
832         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
833         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
834     }
835 }
836
837 void pme_gpu_reinit(gmx_pme_t*           pme,
838                     const DeviceContext* deviceContext,
839                     const DeviceStream*  deviceStream,
840                     const PmeGpuProgram* pmeGpuProgram)
841 {
842     GMX_ASSERT(pme != nullptr, "Need valid PME object");
843
844     if (!pme->gpu)
845     {
846         GMX_RELEASE_ASSERT(deviceContext != nullptr,
847                            "Device context can not be nullptr when setting up PME on GPU.");
848         GMX_RELEASE_ASSERT(deviceStream != nullptr,
849                            "Device stream can not be nullptr when setting up PME on GPU.");
850         /* First-time initialization */
851         pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
852     }
853     else
854     {
855         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
856         pme_gpu_copy_common_data_from(pme);
857     }
858     /* GPU FFT will only get used for a single rank.*/
859     pme->gpu->settings.performGPUFFT =
860             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
861     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
862
863     /* Reinit active timers */
864     pme_gpu_reinit_timings(pme->gpu);
865
866     pme_gpu_reinit_grids(pme->gpu);
867     // Note: if timing the reinit launch overhead becomes more relevant
868     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
869     pme_gpu_reinit_computation(pme, nullptr);
870     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
871      * update for mixed mode on grid switch. TODO: use shared recipbox field.
872      */
873     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
874 }
875
876 void pme_gpu_destroy(PmeGpu* pmeGpu)
877 {
878     /* Free lots of data */
879     pme_gpu_free_energy_virial(pmeGpu);
880     pme_gpu_free_bspline_values(pmeGpu);
881     pme_gpu_free_forces(pmeGpu);
882     pme_gpu_free_coefficients(pmeGpu);
883     pme_gpu_free_spline_data(pmeGpu);
884     pme_gpu_free_grid_indices(pmeGpu);
885     pme_gpu_free_fract_shifts(pmeGpu);
886     pme_gpu_free_grids(pmeGpu);
887
888     pme_gpu_destroy_3dfft(pmeGpu);
889
890     delete pmeGpu;
891 }
892
893 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* chargesA, const real* chargesB)
894 {
895     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
896     kernelParamsPtr->atoms.nAtoms = nAtoms;
897     const int  block_size         = pme_gpu_get_atom_data_block_size();
898     const int  nAtomsNewPadded    = ((nAtoms + block_size - 1) / block_size) * block_size;
899     const bool haveToRealloc      = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
900     pmeGpu->nAtomsAlloc           = nAtomsNewPadded;
901
902 #if GMX_DOUBLE
903     GMX_RELEASE_ASSERT(false, "Only single precision supported");
904     GMX_UNUSED_VALUE(charges);
905 #else
906     int gridIndex = 0;
907     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
908     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
909     gridIndex++;
910     if (chargesB != nullptr)
911     {
912         pme_gpu_realloc_and_copy_input_coefficients(
913                 pmeGpu, reinterpret_cast<const float*>(chargesB), gridIndex);
914     }
915     else
916     {
917         /* Fill the second set of coefficients with chargesA as well to be able to avoid
918          * conditionals in the GPU kernels */
919         /* FIXME: This should be avoided by making a separate templated version of the
920          * relevant kernel(s) (probably only pme_gather_kernel). That would require a
921          * reduction of the current number of templated parameters of that kernel. */
922         pme_gpu_realloc_and_copy_input_coefficients(
923                 pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
924     }
925 #endif
926
927     if (haveToRealloc)
928     {
929         pme_gpu_realloc_forces(pmeGpu);
930         pme_gpu_realloc_spline_data(pmeGpu);
931         pme_gpu_realloc_grid_indices(pmeGpu);
932     }
933     pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
934 }
935
936 /*! \internal \brief
937  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
938  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
939  *
940  * \param[in] pmeGpu         The PME GPU data structure.
941  * \param[in] PMEStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
942  */
943 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
944 {
945     CommandEvent* timingEvent = nullptr;
946     if (pme_gpu_timings_enabled(pmeGpu))
947     {
948         GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
949                    "Wrong PME GPU timing event index");
950         timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
951     }
952     return timingEvent;
953 }
954
955 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, const int grid_index)
956 {
957     int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
958
959     pme_gpu_start_timing(pmeGpu, timerId);
960     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
961             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
962     pme_gpu_stop_timing(pmeGpu, timerId);
963 }
964
965 /*! \brief
966  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
967  * to minimize number of unused blocks.
968  */
969 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
970 {
971     // How many maximum widths in X do we need (hopefully just one)
972     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
973     // Trying to make things even
974     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
975     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
976     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
977                "pmeGpuCreateGrid: excessive blocks");
978     return std::pair<int, int>(colCount, minRowCount);
979 }
980
981 /*! \brief
982  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
983  *
984  * \param[in]  pmeGpu                   The PME GPU structure.
985  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
986  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
987  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
988  *
989  * \return Pointer to CUDA kernel
990  */
991 static auto selectSplineAndSpreadKernelPtr(const PmeGpu*  pmeGpu,
992                                            ThreadsPerAtom threadsPerAtom,
993                                            bool           writeSplinesToGlobal,
994                                            const int      numGrids)
995 {
996     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
997     if (writeSplinesToGlobal)
998     {
999         if (threadsPerAtom == ThreadsPerAtom::Order)
1000         {
1001             if (numGrids == 2)
1002             {
1003                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Dual;
1004             }
1005             else
1006             {
1007                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Single;
1008             }
1009         }
1010         else
1011         {
1012             if (numGrids == 2)
1013             {
1014                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesDual;
1015             }
1016             else
1017             {
1018                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesSingle;
1019             }
1020         }
1021     }
1022     else
1023     {
1024         if (threadsPerAtom == ThreadsPerAtom::Order)
1025         {
1026             if (numGrids == 2)
1027             {
1028                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1029             }
1030             else
1031             {
1032                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1033             }
1034         }
1035         else
1036         {
1037             if (numGrids == 2)
1038             {
1039                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1040             }
1041             else
1042             {
1043                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1044             }
1045         }
1046     }
1047
1048     return kernelPtr;
1049 }
1050
1051 /*! \brief
1052  * Returns a pointer to appropriate spline kernel based on the input bool values
1053  *
1054  * \param[in]  pmeGpu                   The PME GPU structure.
1055  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1056  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1057  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1058  *
1059  * \return Pointer to CUDA kernel
1060  */
1061 static auto selectSplineKernelPtr(const PmeGpu*  pmeGpu,
1062                                   ThreadsPerAtom threadsPerAtom,
1063                                   bool gmx_unused writeSplinesToGlobal,
1064                                   const int       numGrids)
1065 {
1066     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1067     GMX_ASSERT(
1068             writeSplinesToGlobal,
1069             "Spline data should always be written to global memory when just calculating splines");
1070
1071     if (threadsPerAtom == ThreadsPerAtom::Order)
1072     {
1073         if (numGrids == 2)
1074         {
1075             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Dual;
1076         }
1077         else
1078         {
1079             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Single;
1080         }
1081     }
1082     else
1083     {
1084         if (numGrids == 2)
1085         {
1086             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelDual;
1087         }
1088         else
1089         {
1090             kernelPtr = pmeGpu->programHandle_->impl_->splineKernelSingle;
1091         }
1092     }
1093     return kernelPtr;
1094 }
1095
1096 /*! \brief
1097  * Returns a pointer to appropriate spread kernel based on the input bool values
1098  *
1099  * \param[in]  pmeGpu                   The PME GPU structure.
1100  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1101  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
1102  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1103  *
1104  * \return Pointer to CUDA kernel
1105  */
1106 static auto selectSpreadKernelPtr(const PmeGpu*  pmeGpu,
1107                                   ThreadsPerAtom threadsPerAtom,
1108                                   bool           writeSplinesToGlobal,
1109                                   const int      numGrids)
1110 {
1111     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1112     if (writeSplinesToGlobal)
1113     {
1114         if (threadsPerAtom == ThreadsPerAtom::Order)
1115         {
1116             if (numGrids == 2)
1117             {
1118                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Dual;
1119             }
1120             {
1121                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Single;
1122             }
1123         }
1124         else
1125         {
1126             if (numGrids == 2)
1127             {
1128                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelDual;
1129             }
1130             else
1131             {
1132                 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelSingle;
1133             }
1134         }
1135     }
1136     else
1137     {
1138         /* if we are not saving the spline data we need to recalculate it
1139            using the spline and spread Kernel */
1140         if (threadsPerAtom == ThreadsPerAtom::Order)
1141         {
1142             if (numGrids == 2)
1143             {
1144                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1145             }
1146             else
1147             {
1148                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1149             }
1150         }
1151         else
1152         {
1153             if (numGrids == 2)
1154             {
1155                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1156             }
1157             else
1158             {
1159                 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1160             }
1161         }
1162     }
1163     return kernelPtr;
1164 }
1165
1166 void pme_gpu_spread(const PmeGpu*         pmeGpu,
1167                     GpuEventSynchronizer* xReadyOnDevice,
1168                     real**                h_grids,
1169                     bool                  computeSplines,
1170                     bool                  spreadCharges,
1171                     const real            lambda)
1172 {
1173     GMX_ASSERT(
1174             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1175             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1176
1177     GMX_ASSERT(computeSplines || spreadCharges,
1178                "PME spline/spread kernel has invalid input (nothing to do)");
1179     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1180     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1181
1182     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1183
1184     const int order = pmeGpu->common->pme_order;
1185     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1186     const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1187     const int  threadsPerAtom =
1188             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1189     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1190 #if GMX_GPU == GMX_GPU_OPENCL
1191     GMX_ASSERT(pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1192                "Only 16 threads per atom supported in OpenCL");
1193     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1194 #endif
1195     const int atomsPerBlock = blockSize / threadsPerAtom;
1196
1197     // TODO: pick smaller block size in runtime if needed
1198     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1199     // If doing so, change atomsPerBlock in the kernels as well.
1200     // TODO: test varying block sizes on modern arch-s as well
1201     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1202     //(for spline data mostly)
1203     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1204                "inconsistent atom data padding vs. spreading block size");
1205
1206     // Ensure that coordinates are ready on the device before launching spread;
1207     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1208     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1209     GMX_ASSERT(xReadyOnDevice != nullptr || (GMX_GPU != GMX_GPU_CUDA)
1210                        || pmeGpu->common->isRankPmeOnly || pme_gpu_settings(pmeGpu).copyAllOutputs,
1211                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1212     if (xReadyOnDevice)
1213     {
1214         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1215     }
1216
1217     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1218     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1219
1220     if (pmeGpu->common->ngrids == 1)
1221     {
1222         kernelParamsPtr->current.scale = 1.0;
1223     }
1224     else
1225     {
1226         kernelParamsPtr->current.scale = 1.0 - lambda;
1227     }
1228
1229     KernelLaunchConfig config;
1230     config.blockSize[0] = order;
1231     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1232     config.blockSize[2] = atomsPerBlock;
1233     config.gridSize[0]  = dimGrid.first;
1234     config.gridSize[1]  = dimGrid.second;
1235
1236     int                                timingId;
1237     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1238     if (computeSplines)
1239     {
1240         if (spreadCharges)
1241         {
1242             timingId  = gtPME_SPLINEANDSPREAD;
1243             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1244                                                        writeGlobal || (!recalculateSplines),
1245                                                        pmeGpu->common->ngrids);
1246         }
1247         else
1248         {
1249             timingId  = gtPME_SPLINE;
1250             kernelPtr = selectSplineKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1251                                               writeGlobal || (!recalculateSplines),
1252                                               pmeGpu->common->ngrids);
1253         }
1254     }
1255     else
1256     {
1257         timingId  = gtPME_SPREAD;
1258         kernelPtr = selectSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1259                                           writeGlobal || (!recalculateSplines), pmeGpu->common->ngrids);
1260     }
1261
1262
1263     pme_gpu_start_timing(pmeGpu, timingId);
1264     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1265 #if c_canEmbedBuffers
1266     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1267 #else
1268     const auto kernelArgs = prepareGpuKernelArguments(
1269             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1270             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1271             &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A], &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1272             &kernelParamsPtr->grid.d_fractShiftsTable, &kernelParamsPtr->grid.d_gridlineIndicesTable,
1273             &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1274             &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B], &kernelParamsPtr->atoms.d_coordinates);
1275 #endif
1276
1277     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent,
1278                     "PME spline/spread", kernelArgs);
1279     pme_gpu_stop_timing(pmeGpu, timingId);
1280
1281     const auto& settings    = pmeGpu->settings;
1282     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1283     if (copyBackGrid)
1284     {
1285         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1286         {
1287             float* h_grid = h_grids[gridIndex];
1288             pme_gpu_copy_output_spread_grid(pmeGpu, h_grid, gridIndex);
1289         }
1290     }
1291     const bool copyBackAtomData =
1292             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1293     if (copyBackAtomData)
1294     {
1295         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1296     }
1297 }
1298
1299 void pme_gpu_solve(const PmeGpu* pmeGpu,
1300                    const int     gridIndex,
1301                    t_complex*    h_grid,
1302                    GridOrdering  gridOrdering,
1303                    bool          computeEnergyAndVirial)
1304 {
1305     GMX_ASSERT(
1306             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1307             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1308     GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
1309                "Invalid combination of gridIndex and number of grids");
1310
1311     const auto& settings               = pmeGpu->settings;
1312     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1313
1314     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1315
1316     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1317     if (copyInputAndOutputGrid)
1318     {
1319         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex], h_gridFloat, 0,
1320                            pmeGpu->archSpecific->complexGridSize[gridIndex],
1321                            pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1322     }
1323
1324     int majorDim = -1, middleDim = -1, minorDim = -1;
1325     switch (gridOrdering)
1326     {
1327         case GridOrdering::YZX:
1328             majorDim  = YY;
1329             middleDim = ZZ;
1330             minorDim  = XX;
1331             break;
1332
1333         case GridOrdering::XYZ:
1334             majorDim  = XX;
1335             middleDim = YY;
1336             minorDim  = ZZ;
1337             break;
1338
1339         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1340     }
1341
1342     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1343
1344     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1345     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1346     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1347     int       cellsPerBlock;
1348     if (blocksPerGridLine == 1)
1349     {
1350         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1351     }
1352     else
1353     {
1354         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1355     }
1356     const int warpSize  = pmeGpu->programHandle_->warpSize();
1357     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1358
1359     static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1360                   "The CUDA solve energy kernels needs at least 4 warps. "
1361                   "Here we launch at least half of the max warps.");
1362
1363     KernelLaunchConfig config;
1364     config.blockSize[0] = blockSize;
1365     config.gridSize[0]  = blocksPerGridLine;
1366     // rounding up to full warps so that shuffle operations produce defined results
1367     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1368                          / gridLinesPerBlock;
1369     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1370
1371     int                                timingId  = gtPME_SOLVE;
1372     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1373     if (gridOrdering == GridOrdering::YZX)
1374     {
1375         if (gridIndex == 0)
1376         {
1377             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelA
1378                                                : pmeGpu->programHandle_->impl_->solveYZXKernelA;
1379         }
1380         else
1381         {
1382             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelB
1383                                                : pmeGpu->programHandle_->impl_->solveYZXKernelB;
1384         }
1385     }
1386     else if (gridOrdering == GridOrdering::XYZ)
1387     {
1388         if (gridIndex == 0)
1389         {
1390             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelA
1391                                                : pmeGpu->programHandle_->impl_->solveXYZKernelA;
1392         }
1393         else
1394         {
1395             kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelB
1396                                                : pmeGpu->programHandle_->impl_->solveXYZKernelB;
1397         }
1398     }
1399
1400     pme_gpu_start_timing(pmeGpu, timingId);
1401     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1402 #if c_canEmbedBuffers
1403     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1404 #else
1405     const auto kernelArgs = prepareGpuKernelArguments(
1406             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli[gridIndex],
1407             &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1408             &kernelParamsPtr->grid.d_fourierGrid[gridIndex]);
1409 #endif
1410     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve",
1411                     kernelArgs);
1412     pme_gpu_stop_timing(pmeGpu, timingId);
1413
1414     if (computeEnergyAndVirial)
1415     {
1416         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy[gridIndex],
1417                              &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex], 0,
1418                              c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_,
1419                              pmeGpu->settings.transferKind, nullptr);
1420     }
1421
1422     if (copyInputAndOutputGrid)
1423     {
1424         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid[gridIndex], 0,
1425                              pmeGpu->archSpecific->complexGridSize[gridIndex],
1426                              pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1427     }
1428 }
1429
1430 /*! \brief
1431  * Returns a pointer to appropriate gather kernel based on the inputvalues
1432  *
1433  * \param[in]  pmeGpu                   The PME GPU structure.
1434  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1435  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1436  * \param[in]  numGrids                 Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1437  *
1438  * \return Pointer to CUDA kernel
1439  */
1440 inline auto selectGatherKernelPtr(const PmeGpu*  pmeGpu,
1441                                   ThreadsPerAtom threadsPerAtom,
1442                                   bool           readSplinesFromGlobal,
1443                                   const int      numGrids)
1444
1445 {
1446     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1447
1448     if (readSplinesFromGlobal)
1449     {
1450         if (threadsPerAtom == ThreadsPerAtom::Order)
1451         {
1452             if (numGrids == 2)
1453             {
1454                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Dual;
1455             }
1456             else
1457             {
1458                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Single;
1459             }
1460         }
1461         else
1462         {
1463             if (numGrids == 2)
1464             {
1465                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesDual;
1466             }
1467             else
1468             {
1469                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesSingle;
1470             }
1471         }
1472     }
1473     else
1474     {
1475         if (threadsPerAtom == ThreadsPerAtom::Order)
1476         {
1477             if (numGrids == 2)
1478             {
1479                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Dual;
1480             }
1481             else
1482             {
1483                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Single;
1484             }
1485         }
1486         else
1487         {
1488             if (numGrids == 2)
1489             {
1490                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelDual;
1491             }
1492             else
1493             {
1494                 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelSingle;
1495             }
1496         }
1497     }
1498     return kernelPtr;
1499 }
1500
1501 void pme_gpu_gather(PmeGpu* pmeGpu, real** h_grids, const float lambda)
1502 {
1503     GMX_ASSERT(
1504             pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1505             "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1506
1507     const auto& settings = pmeGpu->settings;
1508
1509     if (!settings.performGPUFFT || settings.copyAllOutputs)
1510     {
1511         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1512         {
1513             float* h_grid = const_cast<float*>(h_grids[gridIndex]);
1514             pme_gpu_copy_input_gather_grid(pmeGpu, h_grid, gridIndex);
1515         }
1516     }
1517
1518     if (settings.copyAllOutputs)
1519     {
1520         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1521     }
1522
1523     /* Set if we have unit tests */
1524     const bool   readGlobal = pmeGpu->settings.copyAllOutputs;
1525     const size_t blockSize  = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1526     const int    order      = pmeGpu->common->pme_order;
1527     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1528     const int threadsPerAtom =
1529             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1530     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1531 #if GMX_GPU == GMX_GPU_OPENCL
1532     GMX_ASSERT(pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1533                "Only 16 threads per atom supported in OpenCL");
1534     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1535 #endif
1536     const int atomsPerBlock = blockSize / threadsPerAtom;
1537
1538     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1539                "inconsistent atom data padding vs. gathering block size");
1540
1541     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1542     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1543
1544     KernelLaunchConfig config;
1545     config.blockSize[0] = order;
1546     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1547     config.blockSize[2] = atomsPerBlock;
1548     config.gridSize[0]  = dimGrid.first;
1549     config.gridSize[1]  = dimGrid.second;
1550
1551     // TODO test different cache configs
1552
1553     int                                timingId = gtPME_GATHER;
1554     PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1555             selectGatherKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1556                                   readGlobal || (!recalculateSplines), pmeGpu->common->ngrids);
1557     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1558
1559     pme_gpu_start_timing(pmeGpu, timingId);
1560     auto* timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1561     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1562     if (pmeGpu->common->ngrids == 1)
1563     {
1564         kernelParamsPtr->current.scale = 1.0;
1565     }
1566     else
1567     {
1568         kernelParamsPtr->current.scale = 1.0 - lambda;
1569     }
1570
1571 #if c_canEmbedBuffers
1572     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1573 #else
1574     const auto kernelArgs = prepareGpuKernelArguments(
1575             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1576             &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1577             &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A], &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1578             &kernelParamsPtr->atoms.d_theta, &kernelParamsPtr->atoms.d_dtheta,
1579             &kernelParamsPtr->atoms.d_gridlineIndices, &kernelParamsPtr->atoms.d_forces);
1580 #endif
1581     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather",
1582                     kernelArgs);
1583     pme_gpu_stop_timing(pmeGpu, timingId);
1584
1585     if (pmeGpu->settings.useGpuForceReduction)
1586     {
1587         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1588     }
1589     else
1590     {
1591         pme_gpu_copy_output_forces(pmeGpu);
1592     }
1593 }
1594
1595 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1596 {
1597     if (pmeGpu && pmeGpu->kernelParams)
1598     {
1599         return pmeGpu->kernelParams->atoms.d_forces;
1600     }
1601     else
1602     {
1603         return nullptr;
1604     }
1605 }
1606
1607 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1608 {
1609     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1610                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1611                "initialized.");
1612
1613     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1614                "The device-side buffer can not be set.");
1615
1616     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1617 }
1618
1619 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1620 {
1621     if (pmeGpu && pmeGpu->kernelParams)
1622     {
1623         return &pmeGpu->archSpecific->pmeForcesReady;
1624     }
1625     else
1626     {
1627         return nullptr;
1628     }
1629 }