d6caa84f95f54c77dd3482c5c7643d699c5005fb
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme_gpu_internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <memory>
56 #include <string>
57
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
68
69 #if GMX_GPU == GMX_GPU_CUDA
70 #    include "gromacs/gpu_utils/pmalloc_cuda.h"
71
72 #    include "pme.cuh"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 #    include "gromacs/gpu_utils/gmxopencl.h"
75 #endif
76
77 #include "gromacs/ewald/pme.h"
78
79 #include "pme_gpu_3dfft.h"
80 #include "pme_gpu_calculate_splines.h"
81 #include "pme_gpu_constants.h"
82 #include "pme_gpu_program_impl.h"
83 #include "pme_gpu_timings.h"
84 #include "pme_gpu_types.h"
85 #include "pme_gpu_types_host.h"
86 #include "pme_gpu_types_host_impl.h"
87 #include "pme_grid.h"
88 #include "pme_internal.h"
89 #include "pme_solve.h"
90
91 /*! \brief
92  * CUDA only
93  * Atom limit above which it is advantageous to turn on the
94  * recalcuating of the splines in the gather and using less threads per atom in the spline and spread
95  */
96 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
97
98 /*! \internal \brief
99  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
100  *
101  * \param[in] pmeGpu  The PME GPU structure.
102  * \returns The pointer to the kernel parameters.
103  */
104 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
105 {
106     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
107     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
108     return kernelParamsPtr;
109 }
110
111 /*! \brief
112  * Atom data block size (in terms of number of atoms).
113  * This is the least common multiple of number of atoms processed by
114  * a single block/workgroup of the spread and gather kernels.
115  * The GPU atom data buffers must be padded, which means that
116  * the numbers of atoms used for determining the size of the memory
117  * allocation must be divisible by this.
118  */
119 constexpr int c_pmeAtomDataBlockSize = 64;
120
121 int pme_gpu_get_atom_data_block_size()
122 {
123     return c_pmeAtomDataBlockSize;
124 }
125
126 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
127 {
128     pmeGpu->archSpecific->pmeStream_.synchronize();
129 }
130
131 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
132 {
133     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
134     allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount,
135                          pmeGpu->archSpecific->deviceContext_);
136     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
137 }
138
139 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
140 {
141     freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
142     pfree(pmeGpu->staging.h_virialAndEnergy);
143     pmeGpu->staging.h_virialAndEnergy = nullptr;
144 }
145
146 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
147 {
148     clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
149                            c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_);
150 }
151
152 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu)
153 {
154     const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
155                                           pmeGpu->kernelParams->grid.realGridSize[XX]
156                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
157     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
158
159     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
160                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
161                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
162     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
163     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
164                            &pmeGpu->archSpecific->splineValuesSize,
165                            &pmeGpu->archSpecific->splineValuesSizeAlloc,
166                            pmeGpu->archSpecific->deviceContext_);
167     if (shouldRealloc)
168     {
169         /* Reallocate the host buffer */
170         pfree(pmeGpu->staging.h_splineModuli);
171         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli),
172                 newSplineValuesSize * sizeof(float));
173     }
174     for (int i = 0; i < DIM; i++)
175     {
176         memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i],
177                pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
178     }
179     /* TODO: pin original buffer instead! */
180     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
181                        0, newSplineValuesSize, pmeGpu->archSpecific->pmeStream_,
182                        pmeGpu->settings.transferKind, nullptr);
183 }
184
185 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
186 {
187     pfree(pmeGpu->staging.h_splineModuli);
188     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
189 }
190
191 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
192 {
193     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
194     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
195     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
196                            &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc,
197                            pmeGpu->archSpecific->deviceContext_);
198     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
199     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
200 }
201
202 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
203 {
204     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
205 }
206
207 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
208 {
209     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
210     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
211     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
212                        DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
213                        pmeGpu->settings.transferKind, nullptr);
214 }
215
216 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
217 {
218     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
219     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
220     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
221                          DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
222                          pmeGpu->settings.transferKind, nullptr);
223 }
224
225 void pme_gpu_realloc_and_copy_input_coefficients(PmeGpu* pmeGpu, const float* h_coefficients)
226 {
227     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
228     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
229     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
230     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
231                            &pmeGpu->archSpecific->coefficientsSize,
232                            &pmeGpu->archSpecific->coefficientsSizeAlloc,
233                            pmeGpu->archSpecific->deviceContext_);
234     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients,
235                        const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
236                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
237
238     const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
239     const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
240     if (paddingCount > 0)
241     {
242         clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
243                                paddingCount, pmeGpu->archSpecific->pmeStream_);
244     }
245 }
246
247 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
248 {
249     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
250 }
251
252 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
253 {
254     const int order             = pmeGpu->common->pme_order;
255     const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
256     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
257     /* Two arrays of the same size */
258     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
259     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
260     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
261     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize, &currentSizeTemp,
262                            &currentSizeTempAlloc, pmeGpu->archSpecific->deviceContext_);
263     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
264                            &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc,
265                            pmeGpu->archSpecific->deviceContext_);
266     // the host side reallocation
267     if (shouldRealloc)
268     {
269         pfree(pmeGpu->staging.h_theta);
270         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
271         pfree(pmeGpu->staging.h_dtheta);
272         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
273     }
274 }
275
276 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
277 {
278     /* Two arrays of the same size */
279     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
280     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
281     pfree(pmeGpu->staging.h_theta);
282     pfree(pmeGpu->staging.h_dtheta);
283 }
284
285 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
286 {
287     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
288     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
289     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
290                            &pmeGpu->archSpecific->gridlineIndicesSize,
291                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
292                            pmeGpu->archSpecific->deviceContext_);
293     pfree(pmeGpu->staging.h_gridlineIndices);
294     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
295 }
296
297 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
298 {
299     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
300     pfree(pmeGpu->staging.h_gridlineIndices);
301 }
302
303 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
304 {
305     auto*     kernelParamsPtr = pmeGpu->kernelParams.get();
306     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
307                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
308                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
309     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
310                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
311                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
312     // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
313     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
314     {
315         /* 2 separate grids */
316         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
317                                &pmeGpu->archSpecific->complexGridSize,
318                                &pmeGpu->archSpecific->complexGridSizeAlloc,
319                                pmeGpu->archSpecific->deviceContext_);
320         reallocateDeviceBuffer(
321                 &kernelParamsPtr->grid.d_realGrid, newRealGridSize, &pmeGpu->archSpecific->realGridSize,
322                 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
323     }
324     else
325     {
326         /* A single buffer so that any grid will fit */
327         const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
328         reallocateDeviceBuffer(
329                 &kernelParamsPtr->grid.d_realGrid, newGridsSize, &pmeGpu->archSpecific->realGridSize,
330                 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
331         kernelParamsPtr->grid.d_fourierGrid   = kernelParamsPtr->grid.d_realGrid;
332         pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
333         // the size might get used later for copying the grid
334     }
335 }
336
337 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
338 {
339     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
340     {
341         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
342     }
343     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
344 }
345
346 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
347 {
348     clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
349                            pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream_);
350 }
351
352 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
353 {
354     pme_gpu_free_fract_shifts(pmeGpu);
355
356     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
357
358     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
359     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
360     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
361     const int cellCount           = c_pmeNeighborUnitcellCount;
362     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
363
364     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
365
366     const int newFractShiftsSize = cellCount * (nx + ny + nz);
367
368 #if GMX_GPU == GMX_GPU_CUDA
369     initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable, kernelParamsPtr->fractShiftsTableTexture,
370                          pmeGpu->common->fsh.data(), newFractShiftsSize);
371
372     initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
373                          kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
374                          newFractShiftsSize);
375 #elif GMX_GPU == GMX_GPU_OPENCL
376     // No dedicated texture routines....
377     allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize,
378                          pmeGpu->archSpecific->deviceContext_);
379     allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize,
380                          pmeGpu->archSpecific->deviceContext_);
381     copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(), 0,
382                        newFractShiftsSize, pmeGpu->archSpecific->pmeStream_,
383                        GpuApiCallBehavior::Async, nullptr);
384     copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(), 0,
385                        newFractShiftsSize, pmeGpu->archSpecific->pmeStream_,
386                        GpuApiCallBehavior::Async, nullptr);
387 #endif
388 }
389
390 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
391 {
392     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
393 #if GMX_GPU == GMX_GPU_CUDA
394     destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
395                             kernelParamsPtr->fractShiftsTableTexture);
396     destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
397                             kernelParamsPtr->gridlineIndicesTableTexture);
398 #elif GMX_GPU == GMX_GPU_OPENCL
399     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
400     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
401 #endif
402 }
403
404 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
405 {
406     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
407 }
408
409 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, float* h_grid)
410 {
411     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid, 0, pmeGpu->archSpecific->realGridSize,
412                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
413 }
414
415 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid)
416 {
417     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid, 0,
418                          pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream_,
419                          pmeGpu->settings.transferKind, nullptr);
420     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
421 }
422
423 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
424 {
425     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
426     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
427     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
428                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
429     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
430                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
431     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
432                          0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
433                          pmeGpu->settings.transferKind, nullptr);
434 }
435
436 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
437 {
438     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
439     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
440
441     // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
442     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0, pmeGpu->nAtomsAlloc * DIM,
443                            pmeGpu->archSpecific->pmeStream_);
444     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
445                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
446                            pmeGpu->archSpecific->pmeStream_);
447     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
448                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
449                            pmeGpu->archSpecific->pmeStream_);
450
451     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
452                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
453     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
454                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
455     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
456                        0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
457                        pmeGpu->settings.transferKind, nullptr);
458 }
459
460 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
461 {
462     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
463 }
464
465 void pme_gpu_init_internal(PmeGpu* pmeGpu)
466 {
467 #if GMX_GPU == GMX_GPU_CUDA
468     // Prepare to use the device that this PME task was assigned earlier.
469     // Other entities, such as CUDA timing events, are known to implicitly use the device context.
470     CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
471 #endif
472
473     /* Allocate the target-specific structures */
474     pmeGpu->archSpecific.reset(new PmeGpuSpecific(pmeGpu->programHandle_->impl_->deviceContext_));
475     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
476
477     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
478     /* This should give better performance, according to the cuFFT documentation.
479      * The performance seems to be the same though.
480      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
481      */
482
483     // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
484     if (GMX_GPU == GMX_GPU_CUDA)
485     {
486         /* WARNING: CUDA timings are incorrect with multiple streams.
487          *          This is the main reason why they are disabled by default.
488          */
489         // TODO: Consider turning on by default when we can detect nr of streams.
490         pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
491     }
492     else if (GMX_GPU == GMX_GPU_OPENCL)
493     {
494         pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
495     }
496
497 #if GMX_GPU == GMX_GPU_CUDA
498     pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
499 #elif GMX_GPU == GMX_GPU_OPENCL
500     pmeGpu->maxGridWidthX = INT32_MAX / 2;
501     // TODO: is there no really global work size limit in OpenCL?
502 #endif
503
504     /* Creating a PME GPU stream:
505      * - default high priority with CUDA
506      * - no priorities implemented yet with OpenCL; see #2532
507      */
508     pmeGpu->archSpecific->pmeStream_.init(*pmeGpu->deviceInfo, pmeGpu->archSpecific->deviceContext_,
509                                           DeviceStreamPriority::High, pmeGpu->archSpecific->useTiming);
510 }
511
512 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
513 {
514     if (pme_gpu_settings(pmeGpu).performGPUFFT)
515     {
516         pmeGpu->archSpecific->fftSetup.resize(0);
517         for (int i = 0; i < pmeGpu->common->ngrids; i++)
518         {
519             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
520         }
521     }
522 }
523
524 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
525 {
526     pmeGpu->archSpecific->fftSetup.resize(0);
527 }
528
529 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, PmeOutput* output)
530 {
531     const PmeGpu* pmeGpu = pme.gpu;
532     for (int j = 0; j < c_virialAndEnergyCount; j++)
533     {
534         GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]),
535                    "PME GPU produces incorrect energy/virial.");
536     }
537
538     unsigned int j                 = 0;
539     output->coulombVirial_[XX][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
540     output->coulombVirial_[YY][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
541     output->coulombVirial_[ZZ][ZZ] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
542     output->coulombVirial_[XX][YY] = output->coulombVirial_[YY][XX] =
543             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
544     output->coulombVirial_[XX][ZZ] = output->coulombVirial_[ZZ][XX] =
545             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
546     output->coulombVirial_[YY][ZZ] = output->coulombVirial_[ZZ][YY] =
547             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
548     output->coulombEnergy_ = 0.5F * pmeGpu->staging.h_virialAndEnergy[j++];
549 }
550
551 /*! \brief Sets the force-related members in \p output
552  *
553  * \param[in]   pmeGpu      PME GPU data structure
554  * \param[out]  output      Pointer to PME output data structure
555  */
556 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
557 {
558     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
559     if (output->haveForceOutput_)
560     {
561         output->forces_ = pmeGpu->staging.h_forces;
562     }
563 }
564
565 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial)
566 {
567     PmeGpu* pmeGpu = pme.gpu;
568
569     PmeOutput output;
570
571     pme_gpu_getForceOutput(pmeGpu, &output);
572
573     if (computeEnergyAndVirial)
574     {
575         if (pme_gpu_settings(pmeGpu).performGPUSolve)
576         {
577             pme_gpu_getEnergyAndVirial(pme, &output);
578         }
579         else
580         {
581             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
582         }
583     }
584     return output;
585 }
586
587 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
588 {
589 #if GMX_DOUBLE
590     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
591 #else
592     matrix scaledBox;
593     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
594     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
595     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
596     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
597     matrix recipBox;
598     gmx::invertBoxMatrix(scaledBox, recipBox);
599
600     /* The GPU recipBox is transposed as compared to the CPU recipBox.
601      * Spread uses matrix columns (while solve and gather use rows).
602      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
603      */
604     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
605                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
606                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
607     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
608 #endif
609 }
610
611 /*! \brief \libinternal
612  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
613  *
614  * \param[in] pmeGpu            The PME GPU structure.
615  */
616 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
617 {
618     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
619     kernelParamsPtr->grid.ewaldFactor =
620             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
621
622     /* The grid size variants */
623     for (int i = 0; i < DIM; i++)
624     {
625         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
626         kernelParamsPtr->grid.realGridSizeFP[i] =
627                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
628         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
629
630         // The complex grid currently uses no padding;
631         // if it starts to do so, then another test should be added for that
632         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
633         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
634     }
635     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
636     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
637     {
638         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
639         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
640                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
641     }
642
643     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
644     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
645     kernelParamsPtr->grid.complexGridSize[ZZ]++;
646     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
647
648     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
649     pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
650     pme_gpu_realloc_grids(pmeGpu);
651     pme_gpu_reinit_3dfft(pmeGpu);
652 }
653
654 /* Several GPU functions that refer to the CPU PME data live here.
655  * We would like to keep these away from the GPU-framework specific code for clarity,
656  * as well as compilation issues with MPI.
657  */
658
659 /*! \brief \libinternal
660  * Copies everything useful from the PME CPU to the PME GPU structure.
661  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
662  *
663  * \param[in] pme         The PME structure.
664  */
665 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
666 {
667     /* TODO: Consider refactoring the CPU PME code to use the same structure,
668      * so that this function becomes 2 lines */
669     PmeGpu* pmeGpu               = pme->gpu;
670     pmeGpu->common->ngrids       = pme->ngrids;
671     pmeGpu->common->epsilon_r    = pme->epsilon_r;
672     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
673     pmeGpu->common->nk[XX]       = pme->nkx;
674     pmeGpu->common->nk[YY]       = pme->nky;
675     pmeGpu->common->nk[ZZ]       = pme->nkz;
676     pmeGpu->common->pme_order    = pme->pme_order;
677     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
678     {
679         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
680     }
681     for (int i = 0; i < DIM; i++)
682     {
683         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
684     }
685     const int cellCount = c_pmeNeighborUnitcellCount;
686     pmeGpu->common->fsh.resize(0);
687     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
688     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
689     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
690     pmeGpu->common->nn.resize(0);
691     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
692     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
693     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
694     pmeGpu->common->runMode       = pme->runMode;
695     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
696     pmeGpu->common->boxScaler     = pme->boxScaler;
697 }
698
699 /*! \libinternal \brief
700  * uses heuristics to select the best performing PME gather and scatter kernels
701  *
702  * \param[in,out] pmeGpu         The PME GPU structure.
703  */
704 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
705 {
706     if (pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit && (GMX_GPU == GMX_GPU_CUDA))
707     {
708         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::Order;
709         pmeGpu->settings.recalculateSplines = true;
710     }
711     else
712     {
713         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::OrderSquared;
714         pmeGpu->settings.recalculateSplines = false;
715     }
716 }
717
718
719 /*! \libinternal \brief
720  * Initializes the PME GPU data at the beginning of the run.
721  * TODO: this should become PmeGpu::PmeGpu()
722  *
723  * \param[in,out] pme            The PME structure.
724  * \param[in,out] deviceInfo     The GPU device information structure.
725  * \param[in]     pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
726  */
727 static void pme_gpu_init(gmx_pme_t* pme, const DeviceInformation* deviceInfo, const PmeGpuProgram* pmeGpuProgram)
728 {
729     GMX_ASSERT(deviceInfo != nullptr,
730                "Device information can not be nullptr when GPU is used for PME.");
731     pme->gpu       = new PmeGpu();
732     PmeGpu* pmeGpu = pme->gpu;
733     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
734     pmeGpu->common = std::make_shared<PmeShared>();
735
736     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
737     /* A convenience variable. */
738     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
739     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
740     pmeGpu->settings.performGPUGather = true;
741     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
742     pmeGpu->settings.useGpuForceReduction = false;
743
744     pme_gpu_set_testing(pmeGpu, false);
745
746     pmeGpu->deviceInfo = deviceInfo;
747     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
748     pmeGpu->programHandle_ = pmeGpuProgram;
749
750     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
751
752     pme_gpu_init_internal(pmeGpu);
753     pme_gpu_alloc_energy_virial(pmeGpu);
754
755     pme_gpu_copy_common_data_from(pme);
756
757     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
758
759     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
760     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
761 }
762
763 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
764 {
765     GMX_ASSERT(gridSize != nullptr, "");
766     GMX_ASSERT(paddedGridSize != nullptr, "");
767     GMX_ASSERT(pmeGpu != nullptr, "");
768     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
769     for (int i = 0; i < DIM; i++)
770     {
771         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
772         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
773     }
774 }
775
776 void pme_gpu_reinit(gmx_pme_t* pme, const DeviceInformation* deviceInfo, const PmeGpuProgram* pmeGpuProgram)
777 {
778     GMX_ASSERT(pme != nullptr, "Need valid PME object");
779     if (pme->runMode == PmeRunMode::CPU)
780     {
781         GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object");
782         return;
783     }
784
785     if (!pme->gpu)
786     {
787         /* First-time initialization */
788         pme_gpu_init(pme, deviceInfo, pmeGpuProgram);
789     }
790     else
791     {
792         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
793         pme_gpu_copy_common_data_from(pme);
794     }
795     /* GPU FFT will only get used for a single rank.*/
796     pme->gpu->settings.performGPUFFT =
797             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
798     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
799
800     /* Reinit active timers */
801     pme_gpu_reinit_timings(pme->gpu);
802
803     pme_gpu_reinit_grids(pme->gpu);
804     // Note: if timing the reinit launch overhead becomes more relevant
805     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
806     pme_gpu_reinit_computation(pme, nullptr);
807     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
808      * update for mixed mode on grid switch. TODO: use shared recipbox field.
809      */
810     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
811 }
812
813 void pme_gpu_destroy(PmeGpu* pmeGpu)
814 {
815     /* Free lots of data */
816     pme_gpu_free_energy_virial(pmeGpu);
817     pme_gpu_free_bspline_values(pmeGpu);
818     pme_gpu_free_forces(pmeGpu);
819     pme_gpu_free_coefficients(pmeGpu);
820     pme_gpu_free_spline_data(pmeGpu);
821     pme_gpu_free_grid_indices(pmeGpu);
822     pme_gpu_free_fract_shifts(pmeGpu);
823     pme_gpu_free_grids(pmeGpu);
824
825     pme_gpu_destroy_3dfft(pmeGpu);
826
827     delete pmeGpu;
828 }
829
830 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* charges)
831 {
832     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
833     kernelParamsPtr->atoms.nAtoms = nAtoms;
834     const int  block_size         = pme_gpu_get_atom_data_block_size();
835     const int  nAtomsNewPadded    = ((nAtoms + block_size - 1) / block_size) * block_size;
836     const bool haveToRealloc      = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
837     pmeGpu->nAtomsAlloc           = nAtomsNewPadded;
838
839 #if GMX_DOUBLE
840     GMX_RELEASE_ASSERT(false, "Only single precision supported");
841     GMX_UNUSED_VALUE(charges);
842 #else
843     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(charges));
844     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
845 #endif
846
847     if (haveToRealloc)
848     {
849         pme_gpu_realloc_forces(pmeGpu);
850         pme_gpu_realloc_spline_data(pmeGpu);
851         pme_gpu_realloc_grid_indices(pmeGpu);
852     }
853     pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
854 }
855
856 /*! \internal \brief
857  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
858  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
859  *
860  * \param[in] pmeGpu         The PME GPU data structure.
861  * \param[in] PMEStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
862  */
863 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
864 {
865     CommandEvent* timingEvent = nullptr;
866     if (pme_gpu_timings_enabled(pmeGpu))
867     {
868         GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
869                    "Wrong PME GPU timing event index");
870         timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
871     }
872     return timingEvent;
873 }
874
875 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, int grid_index)
876 {
877     int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
878
879     pme_gpu_start_timing(pmeGpu, timerId);
880     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
881             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
882     pme_gpu_stop_timing(pmeGpu, timerId);
883 }
884
885 /*! \brief
886  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
887  * to minimize number of unused blocks.
888  */
889 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
890 {
891     // How many maximum widths in X do we need (hopefully just one)
892     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
893     // Trying to make things even
894     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
895     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
896     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
897                "pmeGpuCreateGrid: excessive blocks");
898     return std::pair<int, int>(colCount, minRowCount);
899 }
900
901 /*! \brief
902  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
903  *
904  * \param[in]  pmeGpu                   The PME GPU structure.
905  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
906  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
907  *
908  * \return Pointer to CUDA kernel
909  */
910 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu, ThreadsPerAtom threadsPerAtom, bool writeSplinesToGlobal)
911 {
912     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
913     if (writeSplinesToGlobal)
914     {
915         if (threadsPerAtom == ThreadsPerAtom::Order)
916         {
917             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4;
918         }
919         else
920         {
921             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplines;
922         }
923     }
924     else
925     {
926         if (threadsPerAtom == ThreadsPerAtom::Order)
927         {
928             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
929         }
930         else
931         {
932             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
933         }
934     }
935
936     return kernelPtr;
937 }
938
939 /*! \brief
940  * Returns a pointer to appropriate spline kernel based on the input bool values
941  *
942  * \param[in]  pmeGpu                   The PME GPU structure.
943  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
944  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
945  *
946  * \return Pointer to CUDA kernel
947  */
948 static auto selectSplineKernelPtr(const PmeGpu*  pmeGpu,
949                                   ThreadsPerAtom threadsPerAtom,
950                                   bool gmx_unused writeSplinesToGlobal)
951 {
952     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
953     GMX_ASSERT(
954             writeSplinesToGlobal,
955             "Spline data should always be written to global memory when just calculating splines");
956
957     if (threadsPerAtom == ThreadsPerAtom::Order)
958     {
959         kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4;
960     }
961     else
962     {
963         kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
964     }
965     return kernelPtr;
966 }
967
968 /*! \brief
969  * Returns a pointer to appropriate spread kernel based on the input bool values
970  *
971  * \param[in]  pmeGpu                   The PME GPU structure.
972  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
973  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
974  *
975  * \return Pointer to CUDA kernel
976  */
977 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu, ThreadsPerAtom threadsPerAtom, bool writeSplinesToGlobal)
978 {
979     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
980     if (writeSplinesToGlobal)
981     {
982         if (threadsPerAtom == ThreadsPerAtom::Order)
983         {
984             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4;
985         }
986         else
987         {
988             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
989         }
990     }
991     else
992     {
993         /* if we are not saving the spline data we need to recalculate it
994            using the spline and spread Kernel */
995         if (threadsPerAtom == ThreadsPerAtom::Order)
996         {
997             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
998         }
999         else
1000         {
1001             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1002         }
1003     }
1004     return kernelPtr;
1005 }
1006
1007 void pme_gpu_spread(const PmeGpu*         pmeGpu,
1008                     GpuEventSynchronizer* xReadyOnDevice,
1009                     int gmx_unused gridIndex,
1010                     real*          h_grid,
1011                     bool           computeSplines,
1012                     bool           spreadCharges)
1013 {
1014     GMX_ASSERT(computeSplines || spreadCharges,
1015                "PME spline/spread kernel has invalid input (nothing to do)");
1016     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1017     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1018
1019     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1020
1021     const int order = pmeGpu->common->pme_order;
1022     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1023     const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1024     const int  threadsPerAtom =
1025             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1026     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1027 #if GMX_GPU == GMX_GPU_OPENCL
1028     GMX_ASSERT(pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1029                "Only 16 threads per atom supported in OpenCL");
1030     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1031 #endif
1032     const int atomsPerBlock = blockSize / threadsPerAtom;
1033
1034     // TODO: pick smaller block size in runtime if needed
1035     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1036     // If doing so, change atomsPerBlock in the kernels as well.
1037     // TODO: test varying block sizes on modern arch-s as well
1038     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1039     //(for spline data mostly)
1040     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1041                "inconsistent atom data padding vs. spreading block size");
1042
1043     // Ensure that coordinates are ready on the device before launching spread;
1044     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1045     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1046     GMX_ASSERT(xReadyOnDevice != nullptr || (GMX_GPU != GMX_GPU_CUDA)
1047                        || pmeGpu->common->isRankPmeOnly || pme_gpu_settings(pmeGpu).copyAllOutputs,
1048                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1049     if (xReadyOnDevice)
1050     {
1051         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1052     }
1053
1054     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1055     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1056
1057     KernelLaunchConfig config;
1058     config.blockSize[0] = order;
1059     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1060     config.blockSize[2] = atomsPerBlock;
1061     config.gridSize[0]  = dimGrid.first;
1062     config.gridSize[1]  = dimGrid.second;
1063
1064     int                                timingId;
1065     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1066     if (computeSplines)
1067     {
1068         if (spreadCharges)
1069         {
1070             timingId  = gtPME_SPLINEANDSPREAD;
1071             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1072                                                        writeGlobal || (!recalculateSplines));
1073         }
1074         else
1075         {
1076             timingId  = gtPME_SPLINE;
1077             kernelPtr = selectSplineKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1078                                               writeGlobal || (!recalculateSplines));
1079         }
1080     }
1081     else
1082     {
1083         timingId  = gtPME_SPREAD;
1084         kernelPtr = selectSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1085                                           writeGlobal || (!recalculateSplines));
1086     }
1087
1088
1089     pme_gpu_start_timing(pmeGpu, timingId);
1090     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1091 #if c_canEmbedBuffers
1092     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1093 #else
1094     const auto kernelArgs = prepareGpuKernelArguments(
1095             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1096             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1097             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->grid.d_fractShiftsTable,
1098             &kernelParamsPtr->grid.d_gridlineIndicesTable, &kernelParamsPtr->atoms.d_coefficients,
1099             &kernelParamsPtr->atoms.d_coordinates);
1100 #endif
1101
1102     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent,
1103                     "PME spline/spread", kernelArgs);
1104     pme_gpu_stop_timing(pmeGpu, timingId);
1105
1106     const auto& settings    = pmeGpu->settings;
1107     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1108     if (copyBackGrid)
1109     {
1110         pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1111     }
1112     const bool copyBackAtomData =
1113             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1114     if (copyBackAtomData)
1115     {
1116         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1117     }
1118 }
1119
1120 void pme_gpu_solve(const PmeGpu* pmeGpu, t_complex* h_grid, GridOrdering gridOrdering, bool computeEnergyAndVirial)
1121 {
1122     const auto& settings               = pmeGpu->settings;
1123     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1124
1125     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1126
1127     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1128     if (copyInputAndOutputGrid)
1129     {
1130         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat, 0,
1131                            pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream_,
1132                            pmeGpu->settings.transferKind, nullptr);
1133     }
1134
1135     int majorDim = -1, middleDim = -1, minorDim = -1;
1136     switch (gridOrdering)
1137     {
1138         case GridOrdering::YZX:
1139             majorDim  = YY;
1140             middleDim = ZZ;
1141             minorDim  = XX;
1142             break;
1143
1144         case GridOrdering::XYZ:
1145             majorDim  = XX;
1146             middleDim = YY;
1147             minorDim  = ZZ;
1148             break;
1149
1150         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1151     }
1152
1153     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1154
1155     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1156     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1157     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1158     int       cellsPerBlock;
1159     if (blocksPerGridLine == 1)
1160     {
1161         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1162     }
1163     else
1164     {
1165         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1166     }
1167     const int warpSize  = pmeGpu->programHandle_->warpSize();
1168     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1169
1170     static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1171                   "The CUDA solve energy kernels needs at least 4 warps. "
1172                   "Here we launch at least half of the max warps.");
1173
1174     KernelLaunchConfig config;
1175     config.blockSize[0] = blockSize;
1176     config.gridSize[0]  = blocksPerGridLine;
1177     // rounding up to full warps so that shuffle operations produce defined results
1178     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1179                          / gridLinesPerBlock;
1180     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1181
1182     int                                timingId  = gtPME_SOLVE;
1183     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1184     if (gridOrdering == GridOrdering::YZX)
1185     {
1186         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernel
1187                                            : pmeGpu->programHandle_->impl_->solveYZXKernel;
1188     }
1189     else if (gridOrdering == GridOrdering::XYZ)
1190     {
1191         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernel
1192                                            : pmeGpu->programHandle_->impl_->solveXYZKernel;
1193     }
1194
1195     pme_gpu_start_timing(pmeGpu, timingId);
1196     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1197 #if c_canEmbedBuffers
1198     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1199 #else
1200     const auto kernelArgs = prepareGpuKernelArguments(
1201             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli,
1202             &kernelParamsPtr->constants.d_virialAndEnergy, &kernelParamsPtr->grid.d_fourierGrid);
1203 #endif
1204     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve",
1205                     kernelArgs);
1206     pme_gpu_stop_timing(pmeGpu, timingId);
1207
1208     if (computeEnergyAndVirial)
1209     {
1210         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy,
1211                              &kernelParamsPtr->constants.d_virialAndEnergy, 0, c_virialAndEnergyCount,
1212                              pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1213     }
1214
1215     if (copyInputAndOutputGrid)
1216     {
1217         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid, 0,
1218                              pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream_,
1219                              pmeGpu->settings.transferKind, nullptr);
1220     }
1221 }
1222
1223 /*! \brief
1224  * Returns a pointer to appropriate gather kernel based on the inputvalues
1225  *
1226  * \param[in]  pmeGpu                   The PME GPU structure.
1227  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1228  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1229  *
1230  * \return Pointer to CUDA kernel
1231  */
1232 inline auto selectGatherKernelPtr(const PmeGpu* pmeGpu, ThreadsPerAtom threadsPerAtom, bool readSplinesFromGlobal)
1233
1234 {
1235     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1236
1237     if (readSplinesFromGlobal)
1238     {
1239         if (threadsPerAtom == ThreadsPerAtom::Order)
1240         {
1241             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4;
1242         }
1243         else
1244         {
1245             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplines;
1246         }
1247     }
1248     else
1249     {
1250         if (threadsPerAtom == ThreadsPerAtom::Order)
1251         {
1252             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4;
1253         }
1254         else
1255         {
1256             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernel;
1257         }
1258     }
1259     return kernelPtr;
1260 }
1261
1262
1263 void pme_gpu_gather(PmeGpu* pmeGpu, const float* h_grid)
1264 {
1265     const auto& settings = pmeGpu->settings;
1266     if (!settings.performGPUFFT || settings.copyAllOutputs)
1267     {
1268         pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float*>(h_grid));
1269     }
1270
1271     if (settings.copyAllOutputs)
1272     {
1273         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1274     }
1275
1276     /* Set if we have unit tests */
1277     const bool   readGlobal = pmeGpu->settings.copyAllOutputs;
1278     const size_t blockSize  = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1279     const int    order      = pmeGpu->common->pme_order;
1280     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1281     const int threadsPerAtom =
1282             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1283     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1284 #if GMX_GPU == GMX_GPU_OPENCL
1285     GMX_ASSERT(pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1286                "Only 16 threads per atom supported in OpenCL");
1287     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1288 #endif
1289     const int atomsPerBlock = blockSize / threadsPerAtom;
1290
1291     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1292                "inconsistent atom data padding vs. gathering block size");
1293
1294     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1295     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1296
1297     KernelLaunchConfig config;
1298     config.blockSize[0] = order;
1299     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1300     config.blockSize[2] = atomsPerBlock;
1301     config.gridSize[0]  = dimGrid.first;
1302     config.gridSize[1]  = dimGrid.second;
1303
1304     // TODO test different cache configs
1305
1306     int                                timingId  = gtPME_GATHER;
1307     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = selectGatherKernelPtr(
1308             pmeGpu, pmeGpu->settings.threadsPerAtom, readGlobal || (!recalculateSplines));
1309     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1310
1311     pme_gpu_start_timing(pmeGpu, timingId);
1312     auto*       timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1313     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1314 #if c_canEmbedBuffers
1315     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1316 #else
1317     const auto kernelArgs = prepareGpuKernelArguments(
1318             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients,
1319             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->atoms.d_theta,
1320             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1321             &kernelParamsPtr->atoms.d_forces);
1322 #endif
1323     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather",
1324                     kernelArgs);
1325     pme_gpu_stop_timing(pmeGpu, timingId);
1326
1327     if (pmeGpu->settings.useGpuForceReduction)
1328     {
1329         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1330     }
1331     else
1332     {
1333         pme_gpu_copy_output_forces(pmeGpu);
1334     }
1335 }
1336
1337 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1338 {
1339     if (pmeGpu && pmeGpu->kernelParams)
1340     {
1341         return pmeGpu->kernelParams->atoms.d_forces;
1342     }
1343     else
1344     {
1345         return nullptr;
1346     }
1347 }
1348
1349 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1350 {
1351     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1352                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1353                "initialized.");
1354
1355     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1356                "The device-side buffer can not be set.");
1357
1358     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1359 }
1360
1361 const DeviceStream* pme_gpu_get_stream(const PmeGpu* pmeGpu)
1362 {
1363     if (pmeGpu)
1364     {
1365         return &pmeGpu->archSpecific->pmeStream_;
1366     }
1367     else
1368     {
1369         return nullptr;
1370     }
1371 }
1372
1373 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1374 {
1375     if (pmeGpu && pmeGpu->kernelParams)
1376     {
1377         return &pmeGpu->archSpecific->pmeForcesReady;
1378     }
1379     else
1380     {
1381         return nullptr;
1382     }
1383 }