Make use of the DeviceStreamManager
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme_gpu_internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <memory>
56 #include <string>
57
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/device_context.h"
60 #include "gromacs/gpu_utils/device_stream.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/math/invertmatrix.h"
63 #include "gromacs/math/units.h"
64 #include "gromacs/timing/gpu_timing.h"
65 #include "gromacs/utility/exceptions.h"
66 #include "gromacs/utility/fatalerror.h"
67 #include "gromacs/utility/gmxassert.h"
68 #include "gromacs/utility/logger.h"
69 #include "gromacs/utility/stringutil.h"
70
71 #if GMX_GPU == GMX_GPU_CUDA
72 #    include "gromacs/gpu_utils/pmalloc_cuda.h"
73
74 #    include "pme.cuh"
75 #elif GMX_GPU == GMX_GPU_OPENCL
76 #    include "gromacs/gpu_utils/gmxopencl.h"
77 #endif
78
79 #include "gromacs/ewald/pme.h"
80
81 #include "pme_gpu_3dfft.h"
82 #include "pme_gpu_calculate_splines.h"
83 #include "pme_gpu_constants.h"
84 #include "pme_gpu_program_impl.h"
85 #include "pme_gpu_timings.h"
86 #include "pme_gpu_types.h"
87 #include "pme_gpu_types_host.h"
88 #include "pme_gpu_types_host_impl.h"
89 #include "pme_grid.h"
90 #include "pme_internal.h"
91 #include "pme_solve.h"
92
93 /*! \brief
94  * CUDA only
95  * Atom limit above which it is advantageous to turn on the
96  * recalcuating of the splines in the gather and using less threads per atom in the spline and spread
97  */
98 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
99
100 /*! \internal \brief
101  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
102  *
103  * \param[in] pmeGpu  The PME GPU structure.
104  * \returns The pointer to the kernel parameters.
105  */
106 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
107 {
108     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
109     auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
110     return kernelParamsPtr;
111 }
112
113 /*! \brief
114  * Atom data block size (in terms of number of atoms).
115  * This is the least common multiple of number of atoms processed by
116  * a single block/workgroup of the spread and gather kernels.
117  * The GPU atom data buffers must be padded, which means that
118  * the numbers of atoms used for determining the size of the memory
119  * allocation must be divisible by this.
120  */
121 constexpr int c_pmeAtomDataBlockSize = 64;
122
123 int pme_gpu_get_atom_data_block_size()
124 {
125     return c_pmeAtomDataBlockSize;
126 }
127
128 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
129 {
130     pmeGpu->archSpecific->pmeStream_.synchronize();
131 }
132
133 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
134 {
135     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
136     allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount,
137                          pmeGpu->archSpecific->deviceContext_);
138     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
139 }
140
141 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
142 {
143     freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
144     pfree(pmeGpu->staging.h_virialAndEnergy);
145     pmeGpu->staging.h_virialAndEnergy = nullptr;
146 }
147
148 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
149 {
150     clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
151                            c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_);
152 }
153
154 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu)
155 {
156     const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
157                                           pmeGpu->kernelParams->grid.realGridSize[XX]
158                                                   + pmeGpu->kernelParams->grid.realGridSize[YY] };
159     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
160
161     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
162                                     + pmeGpu->kernelParams->grid.realGridSize[YY]
163                                     + pmeGpu->kernelParams->grid.realGridSize[ZZ];
164     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
165     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
166                            &pmeGpu->archSpecific->splineValuesSize,
167                            &pmeGpu->archSpecific->splineValuesSizeAlloc,
168                            pmeGpu->archSpecific->deviceContext_);
169     if (shouldRealloc)
170     {
171         /* Reallocate the host buffer */
172         pfree(pmeGpu->staging.h_splineModuli);
173         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli),
174                 newSplineValuesSize * sizeof(float));
175     }
176     for (int i = 0; i < DIM; i++)
177     {
178         memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i],
179                pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
180     }
181     /* TODO: pin original buffer instead! */
182     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
183                        0, newSplineValuesSize, pmeGpu->archSpecific->pmeStream_,
184                        pmeGpu->settings.transferKind, nullptr);
185 }
186
187 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
188 {
189     pfree(pmeGpu->staging.h_splineModuli);
190     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
191 }
192
193 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
194 {
195     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
196     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
197     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
198                            &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc,
199                            pmeGpu->archSpecific->deviceContext_);
200     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
201     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
202 }
203
204 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
205 {
206     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
207 }
208
209 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
210 {
211     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
212     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
213     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
214                        DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
215                        pmeGpu->settings.transferKind, nullptr);
216 }
217
218 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
219 {
220     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
221     float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
222     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
223                          DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
224                          pmeGpu->settings.transferKind, nullptr);
225 }
226
227 void pme_gpu_realloc_and_copy_input_coefficients(PmeGpu* pmeGpu, const float* h_coefficients)
228 {
229     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
230     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
231     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
232     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
233                            &pmeGpu->archSpecific->coefficientsSize,
234                            &pmeGpu->archSpecific->coefficientsSizeAlloc,
235                            pmeGpu->archSpecific->deviceContext_);
236     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients,
237                        const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
238                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
239
240     const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
241     const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
242     if (paddingCount > 0)
243     {
244         clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
245                                paddingCount, pmeGpu->archSpecific->pmeStream_);
246     }
247 }
248
249 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
250 {
251     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
252 }
253
254 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
255 {
256     const int order             = pmeGpu->common->pme_order;
257     const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
258     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
259     /* Two arrays of the same size */
260     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
261     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
262     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
263     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize, &currentSizeTemp,
264                            &currentSizeTempAlloc, pmeGpu->archSpecific->deviceContext_);
265     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
266                            &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc,
267                            pmeGpu->archSpecific->deviceContext_);
268     // the host side reallocation
269     if (shouldRealloc)
270     {
271         pfree(pmeGpu->staging.h_theta);
272         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
273         pfree(pmeGpu->staging.h_dtheta);
274         pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
275     }
276 }
277
278 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
279 {
280     /* Two arrays of the same size */
281     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
282     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
283     pfree(pmeGpu->staging.h_theta);
284     pfree(pmeGpu->staging.h_dtheta);
285 }
286
287 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
288 {
289     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
290     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
291     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
292                            &pmeGpu->archSpecific->gridlineIndicesSize,
293                            &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
294                            pmeGpu->archSpecific->deviceContext_);
295     pfree(pmeGpu->staging.h_gridlineIndices);
296     pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
297 }
298
299 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
300 {
301     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
302     pfree(pmeGpu->staging.h_gridlineIndices);
303 }
304
305 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
306 {
307     auto*     kernelParamsPtr = pmeGpu->kernelParams.get();
308     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
309                                 * kernelParamsPtr->grid.realGridSizePadded[YY]
310                                 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
311     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
312                                    * kernelParamsPtr->grid.complexGridSizePadded[YY]
313                                    * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
314     // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
315     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
316     {
317         /* 2 separate grids */
318         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
319                                &pmeGpu->archSpecific->complexGridSize,
320                                &pmeGpu->archSpecific->complexGridSizeAlloc,
321                                pmeGpu->archSpecific->deviceContext_);
322         reallocateDeviceBuffer(
323                 &kernelParamsPtr->grid.d_realGrid, newRealGridSize, &pmeGpu->archSpecific->realGridSize,
324                 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
325     }
326     else
327     {
328         /* A single buffer so that any grid will fit */
329         const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
330         reallocateDeviceBuffer(
331                 &kernelParamsPtr->grid.d_realGrid, newGridsSize, &pmeGpu->archSpecific->realGridSize,
332                 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
333         kernelParamsPtr->grid.d_fourierGrid   = kernelParamsPtr->grid.d_realGrid;
334         pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
335         // the size might get used later for copying the grid
336     }
337 }
338
339 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
340 {
341     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
342     {
343         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
344     }
345     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
346 }
347
348 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
349 {
350     clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
351                            pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream_);
352 }
353
354 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
355 {
356     pme_gpu_free_fract_shifts(pmeGpu);
357
358     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
359
360     const int nx                  = kernelParamsPtr->grid.realGridSize[XX];
361     const int ny                  = kernelParamsPtr->grid.realGridSize[YY];
362     const int nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
363     const int cellCount           = c_pmeNeighborUnitcellCount;
364     const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
365
366     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
367
368     const int newFractShiftsSize = cellCount * (nx + ny + nz);
369
370 #if GMX_GPU == GMX_GPU_CUDA
371     initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable, kernelParamsPtr->fractShiftsTableTexture,
372                          pmeGpu->common->fsh.data(), newFractShiftsSize);
373
374     initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
375                          kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
376                          newFractShiftsSize);
377 #elif GMX_GPU == GMX_GPU_OPENCL
378     // No dedicated texture routines....
379     allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize,
380                          pmeGpu->archSpecific->deviceContext_);
381     allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize,
382                          pmeGpu->archSpecific->deviceContext_);
383     copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(), 0,
384                        newFractShiftsSize, pmeGpu->archSpecific->pmeStream_,
385                        GpuApiCallBehavior::Async, nullptr);
386     copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(), 0,
387                        newFractShiftsSize, pmeGpu->archSpecific->pmeStream_,
388                        GpuApiCallBehavior::Async, nullptr);
389 #endif
390 }
391
392 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
393 {
394     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
395 #if GMX_GPU == GMX_GPU_CUDA
396     destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
397                             kernelParamsPtr->fractShiftsTableTexture);
398     destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
399                             kernelParamsPtr->gridlineIndicesTableTexture);
400 #elif GMX_GPU == GMX_GPU_OPENCL
401     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
402     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
403 #endif
404 }
405
406 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
407 {
408     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
409 }
410
411 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, float* h_grid)
412 {
413     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid, 0, pmeGpu->archSpecific->realGridSize,
414                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
415 }
416
417 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid)
418 {
419     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid, 0,
420                          pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream_,
421                          pmeGpu->settings.transferKind, nullptr);
422     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
423 }
424
425 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
426 {
427     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
428     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
429     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
430                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
431     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
432                          pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
433     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
434                          0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
435                          pmeGpu->settings.transferKind, nullptr);
436 }
437
438 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
439 {
440     const size_t splinesCount    = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
441     auto*        kernelParamsPtr = pmeGpu->kernelParams.get();
442
443     // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
444     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0, pmeGpu->nAtomsAlloc * DIM,
445                            pmeGpu->archSpecific->pmeStream_);
446     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
447                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
448                            pmeGpu->archSpecific->pmeStream_);
449     clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
450                            pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
451                            pmeGpu->archSpecific->pmeStream_);
452
453     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
454                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
455     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
456                        pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
457     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
458                        0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
459                        pmeGpu->settings.transferKind, nullptr);
460 }
461
462 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
463 {
464     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
465 }
466
467 /*! \brief Internal GPU initialization for PME.
468  *
469  * \param[in]  pmeGpu         GPU PME data.
470  * \param[in]  deviceContext  GPU context.
471  * \param[in]  deviceStream   GPU stream.
472  */
473 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
474 {
475 #if GMX_GPU == GMX_GPU_CUDA
476     // Prepare to use the device that this PME task was assigned earlier.
477     // Other entities, such as CUDA timing events, are known to implicitly use the device context.
478     CU_RET_ERR(cudaSetDevice(deviceContext.deviceInfo().id), "Switching to PME CUDA device");
479 #endif
480
481     /* Allocate the target-specific structures */
482     pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
483     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
484
485     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
486     /* This should give better performance, according to the cuFFT documentation.
487      * The performance seems to be the same though.
488      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
489      */
490
491 #if GMX_GPU == GMX_GPU_CUDA
492     pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
493 #elif GMX_GPU == GMX_GPU_OPENCL
494     pmeGpu->maxGridWidthX = INT32_MAX / 2;
495     // TODO: is there no really global work size limit in OpenCL?
496 #endif
497 }
498
499 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
500 {
501     if (pme_gpu_settings(pmeGpu).performGPUFFT)
502     {
503         pmeGpu->archSpecific->fftSetup.resize(0);
504         for (int i = 0; i < pmeGpu->common->ngrids; i++)
505         {
506             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
507         }
508     }
509 }
510
511 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
512 {
513     pmeGpu->archSpecific->fftSetup.resize(0);
514 }
515
516 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, PmeOutput* output)
517 {
518     const PmeGpu* pmeGpu = pme.gpu;
519     for (int j = 0; j < c_virialAndEnergyCount; j++)
520     {
521         GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]),
522                    "PME GPU produces incorrect energy/virial.");
523     }
524
525     unsigned int j                 = 0;
526     output->coulombVirial_[XX][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
527     output->coulombVirial_[YY][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
528     output->coulombVirial_[ZZ][ZZ] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
529     output->coulombVirial_[XX][YY] = output->coulombVirial_[YY][XX] =
530             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
531     output->coulombVirial_[XX][ZZ] = output->coulombVirial_[ZZ][XX] =
532             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
533     output->coulombVirial_[YY][ZZ] = output->coulombVirial_[ZZ][YY] =
534             0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
535     output->coulombEnergy_ = 0.5F * pmeGpu->staging.h_virialAndEnergy[j++];
536 }
537
538 /*! \brief Sets the force-related members in \p output
539  *
540  * \param[in]   pmeGpu      PME GPU data structure
541  * \param[out]  output      Pointer to PME output data structure
542  */
543 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
544 {
545     output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
546     if (output->haveForceOutput_)
547     {
548         output->forces_ = pmeGpu->staging.h_forces;
549     }
550 }
551
552 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial)
553 {
554     PmeGpu* pmeGpu = pme.gpu;
555
556     PmeOutput output;
557
558     pme_gpu_getForceOutput(pmeGpu, &output);
559
560     if (computeEnergyAndVirial)
561     {
562         if (pme_gpu_settings(pmeGpu).performGPUSolve)
563         {
564             pme_gpu_getEnergyAndVirial(pme, &output);
565         }
566         else
567         {
568             get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
569         }
570     }
571     return output;
572 }
573
574 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
575 {
576 #if GMX_DOUBLE
577     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
578 #else
579     matrix scaledBox;
580     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
581     auto* kernelParamsPtr              = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
582     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
583     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
584     matrix recipBox;
585     gmx::invertBoxMatrix(scaledBox, recipBox);
586
587     /* The GPU recipBox is transposed as compared to the CPU recipBox.
588      * Spread uses matrix columns (while solve and gather use rows).
589      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
590      */
591     const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
592                                          { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
593                                          { 0.0, 0.0, recipBox[ZZ][ZZ] } };
594     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
595 #endif
596 }
597
598 /*! \brief \libinternal
599  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
600  *
601  * \param[in] pmeGpu            The PME GPU structure.
602  */
603 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
604 {
605     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
606     kernelParamsPtr->grid.ewaldFactor =
607             (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
608
609     /* The grid size variants */
610     for (int i = 0; i < DIM; i++)
611     {
612         kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
613         kernelParamsPtr->grid.realGridSizeFP[i] =
614                 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
615         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
616
617         // The complex grid currently uses no padding;
618         // if it starts to do so, then another test should be added for that
619         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
620         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
621     }
622     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
623     if (!pme_gpu_settings(pmeGpu).performGPUFFT)
624     {
625         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
626         kernelParamsPtr->grid.realGridSizePadded[ZZ] =
627                 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
628     }
629
630     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
631     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
632     kernelParamsPtr->grid.complexGridSize[ZZ]++;
633     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
634
635     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
636     pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
637     pme_gpu_realloc_grids(pmeGpu);
638     pme_gpu_reinit_3dfft(pmeGpu);
639 }
640
641 /* Several GPU functions that refer to the CPU PME data live here.
642  * We would like to keep these away from the GPU-framework specific code for clarity,
643  * as well as compilation issues with MPI.
644  */
645
646 /*! \brief \libinternal
647  * Copies everything useful from the PME CPU to the PME GPU structure.
648  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
649  *
650  * \param[in] pme         The PME structure.
651  */
652 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
653 {
654     /* TODO: Consider refactoring the CPU PME code to use the same structure,
655      * so that this function becomes 2 lines */
656     PmeGpu* pmeGpu               = pme->gpu;
657     pmeGpu->common->ngrids       = pme->ngrids;
658     pmeGpu->common->epsilon_r    = pme->epsilon_r;
659     pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
660     pmeGpu->common->nk[XX]       = pme->nkx;
661     pmeGpu->common->nk[YY]       = pme->nky;
662     pmeGpu->common->nk[ZZ]       = pme->nkz;
663     pmeGpu->common->pme_order    = pme->pme_order;
664     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
665     {
666         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
667     }
668     for (int i = 0; i < DIM; i++)
669     {
670         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
671     }
672     const int cellCount = c_pmeNeighborUnitcellCount;
673     pmeGpu->common->fsh.resize(0);
674     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
675     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
676     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
677     pmeGpu->common->nn.resize(0);
678     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
679     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
680     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
681     pmeGpu->common->runMode       = pme->runMode;
682     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
683     pmeGpu->common->boxScaler     = pme->boxScaler;
684 }
685
686 /*! \libinternal \brief
687  * uses heuristics to select the best performing PME gather and scatter kernels
688  *
689  * \param[in,out] pmeGpu         The PME GPU structure.
690  */
691 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
692 {
693     if (pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit && (GMX_GPU == GMX_GPU_CUDA))
694     {
695         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::Order;
696         pmeGpu->settings.recalculateSplines = true;
697     }
698     else
699     {
700         pmeGpu->settings.threadsPerAtom     = ThreadsPerAtom::OrderSquared;
701         pmeGpu->settings.recalculateSplines = false;
702     }
703 }
704
705
706 /*! \libinternal \brief
707  * Initializes the PME GPU data at the beginning of the run.
708  * TODO: this should become PmeGpu::PmeGpu()
709  *
710  * \param[in,out] pme            The PME structure.
711  * \param[in]     deviceContext  The GPU context.
712  * \param[in]     deviceStream   The GPU stream.
713  * \param[in,out] pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
714  */
715 static void pme_gpu_init(gmx_pme_t*           pme,
716                          const DeviceContext& deviceContext,
717                          const DeviceStream&  deviceStream,
718                          const PmeGpuProgram* pmeGpuProgram)
719 {
720     pme->gpu       = new PmeGpu();
721     PmeGpu* pmeGpu = pme->gpu;
722     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
723     pmeGpu->common = std::make_shared<PmeShared>();
724
725     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
726     /* A convenience variable. */
727     pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
728     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
729     pmeGpu->settings.performGPUGather = true;
730     // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
731     pmeGpu->settings.useGpuForceReduction = false;
732
733     pme_gpu_set_testing(pmeGpu, false);
734
735     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
736     pmeGpu->programHandle_ = pmeGpuProgram;
737
738     pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
739
740     pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
741     pme_gpu_alloc_energy_virial(pmeGpu);
742
743     pme_gpu_copy_common_data_from(pme);
744
745     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
746
747     auto* kernelParamsPtr               = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
748     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
749 }
750
751 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
752 {
753     GMX_ASSERT(gridSize != nullptr, "");
754     GMX_ASSERT(paddedGridSize != nullptr, "");
755     GMX_ASSERT(pmeGpu != nullptr, "");
756     auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
757     for (int i = 0; i < DIM; i++)
758     {
759         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
760         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
761     }
762 }
763
764 void pme_gpu_reinit(gmx_pme_t*           pme,
765                     const DeviceContext* deviceContext,
766                     const DeviceStream*  deviceStream,
767                     const PmeGpuProgram* pmeGpuProgram)
768 {
769     GMX_ASSERT(pme != nullptr, "Need valid PME object");
770
771     if (!pme->gpu)
772     {
773         GMX_RELEASE_ASSERT(deviceContext != nullptr,
774                            "Device context can not be nullptr when setting up PME on GPU.");
775         GMX_RELEASE_ASSERT(deviceStream != nullptr,
776                            "Device stream can not be nullptr when setting up PME on GPU.");
777         /* First-time initialization */
778         pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
779     }
780     else
781     {
782         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
783         pme_gpu_copy_common_data_from(pme);
784     }
785     /* GPU FFT will only get used for a single rank.*/
786     pme->gpu->settings.performGPUFFT =
787             (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
788     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
789
790     /* Reinit active timers */
791     pme_gpu_reinit_timings(pme->gpu);
792
793     pme_gpu_reinit_grids(pme->gpu);
794     // Note: if timing the reinit launch overhead becomes more relevant
795     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
796     pme_gpu_reinit_computation(pme, nullptr);
797     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
798      * update for mixed mode on grid switch. TODO: use shared recipbox field.
799      */
800     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
801 }
802
803 void pme_gpu_destroy(PmeGpu* pmeGpu)
804 {
805     /* Free lots of data */
806     pme_gpu_free_energy_virial(pmeGpu);
807     pme_gpu_free_bspline_values(pmeGpu);
808     pme_gpu_free_forces(pmeGpu);
809     pme_gpu_free_coefficients(pmeGpu);
810     pme_gpu_free_spline_data(pmeGpu);
811     pme_gpu_free_grid_indices(pmeGpu);
812     pme_gpu_free_fract_shifts(pmeGpu);
813     pme_gpu_free_grids(pmeGpu);
814
815     pme_gpu_destroy_3dfft(pmeGpu);
816
817     delete pmeGpu;
818 }
819
820 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* charges)
821 {
822     auto* kernelParamsPtr         = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
823     kernelParamsPtr->atoms.nAtoms = nAtoms;
824     const int  block_size         = pme_gpu_get_atom_data_block_size();
825     const int  nAtomsNewPadded    = ((nAtoms + block_size - 1) / block_size) * block_size;
826     const bool haveToRealloc      = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
827     pmeGpu->nAtomsAlloc           = nAtomsNewPadded;
828
829 #if GMX_DOUBLE
830     GMX_RELEASE_ASSERT(false, "Only single precision supported");
831     GMX_UNUSED_VALUE(charges);
832 #else
833     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(charges));
834     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
835 #endif
836
837     if (haveToRealloc)
838     {
839         pme_gpu_realloc_forces(pmeGpu);
840         pme_gpu_realloc_spline_data(pmeGpu);
841         pme_gpu_realloc_grid_indices(pmeGpu);
842     }
843     pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
844 }
845
846 /*! \internal \brief
847  * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
848  * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
849  *
850  * \param[in] pmeGpu         The PME GPU data structure.
851  * \param[in] PMEStageId     The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
852  */
853 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
854 {
855     CommandEvent* timingEvent = nullptr;
856     if (pme_gpu_timings_enabled(pmeGpu))
857     {
858         GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
859                    "Wrong PME GPU timing event index");
860         timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
861     }
862     return timingEvent;
863 }
864
865 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, int grid_index)
866 {
867     int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
868
869     pme_gpu_start_timing(pmeGpu, timerId);
870     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
871             dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
872     pme_gpu_stop_timing(pmeGpu, timerId);
873 }
874
875 /*! \brief
876  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
877  * to minimize number of unused blocks.
878  */
879 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
880 {
881     // How many maximum widths in X do we need (hopefully just one)
882     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
883     // Trying to make things even
884     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
885     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
886     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
887                "pmeGpuCreateGrid: excessive blocks");
888     return std::pair<int, int>(colCount, minRowCount);
889 }
890
891 /*! \brief
892  * Returns a pointer to appropriate spline and spread kernel based on the input bool values
893  *
894  * \param[in]  pmeGpu                   The PME GPU structure.
895  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
896  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
897  *
898  * \return Pointer to CUDA kernel
899  */
900 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu, ThreadsPerAtom threadsPerAtom, bool writeSplinesToGlobal)
901 {
902     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
903     if (writeSplinesToGlobal)
904     {
905         if (threadsPerAtom == ThreadsPerAtom::Order)
906         {
907             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4;
908         }
909         else
910         {
911             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplines;
912         }
913     }
914     else
915     {
916         if (threadsPerAtom == ThreadsPerAtom::Order)
917         {
918             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
919         }
920         else
921         {
922             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
923         }
924     }
925
926     return kernelPtr;
927 }
928
929 /*! \brief
930  * Returns a pointer to appropriate spline kernel based on the input bool values
931  *
932  * \param[in]  pmeGpu                   The PME GPU structure.
933  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
934  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
935  *
936  * \return Pointer to CUDA kernel
937  */
938 static auto selectSplineKernelPtr(const PmeGpu*  pmeGpu,
939                                   ThreadsPerAtom threadsPerAtom,
940                                   bool gmx_unused writeSplinesToGlobal)
941 {
942     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
943     GMX_ASSERT(
944             writeSplinesToGlobal,
945             "Spline data should always be written to global memory when just calculating splines");
946
947     if (threadsPerAtom == ThreadsPerAtom::Order)
948     {
949         kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4;
950     }
951     else
952     {
953         kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
954     }
955     return kernelPtr;
956 }
957
958 /*! \brief
959  * Returns a pointer to appropriate spread kernel based on the input bool values
960  *
961  * \param[in]  pmeGpu                   The PME GPU structure.
962  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
963  * \param[in]  writeSplinesToGlobal     bool controlling if we should write spline data to global memory
964  *
965  * \return Pointer to CUDA kernel
966  */
967 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu, ThreadsPerAtom threadsPerAtom, bool writeSplinesToGlobal)
968 {
969     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
970     if (writeSplinesToGlobal)
971     {
972         if (threadsPerAtom == ThreadsPerAtom::Order)
973         {
974             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4;
975         }
976         else
977         {
978             kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
979         }
980     }
981     else
982     {
983         /* if we are not saving the spline data we need to recalculate it
984            using the spline and spread Kernel */
985         if (threadsPerAtom == ThreadsPerAtom::Order)
986         {
987             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
988         }
989         else
990         {
991             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
992         }
993     }
994     return kernelPtr;
995 }
996
997 void pme_gpu_spread(const PmeGpu*         pmeGpu,
998                     GpuEventSynchronizer* xReadyOnDevice,
999                     int gmx_unused gridIndex,
1000                     real*          h_grid,
1001                     bool           computeSplines,
1002                     bool           spreadCharges)
1003 {
1004     GMX_ASSERT(computeSplines || spreadCharges,
1005                "PME spline/spread kernel has invalid input (nothing to do)");
1006     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1007     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1008
1009     const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1010
1011     const int order = pmeGpu->common->pme_order;
1012     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1013     const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1014     const int  threadsPerAtom =
1015             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1016     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1017 #if GMX_GPU == GMX_GPU_OPENCL
1018     GMX_ASSERT(pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1019                "Only 16 threads per atom supported in OpenCL");
1020     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1021 #endif
1022     const int atomsPerBlock = blockSize / threadsPerAtom;
1023
1024     // TODO: pick smaller block size in runtime if needed
1025     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1026     // If doing so, change atomsPerBlock in the kernels as well.
1027     // TODO: test varying block sizes on modern arch-s as well
1028     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1029     //(for spline data mostly)
1030     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1031                "inconsistent atom data padding vs. spreading block size");
1032
1033     // Ensure that coordinates are ready on the device before launching spread;
1034     // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1035     // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1036     GMX_ASSERT(xReadyOnDevice != nullptr || (GMX_GPU != GMX_GPU_CUDA)
1037                        || pmeGpu->common->isRankPmeOnly || pme_gpu_settings(pmeGpu).copyAllOutputs,
1038                "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1039     if (xReadyOnDevice)
1040     {
1041         xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1042     }
1043
1044     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1045     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1046
1047     KernelLaunchConfig config;
1048     config.blockSize[0] = order;
1049     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1050     config.blockSize[2] = atomsPerBlock;
1051     config.gridSize[0]  = dimGrid.first;
1052     config.gridSize[1]  = dimGrid.second;
1053
1054     int                                timingId;
1055     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1056     if (computeSplines)
1057     {
1058         if (spreadCharges)
1059         {
1060             timingId  = gtPME_SPLINEANDSPREAD;
1061             kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1062                                                        writeGlobal || (!recalculateSplines));
1063         }
1064         else
1065         {
1066             timingId  = gtPME_SPLINE;
1067             kernelPtr = selectSplineKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1068                                               writeGlobal || (!recalculateSplines));
1069         }
1070     }
1071     else
1072     {
1073         timingId  = gtPME_SPREAD;
1074         kernelPtr = selectSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1075                                           writeGlobal || (!recalculateSplines));
1076     }
1077
1078
1079     pme_gpu_start_timing(pmeGpu, timingId);
1080     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1081 #if c_canEmbedBuffers
1082     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1083 #else
1084     const auto kernelArgs = prepareGpuKernelArguments(
1085             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1086             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1087             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->grid.d_fractShiftsTable,
1088             &kernelParamsPtr->grid.d_gridlineIndicesTable, &kernelParamsPtr->atoms.d_coefficients,
1089             &kernelParamsPtr->atoms.d_coordinates);
1090 #endif
1091
1092     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent,
1093                     "PME spline/spread", kernelArgs);
1094     pme_gpu_stop_timing(pmeGpu, timingId);
1095
1096     const auto& settings    = pmeGpu->settings;
1097     const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1098     if (copyBackGrid)
1099     {
1100         pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1101     }
1102     const bool copyBackAtomData =
1103             computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1104     if (copyBackAtomData)
1105     {
1106         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1107     }
1108 }
1109
1110 void pme_gpu_solve(const PmeGpu* pmeGpu, t_complex* h_grid, GridOrdering gridOrdering, bool computeEnergyAndVirial)
1111 {
1112     const auto& settings               = pmeGpu->settings;
1113     const bool  copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1114
1115     auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1116
1117     float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1118     if (copyInputAndOutputGrid)
1119     {
1120         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat, 0,
1121                            pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream_,
1122                            pmeGpu->settings.transferKind, nullptr);
1123     }
1124
1125     int majorDim = -1, middleDim = -1, minorDim = -1;
1126     switch (gridOrdering)
1127     {
1128         case GridOrdering::YZX:
1129             majorDim  = YY;
1130             middleDim = ZZ;
1131             minorDim  = XX;
1132             break;
1133
1134         case GridOrdering::XYZ:
1135             majorDim  = XX;
1136             middleDim = YY;
1137             minorDim  = ZZ;
1138             break;
1139
1140         default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1141     }
1142
1143     const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1144
1145     const int gridLineSize      = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1146     const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1147     const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1148     int       cellsPerBlock;
1149     if (blocksPerGridLine == 1)
1150     {
1151         cellsPerBlock = gridLineSize * gridLinesPerBlock;
1152     }
1153     else
1154     {
1155         cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1156     }
1157     const int warpSize  = pmeGpu->programHandle_->warpSize();
1158     const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1159
1160     static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1161                   "The CUDA solve energy kernels needs at least 4 warps. "
1162                   "Here we launch at least half of the max warps.");
1163
1164     KernelLaunchConfig config;
1165     config.blockSize[0] = blockSize;
1166     config.gridSize[0]  = blocksPerGridLine;
1167     // rounding up to full warps so that shuffle operations produce defined results
1168     config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1169                          / gridLinesPerBlock;
1170     config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1171
1172     int                                timingId  = gtPME_SOLVE;
1173     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1174     if (gridOrdering == GridOrdering::YZX)
1175     {
1176         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernel
1177                                            : pmeGpu->programHandle_->impl_->solveYZXKernel;
1178     }
1179     else if (gridOrdering == GridOrdering::XYZ)
1180     {
1181         kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernel
1182                                            : pmeGpu->programHandle_->impl_->solveXYZKernel;
1183     }
1184
1185     pme_gpu_start_timing(pmeGpu, timingId);
1186     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1187 #if c_canEmbedBuffers
1188     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1189 #else
1190     const auto kernelArgs = prepareGpuKernelArguments(
1191             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli,
1192             &kernelParamsPtr->constants.d_virialAndEnergy, &kernelParamsPtr->grid.d_fourierGrid);
1193 #endif
1194     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve",
1195                     kernelArgs);
1196     pme_gpu_stop_timing(pmeGpu, timingId);
1197
1198     if (computeEnergyAndVirial)
1199     {
1200         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy,
1201                              &kernelParamsPtr->constants.d_virialAndEnergy, 0, c_virialAndEnergyCount,
1202                              pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1203     }
1204
1205     if (copyInputAndOutputGrid)
1206     {
1207         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid, 0,
1208                              pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream_,
1209                              pmeGpu->settings.transferKind, nullptr);
1210     }
1211 }
1212
1213 /*! \brief
1214  * Returns a pointer to appropriate gather kernel based on the inputvalues
1215  *
1216  * \param[in]  pmeGpu                   The PME GPU structure.
1217  * \param[in]  threadsPerAtom           Controls whether we should use order or order*order threads per atom
1218  * \param[in]  readSplinesFromGlobal    bool controlling if we should write spline data to global memory
1219  *
1220  * \return Pointer to CUDA kernel
1221  */
1222 inline auto selectGatherKernelPtr(const PmeGpu* pmeGpu, ThreadsPerAtom threadsPerAtom, bool readSplinesFromGlobal)
1223
1224 {
1225     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1226
1227     if (readSplinesFromGlobal)
1228     {
1229         if (threadsPerAtom == ThreadsPerAtom::Order)
1230         {
1231             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4;
1232         }
1233         else
1234         {
1235             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplines;
1236         }
1237     }
1238     else
1239     {
1240         if (threadsPerAtom == ThreadsPerAtom::Order)
1241         {
1242             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4;
1243         }
1244         else
1245         {
1246             kernelPtr = pmeGpu->programHandle_->impl_->gatherKernel;
1247         }
1248     }
1249     return kernelPtr;
1250 }
1251
1252
1253 void pme_gpu_gather(PmeGpu* pmeGpu, const float* h_grid)
1254 {
1255     const auto& settings = pmeGpu->settings;
1256     if (!settings.performGPUFFT || settings.copyAllOutputs)
1257     {
1258         pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float*>(h_grid));
1259     }
1260
1261     if (settings.copyAllOutputs)
1262     {
1263         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1264     }
1265
1266     /* Set if we have unit tests */
1267     const bool   readGlobal = pmeGpu->settings.copyAllOutputs;
1268     const size_t blockSize  = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1269     const int    order      = pmeGpu->common->pme_order;
1270     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1271     const int threadsPerAtom =
1272             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1273     const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1274 #if GMX_GPU == GMX_GPU_OPENCL
1275     GMX_ASSERT(pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1276                "Only 16 threads per atom supported in OpenCL");
1277     GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1278 #endif
1279     const int atomsPerBlock = blockSize / threadsPerAtom;
1280
1281     GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1282                "inconsistent atom data padding vs. gathering block size");
1283
1284     const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1285     auto      dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1286
1287     KernelLaunchConfig config;
1288     config.blockSize[0] = order;
1289     config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1290     config.blockSize[2] = atomsPerBlock;
1291     config.gridSize[0]  = dimGrid.first;
1292     config.gridSize[1]  = dimGrid.second;
1293
1294     // TODO test different cache configs
1295
1296     int                                timingId  = gtPME_GATHER;
1297     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = selectGatherKernelPtr(
1298             pmeGpu, pmeGpu->settings.threadsPerAtom, readGlobal || (!recalculateSplines));
1299     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1300
1301     pme_gpu_start_timing(pmeGpu, timingId);
1302     auto*       timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1303     const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1304 #if c_canEmbedBuffers
1305     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1306 #else
1307     const auto kernelArgs = prepareGpuKernelArguments(
1308             kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients,
1309             &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->atoms.d_theta,
1310             &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1311             &kernelParamsPtr->atoms.d_forces);
1312 #endif
1313     launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather",
1314                     kernelArgs);
1315     pme_gpu_stop_timing(pmeGpu, timingId);
1316
1317     if (pmeGpu->settings.useGpuForceReduction)
1318     {
1319         pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1320     }
1321     else
1322     {
1323         pme_gpu_copy_output_forces(pmeGpu);
1324     }
1325 }
1326
1327 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1328 {
1329     if (pmeGpu && pmeGpu->kernelParams)
1330     {
1331         return pmeGpu->kernelParams->atoms.d_forces;
1332     }
1333     else
1334     {
1335         return nullptr;
1336     }
1337 }
1338
1339 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1340 {
1341     GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1342                "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1343                "initialized.");
1344
1345     GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1346                "The device-side buffer can not be set.");
1347
1348     pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1349 }
1350
1351 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1352 {
1353     if (pmeGpu && pmeGpu->kernelParams)
1354     {
1355         return &pmeGpu->archSpecific->pmeForcesReady;
1356     }
1357     else
1358     {
1359         return nullptr;
1360     }
1361 }