d919b84a669a26b881197360730d2b120dd9c171
[alexxy/gromacs.git] / src / gromacs / ewald / pme-gpu-internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme-gpu-internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <string>
56
57 #include "gromacs/ewald/ewald-utils.h"
58 #include "gromacs/gpu_utils/gpu_utils.h"
59 #include "gromacs/math/invertmatrix.h"
60 #include "gromacs/math/units.h"
61 #include "gromacs/timing/gpu_timing.h"
62 #include "gromacs/utility/exceptions.h"
63 #include "gromacs/utility/fatalerror.h"
64 #include "gromacs/utility/gmxassert.h"
65 #include "gromacs/utility/logger.h"
66 #include "gromacs/utility/stringutil.h"
67
68 #if GMX_GPU == GMX_GPU_CUDA
69 #include "gromacs/gpu_utils/pmalloc_cuda.h"
70
71 #include "pme.cuh"
72 #elif GMX_GPU == GMX_GPU_OPENCL
73 #include "gromacs/gpu_utils/gmxopencl.h"
74 #endif
75
76 #include "gromacs/ewald/pme.h"
77
78 #include "pme-gpu-3dfft.h"
79 #include "pme-gpu-constants.h"
80 #include "pme-gpu-program-impl.h"
81 #include "pme-gpu-timings.h"
82 #include "pme-gpu-types.h"
83 #include "pme-gpu-types-host.h"
84 #include "pme-gpu-types-host-impl.h"
85 #include "pme-gpu-utils.h"
86 #include "pme-grid.h"
87 #include "pme-internal.h"
88
89 /*! \internal \brief
90  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
91  *
92  * \param[in] pmeGpu  The PME GPU structure.
93  * \returns The pointer to the kernel parameters.
94  */
95 static PmeGpuKernelParamsBase *pme_gpu_get_kernel_params_base_ptr(const PmeGpu *pmeGpu)
96 {
97     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
98     auto *kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase *>(pmeGpu->kernelParams.get());
99     return kernelParamsPtr;
100 }
101
102 int pme_gpu_get_atom_data_alignment(const PmeGpu * /*unused*/)
103 {
104     //TODO: this can be simplified, as PME_ATOM_DATA_ALIGNMENT is now constant
105     return PME_ATOM_DATA_ALIGNMENT;
106 }
107
108 int pme_gpu_get_atoms_per_warp(const PmeGpu *pmeGpu)
109 {
110     const int order = pmeGpu->common->pme_order;
111     GMX_ASSERT(order > 0, "Invalid PME order");
112     return pmeGpu->programHandle_->impl_->warpSize / PME_SPREADGATHER_THREADS_PER_ATOM;
113 }
114
115 void pme_gpu_synchronize(const PmeGpu *pmeGpu)
116 {
117     gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
118 }
119
120 void pme_gpu_alloc_energy_virial(const PmeGpu *pmeGpu)
121 {
122     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
123     allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount, pmeGpu->archSpecific->context);
124     pmalloc((void **)&pmeGpu->staging.h_virialAndEnergy, energyAndVirialSize);
125 }
126
127 void pme_gpu_free_energy_virial(PmeGpu *pmeGpu)
128 {
129     freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
130     pfree(pmeGpu->staging.h_virialAndEnergy);
131     pmeGpu->staging.h_virialAndEnergy = nullptr;
132 }
133
134 void pme_gpu_clear_energy_virial(const PmeGpu *pmeGpu)
135 {
136     clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
137                            c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
138 }
139
140 void pme_gpu_realloc_and_copy_bspline_values(const PmeGpu *pmeGpu)
141 {
142     const int splineValuesOffset[DIM] = {
143         0,
144         pmeGpu->kernelParams->grid.realGridSize[XX],
145         pmeGpu->kernelParams->grid.realGridSize[XX] + pmeGpu->kernelParams->grid.realGridSize[YY]
146     };
147     memcpy((void *)&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
148
149     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX] +
150         pmeGpu->kernelParams->grid.realGridSize[YY] +
151         pmeGpu->kernelParams->grid.realGridSize[ZZ];
152     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
153     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
154                            &pmeGpu->archSpecific->splineValuesSize, &pmeGpu->archSpecific->splineValuesSizeAlloc, pmeGpu->archSpecific->context);
155     if (shouldRealloc)
156     {
157         /* Reallocate the host buffer */
158         pfree(pmeGpu->staging.h_splineModuli);
159         pmalloc((void **)&pmeGpu->staging.h_splineModuli, newSplineValuesSize * sizeof(float));
160     }
161     for (int i = 0; i < DIM; i++)
162     {
163         memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i], pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
164     }
165     /* TODO: pin original buffer instead! */
166     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
167                        0, newSplineValuesSize,
168                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
169 }
170
171 void pme_gpu_free_bspline_values(const PmeGpu *pmeGpu)
172 {
173     pfree(pmeGpu->staging.h_splineModuli);
174     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
175 }
176
177 void pme_gpu_realloc_forces(PmeGpu *pmeGpu)
178 {
179     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
180     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
181     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
182                            &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc, pmeGpu->archSpecific->context);
183     pmeGpu->staging.h_forces.reserve(pmeGpu->nAtomsAlloc);
184     pmeGpu->staging.h_forces.resize(pmeGpu->kernelParams->atoms.nAtoms);
185 }
186
187 void pme_gpu_free_forces(const PmeGpu *pmeGpu)
188 {
189     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
190 }
191
192 void pme_gpu_copy_input_forces(PmeGpu *pmeGpu)
193 {
194     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
195     float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
196     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat,
197                        0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
198                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
199 }
200
201 void pme_gpu_copy_output_forces(PmeGpu *pmeGpu)
202 {
203     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
204     float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
205     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces,
206                          0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
207                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
208 }
209
210 void pme_gpu_realloc_coordinates(const PmeGpu *pmeGpu)
211 {
212     const size_t newCoordinatesSize = pmeGpu->nAtomsAlloc * DIM;
213     GMX_ASSERT(newCoordinatesSize > 0, "Bad number of atoms in PME GPU");
214     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, newCoordinatesSize,
215                            &pmeGpu->archSpecific->coordinatesSize, &pmeGpu->archSpecific->coordinatesSizeAlloc, pmeGpu->archSpecific->context);
216     if (c_usePadding)
217     {
218         const size_t paddingIndex = DIM * pmeGpu->kernelParams->atoms.nAtoms;
219         const size_t paddingCount = DIM * pmeGpu->nAtomsAlloc - paddingIndex;
220         if (paddingCount > 0)
221         {
222             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coordinates, paddingIndex,
223                                    paddingCount, pmeGpu->archSpecific->pmeStream);
224         }
225     }
226 }
227
228 void pme_gpu_copy_input_coordinates(const PmeGpu *pmeGpu, const rvec *h_coordinates)
229 {
230     GMX_ASSERT(h_coordinates, "Bad host-side coordinate buffer in PME GPU");
231 #if GMX_DOUBLE
232     GMX_RELEASE_ASSERT(false, "Only single precision is supported");
233     GMX_UNUSED_VALUE(h_coordinates);
234 #else
235     const float *h_coordinatesFloat = reinterpret_cast<const float *>(h_coordinates);
236     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, h_coordinatesFloat,
237                        0, pmeGpu->kernelParams->atoms.nAtoms * DIM,
238                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
239 #endif
240 }
241
242 void pme_gpu_free_coordinates(const PmeGpu *pmeGpu)
243 {
244     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates);
245 }
246
247 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu *pmeGpu, const float *h_coefficients)
248 {
249     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
250     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
251     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
252     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
253                            &pmeGpu->archSpecific->coefficientsSize, &pmeGpu->archSpecific->coefficientsSizeAlloc, pmeGpu->archSpecific->context);
254     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, const_cast<float *>(h_coefficients),
255                        0, pmeGpu->kernelParams->atoms.nAtoms,
256                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
257     if (c_usePadding)
258     {
259         const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
260         const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
261         if (paddingCount > 0)
262         {
263             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
264                                    paddingCount, pmeGpu->archSpecific->pmeStream);
265         }
266     }
267 }
268
269 void pme_gpu_free_coefficients(const PmeGpu *pmeGpu)
270 {
271     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
272 }
273
274 void pme_gpu_realloc_spline_data(const PmeGpu *pmeGpu)
275 {
276     const int    order             = pmeGpu->common->pme_order;
277     const int    alignment         = pme_gpu_get_atoms_per_warp(pmeGpu);
278     const size_t nAtomsPadded      = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
279     const int    newSplineDataSize = DIM * order * nAtomsPadded;
280     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
281     /* Two arrays of the same size */
282     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
283     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
284     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
285     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize,
286                            &currentSizeTemp, &currentSizeTempAlloc, pmeGpu->archSpecific->context);
287     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
288                            &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc, pmeGpu->archSpecific->context);
289     // the host side reallocation
290     if (shouldRealloc)
291     {
292         pfree(pmeGpu->staging.h_theta);
293         pmalloc((void **)&pmeGpu->staging.h_theta, newSplineDataSize * sizeof(float));
294         pfree(pmeGpu->staging.h_dtheta);
295         pmalloc((void **)&pmeGpu->staging.h_dtheta, newSplineDataSize * sizeof(float));
296     }
297 }
298
299 void pme_gpu_free_spline_data(const PmeGpu *pmeGpu)
300 {
301     /* Two arrays of the same size */
302     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
303     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
304     pfree(pmeGpu->staging.h_theta);
305     pfree(pmeGpu->staging.h_dtheta);
306 }
307
308 void pme_gpu_realloc_grid_indices(const PmeGpu *pmeGpu)
309 {
310     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
311     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
312     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
313                            &pmeGpu->archSpecific->gridlineIndicesSize, &pmeGpu->archSpecific->gridlineIndicesSizeAlloc, pmeGpu->archSpecific->context);
314     pfree(pmeGpu->staging.h_gridlineIndices);
315     pmalloc((void **)&pmeGpu->staging.h_gridlineIndices, newIndicesSize * sizeof(int));
316 }
317
318 void pme_gpu_free_grid_indices(const PmeGpu *pmeGpu)
319 {
320     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
321     pfree(pmeGpu->staging.h_gridlineIndices);
322 }
323
324 void pme_gpu_realloc_grids(PmeGpu *pmeGpu)
325 {
326     auto     *kernelParamsPtr = pmeGpu->kernelParams.get();
327     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX] *
328         kernelParamsPtr->grid.realGridSizePadded[YY] *
329         kernelParamsPtr->grid.realGridSizePadded[ZZ];
330     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX] *
331         kernelParamsPtr->grid.complexGridSizePadded[YY] *
332         kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
333     // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
334     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
335     {
336         /* 2 separate grids */
337         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
338                                &pmeGpu->archSpecific->complexGridSize, &pmeGpu->archSpecific->complexGridSizeAlloc, pmeGpu->archSpecific->context);
339         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newRealGridSize,
340                                &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
341     }
342     else
343     {
344         /* A single buffer so that any grid will fit */
345         const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
346         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newGridsSize,
347                                &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
348         kernelParamsPtr->grid.d_fourierGrid   = kernelParamsPtr->grid.d_realGrid;
349         pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
350         // the size might get used later for copying the grid
351     }
352 }
353
354 void pme_gpu_free_grids(const PmeGpu *pmeGpu)
355 {
356     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
357     {
358         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
359     }
360     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
361 }
362
363 void pme_gpu_clear_grids(const PmeGpu *pmeGpu)
364 {
365     clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
366                            pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
367 }
368
369 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu *pmeGpu)
370 {
371     pme_gpu_free_fract_shifts(pmeGpu);
372
373     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
374
375     const int    nx                  = kernelParamsPtr->grid.realGridSize[XX];
376     const int    ny                  = kernelParamsPtr->grid.realGridSize[YY];
377     const int    nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
378     const int    cellCount           = c_pmeNeighborUnitcellCount;
379     const int    gridDataOffset[DIM] = {0, cellCount * nx, cellCount * (nx + ny)};
380
381     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
382
383     const int    newFractShiftsSize  = cellCount * (nx + ny + nz);
384
385 #if GMX_GPU == GMX_GPU_CUDA
386     initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
387                          kernelParamsPtr->fractShiftsTableTexture,
388                          pmeGpu->common->fsh.data(),
389                          newFractShiftsSize,
390                          pmeGpu->deviceInfo);
391
392     initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
393                          kernelParamsPtr->gridlineIndicesTableTexture,
394                          pmeGpu->common->nn.data(),
395                          newFractShiftsSize,
396                          pmeGpu->deviceInfo);
397 #elif GMX_GPU == GMX_GPU_OPENCL
398     // No dedicated texture routines....
399     allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize, pmeGpu->archSpecific->context);
400     allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize, pmeGpu->archSpecific->context);
401     copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(),
402                        0, newFractShiftsSize,
403                        pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
404     copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(),
405                        0, newFractShiftsSize,
406                        pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
407 #endif
408 }
409
410 void pme_gpu_free_fract_shifts(const PmeGpu *pmeGpu)
411 {
412     auto *kernelParamsPtr = pmeGpu->kernelParams.get();
413 #if GMX_GPU == GMX_GPU_CUDA
414     destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
415                             kernelParamsPtr->fractShiftsTableTexture,
416                             pmeGpu->deviceInfo);
417     destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
418                             kernelParamsPtr->gridlineIndicesTableTexture,
419                             pmeGpu->deviceInfo);
420 #elif GMX_GPU == GMX_GPU_OPENCL
421     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
422     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
423 #endif
424 }
425
426 bool pme_gpu_stream_query(const PmeGpu *pmeGpu)
427 {
428     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
429 }
430
431 void pme_gpu_copy_input_gather_grid(const PmeGpu *pmeGpu, float *h_grid)
432 {
433     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid,
434                        0, pmeGpu->archSpecific->realGridSize,
435                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
436 }
437
438 void pme_gpu_copy_output_spread_grid(const PmeGpu *pmeGpu, float *h_grid)
439 {
440     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid,
441                          0, pmeGpu->archSpecific->realGridSize,
442                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
443     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
444 }
445
446 void pme_gpu_copy_output_spread_atom_data(const PmeGpu *pmeGpu)
447 {
448     const int    alignment        = pme_gpu_get_atoms_per_warp(pmeGpu);
449     const size_t nAtomsPadded     = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
450     const size_t splinesCount     = DIM * nAtomsPadded * pmeGpu->common->pme_order;
451     auto        *kernelParamsPtr  = pmeGpu->kernelParams.get();
452     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta,
453                          0, splinesCount,
454                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
455     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta,
456                          0, splinesCount,
457                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
458     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
459                          0, kernelParamsPtr->atoms.nAtoms * DIM,
460                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
461 }
462
463 void pme_gpu_copy_input_gather_atom_data(const PmeGpu *pmeGpu)
464 {
465     const int    alignment       = pme_gpu_get_atoms_per_warp(pmeGpu);
466     const size_t nAtomsPadded    = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
467     const size_t splinesCount    = DIM * nAtomsPadded * pmeGpu->common->pme_order;
468     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
469     if (c_usePadding)
470     {
471         // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
472         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
473                                pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
474         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
475                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
476         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
477                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
478     }
479     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta,
480                        0, splinesCount,
481                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
482     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta,
483                        0, splinesCount,
484                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
485     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
486                        0, kernelParamsPtr->atoms.nAtoms * DIM,
487                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
488 }
489
490 void pme_gpu_sync_spread_grid(const PmeGpu *pmeGpu)
491 {
492     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
493 }
494
495 void pme_gpu_init_internal(PmeGpu *pmeGpu)
496 {
497     /* Allocate the target-specific structures */
498     pmeGpu->archSpecific.reset(new PmeGpuSpecific());
499     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
500
501     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
502     /* This should give better performance, according to the cuFFT documentation.
503      * The performance seems to be the same though.
504      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
505      */
506
507     // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
508     if (GMX_GPU == GMX_GPU_CUDA)
509     {
510         /* WARNING: CUDA timings are incorrect with multiple streams.
511          *          This is the main reason why they are disabled by default.
512          */
513         // TODO: Consider turning on by default when we can detect nr of streams.
514         pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
515     }
516     else if (GMX_GPU == GMX_GPU_OPENCL)
517     {
518         pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
519     }
520
521     // TODO: this is just a convenient reuse because programHandle_ currently is in charge of creating context
522     pmeGpu->archSpecific->context = pmeGpu->programHandle_->impl_->context;
523
524 #if GMX_GPU == GMX_GPU_CUDA
525     // Prepare to use the device that this PME task was assigned earlier.
526     CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
527 #endif
528
529 #if GMX_GPU == GMX_GPU_CUDA
530     pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
531 #elif GMX_GPU == GMX_GPU_OPENCL
532     //TODO we'll need work size checks for OpenCL too
533 #endif
534
535     /* Creating a PME GPU stream:
536      * - default high priority with CUDA
537      * - no priorities implemented yet with OpenCL; see #2532
538      */
539 #if GMX_GPU == GMX_GPU_CUDA
540     cudaError_t stat;
541     int         highest_priority, lowest_priority;
542     stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
543     CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
544     stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
545                                         cudaStreamDefault, //cudaStreamNonBlocking,
546                                         highest_priority);
547     CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
548 #elif GMX_GPU == GMX_GPU_OPENCL
549     cl_command_queue_properties queueProperties = pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
550     cl_device_id                device_id       = pmeGpu->deviceInfo->ocl_gpu_id.ocl_device_id;
551     cl_int                      clError;
552     pmeGpu->archSpecific->pmeStream = clCreateCommandQueue(pmeGpu->archSpecific->context,
553                                                            device_id, queueProperties, &clError);
554     if (clError != CL_SUCCESS)
555     {
556         GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
557     }
558 #endif
559 }
560
561 void pme_gpu_destroy_specific(const PmeGpu *pmeGpu)
562 {
563 #if GMX_GPU == GMX_GPU_CUDA
564     /* Destroy the CUDA stream */
565     cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
566     CU_RET_ERR(stat, "PME cudaStreamDestroy error");
567 #elif GMX_GPU == GMX_GPU_OPENCL
568     cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
569     if (clError != CL_SUCCESS)
570     {
571         gmx_warning("Failed to destroy PME command queue");
572     }
573 #endif
574 }
575
576 void pme_gpu_reinit_3dfft(const PmeGpu *pmeGpu)
577 {
578     if (pme_gpu_performs_FFT(pmeGpu))
579     {
580         pmeGpu->archSpecific->fftSetup.resize(0);
581         for (int i = 0; i < pmeGpu->common->ngrids; i++)
582         {
583             pmeGpu->archSpecific->fftSetup.push_back(std::unique_ptr<GpuParallel3dFft>(new GpuParallel3dFft(pmeGpu)));
584         }
585     }
586 }
587
588 void pme_gpu_destroy_3dfft(const PmeGpu *pmeGpu)
589 {
590     pmeGpu->archSpecific->fftSetup.resize(0);
591 }
592
593 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
594 {
595     if (order != 4)
596     {
597         throw order;
598     }
599     constexpr int fixedOrder = 4;
600     GMX_UNUSED_VALUE(fixedOrder);
601
602     const int atomWarpIndex = atomIndex % atomsPerWarp;
603     const int warpIndex     = atomIndex / atomsPerWarp;
604     int       indexBase, result;
605     switch (atomsPerWarp)
606     {
607         case 2:
608             indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
609             result    = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
610             break;
611
612         default:
613             GMX_THROW(gmx::NotImplementedError(gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in getSplineParamFullIndex", atomsPerWarp)));
614     }
615     return result;
616 }
617
618 gmx::ArrayRef<gmx::RVec> pme_gpu_get_forces(PmeGpu *pmeGpu)
619 {
620     return pmeGpu->staging.h_forces;
621 }
622
623 void pme_gpu_get_energy_virial(const PmeGpu *pmeGpu, real *energy, matrix virial)
624 {
625     for (int j = 0; j < c_virialAndEnergyCount; j++)
626     {
627         GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]), "PME GPU produces incorrect energy/virial.");
628     }
629
630     GMX_ASSERT(energy, "Invalid energy output pointer in PME GPU");
631     unsigned int j = 0;
632     virial[XX][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
633     virial[YY][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
634     virial[ZZ][ZZ] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
635     virial[XX][YY] = virial[YY][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
636     virial[XX][ZZ] = virial[ZZ][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
637     virial[YY][ZZ] = virial[ZZ][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
638     *energy        = 0.5f * pmeGpu->staging.h_virialAndEnergy[j++];
639 }
640
641 void pme_gpu_update_input_box(PmeGpu gmx_unused       *pmeGpu,
642                               const matrix gmx_unused  box)
643 {
644 #if GMX_DOUBLE
645     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
646 #else
647     matrix  scaledBox;
648     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
649     auto   *kernelParamsPtr      = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
650     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
651     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0f, "Zero volume of the unit cell");
652     matrix recipBox;
653     gmx::invertBoxMatrix(scaledBox, recipBox);
654
655     /* The GPU recipBox is transposed as compared to the CPU recipBox.
656      * Spread uses matrix columns (while solve and gather use rows).
657      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
658      */
659     const real newRecipBox[DIM][DIM] =
660     {
661         {recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX]},
662         {             0.0, recipBox[YY][YY], recipBox[ZZ][YY]},
663         {             0.0,              0.0, recipBox[ZZ][ZZ]}
664     };
665     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
666 #endif
667 }
668
669 /*! \brief \libinternal
670  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
671  *
672  * \param[in] pmeGpu            The PME GPU structure.
673  */
674 static void pme_gpu_reinit_grids(PmeGpu *pmeGpu)
675 {
676     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
677     kernelParamsPtr->grid.ewaldFactor = (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
678
679     /* The grid size variants */
680     for (int i = 0; i < DIM; i++)
681     {
682         kernelParamsPtr->grid.realGridSize[i]       = pmeGpu->common->nk[i];
683         kernelParamsPtr->grid.realGridSizeFP[i]     = (float)kernelParamsPtr->grid.realGridSize[i];
684         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
685
686         // The complex grid currently uses no padding;
687         // if it starts to do so, then another test should be added for that
688         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
689         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
690     }
691     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
692     if (!pme_gpu_performs_FFT(pmeGpu))
693     {
694         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
695         kernelParamsPtr->grid.realGridSizePadded[ZZ] = (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
696     }
697
698     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
699     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
700     kernelParamsPtr->grid.complexGridSize[ZZ]++;
701     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
702
703     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
704     pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
705     pme_gpu_realloc_grids(pmeGpu);
706     pme_gpu_reinit_3dfft(pmeGpu);
707 }
708
709 /* Several GPU functions that refer to the CPU PME data live here.
710  * We would like to keep these away from the GPU-framework specific code for clarity,
711  * as well as compilation issues with MPI.
712  */
713
714 /*! \brief \libinternal
715  * Copies everything useful from the PME CPU to the PME GPU structure.
716  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
717  *
718  * \param[in] pme         The PME structure.
719  */
720 static void pme_gpu_copy_common_data_from(const gmx_pme_t *pme)
721 {
722     /* TODO: Consider refactoring the CPU PME code to use the same structure,
723      * so that this function becomes 2 lines */
724     PmeGpu *pmeGpu             = pme->gpu;
725     pmeGpu->common->ngrids        = pme->ngrids;
726     pmeGpu->common->epsilon_r     = pme->epsilon_r;
727     pmeGpu->common->ewaldcoeff_q  = pme->ewaldcoeff_q;
728     pmeGpu->common->nk[XX]        = pme->nkx;
729     pmeGpu->common->nk[YY]        = pme->nky;
730     pmeGpu->common->nk[ZZ]        = pme->nkz;
731     pmeGpu->common->pme_order     = pme->pme_order;
732     for (int i = 0; i < DIM; i++)
733     {
734         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
735     }
736     const int cellCount = c_pmeNeighborUnitcellCount;
737     pmeGpu->common->fsh.resize(0);
738     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
739     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
740     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
741     pmeGpu->common->nn.resize(0);
742     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
743     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
744     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
745     pmeGpu->common->runMode   = pme->runMode;
746     pmeGpu->common->boxScaler = pme->boxScaler;
747 }
748
749 /*! \libinternal \brief
750  * Initializes the PME GPU data at the beginning of the run.
751  * TODO: this should become PmeGpu::PmeGpu()
752  *
753  * \param[in,out] pme            The PME structure.
754  * \param[in,out] gpuInfo        The GPU information structure.
755  * \param[in]     pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
756  */
757 static void pme_gpu_init(gmx_pme_t          *pme,
758                          gmx_device_info_t  *gpuInfo,
759                          PmeGpuProgramHandle pmeGpuProgram)
760 {
761     pme->gpu          = new PmeGpu();
762     PmeGpu *pmeGpu = pme->gpu;
763     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
764     pmeGpu->common = std::shared_ptr<PmeShared>(new PmeShared());
765
766     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
767     /* A convenience variable. */
768     pmeGpu->settings.useDecomposition = (pme->nnodes == 1);
769     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
770     pmeGpu->settings.performGPUGather = true;
771
772     pme_gpu_set_testing(pmeGpu, false);
773
774     pmeGpu->deviceInfo = gpuInfo;
775     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
776     pmeGpu->programHandle_ = pmeGpuProgram;
777
778     pme_gpu_init_internal(pmeGpu);
779     pme_gpu_alloc_energy_virial(pmeGpu);
780
781     pme_gpu_copy_common_data_from(pme);
782
783     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0f, "PME GPU: bad electrostatic coefficient");
784
785     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
786     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
787 }
788
789 void pme_gpu_transform_spline_atom_data(const PmeGpu *pmeGpu, const pme_atomcomm_t *atc,
790                                         PmeSplineDataType type, int dimIndex, PmeLayoutTransform transform)
791 {
792     // The GPU atom spline data is laid out in a different way currently than the CPU one.
793     // This function converts the data from GPU to CPU layout (in the host memory).
794     // It is only intended for testing purposes so far.
795     // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their performance
796     // (e.g. spreading on GPU, gathering on CPU).
797     GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
798     const uintmax_t threadIndex  = 0;
799     const auto      atomCount    = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
800     const auto      atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
801     const auto      pmeOrder     = pmeGpu->common->pme_order;
802
803     real           *cpuSplineBuffer;
804     float          *h_splineBuffer;
805     switch (type)
806     {
807         case PmeSplineDataType::Values:
808             cpuSplineBuffer = atc->spline[threadIndex].theta[dimIndex];
809             h_splineBuffer  = pmeGpu->staging.h_theta;
810             break;
811
812         case PmeSplineDataType::Derivatives:
813             cpuSplineBuffer = atc->spline[threadIndex].dtheta[dimIndex];
814             h_splineBuffer  = pmeGpu->staging.h_dtheta;
815             break;
816
817         default:
818             GMX_THROW(gmx::InternalError("Unknown spline data type"));
819     }
820
821     for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
822     {
823         for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
824         {
825             const auto gpuValueIndex = getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
826             const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
827             GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder, "Atom spline data index out of bounds (while transforming GPU data layout for host)");
828             switch (transform)
829             {
830                 case PmeLayoutTransform::GpuToHost:
831                     cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
832                     break;
833
834                 case PmeLayoutTransform::HostToGpu:
835                     h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
836                     break;
837
838                 default:
839                     GMX_THROW(gmx::InternalError("Unknown layout transform"));
840             }
841         }
842     }
843 }
844
845 void pme_gpu_get_real_grid_sizes(const PmeGpu *pmeGpu, gmx::IVec *gridSize, gmx::IVec *paddedGridSize)
846 {
847     GMX_ASSERT(gridSize != nullptr, "");
848     GMX_ASSERT(paddedGridSize != nullptr, "");
849     GMX_ASSERT(pmeGpu != nullptr, "");
850     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
851     for (int i = 0; i < DIM; i++)
852     {
853         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
854         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
855     }
856 }
857
858 void pme_gpu_reinit(gmx_pme_t          *pme,
859                     gmx_device_info_t  *gpuInfo,
860                     PmeGpuProgramHandle pmeGpuProgram)
861 {
862     if (!pme_gpu_active(pme))
863     {
864         return;
865     }
866
867     if (!pme->gpu)
868     {
869         /* First-time initialization */
870         pme_gpu_init(pme, gpuInfo, pmeGpuProgram);
871     }
872     else
873     {
874         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
875         pme_gpu_copy_common_data_from(pme);
876     }
877     /* GPU FFT will only get used for a single rank.*/
878     pme->gpu->settings.performGPUFFT   = (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme_gpu_uses_dd(pme->gpu);
879     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
880
881     /* Reinit active timers */
882     pme_gpu_reinit_timings(pme->gpu);
883
884     pme_gpu_reinit_grids(pme->gpu);
885     // Note: if timing the reinit launch overhead becomes more relevant
886     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
887     pme_gpu_reinit_computation(pme, nullptr);
888     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
889      * update for mixed mode on grid switch. TODO: use shared recipbox field.
890      */
891     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
892 }
893
894 void pme_gpu_destroy(PmeGpu *pmeGpu)
895 {
896     /* Free lots of data */
897     pme_gpu_free_energy_virial(pmeGpu);
898     pme_gpu_free_bspline_values(pmeGpu);
899     pme_gpu_free_forces(pmeGpu);
900     pme_gpu_free_coordinates(pmeGpu);
901     pme_gpu_free_coefficients(pmeGpu);
902     pme_gpu_free_spline_data(pmeGpu);
903     pme_gpu_free_grid_indices(pmeGpu);
904     pme_gpu_free_fract_shifts(pmeGpu);
905     pme_gpu_free_grids(pmeGpu);
906
907     pme_gpu_destroy_3dfft(pmeGpu);
908
909     /* Free the GPU-framework specific data last */
910     pme_gpu_destroy_specific(pmeGpu);
911
912     delete pmeGpu;
913 }
914
915 void pme_gpu_reinit_atoms(PmeGpu *pmeGpu, const int nAtoms, const real *charges)
916 {
917     auto      *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
918     kernelParamsPtr->atoms.nAtoms = nAtoms;
919     const int  alignment = pme_gpu_get_atom_data_alignment(pmeGpu);
920     pmeGpu->nAtomsPadded = ((nAtoms + alignment - 1) / alignment) * alignment;
921     const int  nAtomsAlloc   = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
922     const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
923     pmeGpu->nAtomsAlloc = nAtomsAlloc;
924
925 #if GMX_DOUBLE
926     GMX_RELEASE_ASSERT(false, "Only single precision supported");
927     GMX_UNUSED_VALUE(charges);
928 #else
929     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float *>(charges));
930     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
931 #endif
932
933     if (haveToRealloc)
934     {
935         pme_gpu_realloc_coordinates(pmeGpu);
936         pme_gpu_realloc_forces(pmeGpu);
937         pme_gpu_realloc_spline_data(pmeGpu);
938         pme_gpu_realloc_grid_indices(pmeGpu);
939     }
940 }
941
942 void pme_gpu_3dfft(const PmeGpu *pmeGpu, gmx_fft_direction dir, int grid_index)
943 {
944     int   timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
945
946     auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timerId);
947     pme_gpu_start_timing(pmeGpu, timerId);
948     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(dir, timingEvent);
949     pme_gpu_stop_timing(pmeGpu, timerId);
950 }
951
952 /*! \brief
953  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
954  * to minimize number of unused blocks.
955  */
956 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu *pmeGpu, int blockCount)
957 {
958     // How many maximum widths in X do we need (hopefully just one)
959     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
960     // Trying to make things even
961     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
962     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
963     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount, "pmeGpuCreateGrid: excessive blocks");
964     return std::pair<int, int>(colCount, minRowCount);
965 }
966
967 void pme_gpu_spread(const PmeGpu    *pmeGpu,
968                     int gmx_unused   gridIndex,
969                     real            *h_grid,
970                     bool             computeSplines,
971                     bool             spreadCharges)
972 {
973     GMX_ASSERT(computeSplines || spreadCharges, "PME spline/spread kernel has invalid input (nothing to do)");
974     const auto   *kernelParamsPtr = pmeGpu->kernelParams.get();
975     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
976
977     const int maxThreadsPerBlock = c_spreadMaxWarpsPerBlock * pmeGpu->programHandle_->impl_->warpSize;
978     // TODO take device limitations (CL_DEVICE_MAX_WORK_GROUP_SIZE) into account for all 3 kernels
979     const int order         = pmeGpu->common->pme_order;
980     const int atomsPerBlock = maxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM;
981     // TODO: pick smaller block size in runtime if needed
982     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
983     // If doing so, change atomsPerBlock in the kernels as well.
984     // TODO: test varying block sizes on modern arch-s as well
985     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
986     //(for spline data mostly, together with varying PME_GPU_PARALLEL_SPLINE define)
987     GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. spreading block size");
988
989     const int          blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
990     auto               dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
991
992     KernelLaunchConfig config;
993     config.blockSize[0] = config.blockSize[1] = order;
994     config.blockSize[2] = atomsPerBlock;
995     config.gridSize[0]  = dimGrid.first;
996     config.gridSize[1]  = dimGrid.second;
997     config.stream       = pmeGpu->archSpecific->pmeStream;
998
999     if (order != 4)
1000     {
1001         GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not implemented!"));
1002     }
1003
1004     int  timingId;
1005     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1006     if (computeSplines)
1007     {
1008         if (spreadCharges)
1009         {
1010             timingId  = gtPME_SPLINEANDSPREAD;
1011             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1012         }
1013         else
1014         {
1015             timingId  = gtPME_SPLINE;
1016             kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1017         }
1018     }
1019     else
1020     {
1021         timingId  = gtPME_SPREAD;
1022         kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1023     }
1024
1025     pme_gpu_start_timing(pmeGpu, timingId);
1026     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1027     const auto kernelArgs  = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1028     launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1029     pme_gpu_stop_timing(pmeGpu, timingId);
1030
1031     const bool copyBackGrid = spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
1032     if (copyBackGrid)
1033     {
1034         pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1035     }
1036     const bool copyBackAtomData = computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
1037     if (copyBackAtomData)
1038     {
1039         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1040     }
1041 }
1042
1043 void pme_gpu_solve(const PmeGpu *pmeGpu, t_complex *h_grid,
1044                    GridOrdering gridOrdering, bool computeEnergyAndVirial)
1045 {
1046     const bool   copyInputAndOutputGrid = pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu);
1047
1048     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
1049
1050     float       *h_gridFloat = reinterpret_cast<float *>(h_grid);
1051     if (copyInputAndOutputGrid)
1052     {
1053         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat,
1054                            0, pmeGpu->archSpecific->complexGridSize,
1055                            pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1056     }
1057
1058     int majorDim = -1, middleDim = -1, minorDim = -1;
1059     switch (gridOrdering)
1060     {
1061         case GridOrdering::YZX:
1062             majorDim  = YY;
1063             middleDim = ZZ;
1064             minorDim  = XX;
1065             break;
1066
1067         case GridOrdering::XYZ:
1068             majorDim  = XX;
1069             middleDim = YY;
1070             minorDim  = ZZ;
1071             break;
1072
1073         default:
1074             GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1075     }
1076
1077     const int    maxThreadsPerBlock = c_solveMaxWarpsPerBlock * pmeGpu->programHandle_->impl_->warpSize;
1078     const int    maxBlockSize       = maxThreadsPerBlock;
1079     const int    gridLineSize       = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1080     const int    gridLinesPerBlock  = std::max(maxBlockSize / gridLineSize, 1);
1081     const int    blocksPerGridLine  = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1082     const int    cellsPerBlock      = gridLineSize * gridLinesPerBlock;
1083     const size_t warpSize           = pmeGpu->programHandle_->impl_->warpSize;
1084     const int    blockSize          = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1085
1086
1087     KernelLaunchConfig config;
1088     config.blockSize[0] = blockSize;
1089     config.gridSize[0]  = blocksPerGridLine;
1090     // rounding up to full warps so that shuffle operations produce defined results
1091     config.gridSize[1]  = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1) / gridLinesPerBlock;
1092     config.gridSize[2]  = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1093     config.stream       = pmeGpu->archSpecific->pmeStream;
1094
1095     int  timingId = gtPME_SOLVE;
1096     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1097     if (gridOrdering == GridOrdering::YZX)
1098     {
1099         kernelPtr = computeEnergyAndVirial ?
1100             pmeGpu->programHandle_->impl_->solveYZXEnergyKernel :
1101             pmeGpu->programHandle_->impl_->solveYZXKernel;
1102     }
1103     else if (gridOrdering == GridOrdering::XYZ)
1104     {
1105         kernelPtr = computeEnergyAndVirial ?
1106             pmeGpu->programHandle_->impl_->solveXYZEnergyKernel :
1107             pmeGpu->programHandle_->impl_->solveXYZKernel;
1108     }
1109
1110     pme_gpu_start_timing(pmeGpu, timingId);
1111     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1112     const auto kernelArgs  = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1113     launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1114     pme_gpu_stop_timing(pmeGpu, timingId);
1115
1116     if (computeEnergyAndVirial)
1117     {
1118         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy, &kernelParamsPtr->constants.d_virialAndEnergy,
1119                              0, c_virialAndEnergyCount,
1120                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1121     }
1122
1123     if (copyInputAndOutputGrid)
1124     {
1125         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid,
1126                              0, pmeGpu->archSpecific->complexGridSize,
1127                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1128     }
1129 }
1130
1131 void pme_gpu_gather(PmeGpu                *pmeGpu,
1132                     PmeForceOutputHandling forceTreatment,
1133                     const float           *h_grid
1134                     )
1135 {
1136     /* Copying the input CPU forces for reduction */
1137     if (forceTreatment != PmeForceOutputHandling::Set)
1138     {
1139         pme_gpu_copy_input_forces(pmeGpu);
1140     }
1141
1142     const int    order           = pmeGpu->common->pme_order;
1143     const auto  *kernelParamsPtr = pmeGpu->kernelParams.get();
1144
1145     if (!pme_gpu_performs_FFT(pmeGpu) || pme_gpu_is_testing(pmeGpu))
1146     {
1147         pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float *>(h_grid));
1148     }
1149
1150     if (pme_gpu_is_testing(pmeGpu))
1151     {
1152         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1153     }
1154
1155     const int maxThreadsPerBlock = c_gatherMaxWarpsPerBlock * pmeGpu->programHandle_->impl_->warpSize;
1156     const int atomsPerBlock      =  maxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM;
1157     GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. gathering block size");
1158
1159     const int          blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1160     auto               dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1161
1162     KernelLaunchConfig config;
1163     config.blockSize[0] = config.blockSize[1] = order;
1164     config.blockSize[2] = atomsPerBlock;
1165     config.gridSize[0]  = dimGrid.first;
1166     config.gridSize[1]  = dimGrid.second;
1167     config.stream       = pmeGpu->archSpecific->pmeStream;
1168
1169     if (order != 4)
1170     {
1171         GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not implemented!"));
1172     }
1173
1174     // TODO test different cache configs
1175
1176     int  timingId = gtPME_GATHER;
1177     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1178     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = (forceTreatment == PmeForceOutputHandling::Set) ?
1179         pmeGpu->programHandle_->impl_->gatherKernel :
1180         pmeGpu->programHandle_->impl_->gatherReduceWithInputKernel;
1181
1182     pme_gpu_start_timing(pmeGpu, timingId);
1183     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1184     const auto kernelArgs  = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1185     launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1186     pme_gpu_stop_timing(pmeGpu, timingId);
1187
1188     pme_gpu_copy_output_forces(pmeGpu);
1189 }