15630727e8d5f60dbaccf8cb1f89bf62709f0408
[alexxy/gromacs.git] / src / gromacs / ewald / pme-gpu-internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme-gpu-internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <string>
56
57 #include "gromacs/compat/make_unique.h"
58 #include "gromacs/ewald/ewald-utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
68
69 #if GMX_GPU == GMX_GPU_CUDA
70 #include "gromacs/gpu_utils/pmalloc_cuda.h"
71
72 #include "pme.cuh"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 #include "gromacs/gpu_utils/gmxopencl.h"
75 #endif
76
77 #include "gromacs/ewald/pme.h"
78
79 #include "pme-gpu-3dfft.h"
80 #include "pme-gpu-constants.h"
81 #include "pme-gpu-program-impl.h"
82 #include "pme-gpu-timings.h"
83 #include "pme-gpu-types.h"
84 #include "pme-gpu-types-host.h"
85 #include "pme-gpu-types-host-impl.h"
86 #include "pme-gpu-utils.h"
87 #include "pme-grid.h"
88 #include "pme-internal.h"
89
90 /*! \internal \brief
91  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
92  *
93  * \param[in] pmeGpu  The PME GPU structure.
94  * \returns The pointer to the kernel parameters.
95  */
96 static PmeGpuKernelParamsBase *pme_gpu_get_kernel_params_base_ptr(const PmeGpu *pmeGpu)
97 {
98     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
99     auto *kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase *>(pmeGpu->kernelParams.get());
100     return kernelParamsPtr;
101 }
102
103 int pme_gpu_get_atom_data_alignment(const PmeGpu * /*unused*/)
104 {
105     //TODO: this can be simplified, as PME_ATOM_DATA_ALIGNMENT is now constant
106     return PME_ATOM_DATA_ALIGNMENT;
107 }
108
109 int pme_gpu_get_atoms_per_warp(const PmeGpu *pmeGpu)
110 {
111     const int order = pmeGpu->common->pme_order;
112     GMX_ASSERT(order > 0, "Invalid PME order");
113     return pmeGpu->programHandle_->impl_->warpSize / PME_SPREADGATHER_THREADS_PER_ATOM;
114 }
115
116 void pme_gpu_synchronize(const PmeGpu *pmeGpu)
117 {
118     gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
119 }
120
121 void pme_gpu_alloc_energy_virial(PmeGpu *pmeGpu)
122 {
123     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
124     allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount, pmeGpu->archSpecific->context);
125     pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
126 }
127
128 void pme_gpu_free_energy_virial(PmeGpu *pmeGpu)
129 {
130     freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
131     pfree(pmeGpu->staging.h_virialAndEnergy);
132     pmeGpu->staging.h_virialAndEnergy = nullptr;
133 }
134
135 void pme_gpu_clear_energy_virial(const PmeGpu *pmeGpu)
136 {
137     clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
138                            c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
139 }
140
141 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu *pmeGpu)
142 {
143     const int splineValuesOffset[DIM] = {
144         0,
145         pmeGpu->kernelParams->grid.realGridSize[XX],
146         pmeGpu->kernelParams->grid.realGridSize[XX] + pmeGpu->kernelParams->grid.realGridSize[YY]
147     };
148     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
149
150     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX] +
151         pmeGpu->kernelParams->grid.realGridSize[YY] +
152         pmeGpu->kernelParams->grid.realGridSize[ZZ];
153     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
154     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
155                            &pmeGpu->archSpecific->splineValuesSize, &pmeGpu->archSpecific->splineValuesSizeAlloc, pmeGpu->archSpecific->context);
156     if (shouldRealloc)
157     {
158         /* Reallocate the host buffer */
159         pfree(pmeGpu->staging.h_splineModuli);
160         pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_splineModuli), newSplineValuesSize * sizeof(float));
161     }
162     for (int i = 0; i < DIM; i++)
163     {
164         memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i], pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
165     }
166     /* TODO: pin original buffer instead! */
167     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
168                        0, newSplineValuesSize,
169                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
170 }
171
172 void pme_gpu_free_bspline_values(const PmeGpu *pmeGpu)
173 {
174     pfree(pmeGpu->staging.h_splineModuli);
175     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
176 }
177
178 void pme_gpu_realloc_forces(PmeGpu *pmeGpu)
179 {
180     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
181     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
182     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
183                            &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc, pmeGpu->archSpecific->context);
184     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
185     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
186 }
187
188 void pme_gpu_free_forces(const PmeGpu *pmeGpu)
189 {
190     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
191 }
192
193 void pme_gpu_copy_input_forces(PmeGpu *pmeGpu)
194 {
195     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
196     float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
197     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat,
198                        0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
199                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
200 }
201
202 void pme_gpu_copy_output_forces(PmeGpu *pmeGpu)
203 {
204     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
205     float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
206     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces,
207                          0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
208                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
209 }
210
211 void pme_gpu_realloc_coordinates(const PmeGpu *pmeGpu)
212 {
213     const size_t newCoordinatesSize = pmeGpu->nAtomsAlloc * DIM;
214     GMX_ASSERT(newCoordinatesSize > 0, "Bad number of atoms in PME GPU");
215     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, newCoordinatesSize,
216                            &pmeGpu->archSpecific->coordinatesSize, &pmeGpu->archSpecific->coordinatesSizeAlloc, pmeGpu->archSpecific->context);
217     if (c_usePadding)
218     {
219         const size_t paddingIndex = DIM * pmeGpu->kernelParams->atoms.nAtoms;
220         const size_t paddingCount = DIM * pmeGpu->nAtomsAlloc - paddingIndex;
221         if (paddingCount > 0)
222         {
223             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coordinates, paddingIndex,
224                                    paddingCount, pmeGpu->archSpecific->pmeStream);
225         }
226     }
227 }
228
229 void pme_gpu_copy_input_coordinates(const PmeGpu *pmeGpu, const rvec *h_coordinates)
230 {
231     GMX_ASSERT(h_coordinates, "Bad host-side coordinate buffer in PME GPU");
232 #if GMX_DOUBLE
233     GMX_RELEASE_ASSERT(false, "Only single precision is supported");
234     GMX_UNUSED_VALUE(h_coordinates);
235 #else
236     const float *h_coordinatesFloat = reinterpret_cast<const float *>(h_coordinates);
237     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, h_coordinatesFloat,
238                        0, pmeGpu->kernelParams->atoms.nAtoms * DIM,
239                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
240 #endif
241 }
242
243 void pme_gpu_free_coordinates(const PmeGpu *pmeGpu)
244 {
245     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates);
246 }
247
248 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu *pmeGpu, const float *h_coefficients)
249 {
250     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
251     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
252     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
253     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
254                            &pmeGpu->archSpecific->coefficientsSize, &pmeGpu->archSpecific->coefficientsSizeAlloc, pmeGpu->archSpecific->context);
255     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, const_cast<float *>(h_coefficients),
256                        0, pmeGpu->kernelParams->atoms.nAtoms,
257                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
258     if (c_usePadding)
259     {
260         const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
261         const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
262         if (paddingCount > 0)
263         {
264             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
265                                    paddingCount, pmeGpu->archSpecific->pmeStream);
266         }
267     }
268 }
269
270 void pme_gpu_free_coefficients(const PmeGpu *pmeGpu)
271 {
272     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
273 }
274
275 void pme_gpu_realloc_spline_data(PmeGpu *pmeGpu)
276 {
277     const int    order             = pmeGpu->common->pme_order;
278     const int    alignment         = pme_gpu_get_atoms_per_warp(pmeGpu);
279     const size_t nAtomsPadded      = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
280     const int    newSplineDataSize = DIM * order * nAtomsPadded;
281     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
282     /* Two arrays of the same size */
283     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
284     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
285     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
286     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize,
287                            &currentSizeTemp, &currentSizeTempAlloc, pmeGpu->archSpecific->context);
288     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
289                            &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc, pmeGpu->archSpecific->context);
290     // the host side reallocation
291     if (shouldRealloc)
292     {
293         pfree(pmeGpu->staging.h_theta);
294         pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
295         pfree(pmeGpu->staging.h_dtheta);
296         pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
297     }
298 }
299
300 void pme_gpu_free_spline_data(const PmeGpu *pmeGpu)
301 {
302     /* Two arrays of the same size */
303     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
304     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
305     pfree(pmeGpu->staging.h_theta);
306     pfree(pmeGpu->staging.h_dtheta);
307 }
308
309 void pme_gpu_realloc_grid_indices(PmeGpu *pmeGpu)
310 {
311     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
312     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
313     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
314                            &pmeGpu->archSpecific->gridlineIndicesSize, &pmeGpu->archSpecific->gridlineIndicesSizeAlloc, pmeGpu->archSpecific->context);
315     pfree(pmeGpu->staging.h_gridlineIndices);
316     pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
317 }
318
319 void pme_gpu_free_grid_indices(const PmeGpu *pmeGpu)
320 {
321     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
322     pfree(pmeGpu->staging.h_gridlineIndices);
323 }
324
325 void pme_gpu_realloc_grids(PmeGpu *pmeGpu)
326 {
327     auto     *kernelParamsPtr = pmeGpu->kernelParams.get();
328     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX] *
329         kernelParamsPtr->grid.realGridSizePadded[YY] *
330         kernelParamsPtr->grid.realGridSizePadded[ZZ];
331     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX] *
332         kernelParamsPtr->grid.complexGridSizePadded[YY] *
333         kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
334     // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
335     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
336     {
337         /* 2 separate grids */
338         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
339                                &pmeGpu->archSpecific->complexGridSize, &pmeGpu->archSpecific->complexGridSizeAlloc, pmeGpu->archSpecific->context);
340         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newRealGridSize,
341                                &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
342     }
343     else
344     {
345         /* A single buffer so that any grid will fit */
346         const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
347         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newGridsSize,
348                                &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
349         kernelParamsPtr->grid.d_fourierGrid   = kernelParamsPtr->grid.d_realGrid;
350         pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
351         // the size might get used later for copying the grid
352     }
353 }
354
355 void pme_gpu_free_grids(const PmeGpu *pmeGpu)
356 {
357     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
358     {
359         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
360     }
361     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
362 }
363
364 void pme_gpu_clear_grids(const PmeGpu *pmeGpu)
365 {
366     clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
367                            pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
368 }
369
370 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu *pmeGpu)
371 {
372     pme_gpu_free_fract_shifts(pmeGpu);
373
374     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
375
376     const int    nx                  = kernelParamsPtr->grid.realGridSize[XX];
377     const int    ny                  = kernelParamsPtr->grid.realGridSize[YY];
378     const int    nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
379     const int    cellCount           = c_pmeNeighborUnitcellCount;
380     const int    gridDataOffset[DIM] = {0, cellCount * nx, cellCount * (nx + ny)};
381
382     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
383
384     const int    newFractShiftsSize  = cellCount * (nx + ny + nz);
385
386 #if GMX_GPU == GMX_GPU_CUDA
387     initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
388                          kernelParamsPtr->fractShiftsTableTexture,
389                          pmeGpu->common->fsh.data(),
390                          newFractShiftsSize,
391                          pmeGpu->deviceInfo);
392
393     initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
394                          kernelParamsPtr->gridlineIndicesTableTexture,
395                          pmeGpu->common->nn.data(),
396                          newFractShiftsSize,
397                          pmeGpu->deviceInfo);
398 #elif GMX_GPU == GMX_GPU_OPENCL
399     // No dedicated texture routines....
400     allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize, pmeGpu->archSpecific->context);
401     allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize, pmeGpu->archSpecific->context);
402     copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(),
403                        0, newFractShiftsSize,
404                        pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
405     copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(),
406                        0, newFractShiftsSize,
407                        pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
408 #endif
409 }
410
411 void pme_gpu_free_fract_shifts(const PmeGpu *pmeGpu)
412 {
413     auto *kernelParamsPtr = pmeGpu->kernelParams.get();
414 #if GMX_GPU == GMX_GPU_CUDA
415     destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
416                             kernelParamsPtr->fractShiftsTableTexture,
417                             pmeGpu->deviceInfo);
418     destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
419                             kernelParamsPtr->gridlineIndicesTableTexture,
420                             pmeGpu->deviceInfo);
421 #elif GMX_GPU == GMX_GPU_OPENCL
422     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
423     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
424 #endif
425 }
426
427 bool pme_gpu_stream_query(const PmeGpu *pmeGpu)
428 {
429     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
430 }
431
432 void pme_gpu_copy_input_gather_grid(const PmeGpu *pmeGpu, float *h_grid)
433 {
434     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid,
435                        0, pmeGpu->archSpecific->realGridSize,
436                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
437 }
438
439 void pme_gpu_copy_output_spread_grid(const PmeGpu *pmeGpu, float *h_grid)
440 {
441     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid,
442                          0, pmeGpu->archSpecific->realGridSize,
443                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
444     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
445 }
446
447 void pme_gpu_copy_output_spread_atom_data(const PmeGpu *pmeGpu)
448 {
449     const int    alignment        = pme_gpu_get_atoms_per_warp(pmeGpu);
450     const size_t nAtomsPadded     = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
451     const size_t splinesCount     = DIM * nAtomsPadded * pmeGpu->common->pme_order;
452     auto        *kernelParamsPtr  = pmeGpu->kernelParams.get();
453     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta,
454                          0, splinesCount,
455                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
456     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta,
457                          0, splinesCount,
458                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
459     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
460                          0, kernelParamsPtr->atoms.nAtoms * DIM,
461                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
462 }
463
464 void pme_gpu_copy_input_gather_atom_data(const PmeGpu *pmeGpu)
465 {
466     const int    alignment       = pme_gpu_get_atoms_per_warp(pmeGpu);
467     const size_t nAtomsPadded    = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
468     const size_t splinesCount    = DIM * nAtomsPadded * pmeGpu->common->pme_order;
469     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
470     if (c_usePadding)
471     {
472         // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
473         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
474                                pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
475         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
476                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
477         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
478                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
479     }
480     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta,
481                        0, splinesCount,
482                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
483     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta,
484                        0, splinesCount,
485                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
486     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
487                        0, kernelParamsPtr->atoms.nAtoms * DIM,
488                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
489 }
490
491 void pme_gpu_sync_spread_grid(const PmeGpu *pmeGpu)
492 {
493     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
494 }
495
496 void pme_gpu_init_internal(PmeGpu *pmeGpu)
497 {
498 #if GMX_GPU == GMX_GPU_CUDA
499     // Prepare to use the device that this PME task was assigned earlier.
500     // Other entities, such as CUDA timing events, are known to implicitly use the device context.
501     CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
502 #endif
503
504     /* Allocate the target-specific structures */
505     pmeGpu->archSpecific.reset(new PmeGpuSpecific());
506     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
507
508     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
509     /* This should give better performance, according to the cuFFT documentation.
510      * The performance seems to be the same though.
511      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
512      */
513
514     // TODO: this is just a convenient reuse because programHandle_ currently is in charge of creating context
515     pmeGpu->archSpecific->context = pmeGpu->programHandle_->impl_->context;
516
517     // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
518     if (GMX_GPU == GMX_GPU_CUDA)
519     {
520         /* WARNING: CUDA timings are incorrect with multiple streams.
521          *          This is the main reason why they are disabled by default.
522          */
523         // TODO: Consider turning on by default when we can detect nr of streams.
524         pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
525     }
526     else if (GMX_GPU == GMX_GPU_OPENCL)
527     {
528         pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
529     }
530
531 #if GMX_GPU == GMX_GPU_CUDA
532     pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
533 #elif GMX_GPU == GMX_GPU_OPENCL
534     pmeGpu->maxGridWidthX = INT32_MAX / 2;
535     // TODO: is there no really global work size limit in OpenCL?
536 #endif
537
538     /* Creating a PME GPU stream:
539      * - default high priority with CUDA
540      * - no priorities implemented yet with OpenCL; see #2532
541      */
542 #if GMX_GPU == GMX_GPU_CUDA
543     cudaError_t stat;
544     int         highest_priority, lowest_priority;
545     stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
546     CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
547     stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
548                                         cudaStreamDefault, //cudaStreamNonBlocking,
549                                         highest_priority);
550     CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
551 #elif GMX_GPU == GMX_GPU_OPENCL
552     cl_command_queue_properties queueProperties = pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
553     cl_device_id                device_id       = pmeGpu->deviceInfo->ocl_gpu_id.ocl_device_id;
554     cl_int                      clError;
555     pmeGpu->archSpecific->pmeStream = clCreateCommandQueue(pmeGpu->archSpecific->context,
556                                                            device_id, queueProperties, &clError);
557     if (clError != CL_SUCCESS)
558     {
559         GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
560     }
561 #endif
562 }
563
564 void pme_gpu_destroy_specific(const PmeGpu *pmeGpu)
565 {
566 #if GMX_GPU == GMX_GPU_CUDA
567     /* Destroy the CUDA stream */
568     cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
569     CU_RET_ERR(stat, "PME cudaStreamDestroy error");
570 #elif GMX_GPU == GMX_GPU_OPENCL
571     cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
572     if (clError != CL_SUCCESS)
573     {
574         gmx_warning("Failed to destroy PME command queue");
575     }
576 #endif
577 }
578
579 void pme_gpu_reinit_3dfft(const PmeGpu *pmeGpu)
580 {
581     if (pme_gpu_performs_FFT(pmeGpu))
582     {
583         pmeGpu->archSpecific->fftSetup.resize(0);
584         for (int i = 0; i < pmeGpu->common->ngrids; i++)
585         {
586             pmeGpu->archSpecific->fftSetup.push_back(gmx::compat::make_unique<GpuParallel3dFft>(pmeGpu));
587         }
588     }
589 }
590
591 void pme_gpu_destroy_3dfft(const PmeGpu *pmeGpu)
592 {
593     pmeGpu->archSpecific->fftSetup.resize(0);
594 }
595
596 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
597 {
598     if (order != 4)
599     {
600         throw order;
601     }
602     constexpr int fixedOrder = 4;
603     GMX_UNUSED_VALUE(fixedOrder);
604
605     const int atomWarpIndex = atomIndex % atomsPerWarp;
606     const int warpIndex     = atomIndex / atomsPerWarp;
607     int       indexBase, result;
608     switch (atomsPerWarp)
609     {
610         case 1:
611             indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
612             result    = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
613             break;
614
615         case 2:
616             indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
617             result    = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
618             break;
619
620         case 4:
621             indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
622             result    = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
623             break;
624
625         case 8:
626             indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
627             result    = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
628             break;
629
630         default:
631             GMX_THROW(gmx::NotImplementedError(gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in getSplineParamFullIndex", atomsPerWarp)));
632     }
633     return result;
634 }
635
636 gmx::ArrayRef<gmx::RVec> pme_gpu_get_forces(PmeGpu *pmeGpu)
637 {
638     return pmeGpu->staging.h_forces;
639 }
640
641 void pme_gpu_get_energy_virial(const PmeGpu *pmeGpu, real *energy, matrix virial)
642 {
643     for (int j = 0; j < c_virialAndEnergyCount; j++)
644     {
645         GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]), "PME GPU produces incorrect energy/virial.");
646     }
647
648     GMX_ASSERT(energy, "Invalid energy output pointer in PME GPU");
649     unsigned int j = 0;
650     virial[XX][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
651     virial[YY][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
652     virial[ZZ][ZZ] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
653     virial[XX][YY] = virial[YY][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
654     virial[XX][ZZ] = virial[ZZ][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
655     virial[YY][ZZ] = virial[ZZ][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
656     *energy        = 0.5f * pmeGpu->staging.h_virialAndEnergy[j++];
657 }
658
659 void pme_gpu_update_input_box(PmeGpu gmx_unused       *pmeGpu,
660                               const matrix gmx_unused  box)
661 {
662 #if GMX_DOUBLE
663     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
664 #else
665     matrix  scaledBox;
666     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
667     auto   *kernelParamsPtr      = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
668     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
669     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0f, "Zero volume of the unit cell");
670     matrix recipBox;
671     gmx::invertBoxMatrix(scaledBox, recipBox);
672
673     /* The GPU recipBox is transposed as compared to the CPU recipBox.
674      * Spread uses matrix columns (while solve and gather use rows).
675      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
676      */
677     const real newRecipBox[DIM][DIM] =
678     {
679         {recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX]},
680         {             0.0, recipBox[YY][YY], recipBox[ZZ][YY]},
681         {             0.0,              0.0, recipBox[ZZ][ZZ]}
682     };
683     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
684 #endif
685 }
686
687 /*! \brief \libinternal
688  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
689  *
690  * \param[in] pmeGpu            The PME GPU structure.
691  */
692 static void pme_gpu_reinit_grids(PmeGpu *pmeGpu)
693 {
694     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
695     kernelParamsPtr->grid.ewaldFactor = (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
696
697     /* The grid size variants */
698     for (int i = 0; i < DIM; i++)
699     {
700         kernelParamsPtr->grid.realGridSize[i]       = pmeGpu->common->nk[i];
701         kernelParamsPtr->grid.realGridSizeFP[i]     = static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
702         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
703
704         // The complex grid currently uses no padding;
705         // if it starts to do so, then another test should be added for that
706         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
707         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
708     }
709     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
710     if (!pme_gpu_performs_FFT(pmeGpu))
711     {
712         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
713         kernelParamsPtr->grid.realGridSizePadded[ZZ] = (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
714     }
715
716     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
717     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
718     kernelParamsPtr->grid.complexGridSize[ZZ]++;
719     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
720
721     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
722     pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
723     pme_gpu_realloc_grids(pmeGpu);
724     pme_gpu_reinit_3dfft(pmeGpu);
725 }
726
727 /* Several GPU functions that refer to the CPU PME data live here.
728  * We would like to keep these away from the GPU-framework specific code for clarity,
729  * as well as compilation issues with MPI.
730  */
731
732 /*! \brief \libinternal
733  * Copies everything useful from the PME CPU to the PME GPU structure.
734  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
735  *
736  * \param[in] pme         The PME structure.
737  */
738 static void pme_gpu_copy_common_data_from(const gmx_pme_t *pme)
739 {
740     /* TODO: Consider refactoring the CPU PME code to use the same structure,
741      * so that this function becomes 2 lines */
742     PmeGpu *pmeGpu             = pme->gpu;
743     pmeGpu->common->ngrids        = pme->ngrids;
744     pmeGpu->common->epsilon_r     = pme->epsilon_r;
745     pmeGpu->common->ewaldcoeff_q  = pme->ewaldcoeff_q;
746     pmeGpu->common->nk[XX]        = pme->nkx;
747     pmeGpu->common->nk[YY]        = pme->nky;
748     pmeGpu->common->nk[ZZ]        = pme->nkz;
749     pmeGpu->common->pme_order     = pme->pme_order;
750     for (int i = 0; i < DIM; i++)
751     {
752         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
753     }
754     const int cellCount = c_pmeNeighborUnitcellCount;
755     pmeGpu->common->fsh.resize(0);
756     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
757     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
758     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
759     pmeGpu->common->nn.resize(0);
760     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
761     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
762     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
763     pmeGpu->common->runMode   = pme->runMode;
764     pmeGpu->common->boxScaler = pme->boxScaler;
765 }
766
767 /*! \libinternal \brief
768  * Initializes the PME GPU data at the beginning of the run.
769  * TODO: this should become PmeGpu::PmeGpu()
770  *
771  * \param[in,out] pme            The PME structure.
772  * \param[in,out] gpuInfo        The GPU information structure.
773  * \param[in]     pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
774  */
775 static void pme_gpu_init(gmx_pme_t               *pme,
776                          const gmx_device_info_t *gpuInfo,
777                          PmeGpuProgramHandle      pmeGpuProgram)
778 {
779     pme->gpu          = new PmeGpu();
780     PmeGpu *pmeGpu = pme->gpu;
781     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
782     pmeGpu->common = std::make_shared<PmeShared>();
783
784     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
785     /* A convenience variable. */
786     pmeGpu->settings.useDecomposition = (pme->nnodes == 1);
787     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
788     pmeGpu->settings.performGPUGather = true;
789
790     pme_gpu_set_testing(pmeGpu, false);
791
792     pmeGpu->deviceInfo = gpuInfo;
793     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
794     pmeGpu->programHandle_ = pmeGpuProgram;
795
796     pme_gpu_init_internal(pmeGpu);
797     pme_gpu_alloc_energy_virial(pmeGpu);
798
799     pme_gpu_copy_common_data_from(pme);
800
801     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0f, "PME GPU: bad electrostatic coefficient");
802
803     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
804     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
805 }
806
807 void pme_gpu_transform_spline_atom_data(const PmeGpu *pmeGpu, const pme_atomcomm_t *atc,
808                                         PmeSplineDataType type, int dimIndex, PmeLayoutTransform transform)
809 {
810     // The GPU atom spline data is laid out in a different way currently than the CPU one.
811     // This function converts the data from GPU to CPU layout (in the host memory).
812     // It is only intended for testing purposes so far.
813     // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their performance
814     // (e.g. spreading on GPU, gathering on CPU).
815     GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
816     const uintmax_t threadIndex  = 0;
817     const auto      atomCount    = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
818     const auto      atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
819     const auto      pmeOrder     = pmeGpu->common->pme_order;
820
821     real           *cpuSplineBuffer;
822     float          *h_splineBuffer;
823     switch (type)
824     {
825         case PmeSplineDataType::Values:
826             cpuSplineBuffer = atc->spline[threadIndex].theta[dimIndex];
827             h_splineBuffer  = pmeGpu->staging.h_theta;
828             break;
829
830         case PmeSplineDataType::Derivatives:
831             cpuSplineBuffer = atc->spline[threadIndex].dtheta[dimIndex];
832             h_splineBuffer  = pmeGpu->staging.h_dtheta;
833             break;
834
835         default:
836             GMX_THROW(gmx::InternalError("Unknown spline data type"));
837     }
838
839     for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
840     {
841         for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
842         {
843             const auto gpuValueIndex = getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
844             const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
845             GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder, "Atom spline data index out of bounds (while transforming GPU data layout for host)");
846             switch (transform)
847             {
848                 case PmeLayoutTransform::GpuToHost:
849                     cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
850                     break;
851
852                 case PmeLayoutTransform::HostToGpu:
853                     h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
854                     break;
855
856                 default:
857                     GMX_THROW(gmx::InternalError("Unknown layout transform"));
858             }
859         }
860     }
861 }
862
863 void pme_gpu_get_real_grid_sizes(const PmeGpu *pmeGpu, gmx::IVec *gridSize, gmx::IVec *paddedGridSize)
864 {
865     GMX_ASSERT(gridSize != nullptr, "");
866     GMX_ASSERT(paddedGridSize != nullptr, "");
867     GMX_ASSERT(pmeGpu != nullptr, "");
868     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
869     for (int i = 0; i < DIM; i++)
870     {
871         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
872         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
873     }
874 }
875
876 void pme_gpu_reinit(gmx_pme_t               *pme,
877                     const gmx_device_info_t *gpuInfo,
878                     PmeGpuProgramHandle      pmeGpuProgram)
879 {
880     if (!pme_gpu_active(pme))
881     {
882         return;
883     }
884
885     if (!pme->gpu)
886     {
887         /* First-time initialization */
888         pme_gpu_init(pme, gpuInfo, pmeGpuProgram);
889     }
890     else
891     {
892         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
893         pme_gpu_copy_common_data_from(pme);
894     }
895     /* GPU FFT will only get used for a single rank.*/
896     pme->gpu->settings.performGPUFFT   = (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme_gpu_uses_dd(pme->gpu);
897     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
898
899     /* Reinit active timers */
900     pme_gpu_reinit_timings(pme->gpu);
901
902     pme_gpu_reinit_grids(pme->gpu);
903     // Note: if timing the reinit launch overhead becomes more relevant
904     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
905     pme_gpu_reinit_computation(pme, nullptr);
906     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
907      * update for mixed mode on grid switch. TODO: use shared recipbox field.
908      */
909     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
910 }
911
912 void pme_gpu_destroy(PmeGpu *pmeGpu)
913 {
914     /* Free lots of data */
915     pme_gpu_free_energy_virial(pmeGpu);
916     pme_gpu_free_bspline_values(pmeGpu);
917     pme_gpu_free_forces(pmeGpu);
918     pme_gpu_free_coordinates(pmeGpu);
919     pme_gpu_free_coefficients(pmeGpu);
920     pme_gpu_free_spline_data(pmeGpu);
921     pme_gpu_free_grid_indices(pmeGpu);
922     pme_gpu_free_fract_shifts(pmeGpu);
923     pme_gpu_free_grids(pmeGpu);
924
925     pme_gpu_destroy_3dfft(pmeGpu);
926
927     /* Free the GPU-framework specific data last */
928     pme_gpu_destroy_specific(pmeGpu);
929
930     delete pmeGpu;
931 }
932
933 void pme_gpu_reinit_atoms(PmeGpu *pmeGpu, const int nAtoms, const real *charges)
934 {
935     auto      *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
936     kernelParamsPtr->atoms.nAtoms = nAtoms;
937     const int  alignment = pme_gpu_get_atom_data_alignment(pmeGpu);
938     pmeGpu->nAtomsPadded = ((nAtoms + alignment - 1) / alignment) * alignment;
939     const int  nAtomsAlloc   = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
940     const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
941     pmeGpu->nAtomsAlloc = nAtomsAlloc;
942
943 #if GMX_DOUBLE
944     GMX_RELEASE_ASSERT(false, "Only single precision supported");
945     GMX_UNUSED_VALUE(charges);
946 #else
947     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float *>(charges));
948     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
949 #endif
950
951     if (haveToRealloc)
952     {
953         pme_gpu_realloc_coordinates(pmeGpu);
954         pme_gpu_realloc_forces(pmeGpu);
955         pme_gpu_realloc_spline_data(pmeGpu);
956         pme_gpu_realloc_grid_indices(pmeGpu);
957     }
958 }
959
960 void pme_gpu_3dfft(const PmeGpu *pmeGpu, gmx_fft_direction dir, int grid_index)
961 {
962     int   timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
963
964     pme_gpu_start_timing(pmeGpu, timerId);
965     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
966     pme_gpu_stop_timing(pmeGpu, timerId);
967 }
968
969 /*! \brief
970  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
971  * to minimize number of unused blocks.
972  */
973 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu *pmeGpu, int blockCount)
974 {
975     // How many maximum widths in X do we need (hopefully just one)
976     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
977     // Trying to make things even
978     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
979     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
980     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount, "pmeGpuCreateGrid: excessive blocks");
981     return std::pair<int, int>(colCount, minRowCount);
982 }
983
984 void pme_gpu_spread(const PmeGpu    *pmeGpu,
985                     int gmx_unused   gridIndex,
986                     real            *h_grid,
987                     bool             computeSplines,
988                     bool             spreadCharges)
989 {
990     GMX_ASSERT(computeSplines || spreadCharges, "PME spline/spread kernel has invalid input (nothing to do)");
991     const auto   *kernelParamsPtr = pmeGpu->kernelParams.get();
992     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
993
994     const size_t blockSize  = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
995
996     const int    order         = pmeGpu->common->pme_order;
997     const int    atomsPerBlock = blockSize / PME_SPREADGATHER_THREADS_PER_ATOM;
998     // TODO: pick smaller block size in runtime if needed
999     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1000     // If doing so, change atomsPerBlock in the kernels as well.
1001     // TODO: test varying block sizes on modern arch-s as well
1002     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1003     //(for spline data mostly, together with varying PME_GPU_PARALLEL_SPLINE define)
1004     GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. spreading block size");
1005
1006     const int          blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1007     auto               dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1008
1009     KernelLaunchConfig config;
1010     config.blockSize[0] = config.blockSize[1] = order;
1011     config.blockSize[2] = atomsPerBlock;
1012     config.gridSize[0]  = dimGrid.first;
1013     config.gridSize[1]  = dimGrid.second;
1014     config.stream       = pmeGpu->archSpecific->pmeStream;
1015
1016     if (order != 4)
1017     {
1018         GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not implemented!"));
1019     }
1020
1021     int  timingId;
1022     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1023     if (computeSplines)
1024     {
1025         if (spreadCharges)
1026         {
1027             timingId  = gtPME_SPLINEANDSPREAD;
1028             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1029         }
1030         else
1031         {
1032             timingId  = gtPME_SPLINE;
1033             kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1034         }
1035     }
1036     else
1037     {
1038         timingId  = gtPME_SPREAD;
1039         kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1040     }
1041
1042     pme_gpu_start_timing(pmeGpu, timingId);
1043     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1044 #if c_canEmbedBuffers
1045     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1046 #else
1047     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr,
1048                                                       &kernelParamsPtr->atoms.d_theta,
1049                                                       &kernelParamsPtr->atoms.d_dtheta,
1050                                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1051                                                       &kernelParamsPtr->grid.d_realGrid,
1052                                                       &kernelParamsPtr->grid.d_fractShiftsTable,
1053                                                       &kernelParamsPtr->grid.d_gridlineIndicesTable,
1054                                                       &kernelParamsPtr->atoms.d_coefficients,
1055                                                       &kernelParamsPtr->atoms.d_coordinates
1056                                                       );
1057 #endif
1058
1059     launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1060     pme_gpu_stop_timing(pmeGpu, timingId);
1061
1062     const bool copyBackGrid = spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
1063     if (copyBackGrid)
1064     {
1065         pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1066     }
1067     const bool copyBackAtomData = computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
1068     if (copyBackAtomData)
1069     {
1070         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1071     }
1072 }
1073
1074 void pme_gpu_solve(const PmeGpu *pmeGpu, t_complex *h_grid,
1075                    GridOrdering gridOrdering, bool computeEnergyAndVirial)
1076 {
1077     const bool   copyInputAndOutputGrid = pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu);
1078
1079     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
1080
1081     float       *h_gridFloat = reinterpret_cast<float *>(h_grid);
1082     if (copyInputAndOutputGrid)
1083     {
1084         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat,
1085                            0, pmeGpu->archSpecific->complexGridSize,
1086                            pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1087     }
1088
1089     int majorDim = -1, middleDim = -1, minorDim = -1;
1090     switch (gridOrdering)
1091     {
1092         case GridOrdering::YZX:
1093             majorDim  = YY;
1094             middleDim = ZZ;
1095             minorDim  = XX;
1096             break;
1097
1098         case GridOrdering::XYZ:
1099             majorDim  = XX;
1100             middleDim = YY;
1101             minorDim  = ZZ;
1102             break;
1103
1104         default:
1105             GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1106     }
1107
1108     const size_t maxBlockSize      = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1109
1110     const int    gridLineSize       = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1111     const int    gridLinesPerBlock  = std::max(maxBlockSize / gridLineSize, 1UL);
1112     const int    blocksPerGridLine  = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1113     const int    cellsPerBlock      = gridLineSize * gridLinesPerBlock;
1114     const size_t warpSize           = pmeGpu->programHandle_->impl_->warpSize;
1115     const int    blockSize          = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1116
1117
1118     KernelLaunchConfig config;
1119     config.blockSize[0] = blockSize;
1120     config.gridSize[0]  = blocksPerGridLine;
1121     // rounding up to full warps so that shuffle operations produce defined results
1122     config.gridSize[1]  = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1) / gridLinesPerBlock;
1123     config.gridSize[2]  = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1124     config.stream       = pmeGpu->archSpecific->pmeStream;
1125
1126     int  timingId = gtPME_SOLVE;
1127     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1128     if (gridOrdering == GridOrdering::YZX)
1129     {
1130         kernelPtr = computeEnergyAndVirial ?
1131             pmeGpu->programHandle_->impl_->solveYZXEnergyKernel :
1132             pmeGpu->programHandle_->impl_->solveYZXKernel;
1133     }
1134     else if (gridOrdering == GridOrdering::XYZ)
1135     {
1136         kernelPtr = computeEnergyAndVirial ?
1137             pmeGpu->programHandle_->impl_->solveXYZEnergyKernel :
1138             pmeGpu->programHandle_->impl_->solveXYZKernel;
1139     }
1140
1141     pme_gpu_start_timing(pmeGpu, timingId);
1142     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1143 #if c_canEmbedBuffers
1144     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1145 #else
1146     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr,
1147                                                       &kernelParamsPtr->grid.d_splineModuli,
1148                                                       &kernelParamsPtr->constants.d_virialAndEnergy,
1149                                                       &kernelParamsPtr->grid.d_fourierGrid);
1150 #endif
1151     launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1152     pme_gpu_stop_timing(pmeGpu, timingId);
1153
1154     if (computeEnergyAndVirial)
1155     {
1156         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy, &kernelParamsPtr->constants.d_virialAndEnergy,
1157                              0, c_virialAndEnergyCount,
1158                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1159     }
1160
1161     if (copyInputAndOutputGrid)
1162     {
1163         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid,
1164                              0, pmeGpu->archSpecific->complexGridSize,
1165                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1166     }
1167 }
1168
1169 void pme_gpu_gather(PmeGpu                *pmeGpu,
1170                     PmeForceOutputHandling forceTreatment,
1171                     const float           *h_grid
1172                     )
1173 {
1174     /* Copying the input CPU forces for reduction */
1175     if (forceTreatment != PmeForceOutputHandling::Set)
1176     {
1177         pme_gpu_copy_input_forces(pmeGpu);
1178     }
1179
1180     const int    order           = pmeGpu->common->pme_order;
1181     const auto  *kernelParamsPtr = pmeGpu->kernelParams.get();
1182
1183     if (!pme_gpu_performs_FFT(pmeGpu) || pme_gpu_is_testing(pmeGpu))
1184     {
1185         pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float *>(h_grid));
1186     }
1187
1188     if (pme_gpu_is_testing(pmeGpu))
1189     {
1190         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1191     }
1192
1193     const size_t blockSize     = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1194     const int    atomsPerBlock = blockSize / PME_SPREADGATHER_THREADS_PER_ATOM;
1195     GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. gathering block size");
1196
1197     const int          blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1198     auto               dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1199
1200     KernelLaunchConfig config;
1201     config.blockSize[0] = config.blockSize[1] = order;
1202     config.blockSize[2] = atomsPerBlock;
1203     config.gridSize[0]  = dimGrid.first;
1204     config.gridSize[1]  = dimGrid.second;
1205     config.stream       = pmeGpu->archSpecific->pmeStream;
1206
1207     if (order != 4)
1208     {
1209         GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not implemented!"));
1210     }
1211
1212     // TODO test different cache configs
1213
1214     int  timingId = gtPME_GATHER;
1215     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1216     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = (forceTreatment == PmeForceOutputHandling::Set) ?
1217         pmeGpu->programHandle_->impl_->gatherKernel :
1218         pmeGpu->programHandle_->impl_->gatherReduceWithInputKernel;
1219
1220     pme_gpu_start_timing(pmeGpu, timingId);
1221     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1222 #if c_canEmbedBuffers
1223     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1224 #else
1225     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr,
1226                                                       &kernelParamsPtr->atoms.d_coefficients,
1227                                                       &kernelParamsPtr->grid.d_realGrid,
1228                                                       &kernelParamsPtr->atoms.d_theta,
1229                                                       &kernelParamsPtr->atoms.d_dtheta,
1230                                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1231                                                       &kernelParamsPtr->atoms.d_forces);
1232 #endif
1233     launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1234     pme_gpu_stop_timing(pmeGpu, timingId);
1235
1236     pme_gpu_copy_output_forces(pmeGpu);
1237 }