cad028b557775811ad759173ffe303f5306f3a18
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  *
38  * \brief This file contains internal function implementations
39  * for performing the PME calculations on GPU.
40  *
41  * Note that this file is compiled as regular C++ source in OpenCL builds, but
42  * it is treated as CUDA source in CUDA-enabled GPU builds.
43  *
44  * \author Aleksei Iupinov <a.yupinov@gmail.com>
45  * \ingroup module_ewald
46  */
47
48 #include "gmxpre.h"
49
50 #include "pme_gpu_internal.h"
51
52 #include "config.h"
53
54 #include <list>
55 #include <memory>
56 #include <string>
57
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
68
69 #if GMX_GPU == GMX_GPU_CUDA
70 #include "gromacs/gpu_utils/pmalloc_cuda.h"
71
72 #include "pme.cuh"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 #include "gromacs/gpu_utils/gmxopencl.h"
75 #endif
76
77 #include "gromacs/ewald/pme.h"
78
79 #include "pme_gpu_3dfft.h"
80 #include "pme_gpu_constants.h"
81 #include "pme_gpu_program_impl.h"
82 #include "pme_gpu_timings.h"
83 #include "pme_gpu_types.h"
84 #include "pme_gpu_types_host.h"
85 #include "pme_gpu_types_host_impl.h"
86 #include "pme_gpu_utils.h"
87 #include "pme_grid.h"
88 #include "pme_internal.h"
89 #include "pme_solve.h"
90
91 /*! \internal \brief
92  * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
93  *
94  * \param[in] pmeGpu  The PME GPU structure.
95  * \returns The pointer to the kernel parameters.
96  */
97 static PmeGpuKernelParamsBase *pme_gpu_get_kernel_params_base_ptr(const PmeGpu *pmeGpu)
98 {
99     // reinterpret_cast is needed because the derived CUDA structure is not known in this file
100     auto *kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase *>(pmeGpu->kernelParams.get());
101     return kernelParamsPtr;
102 }
103
104 int pme_gpu_get_atom_data_alignment(const PmeGpu * /*unused*/)
105 {
106     //TODO: this can be simplified, as c_pmeAtomDataAlignment is now constant
107     return c_pmeAtomDataAlignment;
108 }
109
110 int pme_gpu_get_atoms_per_warp(const PmeGpu *pmeGpu)
111 {
112     return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom;
113 }
114
115 void pme_gpu_synchronize(const PmeGpu *pmeGpu)
116 {
117     gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
118 }
119
120 void pme_gpu_alloc_energy_virial(PmeGpu *pmeGpu)
121 {
122     const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
123     allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount, pmeGpu->archSpecific->context);
124     pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
125 }
126
127 void pme_gpu_free_energy_virial(PmeGpu *pmeGpu)
128 {
129     freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
130     pfree(pmeGpu->staging.h_virialAndEnergy);
131     pmeGpu->staging.h_virialAndEnergy = nullptr;
132 }
133
134 void pme_gpu_clear_energy_virial(const PmeGpu *pmeGpu)
135 {
136     clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
137                            c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
138 }
139
140 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu *pmeGpu)
141 {
142     const int splineValuesOffset[DIM] = {
143         0,
144         pmeGpu->kernelParams->grid.realGridSize[XX],
145         pmeGpu->kernelParams->grid.realGridSize[XX] + pmeGpu->kernelParams->grid.realGridSize[YY]
146     };
147     memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
148
149     const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX] +
150         pmeGpu->kernelParams->grid.realGridSize[YY] +
151         pmeGpu->kernelParams->grid.realGridSize[ZZ];
152     const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
153     reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
154                            &pmeGpu->archSpecific->splineValuesSize, &pmeGpu->archSpecific->splineValuesSizeAlloc, pmeGpu->archSpecific->context);
155     if (shouldRealloc)
156     {
157         /* Reallocate the host buffer */
158         pfree(pmeGpu->staging.h_splineModuli);
159         pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_splineModuli), newSplineValuesSize * sizeof(float));
160     }
161     for (int i = 0; i < DIM; i++)
162     {
163         memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i], pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
164     }
165     /* TODO: pin original buffer instead! */
166     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
167                        0, newSplineValuesSize,
168                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
169 }
170
171 void pme_gpu_free_bspline_values(const PmeGpu *pmeGpu)
172 {
173     pfree(pmeGpu->staging.h_splineModuli);
174     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
175 }
176
177 void pme_gpu_realloc_forces(PmeGpu *pmeGpu)
178 {
179     const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
180     GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
181     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
182                            &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc, pmeGpu->archSpecific->context);
183     pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
184     pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
185 }
186
187 void pme_gpu_free_forces(const PmeGpu *pmeGpu)
188 {
189     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
190 }
191
192 void pme_gpu_copy_input_forces(PmeGpu *pmeGpu)
193 {
194     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
195     float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
196     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat,
197                        0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
198                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
199 }
200
201 void pme_gpu_copy_output_forces(PmeGpu *pmeGpu)
202 {
203     GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
204     float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
205     copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces,
206                          0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
207                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
208 }
209
210 void pme_gpu_realloc_coordinates(const PmeGpu *pmeGpu)
211 {
212     const size_t newCoordinatesSize = pmeGpu->nAtomsAlloc * DIM;
213     GMX_ASSERT(newCoordinatesSize > 0, "Bad number of atoms in PME GPU");
214     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, newCoordinatesSize,
215                            &pmeGpu->archSpecific->coordinatesSize, &pmeGpu->archSpecific->coordinatesSizeAlloc, pmeGpu->archSpecific->context);
216     if (c_usePadding)
217     {
218         const size_t paddingIndex = DIM * pmeGpu->kernelParams->atoms.nAtoms;
219         const size_t paddingCount = DIM * pmeGpu->nAtomsAlloc - paddingIndex;
220         if (paddingCount > 0)
221         {
222             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coordinates, paddingIndex,
223                                    paddingCount, pmeGpu->archSpecific->pmeStream);
224         }
225     }
226 }
227
228 void pme_gpu_copy_input_coordinates(const PmeGpu *pmeGpu, const rvec *h_coordinates)
229 {
230     GMX_ASSERT(h_coordinates, "Bad host-side coordinate buffer in PME GPU");
231 #if GMX_DOUBLE
232     GMX_RELEASE_ASSERT(false, "Only single precision is supported");
233     GMX_UNUSED_VALUE(h_coordinates);
234 #else
235     const float *h_coordinatesFloat = reinterpret_cast<const float *>(h_coordinates);
236     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, h_coordinatesFloat,
237                        0, pmeGpu->kernelParams->atoms.nAtoms * DIM,
238                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
239     // FIXME: sync required since the copied data will be used by PP stream when using single GPU for both
240     //        Remove after adding the required event-based sync between the above H2D and the transform kernel
241     pme_gpu_synchronize(pmeGpu);
242
243 #endif
244 }
245
246 void pme_gpu_free_coordinates(const PmeGpu *pmeGpu)
247 {
248     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates);
249 }
250
251 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu *pmeGpu, const float *h_coefficients)
252 {
253     GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
254     const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
255     GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
256     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
257                            &pmeGpu->archSpecific->coefficientsSize, &pmeGpu->archSpecific->coefficientsSizeAlloc, pmeGpu->archSpecific->context);
258     copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, const_cast<float *>(h_coefficients),
259                        0, pmeGpu->kernelParams->atoms.nAtoms,
260                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
261     if (c_usePadding)
262     {
263         const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
264         const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
265         if (paddingCount > 0)
266         {
267             clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
268                                    paddingCount, pmeGpu->archSpecific->pmeStream);
269         }
270     }
271 }
272
273 void pme_gpu_free_coefficients(const PmeGpu *pmeGpu)
274 {
275     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
276 }
277
278 void pme_gpu_realloc_spline_data(PmeGpu *pmeGpu)
279 {
280     const int    order             = pmeGpu->common->pme_order;
281     const int    alignment         = pme_gpu_get_atoms_per_warp(pmeGpu);
282     const size_t nAtomsPadded      = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
283     const int    newSplineDataSize = DIM * order * nAtomsPadded;
284     GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
285     /* Two arrays of the same size */
286     const bool shouldRealloc        = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
287     int        currentSizeTemp      = pmeGpu->archSpecific->splineDataSize;
288     int        currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
289     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize,
290                            &currentSizeTemp, &currentSizeTempAlloc, pmeGpu->archSpecific->context);
291     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
292                            &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc, pmeGpu->archSpecific->context);
293     // the host side reallocation
294     if (shouldRealloc)
295     {
296         pfree(pmeGpu->staging.h_theta);
297         pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
298         pfree(pmeGpu->staging.h_dtheta);
299         pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
300     }
301 }
302
303 void pme_gpu_free_spline_data(const PmeGpu *pmeGpu)
304 {
305     /* Two arrays of the same size */
306     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
307     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
308     pfree(pmeGpu->staging.h_theta);
309     pfree(pmeGpu->staging.h_dtheta);
310 }
311
312 void pme_gpu_realloc_grid_indices(PmeGpu *pmeGpu)
313 {
314     const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
315     GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
316     reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
317                            &pmeGpu->archSpecific->gridlineIndicesSize, &pmeGpu->archSpecific->gridlineIndicesSizeAlloc, pmeGpu->archSpecific->context);
318     pfree(pmeGpu->staging.h_gridlineIndices);
319     pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
320 }
321
322 void pme_gpu_free_grid_indices(const PmeGpu *pmeGpu)
323 {
324     freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
325     pfree(pmeGpu->staging.h_gridlineIndices);
326 }
327
328 void pme_gpu_realloc_grids(PmeGpu *pmeGpu)
329 {
330     auto     *kernelParamsPtr = pmeGpu->kernelParams.get();
331     const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX] *
332         kernelParamsPtr->grid.realGridSizePadded[YY] *
333         kernelParamsPtr->grid.realGridSizePadded[ZZ];
334     const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX] *
335         kernelParamsPtr->grid.complexGridSizePadded[YY] *
336         kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
337     // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
338     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
339     {
340         /* 2 separate grids */
341         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
342                                &pmeGpu->archSpecific->complexGridSize, &pmeGpu->archSpecific->complexGridSizeAlloc, pmeGpu->archSpecific->context);
343         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newRealGridSize,
344                                &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
345     }
346     else
347     {
348         /* A single buffer so that any grid will fit */
349         const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
350         reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newGridsSize,
351                                &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
352         kernelParamsPtr->grid.d_fourierGrid   = kernelParamsPtr->grid.d_realGrid;
353         pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
354         // the size might get used later for copying the grid
355     }
356 }
357
358 void pme_gpu_free_grids(const PmeGpu *pmeGpu)
359 {
360     if (pmeGpu->archSpecific->performOutOfPlaceFFT)
361     {
362         freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
363     }
364     freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
365 }
366
367 void pme_gpu_clear_grids(const PmeGpu *pmeGpu)
368 {
369     clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
370                            pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
371 }
372
373 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu *pmeGpu)
374 {
375     pme_gpu_free_fract_shifts(pmeGpu);
376
377     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
378
379     const int    nx                  = kernelParamsPtr->grid.realGridSize[XX];
380     const int    ny                  = kernelParamsPtr->grid.realGridSize[YY];
381     const int    nz                  = kernelParamsPtr->grid.realGridSize[ZZ];
382     const int    cellCount           = c_pmeNeighborUnitcellCount;
383     const int    gridDataOffset[DIM] = {0, cellCount * nx, cellCount * (nx + ny)};
384
385     memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
386
387     const int    newFractShiftsSize  = cellCount * (nx + ny + nz);
388
389 #if GMX_GPU == GMX_GPU_CUDA
390     initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
391                          kernelParamsPtr->fractShiftsTableTexture,
392                          pmeGpu->common->fsh.data(),
393                          newFractShiftsSize);
394
395     initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
396                          kernelParamsPtr->gridlineIndicesTableTexture,
397                          pmeGpu->common->nn.data(),
398                          newFractShiftsSize);
399 #elif GMX_GPU == GMX_GPU_OPENCL
400     // No dedicated texture routines....
401     allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize, pmeGpu->archSpecific->context);
402     allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize, pmeGpu->archSpecific->context);
403     copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(),
404                        0, newFractShiftsSize,
405                        pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
406     copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(),
407                        0, newFractShiftsSize,
408                        pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
409 #endif
410 }
411
412 void pme_gpu_free_fract_shifts(const PmeGpu *pmeGpu)
413 {
414     auto *kernelParamsPtr = pmeGpu->kernelParams.get();
415 #if GMX_GPU == GMX_GPU_CUDA
416     destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
417                             kernelParamsPtr->fractShiftsTableTexture);
418     destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
419                             kernelParamsPtr->gridlineIndicesTableTexture);
420 #elif GMX_GPU == GMX_GPU_OPENCL
421     freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
422     freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
423 #endif
424 }
425
426 bool pme_gpu_stream_query(const PmeGpu *pmeGpu)
427 {
428     return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
429 }
430
431 void pme_gpu_copy_input_gather_grid(const PmeGpu *pmeGpu, float *h_grid)
432 {
433     copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid,
434                        0, pmeGpu->archSpecific->realGridSize,
435                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
436 }
437
438 void pme_gpu_copy_output_spread_grid(const PmeGpu *pmeGpu, float *h_grid)
439 {
440     copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid,
441                          0, pmeGpu->archSpecific->realGridSize,
442                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
443     pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
444 }
445
446 void pme_gpu_copy_output_spread_atom_data(const PmeGpu *pmeGpu)
447 {
448     const int    alignment        = pme_gpu_get_atoms_per_warp(pmeGpu);
449     const size_t nAtomsPadded     = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
450     const size_t splinesCount     = DIM * nAtomsPadded * pmeGpu->common->pme_order;
451     auto        *kernelParamsPtr  = pmeGpu->kernelParams.get();
452     copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta,
453                          0, splinesCount,
454                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
455     copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta,
456                          0, splinesCount,
457                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
458     copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
459                          0, kernelParamsPtr->atoms.nAtoms * DIM,
460                          pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
461 }
462
463 void pme_gpu_copy_input_gather_atom_data(const PmeGpu *pmeGpu)
464 {
465     const int    alignment       = pme_gpu_get_atoms_per_warp(pmeGpu);
466     const size_t nAtomsPadded    = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
467     const size_t splinesCount    = DIM * nAtomsPadded * pmeGpu->common->pme_order;
468     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
469     if (c_usePadding)
470     {
471         // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
472         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
473                                pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
474         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
475                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
476         clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
477                                pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
478     }
479     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta,
480                        0, splinesCount,
481                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
482     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta,
483                        0, splinesCount,
484                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
485     copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
486                        0, kernelParamsPtr->atoms.nAtoms * DIM,
487                        pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
488 }
489
490 void pme_gpu_sync_spread_grid(const PmeGpu *pmeGpu)
491 {
492     pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
493 }
494
495 void pme_gpu_init_internal(PmeGpu *pmeGpu)
496 {
497 #if GMX_GPU == GMX_GPU_CUDA
498     // Prepare to use the device that this PME task was assigned earlier.
499     // Other entities, such as CUDA timing events, are known to implicitly use the device context.
500     CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
501 #endif
502
503     /* Allocate the target-specific structures */
504     pmeGpu->archSpecific.reset(new PmeGpuSpecific());
505     pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
506
507     pmeGpu->archSpecific->performOutOfPlaceFFT = true;
508     /* This should give better performance, according to the cuFFT documentation.
509      * The performance seems to be the same though.
510      * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
511      */
512
513     // TODO: this is just a convenient reuse because programHandle_ currently is in charge of creating context
514     pmeGpu->archSpecific->context = pmeGpu->programHandle_->impl_->context;
515
516     // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
517     if (GMX_GPU == GMX_GPU_CUDA)
518     {
519         /* WARNING: CUDA timings are incorrect with multiple streams.
520          *          This is the main reason why they are disabled by default.
521          */
522         // TODO: Consider turning on by default when we can detect nr of streams.
523         pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
524     }
525     else if (GMX_GPU == GMX_GPU_OPENCL)
526     {
527         pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
528     }
529
530 #if GMX_GPU == GMX_GPU_CUDA
531     pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
532 #elif GMX_GPU == GMX_GPU_OPENCL
533     pmeGpu->maxGridWidthX = INT32_MAX / 2;
534     // TODO: is there no really global work size limit in OpenCL?
535 #endif
536
537     /* Creating a PME GPU stream:
538      * - default high priority with CUDA
539      * - no priorities implemented yet with OpenCL; see #2532
540      */
541 #if GMX_GPU == GMX_GPU_CUDA
542     cudaError_t stat;
543     int         highest_priority, lowest_priority;
544     stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
545     CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
546     stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
547                                         cudaStreamDefault, //cudaStreamNonBlocking,
548                                         highest_priority);
549     CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
550 #elif GMX_GPU == GMX_GPU_OPENCL
551     cl_command_queue_properties queueProperties = pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
552     cl_device_id                device_id       = pmeGpu->deviceInfo->ocl_gpu_id.ocl_device_id;
553     cl_int                      clError;
554     pmeGpu->archSpecific->pmeStream = clCreateCommandQueue(pmeGpu->archSpecific->context,
555                                                            device_id, queueProperties, &clError);
556     if (clError != CL_SUCCESS)
557     {
558         GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
559     }
560 #endif
561 }
562
563 void pme_gpu_destroy_specific(const PmeGpu *pmeGpu)
564 {
565 #if GMX_GPU == GMX_GPU_CUDA
566     /* Destroy the CUDA stream */
567     cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
568     CU_RET_ERR(stat, "PME cudaStreamDestroy error");
569 #elif GMX_GPU == GMX_GPU_OPENCL
570     cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
571     if (clError != CL_SUCCESS)
572     {
573         gmx_warning("Failed to destroy PME command queue");
574     }
575 #endif
576 }
577
578 void pme_gpu_reinit_3dfft(const PmeGpu *pmeGpu)
579 {
580     if (pme_gpu_performs_FFT(pmeGpu))
581     {
582         pmeGpu->archSpecific->fftSetup.resize(0);
583         for (int i = 0; i < pmeGpu->common->ngrids; i++)
584         {
585             pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
586         }
587     }
588 }
589
590 void pme_gpu_destroy_3dfft(const PmeGpu *pmeGpu)
591 {
592     pmeGpu->archSpecific->fftSetup.resize(0);
593 }
594
595 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
596 {
597     if (order != c_pmeGpuOrder)
598     {
599         throw order;
600     }
601     constexpr int fixedOrder = c_pmeGpuOrder;
602     GMX_UNUSED_VALUE(fixedOrder);
603
604     const int atomWarpIndex = atomIndex % atomsPerWarp;
605     const int warpIndex     = atomIndex / atomsPerWarp;
606     int       indexBase, result;
607     switch (atomsPerWarp)
608     {
609         case 1:
610             indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
611             result    = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
612             break;
613
614         case 2:
615             indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
616             result    = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
617             break;
618
619         case 4:
620             indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
621             result    = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
622             break;
623
624         case 8:
625             indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
626             result    = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
627             break;
628
629         default:
630             GMX_THROW(gmx::NotImplementedError(gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in getSplineParamFullIndex", atomsPerWarp)));
631     }
632     return result;
633 }
634
635 PmeOutput pme_gpu_getEnergyAndVirial(const gmx_pme_t &pme)
636 {
637     PmeOutput     output;
638
639     const PmeGpu *pmeGpu = pme.gpu;
640     for (int j = 0; j < c_virialAndEnergyCount; j++)
641     {
642         GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]), "PME GPU produces incorrect energy/virial.");
643     }
644
645     unsigned int j = 0;
646     output.coulombVirial_[XX][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
647     output.coulombVirial_[YY][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
648     output.coulombVirial_[ZZ][ZZ] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
649     output.coulombVirial_[XX][YY] = output.coulombVirial_[YY][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
650     output.coulombVirial_[XX][ZZ] = output.coulombVirial_[ZZ][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
651     output.coulombVirial_[YY][ZZ] = output.coulombVirial_[ZZ][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
652     output.coulombEnergy_         = 0.5f * pmeGpu->staging.h_virialAndEnergy[j++];
653
654     return output;
655 }
656
657 PmeOutput pme_gpu_getOutput(const gmx_pme_t &pme,
658                             const int        flags)
659 {
660     PmeGpu    *pmeGpu = pme.gpu;
661     const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
662     if (!haveComputedEnergyAndVirial)
663     {
664         // The caller knows from the flags that the energy and the virial are not usable
665         PmeOutput output;
666         output.forces_ = pmeGpu->staging.h_forces;
667         return output;
668     }
669
670     if (pme_gpu_performs_solve(pmeGpu))
671     {
672         PmeOutput output = pme_gpu_getEnergyAndVirial(pme);
673         output.forces_ = pmeGpu->staging.h_forces;
674         return output;
675     }
676     else
677     {
678         PmeOutput output;
679         get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
680         output.forces_ = pmeGpu->staging.h_forces;
681         return output;
682     }
683
684 }
685
686 void pme_gpu_update_input_box(PmeGpu gmx_unused       *pmeGpu,
687                               const matrix gmx_unused  box)
688 {
689 #if GMX_DOUBLE
690     GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
691 #else
692     matrix  scaledBox;
693     pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
694     auto   *kernelParamsPtr      = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
695     kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
696     GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0f, "Zero volume of the unit cell");
697     matrix recipBox;
698     gmx::invertBoxMatrix(scaledBox, recipBox);
699
700     /* The GPU recipBox is transposed as compared to the CPU recipBox.
701      * Spread uses matrix columns (while solve and gather use rows).
702      * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
703      */
704     const real newRecipBox[DIM][DIM] =
705     {
706         {recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX]},
707         {             0.0, recipBox[YY][YY], recipBox[ZZ][YY]},
708         {             0.0,              0.0, recipBox[ZZ][ZZ]}
709     };
710     memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
711 #endif
712 }
713
714 /*! \brief \libinternal
715  * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
716  *
717  * \param[in] pmeGpu            The PME GPU structure.
718  */
719 static void pme_gpu_reinit_grids(PmeGpu *pmeGpu)
720 {
721     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
722     kernelParamsPtr->grid.ewaldFactor = (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
723
724     /* The grid size variants */
725     for (int i = 0; i < DIM; i++)
726     {
727         kernelParamsPtr->grid.realGridSize[i]       = pmeGpu->common->nk[i];
728         kernelParamsPtr->grid.realGridSizeFP[i]     = static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
729         kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
730
731         // The complex grid currently uses no padding;
732         // if it starts to do so, then another test should be added for that
733         kernelParamsPtr->grid.complexGridSize[i]       = kernelParamsPtr->grid.realGridSize[i];
734         kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
735     }
736     /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
737     if (!pme_gpu_performs_FFT(pmeGpu))
738     {
739         // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
740         kernelParamsPtr->grid.realGridSizePadded[ZZ] = (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
741     }
742
743     /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
744     kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
745     kernelParamsPtr->grid.complexGridSize[ZZ]++;
746     kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
747
748     pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
749     pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
750     pme_gpu_realloc_grids(pmeGpu);
751     pme_gpu_reinit_3dfft(pmeGpu);
752 }
753
754 /* Several GPU functions that refer to the CPU PME data live here.
755  * We would like to keep these away from the GPU-framework specific code for clarity,
756  * as well as compilation issues with MPI.
757  */
758
759 /*! \brief \libinternal
760  * Copies everything useful from the PME CPU to the PME GPU structure.
761  * The goal is to minimize interaction with the PME CPU structure in the GPU code.
762  *
763  * \param[in] pme         The PME structure.
764  */
765 static void pme_gpu_copy_common_data_from(const gmx_pme_t *pme)
766 {
767     /* TODO: Consider refactoring the CPU PME code to use the same structure,
768      * so that this function becomes 2 lines */
769     PmeGpu *pmeGpu             = pme->gpu;
770     pmeGpu->common->ngrids        = pme->ngrids;
771     pmeGpu->common->epsilon_r     = pme->epsilon_r;
772     pmeGpu->common->ewaldcoeff_q  = pme->ewaldcoeff_q;
773     pmeGpu->common->nk[XX]        = pme->nkx;
774     pmeGpu->common->nk[YY]        = pme->nky;
775     pmeGpu->common->nk[ZZ]        = pme->nkz;
776     pmeGpu->common->pme_order     = pme->pme_order;
777     if (pmeGpu->common->pme_order != c_pmeGpuOrder)
778     {
779         GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
780     }
781     for (int i = 0; i < DIM; i++)
782     {
783         pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
784     }
785     const int cellCount = c_pmeNeighborUnitcellCount;
786     pmeGpu->common->fsh.resize(0);
787     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
788     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
789     pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
790     pmeGpu->common->nn.resize(0);
791     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
792     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
793     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
794     pmeGpu->common->runMode   = pme->runMode;
795     pmeGpu->common->boxScaler = pme->boxScaler;
796 }
797
798 /*! \libinternal \brief
799  * Initializes the PME GPU data at the beginning of the run.
800  * TODO: this should become PmeGpu::PmeGpu()
801  *
802  * \param[in,out] pme            The PME structure.
803  * \param[in,out] gpuInfo        The GPU information structure.
804  * \param[in]     pmeGpuProgram  The handle to the program/kernel data created outside (e.g. in unit tests/runner)
805  */
806 static void pme_gpu_init(gmx_pme_t               *pme,
807                          const gmx_device_info_t *gpuInfo,
808                          PmeGpuProgramHandle      pmeGpuProgram)
809 {
810     pme->gpu          = new PmeGpu();
811     PmeGpu *pmeGpu = pme->gpu;
812     changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
813     pmeGpu->common = std::make_shared<PmeShared>();
814
815     /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
816     /* A convenience variable. */
817     pmeGpu->settings.useDecomposition = (pme->nnodes == 1);
818     /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
819     pmeGpu->settings.performGPUGather = true;
820
821     pme_gpu_set_testing(pmeGpu, false);
822
823     pmeGpu->deviceInfo = gpuInfo;
824     GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
825     pmeGpu->programHandle_ = pmeGpuProgram;
826
827     pme_gpu_init_internal(pmeGpu);
828     pme_gpu_alloc_energy_virial(pmeGpu);
829
830     pme_gpu_copy_common_data_from(pme);
831
832     GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0f, "PME GPU: bad electrostatic coefficient");
833
834     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
835     kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
836 }
837
838 void pme_gpu_transform_spline_atom_data(const PmeGpu *pmeGpu, const PmeAtomComm *atc,
839                                         PmeSplineDataType type, int dimIndex, PmeLayoutTransform transform)
840 {
841     // The GPU atom spline data is laid out in a different way currently than the CPU one.
842     // This function converts the data from GPU to CPU layout (in the host memory).
843     // It is only intended for testing purposes so far.
844     // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their performance
845     // (e.g. spreading on GPU, gathering on CPU).
846     GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
847     const uintmax_t threadIndex  = 0;
848     const auto      atomCount    = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
849     const auto      atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
850     const auto      pmeOrder     = pmeGpu->common->pme_order;
851     GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
852
853     real           *cpuSplineBuffer;
854     float          *h_splineBuffer;
855     switch (type)
856     {
857         case PmeSplineDataType::Values:
858             cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
859             h_splineBuffer  = pmeGpu->staging.h_theta;
860             break;
861
862         case PmeSplineDataType::Derivatives:
863             cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
864             h_splineBuffer  = pmeGpu->staging.h_dtheta;
865             break;
866
867         default:
868             GMX_THROW(gmx::InternalError("Unknown spline data type"));
869     }
870
871     for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
872     {
873         for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
874         {
875             const auto gpuValueIndex = getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
876             const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
877             GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder, "Atom spline data index out of bounds (while transforming GPU data layout for host)");
878             switch (transform)
879             {
880                 case PmeLayoutTransform::GpuToHost:
881                     cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
882                     break;
883
884                 case PmeLayoutTransform::HostToGpu:
885                     h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
886                     break;
887
888                 default:
889                     GMX_THROW(gmx::InternalError("Unknown layout transform"));
890             }
891         }
892     }
893 }
894
895 void pme_gpu_get_real_grid_sizes(const PmeGpu *pmeGpu, gmx::IVec *gridSize, gmx::IVec *paddedGridSize)
896 {
897     GMX_ASSERT(gridSize != nullptr, "");
898     GMX_ASSERT(paddedGridSize != nullptr, "");
899     GMX_ASSERT(pmeGpu != nullptr, "");
900     auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
901     for (int i = 0; i < DIM; i++)
902     {
903         (*gridSize)[i]       = kernelParamsPtr->grid.realGridSize[i];
904         (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
905     }
906 }
907
908 void pme_gpu_reinit(gmx_pme_t               *pme,
909                     const gmx_device_info_t *gpuInfo,
910                     PmeGpuProgramHandle      pmeGpuProgram)
911 {
912     if (!pme_gpu_active(pme))
913     {
914         return;
915     }
916
917     if (!pme->gpu)
918     {
919         /* First-time initialization */
920         pme_gpu_init(pme, gpuInfo, pmeGpuProgram);
921     }
922     else
923     {
924         /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
925         pme_gpu_copy_common_data_from(pme);
926     }
927     /* GPU FFT will only get used for a single rank.*/
928     pme->gpu->settings.performGPUFFT   = (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme_gpu_uses_dd(pme->gpu);
929     pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
930
931     /* Reinit active timers */
932     pme_gpu_reinit_timings(pme->gpu);
933
934     pme_gpu_reinit_grids(pme->gpu);
935     // Note: if timing the reinit launch overhead becomes more relevant
936     // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
937     pme_gpu_reinit_computation(pme, nullptr);
938     /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
939      * update for mixed mode on grid switch. TODO: use shared recipbox field.
940      */
941     std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
942 }
943
944 void pme_gpu_destroy(PmeGpu *pmeGpu)
945 {
946     /* Free lots of data */
947     pme_gpu_free_energy_virial(pmeGpu);
948     pme_gpu_free_bspline_values(pmeGpu);
949     pme_gpu_free_forces(pmeGpu);
950     pme_gpu_free_coordinates(pmeGpu);
951     pme_gpu_free_coefficients(pmeGpu);
952     pme_gpu_free_spline_data(pmeGpu);
953     pme_gpu_free_grid_indices(pmeGpu);
954     pme_gpu_free_fract_shifts(pmeGpu);
955     pme_gpu_free_grids(pmeGpu);
956
957     pme_gpu_destroy_3dfft(pmeGpu);
958
959     /* Free the GPU-framework specific data last */
960     pme_gpu_destroy_specific(pmeGpu);
961
962     delete pmeGpu;
963 }
964
965 void pme_gpu_reinit_atoms(PmeGpu *pmeGpu, const int nAtoms, const real *charges)
966 {
967     auto      *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
968     kernelParamsPtr->atoms.nAtoms = nAtoms;
969     const int  alignment = pme_gpu_get_atom_data_alignment(pmeGpu);
970     pmeGpu->nAtomsPadded = ((nAtoms + alignment - 1) / alignment) * alignment;
971     const int  nAtomsAlloc   = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
972     const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
973     pmeGpu->nAtomsAlloc = nAtomsAlloc;
974
975 #if GMX_DOUBLE
976     GMX_RELEASE_ASSERT(false, "Only single precision supported");
977     GMX_UNUSED_VALUE(charges);
978 #else
979     pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float *>(charges));
980     /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
981 #endif
982
983     if (haveToRealloc)
984     {
985         pme_gpu_realloc_coordinates(pmeGpu);
986         pme_gpu_realloc_forces(pmeGpu);
987         pme_gpu_realloc_spline_data(pmeGpu);
988         pme_gpu_realloc_grid_indices(pmeGpu);
989     }
990 }
991
992 void pme_gpu_3dfft(const PmeGpu *pmeGpu, gmx_fft_direction dir, int grid_index)
993 {
994     int   timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
995
996     pme_gpu_start_timing(pmeGpu, timerId);
997     pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
998     pme_gpu_stop_timing(pmeGpu, timerId);
999 }
1000
1001 /*! \brief
1002  * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1003  * to minimize number of unused blocks.
1004  */
1005 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu *pmeGpu, int blockCount)
1006 {
1007     // How many maximum widths in X do we need (hopefully just one)
1008     const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1009     // Trying to make things even
1010     const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1011     GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1012     GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount, "pmeGpuCreateGrid: excessive blocks");
1013     return std::pair<int, int>(colCount, minRowCount);
1014 }
1015
1016 void pme_gpu_spread(const PmeGpu    *pmeGpu,
1017                     int gmx_unused   gridIndex,
1018                     real            *h_grid,
1019                     bool             computeSplines,
1020                     bool             spreadCharges)
1021 {
1022     GMX_ASSERT(computeSplines || spreadCharges, "PME spline/spread kernel has invalid input (nothing to do)");
1023     const auto   *kernelParamsPtr = pmeGpu->kernelParams.get();
1024     GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1025
1026     const size_t blockSize  = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1027
1028     const int    order         = pmeGpu->common->pme_order;
1029     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1030     const int    atomsPerBlock = blockSize / c_pmeSpreadGatherThreadsPerAtom;
1031     // TODO: pick smaller block size in runtime if needed
1032     // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1033     // If doing so, change atomsPerBlock in the kernels as well.
1034     // TODO: test varying block sizes on modern arch-s as well
1035     // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1036     //(for spline data mostly, together with varying PME_GPU_PARALLEL_SPLINE define)
1037     GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock), "inconsistent atom data padding vs. spreading block size");
1038
1039     const int          blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1040     auto               dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1041
1042     KernelLaunchConfig config;
1043     config.blockSize[0] = config.blockSize[1] = order;
1044     config.blockSize[2] = atomsPerBlock;
1045     config.gridSize[0]  = dimGrid.first;
1046     config.gridSize[1]  = dimGrid.second;
1047     config.stream       = pmeGpu->archSpecific->pmeStream;
1048
1049     int  timingId;
1050     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1051     if (computeSplines)
1052     {
1053         if (spreadCharges)
1054         {
1055             timingId  = gtPME_SPLINEANDSPREAD;
1056             kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1057         }
1058         else
1059         {
1060             timingId  = gtPME_SPLINE;
1061             kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1062         }
1063     }
1064     else
1065     {
1066         timingId  = gtPME_SPREAD;
1067         kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1068     }
1069
1070     pme_gpu_start_timing(pmeGpu, timingId);
1071     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1072 #if c_canEmbedBuffers
1073     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1074 #else
1075     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr,
1076                                                       &kernelParamsPtr->atoms.d_theta,
1077                                                       &kernelParamsPtr->atoms.d_dtheta,
1078                                                       &kernelParamsPtr->atoms.d_gridlineIndices,
1079                                                       &kernelParamsPtr->grid.d_realGrid,
1080                                                       &kernelParamsPtr->grid.d_fractShiftsTable,
1081                                                       &kernelParamsPtr->grid.d_gridlineIndicesTable,
1082                                                       &kernelParamsPtr->atoms.d_coefficients,
1083                                                       &kernelParamsPtr->atoms.d_coordinates
1084                                                       );
1085 #endif
1086
1087     launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1088     pme_gpu_stop_timing(pmeGpu, timingId);
1089
1090     const bool copyBackGrid = spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
1091     if (copyBackGrid)
1092     {
1093         pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1094     }
1095     const bool copyBackAtomData = computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
1096     if (copyBackAtomData)
1097     {
1098         pme_gpu_copy_output_spread_atom_data(pmeGpu);
1099     }
1100 }
1101
1102 void pme_gpu_solve(const PmeGpu *pmeGpu, t_complex *h_grid,
1103                    GridOrdering gridOrdering, bool computeEnergyAndVirial)
1104 {
1105     const bool   copyInputAndOutputGrid = pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu);
1106
1107     auto        *kernelParamsPtr = pmeGpu->kernelParams.get();
1108
1109     float       *h_gridFloat = reinterpret_cast<float *>(h_grid);
1110     if (copyInputAndOutputGrid)
1111     {
1112         copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat,
1113                            0, pmeGpu->archSpecific->complexGridSize,
1114                            pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1115     }
1116
1117     int majorDim = -1, middleDim = -1, minorDim = -1;
1118     switch (gridOrdering)
1119     {
1120         case GridOrdering::YZX:
1121             majorDim  = YY;
1122             middleDim = ZZ;
1123             minorDim  = XX;
1124             break;
1125
1126         case GridOrdering::XYZ:
1127             majorDim  = XX;
1128             middleDim = YY;
1129             minorDim  = ZZ;
1130             break;
1131
1132         default:
1133             GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1134     }
1135
1136     const int    maxBlockSize       = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1137
1138     const int    gridLineSize       = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1139     const int    gridLinesPerBlock  = std::max(maxBlockSize / gridLineSize, 1);
1140     const int    blocksPerGridLine  = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1141     int          cellsPerBlock;
1142     if (blocksPerGridLine == 1)
1143     {
1144         cellsPerBlock               = gridLineSize * gridLinesPerBlock;
1145     }
1146     else
1147     {
1148         cellsPerBlock               = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1149     }
1150     const int    warpSize           = pmeGpu->programHandle_->impl_->warpSize;
1151     const int    blockSize          = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1152
1153     static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock/2 >= 4,
1154                   "The CUDA solve energy kernels needs at least 4 warps. "
1155                   "Here we launch at least half of the max warps.");
1156
1157     KernelLaunchConfig config;
1158     config.blockSize[0] = blockSize;
1159     config.gridSize[0]  = blocksPerGridLine;
1160     // rounding up to full warps so that shuffle operations produce defined results
1161     config.gridSize[1]  = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1) / gridLinesPerBlock;
1162     config.gridSize[2]  = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1163     config.stream       = pmeGpu->archSpecific->pmeStream;
1164
1165     int  timingId = gtPME_SOLVE;
1166     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1167     if (gridOrdering == GridOrdering::YZX)
1168     {
1169         kernelPtr = computeEnergyAndVirial ?
1170             pmeGpu->programHandle_->impl_->solveYZXEnergyKernel :
1171             pmeGpu->programHandle_->impl_->solveYZXKernel;
1172     }
1173     else if (gridOrdering == GridOrdering::XYZ)
1174     {
1175         kernelPtr = computeEnergyAndVirial ?
1176             pmeGpu->programHandle_->impl_->solveXYZEnergyKernel :
1177             pmeGpu->programHandle_->impl_->solveXYZKernel;
1178     }
1179
1180     pme_gpu_start_timing(pmeGpu, timingId);
1181     auto      *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1182 #if c_canEmbedBuffers
1183     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1184 #else
1185     const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr,
1186                                                       &kernelParamsPtr->grid.d_splineModuli,
1187                                                       &kernelParamsPtr->constants.d_virialAndEnergy,
1188                                                       &kernelParamsPtr->grid.d_fourierGrid);
1189 #endif
1190     launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1191     pme_gpu_stop_timing(pmeGpu, timingId);
1192
1193     if (computeEnergyAndVirial)
1194     {
1195         copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy, &kernelParamsPtr->constants.d_virialAndEnergy,
1196                              0, c_virialAndEnergyCount,
1197                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1198     }
1199
1200     if (copyInputAndOutputGrid)
1201     {
1202         copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid,
1203                              0, pmeGpu->archSpecific->complexGridSize,
1204                              pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1205     }
1206 }
1207
1208 void pme_gpu_gather(PmeGpu                *pmeGpu,
1209                     PmeForceOutputHandling forceTreatment,
1210                     const float           *h_grid
1211                     )
1212 {
1213     /* Copying the input CPU forces for reduction */
1214     if (forceTreatment != PmeForceOutputHandling::Set)
1215     {
1216         pme_gpu_copy_input_forces(pmeGpu);
1217     }
1218
1219     if (!pme_gpu_performs_FFT(pmeGpu) || pme_gpu_is_testing(pmeGpu))
1220     {
1221         pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float *>(h_grid));
1222     }
1223
1224     if (pme_gpu_is_testing(pmeGpu))
1225     {
1226         pme_gpu_copy_input_gather_atom_data(pmeGpu);
1227     }
1228
1229     const size_t blockSize     = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1230     const int    atomsPerBlock = blockSize / c_pmeSpreadGatherThreadsPerAtom;
1231     GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock), "inconsistent atom data padding vs. gathering block size");
1232
1233     const int          blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1234     auto               dimGrid    = pmeGpuCreateGrid(pmeGpu, blockCount);
1235
1236
1237     const int order = pmeGpu->common->pme_order;
1238     GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1239
1240     KernelLaunchConfig config;
1241     config.blockSize[0] = config.blockSize[1] = order;
1242     config.blockSize[2] = atomsPerBlock;
1243     config.gridSize[0]  = dimGrid.first;
1244     config.gridSize[1]  = dimGrid.second;
1245     config.stream       = pmeGpu->archSpecific->pmeStream;
1246
1247     // TODO test different cache configs
1248
1249     int  timingId = gtPME_GATHER;
1250     // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1251     PmeGpuProgramImpl::PmeKernelHandle kernelPtr = (forceTreatment == PmeForceOutputHandling::Set) ?
1252         pmeGpu->programHandle_->impl_->gatherKernel :
1253         pmeGpu->programHandle_->impl_->gatherReduceWithInputKernel;
1254
1255     pme_gpu_start_timing(pmeGpu, timingId);
1256     auto       *timingEvent     = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1257     const auto *kernelParamsPtr = pmeGpu->kernelParams.get();
1258 #if c_canEmbedBuffers
1259     const auto  kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1260 #else
1261     const auto  kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr,
1262                                                        &kernelParamsPtr->atoms.d_coefficients,
1263                                                        &kernelParamsPtr->grid.d_realGrid,
1264                                                        &kernelParamsPtr->atoms.d_theta,
1265                                                        &kernelParamsPtr->atoms.d_dtheta,
1266                                                        &kernelParamsPtr->atoms.d_gridlineIndices,
1267                                                        &kernelParamsPtr->atoms.d_forces);
1268 #endif
1269     launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1270     pme_gpu_stop_timing(pmeGpu, timingId);
1271
1272     pme_gpu_copy_output_forces(pmeGpu);
1273 }
1274
1275 void * pme_gpu_get_kernelparam_coordinates(const PmeGpu *pmeGpu)
1276 {
1277     if (pmeGpu && pmeGpu->kernelParams)
1278     {
1279         return pmeGpu->kernelParams->atoms.d_coordinates;
1280     }
1281     else
1282     {
1283         return nullptr;
1284     }
1285
1286 }