2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file contains internal function implementations
39 * for performing the PME calculations on GPU.
41 * Note that this file is compiled as regular C++ source in OpenCL builds, but
42 * it is treated as CUDA source in CUDA-enabled GPU builds.
44 * \author Aleksei Iupinov <a.yupinov@gmail.com>
45 * \ingroup module_ewald
50 #include "pme_gpu_internal.h"
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/device_context.h"
60 #include "gromacs/gpu_utils/device_stream.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/math/invertmatrix.h"
63 #include "gromacs/math/units.h"
64 #include "gromacs/timing/gpu_timing.h"
65 #include "gromacs/utility/exceptions.h"
66 #include "gromacs/utility/fatalerror.h"
67 #include "gromacs/utility/gmxassert.h"
68 #include "gromacs/utility/logger.h"
69 #include "gromacs/utility/stringutil.h"
71 #if GMX_GPU == GMX_GPU_CUDA
72 # include "gromacs/gpu_utils/pmalloc_cuda.h"
75 #elif GMX_GPU == GMX_GPU_OPENCL
76 # include "gromacs/gpu_utils/gmxopencl.h"
79 #include "gromacs/ewald/pme.h"
81 #include "pme_gpu_3dfft.h"
82 #include "pme_gpu_calculate_splines.h"
83 #include "pme_gpu_constants.h"
84 #include "pme_gpu_program_impl.h"
85 #include "pme_gpu_timings.h"
86 #include "pme_gpu_types.h"
87 #include "pme_gpu_types_host.h"
88 #include "pme_gpu_types_host_impl.h"
90 #include "pme_internal.h"
91 #include "pme_solve.h"
95 * Atom limit above which it is advantageous to turn on the
96 * recalcuating of the splines in the gather and using less threads per atom in the spline and spread
98 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
101 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
103 * \param[in] pmeGpu The PME GPU structure.
104 * \returns The pointer to the kernel parameters.
106 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
108 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
109 auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
110 return kernelParamsPtr;
114 * Atom data block size (in terms of number of atoms).
115 * This is the least common multiple of number of atoms processed by
116 * a single block/workgroup of the spread and gather kernels.
117 * The GPU atom data buffers must be padded, which means that
118 * the numbers of atoms used for determining the size of the memory
119 * allocation must be divisible by this.
121 constexpr int c_pmeAtomDataBlockSize = 64;
123 int pme_gpu_get_atom_data_block_size()
125 return c_pmeAtomDataBlockSize;
128 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
130 pmeGpu->archSpecific->pmeStream_.synchronize();
133 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
135 const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
138 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
139 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
141 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
143 allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
144 c_virialAndEnergyCount, pmeGpu->archSpecific->deviceContext_);
145 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy[gridIndex]), energyAndVirialSize);
149 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
151 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
153 freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex]);
154 pfree(pmeGpu->staging.h_virialAndEnergy[gridIndex]);
155 pmeGpu->staging.h_virialAndEnergy[gridIndex] = nullptr;
159 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
161 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
163 clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex], 0,
164 c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_);
168 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu, const int gridIndex)
171 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
172 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
173 GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
174 "Invalid combination of gridIndex and number of grids");
176 const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
177 pmeGpu->kernelParams->grid.realGridSize[XX]
178 + pmeGpu->kernelParams->grid.realGridSize[YY] };
179 memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
181 const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
182 + pmeGpu->kernelParams->grid.realGridSize[YY]
183 + pmeGpu->kernelParams->grid.realGridSize[ZZ];
184 const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize[gridIndex]);
185 reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
186 newSplineValuesSize, &pmeGpu->archSpecific->splineValuesSize[gridIndex],
187 &pmeGpu->archSpecific->splineValuesCapacity[gridIndex],
188 pmeGpu->archSpecific->deviceContext_);
191 /* Reallocate the host buffer */
192 pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
193 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli[gridIndex]),
194 newSplineValuesSize * sizeof(float));
196 for (int i = 0; i < DIM; i++)
198 memcpy(pmeGpu->staging.h_splineModuli[gridIndex] + splineValuesOffset[i],
199 pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
201 /* TODO: pin original buffer instead! */
202 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
203 pmeGpu->staging.h_splineModuli[gridIndex], 0, newSplineValuesSize,
204 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
207 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
209 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
211 pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
212 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex]);
216 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
218 const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
219 GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
220 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
221 &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc,
222 pmeGpu->archSpecific->deviceContext_);
223 pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
224 pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
227 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
229 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
232 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
234 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
235 float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
236 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
237 DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
238 pmeGpu->settings.transferKind, nullptr);
241 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
243 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
244 float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
245 copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
246 DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
247 pmeGpu->settings.transferKind, nullptr);
250 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu,
251 const float* h_coefficients,
254 GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
255 const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
256 GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
257 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
258 newCoefficientsSize, &pmeGpu->archSpecific->coefficientsSize[gridIndex],
259 &pmeGpu->archSpecific->coefficientsCapacity[gridIndex],
260 pmeGpu->archSpecific->deviceContext_);
261 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
262 const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
263 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
265 const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
266 const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
267 if (paddingCount > 0)
269 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex], paddingIndex,
270 paddingCount, pmeGpu->archSpecific->pmeStream_);
274 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
276 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
278 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex]);
282 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
284 const int order = pmeGpu->common->pme_order;
285 const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
286 GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
287 /* Two arrays of the same size */
288 const bool shouldRealloc = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
289 int currentSizeTemp = pmeGpu->archSpecific->splineDataSize;
290 int currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
291 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize, ¤tSizeTemp,
292 ¤tSizeTempAlloc, pmeGpu->archSpecific->deviceContext_);
293 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
294 &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc,
295 pmeGpu->archSpecific->deviceContext_);
296 // the host side reallocation
299 pfree(pmeGpu->staging.h_theta);
300 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
301 pfree(pmeGpu->staging.h_dtheta);
302 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
306 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
308 /* Two arrays of the same size */
309 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
310 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
311 pfree(pmeGpu->staging.h_theta);
312 pfree(pmeGpu->staging.h_dtheta);
315 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
317 const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
318 GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
319 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
320 &pmeGpu->archSpecific->gridlineIndicesSize,
321 &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
322 pmeGpu->archSpecific->deviceContext_);
323 pfree(pmeGpu->staging.h_gridlineIndices);
324 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
327 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
329 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
330 pfree(pmeGpu->staging.h_gridlineIndices);
333 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
335 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
337 const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
338 * kernelParamsPtr->grid.realGridSizePadded[YY]
339 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
340 const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
341 * kernelParamsPtr->grid.complexGridSizePadded[YY]
342 * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
343 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
345 // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
346 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
348 /* 2 separate grids */
349 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex], newComplexGridSize,
350 &pmeGpu->archSpecific->complexGridSize[gridIndex],
351 &pmeGpu->archSpecific->complexGridCapacity[gridIndex],
352 pmeGpu->archSpecific->deviceContext_);
353 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex], newRealGridSize,
354 &pmeGpu->archSpecific->realGridSize[gridIndex],
355 &pmeGpu->archSpecific->realGridCapacity[gridIndex],
356 pmeGpu->archSpecific->deviceContext_);
360 /* A single buffer so that any grid will fit */
361 const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
362 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex], newGridsSize,
363 &pmeGpu->archSpecific->realGridSize[gridIndex],
364 &pmeGpu->archSpecific->realGridCapacity[gridIndex],
365 pmeGpu->archSpecific->deviceContext_);
366 kernelParamsPtr->grid.d_fourierGrid[gridIndex] = kernelParamsPtr->grid.d_realGrid[gridIndex];
367 pmeGpu->archSpecific->complexGridSize[gridIndex] =
368 pmeGpu->archSpecific->realGridSize[gridIndex];
369 // the size might get used later for copying the grid
374 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
376 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
378 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
380 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid[gridIndex]);
382 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex]);
386 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
388 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
390 clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex], 0,
391 pmeGpu->archSpecific->realGridSize[gridIndex],
392 pmeGpu->archSpecific->pmeStream_);
396 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
398 pme_gpu_free_fract_shifts(pmeGpu);
400 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
402 const int nx = kernelParamsPtr->grid.realGridSize[XX];
403 const int ny = kernelParamsPtr->grid.realGridSize[YY];
404 const int nz = kernelParamsPtr->grid.realGridSize[ZZ];
405 const int cellCount = c_pmeNeighborUnitcellCount;
406 const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
408 memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
410 const int newFractShiftsSize = cellCount * (nx + ny + nz);
412 initParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
413 &kernelParamsPtr->fractShiftsTableTexture, pmeGpu->common->fsh.data(),
414 newFractShiftsSize, pmeGpu->archSpecific->deviceContext_);
416 initParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
417 &kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
418 newFractShiftsSize, pmeGpu->archSpecific->deviceContext_);
421 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
423 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
424 #if GMX_GPU == GMX_GPU_CUDA
425 destroyParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
426 kernelParamsPtr->fractShiftsTableTexture);
427 destroyParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
428 kernelParamsPtr->gridlineIndicesTableTexture);
429 #elif GMX_GPU == GMX_GPU_OPENCL
430 freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
431 freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
435 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
437 return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
440 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, const float* h_grid, const int gridIndex)
442 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex], h_grid, 0,
443 pmeGpu->archSpecific->realGridSize[gridIndex],
444 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
447 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid, const int gridIndex)
449 copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid[gridIndex], 0,
450 pmeGpu->archSpecific->realGridSize[gridIndex],
451 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
452 pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
455 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
457 const size_t splinesCount = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
458 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
459 copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
460 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
461 copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
462 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
463 copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
464 0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
465 pmeGpu->settings.transferKind, nullptr);
468 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
470 const size_t splinesCount = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
471 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
473 // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
474 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0, pmeGpu->nAtomsAlloc * DIM,
475 pmeGpu->archSpecific->pmeStream_);
476 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
477 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
478 pmeGpu->archSpecific->pmeStream_);
479 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
480 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
481 pmeGpu->archSpecific->pmeStream_);
483 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
484 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
485 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
486 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
487 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
488 0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
489 pmeGpu->settings.transferKind, nullptr);
492 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
494 pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
497 /*! \brief Internal GPU initialization for PME.
499 * \param[in] pmeGpu GPU PME data.
500 * \param[in] deviceContext GPU context.
501 * \param[in] deviceStream GPU stream.
503 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
505 #if GMX_GPU == GMX_GPU_CUDA
506 // Prepare to use the device that this PME task was assigned earlier.
507 // Other entities, such as CUDA timing events, are known to implicitly use the device context.
508 CU_RET_ERR(cudaSetDevice(deviceContext.deviceInfo().id), "Switching to PME CUDA device");
511 /* Allocate the target-specific structures */
512 pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
513 pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
515 pmeGpu->archSpecific->performOutOfPlaceFFT = true;
516 /* This should give better performance, according to the cuFFT documentation.
517 * The performance seems to be the same though.
518 * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
521 #if GMX_GPU == GMX_GPU_CUDA
522 pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
523 #elif GMX_GPU == GMX_GPU_OPENCL
524 pmeGpu->maxGridWidthX = INT32_MAX / 2;
525 // TODO: is there no really global work size limit in OpenCL?
529 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
531 if (pme_gpu_settings(pmeGpu).performGPUFFT)
533 pmeGpu->archSpecific->fftSetup.resize(0);
534 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
536 pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu, gridIndex));
541 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
543 pmeGpu->archSpecific->fftSetup.resize(0);
546 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, const float lambda, PmeOutput* output)
548 const PmeGpu* pmeGpu = pme.gpu;
550 GMX_ASSERT(lambda == 1.0 || pmeGpu->common->ngrids == 2,
551 "Invalid combination of lambda and number of grids");
553 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
555 for (int j = 0; j < c_virialAndEnergyCount; j++)
557 GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[gridIndex][j]),
558 "PME GPU produces incorrect energy/virial.");
561 for (int dim1 = 0; dim1 < DIM; dim1++)
563 for (int dim2 = 0; dim2 < DIM; dim2++)
565 output->coulombVirial_[dim1][dim2] = 0;
568 output->coulombEnergy_ = 0;
570 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
572 if (pmeGpu->common->ngrids == 2)
574 scale = gridIndex == 0 ? (1.0 - lambda) : lambda;
576 output->coulombVirial_[XX][XX] +=
577 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][0];
578 output->coulombVirial_[YY][YY] +=
579 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][1];
580 output->coulombVirial_[ZZ][ZZ] +=
581 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][2];
582 output->coulombVirial_[XX][YY] +=
583 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
584 output->coulombVirial_[YY][XX] +=
585 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
586 output->coulombVirial_[XX][ZZ] +=
587 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
588 output->coulombVirial_[ZZ][XX] +=
589 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
590 output->coulombVirial_[YY][ZZ] +=
591 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
592 output->coulombVirial_[ZZ][YY] +=
593 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
594 output->coulombEnergy_ += scale * 0.5F * pmeGpu->staging.h_virialAndEnergy[gridIndex][6];
596 if (pmeGpu->common->ngrids > 1)
598 output->coulombDvdl_ = 0.5F
599 * (pmeGpu->staging.h_virialAndEnergy[FEP_STATE_B][6]
600 - pmeGpu->staging.h_virialAndEnergy[FEP_STATE_A][6]);
604 /*! \brief Sets the force-related members in \p output
606 * \param[in] pmeGpu PME GPU data structure
607 * \param[out] output Pointer to PME output data structure
609 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
611 output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
612 if (output->haveForceOutput_)
614 output->forces_ = pmeGpu->staging.h_forces;
618 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial, const real lambdaQ)
620 PmeGpu* pmeGpu = pme.gpu;
624 pme_gpu_getForceOutput(pmeGpu, &output);
626 if (computeEnergyAndVirial)
628 if (pme_gpu_settings(pmeGpu).performGPUSolve)
630 pme_gpu_getEnergyAndVirial(pme, lambdaQ, &output);
634 get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
640 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
643 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
646 pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
647 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
648 kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
649 GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
651 gmx::invertBoxMatrix(scaledBox, recipBox);
653 /* The GPU recipBox is transposed as compared to the CPU recipBox.
654 * Spread uses matrix columns (while solve and gather use rows).
655 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
657 const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
658 { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
659 { 0.0, 0.0, recipBox[ZZ][ZZ] } };
660 memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
664 /*! \brief \libinternal
665 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
667 * \param[in] pmeGpu The PME GPU structure.
669 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
671 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
674 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
675 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
677 kernelParamsPtr->grid.ewaldFactor =
678 (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
679 /* The grid size variants */
680 for (int i = 0; i < DIM; i++)
682 kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
683 kernelParamsPtr->grid.realGridSizeFP[i] =
684 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
685 kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
687 // The complex grid currently uses no padding;
688 // if it starts to do so, then another test should be added for that
689 kernelParamsPtr->grid.complexGridSize[i] = kernelParamsPtr->grid.realGridSize[i];
690 kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
692 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
693 if (!pme_gpu_settings(pmeGpu).performGPUFFT)
695 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
696 kernelParamsPtr->grid.realGridSizePadded[ZZ] =
697 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
699 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
700 kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
701 kernelParamsPtr->grid.complexGridSize[ZZ]++;
702 kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
704 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
705 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
707 pme_gpu_realloc_and_copy_bspline_values(pmeGpu, gridIndex);
710 pme_gpu_realloc_grids(pmeGpu);
711 pme_gpu_reinit_3dfft(pmeGpu);
714 /* Several GPU functions that refer to the CPU PME data live here.
715 * We would like to keep these away from the GPU-framework specific code for clarity,
716 * as well as compilation issues with MPI.
719 /*! \brief \libinternal
720 * Copies everything useful from the PME CPU to the PME GPU structure.
721 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
723 * \param[in] pme The PME structure.
725 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
727 /* TODO: Consider refactoring the CPU PME code to use the same structure,
728 * so that this function becomes 2 lines */
729 PmeGpu* pmeGpu = pme->gpu;
730 pmeGpu->common->ngrids = pme->bFEP_q ? 2 : 1;
731 pmeGpu->common->epsilon_r = pme->epsilon_r;
732 pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
733 pmeGpu->common->nk[XX] = pme->nkx;
734 pmeGpu->common->nk[YY] = pme->nky;
735 pmeGpu->common->nk[ZZ] = pme->nkz;
736 pmeGpu->common->pme_order = pme->pme_order;
737 if (pmeGpu->common->pme_order != c_pmeGpuOrder)
739 GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
741 for (int i = 0; i < DIM; i++)
743 pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
745 const int cellCount = c_pmeNeighborUnitcellCount;
746 pmeGpu->common->fsh.resize(0);
747 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
748 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
749 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
750 pmeGpu->common->nn.resize(0);
751 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
752 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
753 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
754 pmeGpu->common->runMode = pme->runMode;
755 pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
756 pmeGpu->common->boxScaler = pme->boxScaler;
759 /*! \libinternal \brief
760 * uses heuristics to select the best performing PME gather and scatter kernels
762 * \param[in,out] pmeGpu The PME GPU structure.
764 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
766 if (pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit && (GMX_GPU == GMX_GPU_CUDA))
768 pmeGpu->settings.threadsPerAtom = ThreadsPerAtom::Order;
769 pmeGpu->settings.recalculateSplines = true;
773 pmeGpu->settings.threadsPerAtom = ThreadsPerAtom::OrderSquared;
774 pmeGpu->settings.recalculateSplines = false;
779 /*! \libinternal \brief
780 * Initializes the PME GPU data at the beginning of the run.
781 * TODO: this should become PmeGpu::PmeGpu()
783 * \param[in,out] pme The PME structure.
784 * \param[in] deviceContext The GPU context.
785 * \param[in] deviceStream The GPU stream.
786 * \param[in,out] pmeGpuProgram The handle to the program/kernel data created outside (e.g. in unit tests/runner)
788 static void pme_gpu_init(gmx_pme_t* pme,
789 const DeviceContext& deviceContext,
790 const DeviceStream& deviceStream,
791 const PmeGpuProgram* pmeGpuProgram)
793 pme->gpu = new PmeGpu();
794 PmeGpu* pmeGpu = pme->gpu;
795 changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
796 pmeGpu->common = std::make_shared<PmeShared>();
798 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
799 /* A convenience variable. */
800 pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
801 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
802 pmeGpu->settings.performGPUGather = true;
803 // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
804 pmeGpu->settings.useGpuForceReduction = false;
806 pme_gpu_set_testing(pmeGpu, false);
808 GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
809 pmeGpu->programHandle_ = pmeGpuProgram;
811 pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
813 pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
815 pme_gpu_copy_common_data_from(pme);
816 pme_gpu_alloc_energy_virial(pmeGpu);
818 GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
820 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
821 kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
824 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
826 GMX_ASSERT(gridSize != nullptr, "");
827 GMX_ASSERT(paddedGridSize != nullptr, "");
828 GMX_ASSERT(pmeGpu != nullptr, "");
829 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
830 for (int i = 0; i < DIM; i++)
832 (*gridSize)[i] = kernelParamsPtr->grid.realGridSize[i];
833 (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
837 void pme_gpu_reinit(gmx_pme_t* pme,
838 const DeviceContext* deviceContext,
839 const DeviceStream* deviceStream,
840 const PmeGpuProgram* pmeGpuProgram)
842 GMX_ASSERT(pme != nullptr, "Need valid PME object");
846 GMX_RELEASE_ASSERT(deviceContext != nullptr,
847 "Device context can not be nullptr when setting up PME on GPU.");
848 GMX_RELEASE_ASSERT(deviceStream != nullptr,
849 "Device stream can not be nullptr when setting up PME on GPU.");
850 /* First-time initialization */
851 pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
855 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
856 pme_gpu_copy_common_data_from(pme);
858 /* GPU FFT will only get used for a single rank.*/
859 pme->gpu->settings.performGPUFFT =
860 (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
861 pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
863 /* Reinit active timers */
864 pme_gpu_reinit_timings(pme->gpu);
866 pme_gpu_reinit_grids(pme->gpu);
867 // Note: if timing the reinit launch overhead becomes more relevant
868 // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
869 pme_gpu_reinit_computation(pme, nullptr);
870 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
871 * update for mixed mode on grid switch. TODO: use shared recipbox field.
873 std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
876 void pme_gpu_destroy(PmeGpu* pmeGpu)
878 /* Free lots of data */
879 pme_gpu_free_energy_virial(pmeGpu);
880 pme_gpu_free_bspline_values(pmeGpu);
881 pme_gpu_free_forces(pmeGpu);
882 pme_gpu_free_coefficients(pmeGpu);
883 pme_gpu_free_spline_data(pmeGpu);
884 pme_gpu_free_grid_indices(pmeGpu);
885 pme_gpu_free_fract_shifts(pmeGpu);
886 pme_gpu_free_grids(pmeGpu);
888 pme_gpu_destroy_3dfft(pmeGpu);
893 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* chargesA, const real* chargesB)
895 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
896 kernelParamsPtr->atoms.nAtoms = nAtoms;
897 const int block_size = pme_gpu_get_atom_data_block_size();
898 const int nAtomsNewPadded = ((nAtoms + block_size - 1) / block_size) * block_size;
899 const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
900 pmeGpu->nAtomsAlloc = nAtomsNewPadded;
903 GMX_RELEASE_ASSERT(false, "Only single precision supported");
904 GMX_UNUSED_VALUE(charges);
907 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
908 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
910 if (chargesB != nullptr)
912 pme_gpu_realloc_and_copy_input_coefficients(
913 pmeGpu, reinterpret_cast<const float*>(chargesB), gridIndex);
917 /* Fill the second set of coefficients with chargesA as well to be able to avoid
918 * conditionals in the GPU kernels */
919 /* FIXME: This should be avoided by making a separate templated version of the
920 * relevant kernel(s) (probably only pme_gather_kernel). That would require a
921 * reduction of the current number of templated parameters of that kernel. */
922 pme_gpu_realloc_and_copy_input_coefficients(
923 pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
929 pme_gpu_realloc_forces(pmeGpu);
930 pme_gpu_realloc_spline_data(pmeGpu);
931 pme_gpu_realloc_grid_indices(pmeGpu);
933 pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
937 * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
938 * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
940 * \param[in] pmeGpu The PME GPU data structure.
941 * \param[in] PMEStageId The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
943 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
945 CommandEvent* timingEvent = nullptr;
946 if (pme_gpu_timings_enabled(pmeGpu))
948 GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
949 "Wrong PME GPU timing event index");
950 timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
955 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, const int grid_index)
957 int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
959 pme_gpu_start_timing(pmeGpu, timerId);
960 pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
961 dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
962 pme_gpu_stop_timing(pmeGpu, timerId);
966 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
967 * to minimize number of unused blocks.
969 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
971 // How many maximum widths in X do we need (hopefully just one)
972 const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
973 // Trying to make things even
974 const int colCount = (blockCount + minRowCount - 1) / minRowCount;
975 GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
976 GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
977 "pmeGpuCreateGrid: excessive blocks");
978 return std::pair<int, int>(colCount, minRowCount);
982 * Returns a pointer to appropriate spline and spread kernel based on the input bool values
984 * \param[in] pmeGpu The PME GPU structure.
985 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
986 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
987 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
989 * \return Pointer to CUDA kernel
991 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu,
992 ThreadsPerAtom threadsPerAtom,
993 bool writeSplinesToGlobal,
996 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
997 if (writeSplinesToGlobal)
999 if (threadsPerAtom == ThreadsPerAtom::Order)
1003 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Dual;
1007 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Single;
1014 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesDual;
1018 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesSingle;
1024 if (threadsPerAtom == ThreadsPerAtom::Order)
1028 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1032 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1039 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1043 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1052 * Returns a pointer to appropriate spline kernel based on the input bool values
1054 * \param[in] pmeGpu The PME GPU structure.
1055 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1056 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1057 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1059 * \return Pointer to CUDA kernel
1061 static auto selectSplineKernelPtr(const PmeGpu* pmeGpu,
1062 ThreadsPerAtom threadsPerAtom,
1063 bool gmx_unused writeSplinesToGlobal,
1066 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1068 writeSplinesToGlobal,
1069 "Spline data should always be written to global memory when just calculating splines");
1071 if (threadsPerAtom == ThreadsPerAtom::Order)
1075 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Dual;
1079 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Single;
1086 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelDual;
1090 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelSingle;
1097 * Returns a pointer to appropriate spread kernel based on the input bool values
1099 * \param[in] pmeGpu The PME GPU structure.
1100 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1101 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1102 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1104 * \return Pointer to CUDA kernel
1106 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu,
1107 ThreadsPerAtom threadsPerAtom,
1108 bool writeSplinesToGlobal,
1111 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1112 if (writeSplinesToGlobal)
1114 if (threadsPerAtom == ThreadsPerAtom::Order)
1118 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Dual;
1121 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Single;
1128 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelDual;
1132 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelSingle;
1138 /* if we are not saving the spline data we need to recalculate it
1139 using the spline and spread Kernel */
1140 if (threadsPerAtom == ThreadsPerAtom::Order)
1144 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1148 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1155 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1159 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1166 void pme_gpu_spread(const PmeGpu* pmeGpu,
1167 GpuEventSynchronizer* xReadyOnDevice,
1169 bool computeSplines,
1174 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1175 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1177 GMX_ASSERT(computeSplines || spreadCharges,
1178 "PME spline/spread kernel has invalid input (nothing to do)");
1179 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1180 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1182 const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1184 const int order = pmeGpu->common->pme_order;
1185 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1186 const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1187 const int threadsPerAtom =
1188 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1189 const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1190 #if GMX_GPU == GMX_GPU_OPENCL
1191 GMX_ASSERT(pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1192 "Only 16 threads per atom supported in OpenCL");
1193 GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1195 const int atomsPerBlock = blockSize / threadsPerAtom;
1197 // TODO: pick smaller block size in runtime if needed
1198 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1199 // If doing so, change atomsPerBlock in the kernels as well.
1200 // TODO: test varying block sizes on modern arch-s as well
1201 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1202 //(for spline data mostly)
1203 GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1204 "inconsistent atom data padding vs. spreading block size");
1206 // Ensure that coordinates are ready on the device before launching spread;
1207 // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1208 // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1209 GMX_ASSERT(xReadyOnDevice != nullptr || (GMX_GPU != GMX_GPU_CUDA)
1210 || pmeGpu->common->isRankPmeOnly || pme_gpu_settings(pmeGpu).copyAllOutputs,
1211 "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1214 xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1217 const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1218 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1220 if (pmeGpu->common->ngrids == 1)
1222 kernelParamsPtr->current.scale = 1.0;
1226 kernelParamsPtr->current.scale = 1.0 - lambda;
1229 KernelLaunchConfig config;
1230 config.blockSize[0] = order;
1231 config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1232 config.blockSize[2] = atomsPerBlock;
1233 config.gridSize[0] = dimGrid.first;
1234 config.gridSize[1] = dimGrid.second;
1237 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1242 timingId = gtPME_SPLINEANDSPREAD;
1243 kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1244 writeGlobal || (!recalculateSplines),
1245 pmeGpu->common->ngrids);
1249 timingId = gtPME_SPLINE;
1250 kernelPtr = selectSplineKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1251 writeGlobal || (!recalculateSplines),
1252 pmeGpu->common->ngrids);
1257 timingId = gtPME_SPREAD;
1258 kernelPtr = selectSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1259 writeGlobal || (!recalculateSplines), pmeGpu->common->ngrids);
1263 pme_gpu_start_timing(pmeGpu, timingId);
1264 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1265 #if c_canEmbedBuffers
1266 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1268 const auto kernelArgs = prepareGpuKernelArguments(
1269 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1270 &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1271 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A], &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1272 &kernelParamsPtr->grid.d_fractShiftsTable, &kernelParamsPtr->grid.d_gridlineIndicesTable,
1273 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1274 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B], &kernelParamsPtr->atoms.d_coordinates);
1277 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent,
1278 "PME spline/spread", kernelArgs);
1279 pme_gpu_stop_timing(pmeGpu, timingId);
1281 const auto& settings = pmeGpu->settings;
1282 const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1285 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1287 float* h_grid = h_grids[gridIndex];
1288 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid, gridIndex);
1291 const bool copyBackAtomData =
1292 computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1293 if (copyBackAtomData)
1295 pme_gpu_copy_output_spread_atom_data(pmeGpu);
1299 void pme_gpu_solve(const PmeGpu* pmeGpu,
1300 const int gridIndex,
1302 GridOrdering gridOrdering,
1303 bool computeEnergyAndVirial)
1306 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1307 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1308 GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
1309 "Invalid combination of gridIndex and number of grids");
1311 const auto& settings = pmeGpu->settings;
1312 const bool copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1314 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1316 float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1317 if (copyInputAndOutputGrid)
1319 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex], h_gridFloat, 0,
1320 pmeGpu->archSpecific->complexGridSize[gridIndex],
1321 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1324 int majorDim = -1, middleDim = -1, minorDim = -1;
1325 switch (gridOrdering)
1327 case GridOrdering::YZX:
1333 case GridOrdering::XYZ:
1339 default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1342 const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1344 const int gridLineSize = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1345 const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1346 const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1348 if (blocksPerGridLine == 1)
1350 cellsPerBlock = gridLineSize * gridLinesPerBlock;
1354 cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1356 const int warpSize = pmeGpu->programHandle_->warpSize();
1357 const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1359 static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1360 "The CUDA solve energy kernels needs at least 4 warps. "
1361 "Here we launch at least half of the max warps.");
1363 KernelLaunchConfig config;
1364 config.blockSize[0] = blockSize;
1365 config.gridSize[0] = blocksPerGridLine;
1366 // rounding up to full warps so that shuffle operations produce defined results
1367 config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1368 / gridLinesPerBlock;
1369 config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1371 int timingId = gtPME_SOLVE;
1372 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1373 if (gridOrdering == GridOrdering::YZX)
1377 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelA
1378 : pmeGpu->programHandle_->impl_->solveYZXKernelA;
1382 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelB
1383 : pmeGpu->programHandle_->impl_->solveYZXKernelB;
1386 else if (gridOrdering == GridOrdering::XYZ)
1390 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelA
1391 : pmeGpu->programHandle_->impl_->solveXYZKernelA;
1395 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelB
1396 : pmeGpu->programHandle_->impl_->solveXYZKernelB;
1400 pme_gpu_start_timing(pmeGpu, timingId);
1401 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1402 #if c_canEmbedBuffers
1403 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1405 const auto kernelArgs = prepareGpuKernelArguments(
1406 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli[gridIndex],
1407 &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1408 &kernelParamsPtr->grid.d_fourierGrid[gridIndex]);
1410 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve",
1412 pme_gpu_stop_timing(pmeGpu, timingId);
1414 if (computeEnergyAndVirial)
1416 copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy[gridIndex],
1417 &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex], 0,
1418 c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_,
1419 pmeGpu->settings.transferKind, nullptr);
1422 if (copyInputAndOutputGrid)
1424 copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid[gridIndex], 0,
1425 pmeGpu->archSpecific->complexGridSize[gridIndex],
1426 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1431 * Returns a pointer to appropriate gather kernel based on the inputvalues
1433 * \param[in] pmeGpu The PME GPU structure.
1434 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1435 * \param[in] readSplinesFromGlobal bool controlling if we should write spline data to global memory
1436 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1438 * \return Pointer to CUDA kernel
1440 inline auto selectGatherKernelPtr(const PmeGpu* pmeGpu,
1441 ThreadsPerAtom threadsPerAtom,
1442 bool readSplinesFromGlobal,
1446 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1448 if (readSplinesFromGlobal)
1450 if (threadsPerAtom == ThreadsPerAtom::Order)
1454 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Dual;
1458 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Single;
1465 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesDual;
1469 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesSingle;
1475 if (threadsPerAtom == ThreadsPerAtom::Order)
1479 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Dual;
1483 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Single;
1490 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelDual;
1494 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelSingle;
1501 void pme_gpu_gather(PmeGpu* pmeGpu, real** h_grids, const float lambda)
1504 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1505 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1507 const auto& settings = pmeGpu->settings;
1509 if (!settings.performGPUFFT || settings.copyAllOutputs)
1511 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1513 float* h_grid = const_cast<float*>(h_grids[gridIndex]);
1514 pme_gpu_copy_input_gather_grid(pmeGpu, h_grid, gridIndex);
1518 if (settings.copyAllOutputs)
1520 pme_gpu_copy_input_gather_atom_data(pmeGpu);
1523 /* Set if we have unit tests */
1524 const bool readGlobal = pmeGpu->settings.copyAllOutputs;
1525 const size_t blockSize = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1526 const int order = pmeGpu->common->pme_order;
1527 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1528 const int threadsPerAtom =
1529 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1530 const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1531 #if GMX_GPU == GMX_GPU_OPENCL
1532 GMX_ASSERT(pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1533 "Only 16 threads per atom supported in OpenCL");
1534 GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1536 const int atomsPerBlock = blockSize / threadsPerAtom;
1538 GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1539 "inconsistent atom data padding vs. gathering block size");
1541 const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1542 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1544 KernelLaunchConfig config;
1545 config.blockSize[0] = order;
1546 config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1547 config.blockSize[2] = atomsPerBlock;
1548 config.gridSize[0] = dimGrid.first;
1549 config.gridSize[1] = dimGrid.second;
1551 // TODO test different cache configs
1553 int timingId = gtPME_GATHER;
1554 PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1555 selectGatherKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1556 readGlobal || (!recalculateSplines), pmeGpu->common->ngrids);
1557 // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1559 pme_gpu_start_timing(pmeGpu, timingId);
1560 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1561 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1562 if (pmeGpu->common->ngrids == 1)
1564 kernelParamsPtr->current.scale = 1.0;
1568 kernelParamsPtr->current.scale = 1.0 - lambda;
1571 #if c_canEmbedBuffers
1572 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1574 const auto kernelArgs = prepareGpuKernelArguments(
1575 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1576 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1577 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A], &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1578 &kernelParamsPtr->atoms.d_theta, &kernelParamsPtr->atoms.d_dtheta,
1579 &kernelParamsPtr->atoms.d_gridlineIndices, &kernelParamsPtr->atoms.d_forces);
1581 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather",
1583 pme_gpu_stop_timing(pmeGpu, timingId);
1585 if (pmeGpu->settings.useGpuForceReduction)
1587 pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1591 pme_gpu_copy_output_forces(pmeGpu);
1595 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1597 if (pmeGpu && pmeGpu->kernelParams)
1599 return pmeGpu->kernelParams->atoms.d_forces;
1607 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1609 GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1610 "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1613 GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1614 "The device-side buffer can not be set.");
1616 pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1619 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1621 if (pmeGpu && pmeGpu->kernelParams)
1623 return &pmeGpu->archSpecific->pmeForcesReady;