2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file contains internal function implementations
39 * for performing the PME calculations on GPU.
41 * Note that this file is compiled as regular C++ source in OpenCL builds, but
42 * it is treated as CUDA source in CUDA-enabled GPU builds.
44 * \author Aleksei Iupinov <a.yupinov@gmail.com>
45 * \ingroup module_ewald
50 #include "pme_gpu_internal.h"
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/device_context.h"
60 #include "gromacs/gpu_utils/device_stream.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/math/invertmatrix.h"
63 #include "gromacs/math/units.h"
64 #include "gromacs/timing/gpu_timing.h"
65 #include "gromacs/utility/exceptions.h"
66 #include "gromacs/utility/fatalerror.h"
67 #include "gromacs/utility/gmxassert.h"
68 #include "gromacs/utility/logger.h"
69 #include "gromacs/utility/stringutil.h"
71 #if GMX_GPU == GMX_GPU_CUDA
72 # include "gromacs/gpu_utils/pmalloc_cuda.h"
75 #elif GMX_GPU == GMX_GPU_OPENCL
76 # include "gromacs/gpu_utils/gmxopencl.h"
79 #include "gromacs/ewald/pme.h"
81 #include "pme_gpu_3dfft.h"
82 #include "pme_gpu_calculate_splines.h"
83 #include "pme_gpu_constants.h"
84 #include "pme_gpu_program_impl.h"
85 #include "pme_gpu_timings.h"
86 #include "pme_gpu_types.h"
87 #include "pme_gpu_types_host.h"
88 #include "pme_gpu_types_host_impl.h"
90 #include "pme_internal.h"
91 #include "pme_solve.h"
95 * Atom limit above which it is advantageous to turn on the
96 * recalcuating of the splines in the gather and using less threads per atom in the spline and spread
98 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
101 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
103 * \param[in] pmeGpu The PME GPU structure.
104 * \returns The pointer to the kernel parameters.
106 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
108 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
109 auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
110 return kernelParamsPtr;
114 * Atom data block size (in terms of number of atoms).
115 * This is the least common multiple of number of atoms processed by
116 * a single block/workgroup of the spread and gather kernels.
117 * The GPU atom data buffers must be padded, which means that
118 * the numbers of atoms used for determining the size of the memory
119 * allocation must be divisible by this.
121 constexpr int c_pmeAtomDataBlockSize = 64;
123 int pme_gpu_get_atom_data_block_size()
125 return c_pmeAtomDataBlockSize;
128 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
130 pmeGpu->archSpecific->pmeStream_.synchronize();
133 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
135 const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
136 allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount,
137 pmeGpu->archSpecific->deviceContext_);
138 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
141 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
143 freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
144 pfree(pmeGpu->staging.h_virialAndEnergy);
145 pmeGpu->staging.h_virialAndEnergy = nullptr;
148 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
150 clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
151 c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_);
154 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu)
156 const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
157 pmeGpu->kernelParams->grid.realGridSize[XX]
158 + pmeGpu->kernelParams->grid.realGridSize[YY] };
159 memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
161 const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
162 + pmeGpu->kernelParams->grid.realGridSize[YY]
163 + pmeGpu->kernelParams->grid.realGridSize[ZZ];
164 const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
165 reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
166 &pmeGpu->archSpecific->splineValuesSize,
167 &pmeGpu->archSpecific->splineValuesSizeAlloc,
168 pmeGpu->archSpecific->deviceContext_);
171 /* Reallocate the host buffer */
172 pfree(pmeGpu->staging.h_splineModuli);
173 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli),
174 newSplineValuesSize * sizeof(float));
176 for (int i = 0; i < DIM; i++)
178 memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i],
179 pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
181 /* TODO: pin original buffer instead! */
182 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
183 0, newSplineValuesSize, pmeGpu->archSpecific->pmeStream_,
184 pmeGpu->settings.transferKind, nullptr);
187 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
189 pfree(pmeGpu->staging.h_splineModuli);
190 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
193 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
195 const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
196 GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
197 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
198 &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc,
199 pmeGpu->archSpecific->deviceContext_);
200 pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
201 pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
204 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
206 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
209 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
211 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
212 float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
213 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
214 DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
215 pmeGpu->settings.transferKind, nullptr);
218 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
220 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
221 float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
222 copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
223 DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
224 pmeGpu->settings.transferKind, nullptr);
227 void pme_gpu_realloc_and_copy_input_coefficients(PmeGpu* pmeGpu, const float* h_coefficients)
229 GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
230 const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
231 GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
232 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
233 &pmeGpu->archSpecific->coefficientsSize,
234 &pmeGpu->archSpecific->coefficientsSizeAlloc,
235 pmeGpu->archSpecific->deviceContext_);
236 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients,
237 const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
238 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
240 const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
241 const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
242 if (paddingCount > 0)
244 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
245 paddingCount, pmeGpu->archSpecific->pmeStream_);
249 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
251 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
254 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
256 const int order = pmeGpu->common->pme_order;
257 const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
258 GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
259 /* Two arrays of the same size */
260 const bool shouldRealloc = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
261 int currentSizeTemp = pmeGpu->archSpecific->splineDataSize;
262 int currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
263 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize, ¤tSizeTemp,
264 ¤tSizeTempAlloc, pmeGpu->archSpecific->deviceContext_);
265 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
266 &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc,
267 pmeGpu->archSpecific->deviceContext_);
268 // the host side reallocation
271 pfree(pmeGpu->staging.h_theta);
272 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
273 pfree(pmeGpu->staging.h_dtheta);
274 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
278 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
280 /* Two arrays of the same size */
281 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
282 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
283 pfree(pmeGpu->staging.h_theta);
284 pfree(pmeGpu->staging.h_dtheta);
287 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
289 const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
290 GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
291 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
292 &pmeGpu->archSpecific->gridlineIndicesSize,
293 &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
294 pmeGpu->archSpecific->deviceContext_);
295 pfree(pmeGpu->staging.h_gridlineIndices);
296 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
299 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
301 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
302 pfree(pmeGpu->staging.h_gridlineIndices);
305 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
307 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
308 const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
309 * kernelParamsPtr->grid.realGridSizePadded[YY]
310 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
311 const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
312 * kernelParamsPtr->grid.complexGridSizePadded[YY]
313 * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
314 // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
315 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
317 /* 2 separate grids */
318 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
319 &pmeGpu->archSpecific->complexGridSize,
320 &pmeGpu->archSpecific->complexGridSizeAlloc,
321 pmeGpu->archSpecific->deviceContext_);
322 reallocateDeviceBuffer(
323 &kernelParamsPtr->grid.d_realGrid, newRealGridSize, &pmeGpu->archSpecific->realGridSize,
324 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
328 /* A single buffer so that any grid will fit */
329 const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
330 reallocateDeviceBuffer(
331 &kernelParamsPtr->grid.d_realGrid, newGridsSize, &pmeGpu->archSpecific->realGridSize,
332 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
333 kernelParamsPtr->grid.d_fourierGrid = kernelParamsPtr->grid.d_realGrid;
334 pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
335 // the size might get used later for copying the grid
339 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
341 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
343 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
345 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
348 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
350 clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
351 pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream_);
354 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
356 pme_gpu_free_fract_shifts(pmeGpu);
358 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
360 const int nx = kernelParamsPtr->grid.realGridSize[XX];
361 const int ny = kernelParamsPtr->grid.realGridSize[YY];
362 const int nz = kernelParamsPtr->grid.realGridSize[ZZ];
363 const int cellCount = c_pmeNeighborUnitcellCount;
364 const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
366 memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
368 const int newFractShiftsSize = cellCount * (nx + ny + nz);
370 #if GMX_GPU == GMX_GPU_CUDA
371 initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable, kernelParamsPtr->fractShiftsTableTexture,
372 pmeGpu->common->fsh.data(), newFractShiftsSize);
374 initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
375 kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
377 #elif GMX_GPU == GMX_GPU_OPENCL
378 // No dedicated texture routines....
379 allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize,
380 pmeGpu->archSpecific->deviceContext_);
381 allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize,
382 pmeGpu->archSpecific->deviceContext_);
383 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(), 0,
384 newFractShiftsSize, pmeGpu->archSpecific->pmeStream_,
385 GpuApiCallBehavior::Async, nullptr);
386 copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(), 0,
387 newFractShiftsSize, pmeGpu->archSpecific->pmeStream_,
388 GpuApiCallBehavior::Async, nullptr);
392 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
394 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
395 #if GMX_GPU == GMX_GPU_CUDA
396 destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
397 kernelParamsPtr->fractShiftsTableTexture);
398 destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
399 kernelParamsPtr->gridlineIndicesTableTexture);
400 #elif GMX_GPU == GMX_GPU_OPENCL
401 freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
402 freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
406 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
408 return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
411 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, float* h_grid)
413 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid, 0, pmeGpu->archSpecific->realGridSize,
414 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
417 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid)
419 copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid, 0,
420 pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream_,
421 pmeGpu->settings.transferKind, nullptr);
422 pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
425 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
427 const size_t splinesCount = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
428 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
429 copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
430 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
431 copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
432 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
433 copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
434 0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
435 pmeGpu->settings.transferKind, nullptr);
438 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
440 const size_t splinesCount = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
441 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
443 // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
444 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0, pmeGpu->nAtomsAlloc * DIM,
445 pmeGpu->archSpecific->pmeStream_);
446 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
447 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
448 pmeGpu->archSpecific->pmeStream_);
449 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
450 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
451 pmeGpu->archSpecific->pmeStream_);
453 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
454 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
455 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
456 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
457 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
458 0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
459 pmeGpu->settings.transferKind, nullptr);
462 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
464 pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
467 /*! \brief Internal GPU initialization for PME.
469 * \param[in] pmeGpu GPU PME data.
470 * \param[in] deviceContext GPU context.
471 * \param[in] deviceStream GPU stream.
473 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
475 #if GMX_GPU == GMX_GPU_CUDA
476 // Prepare to use the device that this PME task was assigned earlier.
477 // Other entities, such as CUDA timing events, are known to implicitly use the device context.
478 CU_RET_ERR(cudaSetDevice(deviceContext.deviceInfo().id), "Switching to PME CUDA device");
481 /* Allocate the target-specific structures */
482 pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
483 pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
485 pmeGpu->archSpecific->performOutOfPlaceFFT = true;
486 /* This should give better performance, according to the cuFFT documentation.
487 * The performance seems to be the same though.
488 * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
491 #if GMX_GPU == GMX_GPU_CUDA
492 pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
493 #elif GMX_GPU == GMX_GPU_OPENCL
494 pmeGpu->maxGridWidthX = INT32_MAX / 2;
495 // TODO: is there no really global work size limit in OpenCL?
499 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
501 if (pme_gpu_settings(pmeGpu).performGPUFFT)
503 pmeGpu->archSpecific->fftSetup.resize(0);
504 for (int i = 0; i < pmeGpu->common->ngrids; i++)
506 pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
511 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
513 pmeGpu->archSpecific->fftSetup.resize(0);
516 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, PmeOutput* output)
518 const PmeGpu* pmeGpu = pme.gpu;
519 for (int j = 0; j < c_virialAndEnergyCount; j++)
521 GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]),
522 "PME GPU produces incorrect energy/virial.");
526 output->coulombVirial_[XX][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
527 output->coulombVirial_[YY][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
528 output->coulombVirial_[ZZ][ZZ] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
529 output->coulombVirial_[XX][YY] = output->coulombVirial_[YY][XX] =
530 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
531 output->coulombVirial_[XX][ZZ] = output->coulombVirial_[ZZ][XX] =
532 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
533 output->coulombVirial_[YY][ZZ] = output->coulombVirial_[ZZ][YY] =
534 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
535 output->coulombEnergy_ = 0.5F * pmeGpu->staging.h_virialAndEnergy[j++];
538 /*! \brief Sets the force-related members in \p output
540 * \param[in] pmeGpu PME GPU data structure
541 * \param[out] output Pointer to PME output data structure
543 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
545 output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
546 if (output->haveForceOutput_)
548 output->forces_ = pmeGpu->staging.h_forces;
552 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial)
554 PmeGpu* pmeGpu = pme.gpu;
558 pme_gpu_getForceOutput(pmeGpu, &output);
560 if (computeEnergyAndVirial)
562 if (pme_gpu_settings(pmeGpu).performGPUSolve)
564 pme_gpu_getEnergyAndVirial(pme, &output);
568 get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
574 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
577 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
580 pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
581 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
582 kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
583 GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
585 gmx::invertBoxMatrix(scaledBox, recipBox);
587 /* The GPU recipBox is transposed as compared to the CPU recipBox.
588 * Spread uses matrix columns (while solve and gather use rows).
589 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
591 const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
592 { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
593 { 0.0, 0.0, recipBox[ZZ][ZZ] } };
594 memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
598 /*! \brief \libinternal
599 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
601 * \param[in] pmeGpu The PME GPU structure.
603 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
605 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
606 kernelParamsPtr->grid.ewaldFactor =
607 (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
609 /* The grid size variants */
610 for (int i = 0; i < DIM; i++)
612 kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
613 kernelParamsPtr->grid.realGridSizeFP[i] =
614 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
615 kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
617 // The complex grid currently uses no padding;
618 // if it starts to do so, then another test should be added for that
619 kernelParamsPtr->grid.complexGridSize[i] = kernelParamsPtr->grid.realGridSize[i];
620 kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
622 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
623 if (!pme_gpu_settings(pmeGpu).performGPUFFT)
625 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
626 kernelParamsPtr->grid.realGridSizePadded[ZZ] =
627 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
630 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
631 kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
632 kernelParamsPtr->grid.complexGridSize[ZZ]++;
633 kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
635 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
636 pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
637 pme_gpu_realloc_grids(pmeGpu);
638 pme_gpu_reinit_3dfft(pmeGpu);
641 /* Several GPU functions that refer to the CPU PME data live here.
642 * We would like to keep these away from the GPU-framework specific code for clarity,
643 * as well as compilation issues with MPI.
646 /*! \brief \libinternal
647 * Copies everything useful from the PME CPU to the PME GPU structure.
648 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
650 * \param[in] pme The PME structure.
652 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
654 /* TODO: Consider refactoring the CPU PME code to use the same structure,
655 * so that this function becomes 2 lines */
656 PmeGpu* pmeGpu = pme->gpu;
657 pmeGpu->common->ngrids = pme->ngrids;
658 pmeGpu->common->epsilon_r = pme->epsilon_r;
659 pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
660 pmeGpu->common->nk[XX] = pme->nkx;
661 pmeGpu->common->nk[YY] = pme->nky;
662 pmeGpu->common->nk[ZZ] = pme->nkz;
663 pmeGpu->common->pme_order = pme->pme_order;
664 if (pmeGpu->common->pme_order != c_pmeGpuOrder)
666 GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
668 for (int i = 0; i < DIM; i++)
670 pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
672 const int cellCount = c_pmeNeighborUnitcellCount;
673 pmeGpu->common->fsh.resize(0);
674 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
675 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
676 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
677 pmeGpu->common->nn.resize(0);
678 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
679 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
680 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
681 pmeGpu->common->runMode = pme->runMode;
682 pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
683 pmeGpu->common->boxScaler = pme->boxScaler;
686 /*! \libinternal \brief
687 * uses heuristics to select the best performing PME gather and scatter kernels
689 * \param[in,out] pmeGpu The PME GPU structure.
691 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
693 if (pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit && (GMX_GPU == GMX_GPU_CUDA))
695 pmeGpu->settings.threadsPerAtom = ThreadsPerAtom::Order;
696 pmeGpu->settings.recalculateSplines = true;
700 pmeGpu->settings.threadsPerAtom = ThreadsPerAtom::OrderSquared;
701 pmeGpu->settings.recalculateSplines = false;
706 /*! \libinternal \brief
707 * Initializes the PME GPU data at the beginning of the run.
708 * TODO: this should become PmeGpu::PmeGpu()
710 * \param[in,out] pme The PME structure.
711 * \param[in] deviceContext The GPU context.
712 * \param[in] deviceStream The GPU stream.
713 * \param[in,out] pmeGpuProgram The handle to the program/kernel data created outside (e.g. in unit tests/runner)
715 static void pme_gpu_init(gmx_pme_t* pme,
716 const DeviceContext& deviceContext,
717 const DeviceStream& deviceStream,
718 const PmeGpuProgram* pmeGpuProgram)
720 pme->gpu = new PmeGpu();
721 PmeGpu* pmeGpu = pme->gpu;
722 changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
723 pmeGpu->common = std::make_shared<PmeShared>();
725 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
726 /* A convenience variable. */
727 pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
728 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
729 pmeGpu->settings.performGPUGather = true;
730 // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
731 pmeGpu->settings.useGpuForceReduction = false;
733 pme_gpu_set_testing(pmeGpu, false);
735 GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
736 pmeGpu->programHandle_ = pmeGpuProgram;
738 pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
740 pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
741 pme_gpu_alloc_energy_virial(pmeGpu);
743 pme_gpu_copy_common_data_from(pme);
745 GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
747 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
748 kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
751 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
753 GMX_ASSERT(gridSize != nullptr, "");
754 GMX_ASSERT(paddedGridSize != nullptr, "");
755 GMX_ASSERT(pmeGpu != nullptr, "");
756 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
757 for (int i = 0; i < DIM; i++)
759 (*gridSize)[i] = kernelParamsPtr->grid.realGridSize[i];
760 (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
764 void pme_gpu_reinit(gmx_pme_t* pme,
765 const DeviceContext* deviceContext,
766 const DeviceStream* deviceStream,
767 const PmeGpuProgram* pmeGpuProgram)
769 GMX_ASSERT(pme != nullptr, "Need valid PME object");
773 GMX_RELEASE_ASSERT(deviceContext != nullptr,
774 "Device context can not be nullptr when setting up PME on GPU.");
775 GMX_RELEASE_ASSERT(deviceStream != nullptr,
776 "Device stream can not be nullptr when setting up PME on GPU.");
777 /* First-time initialization */
778 pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
782 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
783 pme_gpu_copy_common_data_from(pme);
785 /* GPU FFT will only get used for a single rank.*/
786 pme->gpu->settings.performGPUFFT =
787 (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
788 pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
790 /* Reinit active timers */
791 pme_gpu_reinit_timings(pme->gpu);
793 pme_gpu_reinit_grids(pme->gpu);
794 // Note: if timing the reinit launch overhead becomes more relevant
795 // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
796 pme_gpu_reinit_computation(pme, nullptr);
797 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
798 * update for mixed mode on grid switch. TODO: use shared recipbox field.
800 std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
803 void pme_gpu_destroy(PmeGpu* pmeGpu)
805 /* Free lots of data */
806 pme_gpu_free_energy_virial(pmeGpu);
807 pme_gpu_free_bspline_values(pmeGpu);
808 pme_gpu_free_forces(pmeGpu);
809 pme_gpu_free_coefficients(pmeGpu);
810 pme_gpu_free_spline_data(pmeGpu);
811 pme_gpu_free_grid_indices(pmeGpu);
812 pme_gpu_free_fract_shifts(pmeGpu);
813 pme_gpu_free_grids(pmeGpu);
815 pme_gpu_destroy_3dfft(pmeGpu);
820 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* charges)
822 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
823 kernelParamsPtr->atoms.nAtoms = nAtoms;
824 const int block_size = pme_gpu_get_atom_data_block_size();
825 const int nAtomsNewPadded = ((nAtoms + block_size - 1) / block_size) * block_size;
826 const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
827 pmeGpu->nAtomsAlloc = nAtomsNewPadded;
830 GMX_RELEASE_ASSERT(false, "Only single precision supported");
831 GMX_UNUSED_VALUE(charges);
833 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(charges));
834 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
839 pme_gpu_realloc_forces(pmeGpu);
840 pme_gpu_realloc_spline_data(pmeGpu);
841 pme_gpu_realloc_grid_indices(pmeGpu);
843 pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
847 * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
848 * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
850 * \param[in] pmeGpu The PME GPU data structure.
851 * \param[in] PMEStageId The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
853 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
855 CommandEvent* timingEvent = nullptr;
856 if (pme_gpu_timings_enabled(pmeGpu))
858 GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
859 "Wrong PME GPU timing event index");
860 timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
865 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, int grid_index)
867 int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
869 pme_gpu_start_timing(pmeGpu, timerId);
870 pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
871 dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
872 pme_gpu_stop_timing(pmeGpu, timerId);
876 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
877 * to minimize number of unused blocks.
879 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
881 // How many maximum widths in X do we need (hopefully just one)
882 const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
883 // Trying to make things even
884 const int colCount = (blockCount + minRowCount - 1) / minRowCount;
885 GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
886 GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
887 "pmeGpuCreateGrid: excessive blocks");
888 return std::pair<int, int>(colCount, minRowCount);
892 * Returns a pointer to appropriate spline and spread kernel based on the input bool values
894 * \param[in] pmeGpu The PME GPU structure.
895 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
896 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
898 * \return Pointer to CUDA kernel
900 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu, ThreadsPerAtom threadsPerAtom, bool writeSplinesToGlobal)
902 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
903 if (writeSplinesToGlobal)
905 if (threadsPerAtom == ThreadsPerAtom::Order)
907 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4;
911 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplines;
916 if (threadsPerAtom == ThreadsPerAtom::Order)
918 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
922 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
930 * Returns a pointer to appropriate spline kernel based on the input bool values
932 * \param[in] pmeGpu The PME GPU structure.
933 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
934 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
936 * \return Pointer to CUDA kernel
938 static auto selectSplineKernelPtr(const PmeGpu* pmeGpu,
939 ThreadsPerAtom threadsPerAtom,
940 bool gmx_unused writeSplinesToGlobal)
942 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
944 writeSplinesToGlobal,
945 "Spline data should always be written to global memory when just calculating splines");
947 if (threadsPerAtom == ThreadsPerAtom::Order)
949 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4;
953 kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
959 * Returns a pointer to appropriate spread kernel based on the input bool values
961 * \param[in] pmeGpu The PME GPU structure.
962 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
963 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
965 * \return Pointer to CUDA kernel
967 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu, ThreadsPerAtom threadsPerAtom, bool writeSplinesToGlobal)
969 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
970 if (writeSplinesToGlobal)
972 if (threadsPerAtom == ThreadsPerAtom::Order)
974 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4;
978 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
983 /* if we are not saving the spline data we need to recalculate it
984 using the spline and spread Kernel */
985 if (threadsPerAtom == ThreadsPerAtom::Order)
987 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
991 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
997 void pme_gpu_spread(const PmeGpu* pmeGpu,
998 GpuEventSynchronizer* xReadyOnDevice,
999 int gmx_unused gridIndex,
1001 bool computeSplines,
1004 GMX_ASSERT(computeSplines || spreadCharges,
1005 "PME spline/spread kernel has invalid input (nothing to do)");
1006 const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1007 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1009 const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1011 const int order = pmeGpu->common->pme_order;
1012 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1013 const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1014 const int threadsPerAtom =
1015 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1016 const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1017 #if GMX_GPU == GMX_GPU_OPENCL
1018 GMX_ASSERT(pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1019 "Only 16 threads per atom supported in OpenCL");
1020 GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1022 const int atomsPerBlock = blockSize / threadsPerAtom;
1024 // TODO: pick smaller block size in runtime if needed
1025 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1026 // If doing so, change atomsPerBlock in the kernels as well.
1027 // TODO: test varying block sizes on modern arch-s as well
1028 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1029 //(for spline data mostly)
1030 GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1031 "inconsistent atom data padding vs. spreading block size");
1033 // Ensure that coordinates are ready on the device before launching spread;
1034 // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1035 // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1036 GMX_ASSERT(xReadyOnDevice != nullptr || (GMX_GPU != GMX_GPU_CUDA)
1037 || pmeGpu->common->isRankPmeOnly || pme_gpu_settings(pmeGpu).copyAllOutputs,
1038 "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1041 xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1044 const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1045 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1047 KernelLaunchConfig config;
1048 config.blockSize[0] = order;
1049 config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1050 config.blockSize[2] = atomsPerBlock;
1051 config.gridSize[0] = dimGrid.first;
1052 config.gridSize[1] = dimGrid.second;
1055 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1060 timingId = gtPME_SPLINEANDSPREAD;
1061 kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1062 writeGlobal || (!recalculateSplines));
1066 timingId = gtPME_SPLINE;
1067 kernelPtr = selectSplineKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1068 writeGlobal || (!recalculateSplines));
1073 timingId = gtPME_SPREAD;
1074 kernelPtr = selectSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1075 writeGlobal || (!recalculateSplines));
1079 pme_gpu_start_timing(pmeGpu, timingId);
1080 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1081 #if c_canEmbedBuffers
1082 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1084 const auto kernelArgs = prepareGpuKernelArguments(
1085 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1086 &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1087 &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->grid.d_fractShiftsTable,
1088 &kernelParamsPtr->grid.d_gridlineIndicesTable, &kernelParamsPtr->atoms.d_coefficients,
1089 &kernelParamsPtr->atoms.d_coordinates);
1092 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent,
1093 "PME spline/spread", kernelArgs);
1094 pme_gpu_stop_timing(pmeGpu, timingId);
1096 const auto& settings = pmeGpu->settings;
1097 const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1100 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1102 const bool copyBackAtomData =
1103 computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1104 if (copyBackAtomData)
1106 pme_gpu_copy_output_spread_atom_data(pmeGpu);
1110 void pme_gpu_solve(const PmeGpu* pmeGpu, t_complex* h_grid, GridOrdering gridOrdering, bool computeEnergyAndVirial)
1112 const auto& settings = pmeGpu->settings;
1113 const bool copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1115 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1117 float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1118 if (copyInputAndOutputGrid)
1120 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat, 0,
1121 pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream_,
1122 pmeGpu->settings.transferKind, nullptr);
1125 int majorDim = -1, middleDim = -1, minorDim = -1;
1126 switch (gridOrdering)
1128 case GridOrdering::YZX:
1134 case GridOrdering::XYZ:
1140 default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1143 const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1145 const int gridLineSize = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1146 const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1147 const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1149 if (blocksPerGridLine == 1)
1151 cellsPerBlock = gridLineSize * gridLinesPerBlock;
1155 cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1157 const int warpSize = pmeGpu->programHandle_->warpSize();
1158 const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1160 static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1161 "The CUDA solve energy kernels needs at least 4 warps. "
1162 "Here we launch at least half of the max warps.");
1164 KernelLaunchConfig config;
1165 config.blockSize[0] = blockSize;
1166 config.gridSize[0] = blocksPerGridLine;
1167 // rounding up to full warps so that shuffle operations produce defined results
1168 config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1169 / gridLinesPerBlock;
1170 config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1172 int timingId = gtPME_SOLVE;
1173 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1174 if (gridOrdering == GridOrdering::YZX)
1176 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernel
1177 : pmeGpu->programHandle_->impl_->solveYZXKernel;
1179 else if (gridOrdering == GridOrdering::XYZ)
1181 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernel
1182 : pmeGpu->programHandle_->impl_->solveXYZKernel;
1185 pme_gpu_start_timing(pmeGpu, timingId);
1186 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1187 #if c_canEmbedBuffers
1188 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1190 const auto kernelArgs = prepareGpuKernelArguments(
1191 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli,
1192 &kernelParamsPtr->constants.d_virialAndEnergy, &kernelParamsPtr->grid.d_fourierGrid);
1194 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve",
1196 pme_gpu_stop_timing(pmeGpu, timingId);
1198 if (computeEnergyAndVirial)
1200 copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy,
1201 &kernelParamsPtr->constants.d_virialAndEnergy, 0, c_virialAndEnergyCount,
1202 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1205 if (copyInputAndOutputGrid)
1207 copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid, 0,
1208 pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream_,
1209 pmeGpu->settings.transferKind, nullptr);
1214 * Returns a pointer to appropriate gather kernel based on the inputvalues
1216 * \param[in] pmeGpu The PME GPU structure.
1217 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1218 * \param[in] readSplinesFromGlobal bool controlling if we should write spline data to global memory
1220 * \return Pointer to CUDA kernel
1222 inline auto selectGatherKernelPtr(const PmeGpu* pmeGpu, ThreadsPerAtom threadsPerAtom, bool readSplinesFromGlobal)
1225 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1227 if (readSplinesFromGlobal)
1229 if (threadsPerAtom == ThreadsPerAtom::Order)
1231 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4;
1235 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplines;
1240 if (threadsPerAtom == ThreadsPerAtom::Order)
1242 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4;
1246 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernel;
1253 void pme_gpu_gather(PmeGpu* pmeGpu, const float* h_grid)
1255 const auto& settings = pmeGpu->settings;
1256 if (!settings.performGPUFFT || settings.copyAllOutputs)
1258 pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float*>(h_grid));
1261 if (settings.copyAllOutputs)
1263 pme_gpu_copy_input_gather_atom_data(pmeGpu);
1266 /* Set if we have unit tests */
1267 const bool readGlobal = pmeGpu->settings.copyAllOutputs;
1268 const size_t blockSize = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1269 const int order = pmeGpu->common->pme_order;
1270 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1271 const int threadsPerAtom =
1272 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1273 const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1274 #if GMX_GPU == GMX_GPU_OPENCL
1275 GMX_ASSERT(pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1276 "Only 16 threads per atom supported in OpenCL");
1277 GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1279 const int atomsPerBlock = blockSize / threadsPerAtom;
1281 GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1282 "inconsistent atom data padding vs. gathering block size");
1284 const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1285 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1287 KernelLaunchConfig config;
1288 config.blockSize[0] = order;
1289 config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1290 config.blockSize[2] = atomsPerBlock;
1291 config.gridSize[0] = dimGrid.first;
1292 config.gridSize[1] = dimGrid.second;
1294 // TODO test different cache configs
1296 int timingId = gtPME_GATHER;
1297 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = selectGatherKernelPtr(
1298 pmeGpu, pmeGpu->settings.threadsPerAtom, readGlobal || (!recalculateSplines));
1299 // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1301 pme_gpu_start_timing(pmeGpu, timingId);
1302 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1303 const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1304 #if c_canEmbedBuffers
1305 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1307 const auto kernelArgs = prepareGpuKernelArguments(
1308 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients,
1309 &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->atoms.d_theta,
1310 &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1311 &kernelParamsPtr->atoms.d_forces);
1313 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather",
1315 pme_gpu_stop_timing(pmeGpu, timingId);
1317 if (pmeGpu->settings.useGpuForceReduction)
1319 pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1323 pme_gpu_copy_output_forces(pmeGpu);
1327 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1329 if (pmeGpu && pmeGpu->kernelParams)
1331 return pmeGpu->kernelParams->atoms.d_forces;
1339 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1341 GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1342 "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1345 GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1346 "The device-side buffer can not be set.");
1348 pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1351 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1353 if (pmeGpu && pmeGpu->kernelParams)
1355 return &pmeGpu->archSpecific->pmeForcesReady;