2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file contains internal function implementations
39 * for performing the PME calculations on GPU.
41 * Note that this file is compiled as regular C++ source in OpenCL builds, but
42 * it is treated as CUDA source in CUDA-enabled GPU builds.
44 * \author Aleksei Iupinov <a.yupinov@gmail.com>
45 * \ingroup module_ewald
50 #include "pme_gpu_internal.h"
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
69 #if GMX_GPU == GMX_GPU_CUDA
70 # include "gromacs/gpu_utils/pmalloc_cuda.h"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 # include "gromacs/gpu_utils/gmxopencl.h"
77 #include "gromacs/ewald/pme.h"
79 #include "pme_gpu_3dfft.h"
80 #include "pme_gpu_calculate_splines.h"
81 #include "pme_gpu_constants.h"
82 #include "pme_gpu_program_impl.h"
83 #include "pme_gpu_timings.h"
84 #include "pme_gpu_types.h"
85 #include "pme_gpu_types_host.h"
86 #include "pme_gpu_types_host_impl.h"
88 #include "pme_internal.h"
89 #include "pme_solve.h"
93 * Atom limit above which it is advantageous to turn on the
94 * recalcuating of the splines in the gather and using less threads per atom in the spline and spread
96 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
99 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
101 * \param[in] pmeGpu The PME GPU structure.
102 * \returns The pointer to the kernel parameters.
104 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
106 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
107 auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
108 return kernelParamsPtr;
111 int pme_gpu_get_atom_data_alignment(const PmeGpu* /*unused*/)
113 // TODO: this can be simplified, as c_pmeAtomDataAlignment is now constant
116 return c_pmeAtomDataAlignment;
124 int pme_gpu_get_atoms_per_warp(const PmeGpu* pmeGpu)
126 if (pmeGpu->settings.useOrderThreadsPerAtom)
128 return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom;
132 return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom;
136 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
138 gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
141 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
143 const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
144 allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount,
145 pmeGpu->archSpecific->deviceContext_);
146 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
149 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
151 freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
152 pfree(pmeGpu->staging.h_virialAndEnergy);
153 pmeGpu->staging.h_virialAndEnergy = nullptr;
156 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
158 clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
159 c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
162 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu)
164 const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
165 pmeGpu->kernelParams->grid.realGridSize[XX]
166 + pmeGpu->kernelParams->grid.realGridSize[YY] };
167 memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
169 const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
170 + pmeGpu->kernelParams->grid.realGridSize[YY]
171 + pmeGpu->kernelParams->grid.realGridSize[ZZ];
172 const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
173 reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
174 &pmeGpu->archSpecific->splineValuesSize,
175 &pmeGpu->archSpecific->splineValuesSizeAlloc,
176 pmeGpu->archSpecific->deviceContext_);
179 /* Reallocate the host buffer */
180 pfree(pmeGpu->staging.h_splineModuli);
181 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli),
182 newSplineValuesSize * sizeof(float));
184 for (int i = 0; i < DIM; i++)
186 memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i],
187 pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
189 /* TODO: pin original buffer instead! */
190 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
191 0, newSplineValuesSize, pmeGpu->archSpecific->pmeStream,
192 pmeGpu->settings.transferKind, nullptr);
195 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
197 pfree(pmeGpu->staging.h_splineModuli);
198 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
201 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
203 const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
204 GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
205 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
206 &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc,
207 pmeGpu->archSpecific->deviceContext_);
208 pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
209 pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
212 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
214 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
217 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
219 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
220 float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
221 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
222 DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream,
223 pmeGpu->settings.transferKind, nullptr);
226 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
228 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
229 float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
230 copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
231 DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream,
232 pmeGpu->settings.transferKind, nullptr);
235 void pme_gpu_realloc_and_copy_input_coefficients(PmeGpu* pmeGpu, const float* h_coefficients)
237 GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
238 const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
239 GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
240 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
241 &pmeGpu->archSpecific->coefficientsSize,
242 &pmeGpu->archSpecific->coefficientsSizeAlloc,
243 pmeGpu->archSpecific->deviceContext_);
244 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients,
245 const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
246 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
249 const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
250 const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
251 if (paddingCount > 0)
253 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
254 paddingCount, pmeGpu->archSpecific->pmeStream);
259 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
261 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
264 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
266 const int order = pmeGpu->common->pme_order;
267 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
268 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
269 const int newSplineDataSize = DIM * order * nAtomsPadded;
270 GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
271 /* Two arrays of the same size */
272 const bool shouldRealloc = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
273 int currentSizeTemp = pmeGpu->archSpecific->splineDataSize;
274 int currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
275 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize, ¤tSizeTemp,
276 ¤tSizeTempAlloc, pmeGpu->archSpecific->deviceContext_);
277 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
278 &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc,
279 pmeGpu->archSpecific->deviceContext_);
280 // the host side reallocation
283 pfree(pmeGpu->staging.h_theta);
284 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
285 pfree(pmeGpu->staging.h_dtheta);
286 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
290 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
292 /* Two arrays of the same size */
293 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
294 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
295 pfree(pmeGpu->staging.h_theta);
296 pfree(pmeGpu->staging.h_dtheta);
299 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
301 const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
302 GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
303 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
304 &pmeGpu->archSpecific->gridlineIndicesSize,
305 &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
306 pmeGpu->archSpecific->deviceContext_);
307 pfree(pmeGpu->staging.h_gridlineIndices);
308 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
311 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
313 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
314 pfree(pmeGpu->staging.h_gridlineIndices);
317 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
319 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
320 const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
321 * kernelParamsPtr->grid.realGridSizePadded[YY]
322 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
323 const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
324 * kernelParamsPtr->grid.complexGridSizePadded[YY]
325 * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
326 // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
327 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
329 /* 2 separate grids */
330 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
331 &pmeGpu->archSpecific->complexGridSize,
332 &pmeGpu->archSpecific->complexGridSizeAlloc,
333 pmeGpu->archSpecific->deviceContext_);
334 reallocateDeviceBuffer(
335 &kernelParamsPtr->grid.d_realGrid, newRealGridSize, &pmeGpu->archSpecific->realGridSize,
336 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
340 /* A single buffer so that any grid will fit */
341 const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
342 reallocateDeviceBuffer(
343 &kernelParamsPtr->grid.d_realGrid, newGridsSize, &pmeGpu->archSpecific->realGridSize,
344 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->deviceContext_);
345 kernelParamsPtr->grid.d_fourierGrid = kernelParamsPtr->grid.d_realGrid;
346 pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
347 // the size might get used later for copying the grid
351 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
353 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
355 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
357 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
360 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
362 clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
363 pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
366 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
368 pme_gpu_free_fract_shifts(pmeGpu);
370 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
372 const int nx = kernelParamsPtr->grid.realGridSize[XX];
373 const int ny = kernelParamsPtr->grid.realGridSize[YY];
374 const int nz = kernelParamsPtr->grid.realGridSize[ZZ];
375 const int cellCount = c_pmeNeighborUnitcellCount;
376 const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
378 memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
380 const int newFractShiftsSize = cellCount * (nx + ny + nz);
382 #if GMX_GPU == GMX_GPU_CUDA
383 initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable, kernelParamsPtr->fractShiftsTableTexture,
384 pmeGpu->common->fsh.data(), newFractShiftsSize);
386 initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
387 kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
389 #elif GMX_GPU == GMX_GPU_OPENCL
390 // No dedicated texture routines....
391 allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize,
392 pmeGpu->archSpecific->deviceContext_);
393 allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize,
394 pmeGpu->archSpecific->deviceContext_);
395 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(), 0,
396 newFractShiftsSize, pmeGpu->archSpecific->pmeStream,
397 GpuApiCallBehavior::Async, nullptr);
398 copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(), 0,
399 newFractShiftsSize, pmeGpu->archSpecific->pmeStream,
400 GpuApiCallBehavior::Async, nullptr);
404 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
406 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
407 #if GMX_GPU == GMX_GPU_CUDA
408 destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
409 kernelParamsPtr->fractShiftsTableTexture);
410 destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
411 kernelParamsPtr->gridlineIndicesTableTexture);
412 #elif GMX_GPU == GMX_GPU_OPENCL
413 freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
414 freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
418 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
420 return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
423 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, float* h_grid)
425 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid, 0, pmeGpu->archSpecific->realGridSize,
426 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
429 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid)
431 copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid, 0,
432 pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream,
433 pmeGpu->settings.transferKind, nullptr);
434 pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
437 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
439 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
440 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
441 const size_t splinesCount = DIM * nAtomsPadded * pmeGpu->common->pme_order;
442 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
443 copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
444 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
445 copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
446 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
447 copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
448 0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream,
449 pmeGpu->settings.transferKind, nullptr);
452 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
454 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
455 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
456 const size_t splinesCount = DIM * nAtomsPadded * pmeGpu->common->pme_order;
457 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
460 // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
461 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
462 pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
463 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
464 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
465 pmeGpu->archSpecific->pmeStream);
466 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
467 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
468 pmeGpu->archSpecific->pmeStream);
470 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
471 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
472 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
473 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
474 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
475 0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream,
476 pmeGpu->settings.transferKind, nullptr);
479 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
481 pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
484 void pme_gpu_init_internal(PmeGpu* pmeGpu)
486 #if GMX_GPU == GMX_GPU_CUDA
487 // Prepare to use the device that this PME task was assigned earlier.
488 // Other entities, such as CUDA timing events, are known to implicitly use the device context.
489 CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
492 /* Allocate the target-specific structures */
493 pmeGpu->archSpecific.reset(new PmeGpuSpecific(pmeGpu->programHandle_->impl_->deviceContext_));
494 pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
496 pmeGpu->archSpecific->performOutOfPlaceFFT = true;
497 /* This should give better performance, according to the cuFFT documentation.
498 * The performance seems to be the same though.
499 * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
502 // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
503 if (GMX_GPU == GMX_GPU_CUDA)
505 /* WARNING: CUDA timings are incorrect with multiple streams.
506 * This is the main reason why they are disabled by default.
508 // TODO: Consider turning on by default when we can detect nr of streams.
509 pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
511 else if (GMX_GPU == GMX_GPU_OPENCL)
513 pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
516 #if GMX_GPU == GMX_GPU_CUDA
517 pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
518 #elif GMX_GPU == GMX_GPU_OPENCL
519 pmeGpu->maxGridWidthX = INT32_MAX / 2;
520 // TODO: is there no really global work size limit in OpenCL?
523 /* Creating a PME GPU stream:
524 * - default high priority with CUDA
525 * - no priorities implemented yet with OpenCL; see #2532
527 #if GMX_GPU == GMX_GPU_CUDA
529 int highest_priority, lowest_priority;
530 stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
531 CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
532 stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
533 cudaStreamDefault, // cudaStreamNonBlocking,
535 CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
536 #elif GMX_GPU == GMX_GPU_OPENCL
537 cl_command_queue_properties queueProperties =
538 pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
539 cl_device_id device_id = pmeGpu->deviceInfo->oclDeviceId;
541 pmeGpu->archSpecific->pmeStream = clCreateCommandQueue(
542 pmeGpu->archSpecific->deviceContext_.context(), device_id, queueProperties, &clError);
543 if (clError != CL_SUCCESS)
545 GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
550 void pme_gpu_destroy_specific(const PmeGpu* pmeGpu)
552 #if GMX_GPU == GMX_GPU_CUDA
553 /* Destroy the CUDA stream */
554 cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
555 CU_RET_ERR(stat, "PME cudaStreamDestroy error");
556 #elif GMX_GPU == GMX_GPU_OPENCL
557 cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
558 if (clError != CL_SUCCESS)
560 gmx_warning("Failed to destroy PME command queue");
565 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
567 if (pme_gpu_settings(pmeGpu).performGPUFFT)
569 pmeGpu->archSpecific->fftSetup.resize(0);
570 for (int i = 0; i < pmeGpu->common->ngrids; i++)
572 pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
577 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
579 pmeGpu->archSpecific->fftSetup.resize(0);
582 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
584 if (order != c_pmeGpuOrder)
588 constexpr int fixedOrder = c_pmeGpuOrder;
589 GMX_UNUSED_VALUE(fixedOrder);
591 const int atomWarpIndex = atomIndex % atomsPerWarp;
592 const int warpIndex = atomIndex / atomsPerWarp;
593 int indexBase, result;
594 switch (atomsPerWarp)
597 indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
598 result = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
602 indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
603 result = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
607 indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
608 result = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
612 indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
613 result = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
617 GMX_THROW(gmx::NotImplementedError(
618 gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in "
619 "getSplineParamFullIndex",
625 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, PmeOutput* output)
627 const PmeGpu* pmeGpu = pme.gpu;
628 for (int j = 0; j < c_virialAndEnergyCount; j++)
630 GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]),
631 "PME GPU produces incorrect energy/virial.");
635 output->coulombVirial_[XX][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
636 output->coulombVirial_[YY][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
637 output->coulombVirial_[ZZ][ZZ] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
638 output->coulombVirial_[XX][YY] = output->coulombVirial_[YY][XX] =
639 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
640 output->coulombVirial_[XX][ZZ] = output->coulombVirial_[ZZ][XX] =
641 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
642 output->coulombVirial_[YY][ZZ] = output->coulombVirial_[ZZ][YY] =
643 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
644 output->coulombEnergy_ = 0.5F * pmeGpu->staging.h_virialAndEnergy[j++];
647 /*! \brief Sets the force-related members in \p output
649 * \param[in] pmeGpu PME GPU data structure
650 * \param[out] output Pointer to PME output data structure
652 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
654 output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
655 if (output->haveForceOutput_)
657 output->forces_ = pmeGpu->staging.h_forces;
661 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial)
663 PmeGpu* pmeGpu = pme.gpu;
667 pme_gpu_getForceOutput(pmeGpu, &output);
669 if (computeEnergyAndVirial)
671 if (pme_gpu_settings(pmeGpu).performGPUSolve)
673 pme_gpu_getEnergyAndVirial(pme, &output);
677 get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
683 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
686 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
689 pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
690 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
691 kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
692 GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
694 gmx::invertBoxMatrix(scaledBox, recipBox);
696 /* The GPU recipBox is transposed as compared to the CPU recipBox.
697 * Spread uses matrix columns (while solve and gather use rows).
698 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
700 const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
701 { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
702 { 0.0, 0.0, recipBox[ZZ][ZZ] } };
703 memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
707 /*! \brief \libinternal
708 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
710 * \param[in] pmeGpu The PME GPU structure.
712 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
714 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
715 kernelParamsPtr->grid.ewaldFactor =
716 (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
718 /* The grid size variants */
719 for (int i = 0; i < DIM; i++)
721 kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
722 kernelParamsPtr->grid.realGridSizeFP[i] =
723 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
724 kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
726 // The complex grid currently uses no padding;
727 // if it starts to do so, then another test should be added for that
728 kernelParamsPtr->grid.complexGridSize[i] = kernelParamsPtr->grid.realGridSize[i];
729 kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
731 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
732 if (!pme_gpu_settings(pmeGpu).performGPUFFT)
734 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
735 kernelParamsPtr->grid.realGridSizePadded[ZZ] =
736 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
739 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
740 kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
741 kernelParamsPtr->grid.complexGridSize[ZZ]++;
742 kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
744 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
745 pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
746 pme_gpu_realloc_grids(pmeGpu);
747 pme_gpu_reinit_3dfft(pmeGpu);
750 /* Several GPU functions that refer to the CPU PME data live here.
751 * We would like to keep these away from the GPU-framework specific code for clarity,
752 * as well as compilation issues with MPI.
755 /*! \brief \libinternal
756 * Copies everything useful from the PME CPU to the PME GPU structure.
757 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
759 * \param[in] pme The PME structure.
761 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
763 /* TODO: Consider refactoring the CPU PME code to use the same structure,
764 * so that this function becomes 2 lines */
765 PmeGpu* pmeGpu = pme->gpu;
766 pmeGpu->common->ngrids = pme->ngrids;
767 pmeGpu->common->epsilon_r = pme->epsilon_r;
768 pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
769 pmeGpu->common->nk[XX] = pme->nkx;
770 pmeGpu->common->nk[YY] = pme->nky;
771 pmeGpu->common->nk[ZZ] = pme->nkz;
772 pmeGpu->common->pme_order = pme->pme_order;
773 if (pmeGpu->common->pme_order != c_pmeGpuOrder)
775 GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
777 for (int i = 0; i < DIM; i++)
779 pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
781 const int cellCount = c_pmeNeighborUnitcellCount;
782 pmeGpu->common->fsh.resize(0);
783 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
784 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
785 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
786 pmeGpu->common->nn.resize(0);
787 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
788 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
789 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
790 pmeGpu->common->runMode = pme->runMode;
791 pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
792 pmeGpu->common->boxScaler = pme->boxScaler;
795 /*! \libinternal \brief
796 * uses heuristics to select the best performing PME gather and scatter kernels
798 * \param[in,out] pmeGpu The PME GPU structure.
800 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
802 if (pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit && (GMX_GPU == GMX_GPU_CUDA))
804 pmeGpu->settings.useOrderThreadsPerAtom = true;
805 pmeGpu->settings.recalculateSplines = true;
809 pmeGpu->settings.useOrderThreadsPerAtom = false;
810 pmeGpu->settings.recalculateSplines = false;
815 /*! \libinternal \brief
816 * Initializes the PME GPU data at the beginning of the run.
817 * TODO: this should become PmeGpu::PmeGpu()
819 * \param[in,out] pme The PME structure.
820 * \param[in,out] deviceInfo The GPU device information structure.
821 * \param[in] pmeGpuProgram The handle to the program/kernel data created outside (e.g. in unit tests/runner)
823 static void pme_gpu_init(gmx_pme_t* pme, const DeviceInformation* deviceInfo, const PmeGpuProgram* pmeGpuProgram)
825 pme->gpu = new PmeGpu();
826 PmeGpu* pmeGpu = pme->gpu;
827 changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
828 pmeGpu->common = std::make_shared<PmeShared>();
830 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
831 /* A convenience variable. */
832 pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
833 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
834 pmeGpu->settings.performGPUGather = true;
835 // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
836 pmeGpu->settings.useGpuForceReduction = false;
838 pme_gpu_set_testing(pmeGpu, false);
840 pmeGpu->deviceInfo = deviceInfo;
841 GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
842 pmeGpu->programHandle_ = pmeGpuProgram;
844 pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
846 pme_gpu_init_internal(pmeGpu);
847 pme_gpu_alloc_energy_virial(pmeGpu);
849 pme_gpu_copy_common_data_from(pme);
851 GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
853 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
854 kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
857 void pme_gpu_transform_spline_atom_data(const PmeGpu* pmeGpu,
858 const PmeAtomComm* atc,
859 PmeSplineDataType type,
861 PmeLayoutTransform transform)
863 // The GPU atom spline data is laid out in a different way currently than the CPU one.
864 // This function converts the data from GPU to CPU layout (in the host memory).
865 // It is only intended for testing purposes so far.
866 // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their
867 // performance (e.g. spreading on GPU, gathering on CPU).
868 GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
869 const uintmax_t threadIndex = 0;
870 const auto atomCount = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
871 const auto atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
872 const auto pmeOrder = pmeGpu->common->pme_order;
873 GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
875 real* cpuSplineBuffer;
876 float* h_splineBuffer;
879 case PmeSplineDataType::Values:
880 cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
881 h_splineBuffer = pmeGpu->staging.h_theta;
884 case PmeSplineDataType::Derivatives:
885 cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
886 h_splineBuffer = pmeGpu->staging.h_dtheta;
889 default: GMX_THROW(gmx::InternalError("Unknown spline data type"));
892 for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
894 for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
896 const auto gpuValueIndex =
897 getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
898 const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
899 GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder,
900 "Atom spline data index out of bounds (while transforming GPU data layout "
904 case PmeLayoutTransform::GpuToHost:
905 cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
908 case PmeLayoutTransform::HostToGpu:
909 h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
912 default: GMX_THROW(gmx::InternalError("Unknown layout transform"));
918 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
920 GMX_ASSERT(gridSize != nullptr, "");
921 GMX_ASSERT(paddedGridSize != nullptr, "");
922 GMX_ASSERT(pmeGpu != nullptr, "");
923 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
924 for (int i = 0; i < DIM; i++)
926 (*gridSize)[i] = kernelParamsPtr->grid.realGridSize[i];
927 (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
931 void pme_gpu_reinit(gmx_pme_t* pme, const DeviceInformation* deviceInfo, const PmeGpuProgram* pmeGpuProgram)
933 GMX_ASSERT(pme != nullptr, "Need valid PME object");
934 if (pme->runMode == PmeRunMode::CPU)
936 GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object");
942 /* First-time initialization */
943 pme_gpu_init(pme, deviceInfo, pmeGpuProgram);
947 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
948 pme_gpu_copy_common_data_from(pme);
950 /* GPU FFT will only get used for a single rank.*/
951 pme->gpu->settings.performGPUFFT =
952 (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
953 pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
955 /* Reinit active timers */
956 pme_gpu_reinit_timings(pme->gpu);
958 pme_gpu_reinit_grids(pme->gpu);
959 // Note: if timing the reinit launch overhead becomes more relevant
960 // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
961 pme_gpu_reinit_computation(pme, nullptr);
962 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
963 * update for mixed mode on grid switch. TODO: use shared recipbox field.
965 std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
968 void pme_gpu_destroy(PmeGpu* pmeGpu)
970 /* Free lots of data */
971 pme_gpu_free_energy_virial(pmeGpu);
972 pme_gpu_free_bspline_values(pmeGpu);
973 pme_gpu_free_forces(pmeGpu);
974 pme_gpu_free_coefficients(pmeGpu);
975 pme_gpu_free_spline_data(pmeGpu);
976 pme_gpu_free_grid_indices(pmeGpu);
977 pme_gpu_free_fract_shifts(pmeGpu);
978 pme_gpu_free_grids(pmeGpu);
980 pme_gpu_destroy_3dfft(pmeGpu);
982 /* Free the GPU-framework specific data last */
983 pme_gpu_destroy_specific(pmeGpu);
988 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* charges)
990 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
991 kernelParamsPtr->atoms.nAtoms = nAtoms;
992 const int alignment = pme_gpu_get_atom_data_alignment(pmeGpu);
993 pmeGpu->nAtomsPadded = ((nAtoms + alignment - 1) / alignment) * alignment;
994 const int nAtomsAlloc = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
995 const bool haveToRealloc =
996 (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
997 pmeGpu->nAtomsAlloc = nAtomsAlloc;
1000 GMX_RELEASE_ASSERT(false, "Only single precision supported");
1001 GMX_UNUSED_VALUE(charges);
1003 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(charges));
1004 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
1009 pme_gpu_realloc_forces(pmeGpu);
1010 pme_gpu_realloc_spline_data(pmeGpu);
1011 pme_gpu_realloc_grid_indices(pmeGpu);
1013 pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
1016 /*! \internal \brief
1017 * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
1018 * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
1020 * \param[in] pmeGpu The PME GPU data structure.
1021 * \param[in] PMEStageId The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
1023 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
1025 CommandEvent* timingEvent = nullptr;
1026 if (pme_gpu_timings_enabled(pmeGpu))
1028 GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
1029 "Wrong PME GPU timing event index");
1030 timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
1035 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, int grid_index)
1037 int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
1039 pme_gpu_start_timing(pmeGpu, timerId);
1040 pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
1041 dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1042 pme_gpu_stop_timing(pmeGpu, timerId);
1046 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1047 * to minimize number of unused blocks.
1049 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
1051 // How many maximum widths in X do we need (hopefully just one)
1052 const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1053 // Trying to make things even
1054 const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1055 GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1056 GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1057 "pmeGpuCreateGrid: excessive blocks");
1058 return std::pair<int, int>(colCount, minRowCount);
1062 * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1064 * \param[in] pmeGpu The PME GPU structure.
1065 * \param[in] useOrderThreadsPerAtom bool controlling if we should use order or order*order threads per atom
1066 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1068 * \return Pointer to CUDA kernel
1070 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool writeSplinesToGlobal)
1072 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1073 if (writeSplinesToGlobal)
1075 if (useOrderThreadsPerAtom)
1077 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4;
1081 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplines;
1086 if (useOrderThreadsPerAtom)
1088 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
1092 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1100 * Returns a pointer to appropriate spline kernel based on the input bool values
1102 * \param[in] pmeGpu The PME GPU structure.
1103 * \param[in] useOrderThreadsPerAtom bool controlling if we should use order or order*order threads per atom
1104 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1106 * \return Pointer to CUDA kernel
1108 static auto selectSplineKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool gmx_unused writeSplinesToGlobal)
1110 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1112 writeSplinesToGlobal,
1113 "Spline data should always be written to global memory when just calculating splines");
1115 if (useOrderThreadsPerAtom)
1117 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4;
1121 kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1127 * Returns a pointer to appropriate spread kernel based on the input bool values
1129 * \param[in] pmeGpu The PME GPU structure.
1130 * \param[in] useOrderThreadsPerAtom bool controlling if we should use order or order*order threads per atom
1131 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1133 * \return Pointer to CUDA kernel
1135 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool writeSplinesToGlobal)
1137 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1138 if (writeSplinesToGlobal)
1140 if (useOrderThreadsPerAtom)
1142 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4;
1146 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1151 /* if we are not saving the spline data we need to recalculate it
1152 using the spline and spread Kernel */
1153 if (useOrderThreadsPerAtom)
1155 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
1159 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1165 void pme_gpu_spread(const PmeGpu* pmeGpu,
1166 GpuEventSynchronizer* xReadyOnDevice,
1167 int gmx_unused gridIndex,
1169 bool computeSplines,
1172 GMX_ASSERT(computeSplines || spreadCharges,
1173 "PME spline/spread kernel has invalid input (nothing to do)");
1174 const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1175 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1177 const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1179 const int order = pmeGpu->common->pme_order;
1180 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1181 const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1182 const bool useOrderThreadsPerAtom = pmeGpu->settings.useOrderThreadsPerAtom;
1183 const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1184 #if GMX_GPU == GMX_GPU_OPENCL
1185 GMX_ASSERT(!useOrderThreadsPerAtom, "Only 16 threads per atom supported in OpenCL");
1186 GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1188 const int atomsPerBlock = useOrderThreadsPerAtom ? blockSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
1189 : blockSize / c_pmeSpreadGatherThreadsPerAtom;
1191 // TODO: pick smaller block size in runtime if needed
1192 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1193 // If doing so, change atomsPerBlock in the kernels as well.
1194 // TODO: test varying block sizes on modern arch-s as well
1195 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1196 //(for spline data mostly)
1197 GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock),
1198 "inconsistent atom data padding vs. spreading block size");
1200 // Ensure that coordinates are ready on the device before launching spread;
1201 // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1202 // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1203 GMX_ASSERT(xReadyOnDevice != nullptr || (GMX_GPU != GMX_GPU_CUDA)
1204 || pmeGpu->common->isRankPmeOnly || pme_gpu_settings(pmeGpu).copyAllOutputs,
1205 "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1208 xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream);
1211 const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1212 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1214 KernelLaunchConfig config;
1215 config.blockSize[0] = order;
1216 config.blockSize[1] = useOrderThreadsPerAtom ? 1 : order;
1217 config.blockSize[2] = atomsPerBlock;
1218 config.gridSize[0] = dimGrid.first;
1219 config.gridSize[1] = dimGrid.second;
1220 config.stream = pmeGpu->archSpecific->pmeStream;
1223 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1228 timingId = gtPME_SPLINEANDSPREAD;
1229 kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, useOrderThreadsPerAtom,
1230 writeGlobal || (!recalculateSplines));
1234 timingId = gtPME_SPLINE;
1235 kernelPtr = selectSplineKernelPtr(pmeGpu, useOrderThreadsPerAtom,
1236 writeGlobal || (!recalculateSplines));
1241 timingId = gtPME_SPREAD;
1242 kernelPtr = selectSpreadKernelPtr(pmeGpu, useOrderThreadsPerAtom,
1243 writeGlobal || (!recalculateSplines));
1247 pme_gpu_start_timing(pmeGpu, timingId);
1248 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1249 #if c_canEmbedBuffers
1250 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1252 const auto kernelArgs = prepareGpuKernelArguments(
1253 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1254 &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1255 &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->grid.d_fractShiftsTable,
1256 &kernelParamsPtr->grid.d_gridlineIndicesTable, &kernelParamsPtr->atoms.d_coefficients,
1257 &kernelParamsPtr->atoms.d_coordinates);
1260 launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1261 pme_gpu_stop_timing(pmeGpu, timingId);
1263 const auto& settings = pmeGpu->settings;
1264 const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1267 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1269 const bool copyBackAtomData =
1270 computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1271 if (copyBackAtomData)
1273 pme_gpu_copy_output_spread_atom_data(pmeGpu);
1277 void pme_gpu_solve(const PmeGpu* pmeGpu, t_complex* h_grid, GridOrdering gridOrdering, bool computeEnergyAndVirial)
1279 const auto& settings = pmeGpu->settings;
1280 const bool copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1282 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1284 float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1285 if (copyInputAndOutputGrid)
1287 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat, 0,
1288 pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream,
1289 pmeGpu->settings.transferKind, nullptr);
1292 int majorDim = -1, middleDim = -1, minorDim = -1;
1293 switch (gridOrdering)
1295 case GridOrdering::YZX:
1301 case GridOrdering::XYZ:
1307 default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1310 const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1312 const int gridLineSize = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1313 const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1314 const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1316 if (blocksPerGridLine == 1)
1318 cellsPerBlock = gridLineSize * gridLinesPerBlock;
1322 cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1324 const int warpSize = pmeGpu->programHandle_->impl_->warpSize;
1325 const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1327 static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1328 "The CUDA solve energy kernels needs at least 4 warps. "
1329 "Here we launch at least half of the max warps.");
1331 KernelLaunchConfig config;
1332 config.blockSize[0] = blockSize;
1333 config.gridSize[0] = blocksPerGridLine;
1334 // rounding up to full warps so that shuffle operations produce defined results
1335 config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1336 / gridLinesPerBlock;
1337 config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1338 config.stream = pmeGpu->archSpecific->pmeStream;
1340 int timingId = gtPME_SOLVE;
1341 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1342 if (gridOrdering == GridOrdering::YZX)
1344 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernel
1345 : pmeGpu->programHandle_->impl_->solveYZXKernel;
1347 else if (gridOrdering == GridOrdering::XYZ)
1349 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernel
1350 : pmeGpu->programHandle_->impl_->solveXYZKernel;
1353 pme_gpu_start_timing(pmeGpu, timingId);
1354 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1355 #if c_canEmbedBuffers
1356 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1358 const auto kernelArgs = prepareGpuKernelArguments(
1359 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli,
1360 &kernelParamsPtr->constants.d_virialAndEnergy, &kernelParamsPtr->grid.d_fourierGrid);
1362 launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1363 pme_gpu_stop_timing(pmeGpu, timingId);
1365 if (computeEnergyAndVirial)
1367 copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy,
1368 &kernelParamsPtr->constants.d_virialAndEnergy, 0, c_virialAndEnergyCount,
1369 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1372 if (copyInputAndOutputGrid)
1374 copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid, 0,
1375 pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream,
1376 pmeGpu->settings.transferKind, nullptr);
1381 * Returns a pointer to appropriate gather kernel based on the inputvalues
1383 * \param[in] pmeGpu The PME GPU structure.
1384 * \param[in] useOrderThreadsPerAtom bool controlling if we should use order or order*order threads per atom
1385 * \param[in] readSplinesFromGlobal bool controlling if we should write spline data to global memory
1387 * \return Pointer to CUDA kernel
1389 inline auto selectGatherKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool readSplinesFromGlobal)
1392 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1394 if (readSplinesFromGlobal)
1396 if (useOrderThreadsPerAtom)
1398 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4;
1402 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplines;
1407 if (useOrderThreadsPerAtom)
1409 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4;
1413 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernel;
1420 void pme_gpu_gather(PmeGpu* pmeGpu, const float* h_grid)
1422 const auto& settings = pmeGpu->settings;
1423 if (!settings.performGPUFFT || settings.copyAllOutputs)
1425 pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float*>(h_grid));
1428 if (settings.copyAllOutputs)
1430 pme_gpu_copy_input_gather_atom_data(pmeGpu);
1433 /* Set if we have unit tests */
1434 const bool readGlobal = pmeGpu->settings.copyAllOutputs;
1435 const size_t blockSize = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1436 const bool useOrderThreadsPerAtom = pmeGpu->settings.useOrderThreadsPerAtom;
1437 const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1438 #if GMX_GPU == GMX_GPU_OPENCL
1439 GMX_ASSERT(!useOrderThreadsPerAtom, "Only 16 threads per atom supported in OpenCL");
1440 GMX_ASSERT(!recalculateSplines, "Recalculating splines not supported in OpenCL");
1442 const int atomsPerBlock = useOrderThreadsPerAtom ? blockSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
1443 : blockSize / c_pmeSpreadGatherThreadsPerAtom;
1445 GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock),
1446 "inconsistent atom data padding vs. gathering block size");
1448 const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1449 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1451 const int order = pmeGpu->common->pme_order;
1452 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1454 KernelLaunchConfig config;
1455 config.blockSize[0] = order;
1456 config.blockSize[1] = useOrderThreadsPerAtom ? 1 : order;
1457 config.blockSize[2] = atomsPerBlock;
1458 config.gridSize[0] = dimGrid.first;
1459 config.gridSize[1] = dimGrid.second;
1460 config.stream = pmeGpu->archSpecific->pmeStream;
1462 // TODO test different cache configs
1464 int timingId = gtPME_GATHER;
1465 PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1466 selectGatherKernelPtr(pmeGpu, useOrderThreadsPerAtom, readGlobal || (!recalculateSplines));
1467 // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1469 pme_gpu_start_timing(pmeGpu, timingId);
1470 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1471 const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1472 #if c_canEmbedBuffers
1473 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1475 const auto kernelArgs = prepareGpuKernelArguments(
1476 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients,
1477 &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->atoms.d_theta,
1478 &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1479 &kernelParamsPtr->atoms.d_forces);
1481 launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1482 pme_gpu_stop_timing(pmeGpu, timingId);
1484 if (pmeGpu->settings.useGpuForceReduction)
1486 pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream);
1490 pme_gpu_copy_output_forces(pmeGpu);
1494 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1496 if (pmeGpu && pmeGpu->kernelParams)
1498 return pmeGpu->kernelParams->atoms.d_forces;
1506 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1508 GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1509 "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1512 GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1513 "The device-side buffer can not be set.");
1515 pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1518 void* pme_gpu_get_stream(const PmeGpu* pmeGpu)
1522 return static_cast<void*>(&pmeGpu->archSpecific->pmeStream);
1530 const DeviceContext* pme_gpu_get_context(const PmeGpu* pmeGpu)
1534 "GPU context object was requested, but PME GPU object was not (yet) initialized.");
1535 return &pmeGpu->archSpecific->deviceContext_;
1538 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1540 if (pmeGpu && pmeGpu->kernelParams)
1542 return &pmeGpu->archSpecific->pmeForcesReady;