2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020 by the GROMACS development team.
5 * Copyright (c) 2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief This file contains internal function implementations
40 * for performing the PME calculations on GPU.
42 * Note that this file is compiled as regular C++ source in OpenCL builds, but
43 * it is treated as CUDA source in CUDA-enabled GPU builds.
45 * \author Aleksei Iupinov <a.yupinov@gmail.com>
46 * \ingroup module_ewald
51 #include "pme_gpu_internal.h"
59 #include "gromacs/ewald/ewald_utils.h"
60 #include "gromacs/fft/gpu_3dfft.h"
61 #include "gromacs/gpu_utils/device_context.h"
62 #include "gromacs/gpu_utils/device_stream.h"
63 #include "gromacs/gpu_utils/gpu_utils.h"
64 #include "gromacs/gpu_utils/pmalloc.h"
66 # include "gromacs/gpu_utils/syclutils.h"
68 #include "gromacs/hardware/device_information.h"
69 #include "gromacs/math/invertmatrix.h"
70 #include "gromacs/math/units.h"
71 #include "gromacs/timing/gpu_timing.h"
72 #include "gromacs/utility/exceptions.h"
73 #include "gromacs/utility/fatalerror.h"
74 #include "gromacs/utility/gmxassert.h"
75 #include "gromacs/utility/logger.h"
76 #include "gromacs/utility/stringutil.h"
77 #include "gromacs/ewald/pme.h"
78 #include "gromacs/ewald/pme_coordinate_receiver_gpu.h"
84 #include "pme_gpu_calculate_splines.h"
85 #include "pme_gpu_constants.h"
86 #include "pme_gpu_program_impl.h"
87 #include "pme_gpu_timings.h"
88 #include "pme_gpu_types.h"
89 #include "pme_gpu_types_host.h"
90 #include "pme_gpu_types_host_impl.h"
92 #include "pme_internal.h"
93 #include "pme_solve.h"
97 * Atom limit above which it is advantageous to turn on the
98 * recalculating of the splines in the gather and using less threads per atom in the spline and spread
100 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
103 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
105 * \param[in] pmeGpu The PME GPU structure.
106 * \returns The pointer to the kernel parameters.
108 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
110 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
111 auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
112 return kernelParamsPtr;
116 * Atom data block size (in terms of number of atoms).
117 * This is the least common multiple of number of atoms processed by
118 * a single block/workgroup of the spread and gather kernels.
119 * The GPU atom data buffers must be padded, which means that
120 * the numbers of atoms used for determining the size of the memory
121 * allocation must be divisible by this.
123 constexpr int c_pmeAtomDataBlockSize = 64;
125 int pme_gpu_get_atom_data_block_size()
127 return c_pmeAtomDataBlockSize;
130 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
132 pmeGpu->archSpecific->pmeStream_.synchronize();
135 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
137 const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
140 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
141 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
143 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
145 allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
146 c_virialAndEnergyCount,
147 pmeGpu->archSpecific->deviceContext_);
148 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy[gridIndex]), energyAndVirialSize);
152 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
154 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
156 freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex]);
157 pfree(pmeGpu->staging.h_virialAndEnergy[gridIndex]);
158 pmeGpu->staging.h_virialAndEnergy[gridIndex] = nullptr;
162 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
164 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
166 clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
168 c_virialAndEnergyCount,
169 pmeGpu->archSpecific->pmeStream_);
173 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu, const int gridIndex)
176 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
177 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
178 GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
179 "Invalid combination of gridIndex and number of grids");
181 const int splineValuesOffset[DIM] = { 0,
182 pmeGpu->kernelParams->grid.realGridSize[XX],
183 pmeGpu->kernelParams->grid.realGridSize[XX]
184 + pmeGpu->kernelParams->grid.realGridSize[YY] };
185 memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
187 const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
188 + pmeGpu->kernelParams->grid.realGridSize[YY]
189 + pmeGpu->kernelParams->grid.realGridSize[ZZ];
190 const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize[gridIndex]);
191 reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
193 &pmeGpu->archSpecific->splineValuesSize[gridIndex],
194 &pmeGpu->archSpecific->splineValuesCapacity[gridIndex],
195 pmeGpu->archSpecific->deviceContext_);
198 /* Reallocate the host buffer */
199 pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
200 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli[gridIndex]),
201 newSplineValuesSize * sizeof(float));
203 for (int i = 0; i < DIM; i++)
205 memcpy(pmeGpu->staging.h_splineModuli[gridIndex] + splineValuesOffset[i],
206 pmeGpu->common->bsp_mod[i].data(),
207 pmeGpu->common->bsp_mod[i].size() * sizeof(float));
209 /* TODO: pin original buffer instead! */
210 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
211 pmeGpu->staging.h_splineModuli[gridIndex],
214 pmeGpu->archSpecific->pmeStream_,
215 pmeGpu->settings.transferKind,
219 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
221 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
223 pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
224 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex]);
228 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
230 const size_t newForcesSize = pmeGpu->nAtomsAlloc;
231 GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
232 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
234 &pmeGpu->archSpecific->forcesSize,
235 &pmeGpu->archSpecific->forcesSizeAlloc,
236 pmeGpu->archSpecific->deviceContext_);
237 pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
238 pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
241 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
243 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
246 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
248 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
249 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
250 pmeGpu->staging.h_forces.data(),
252 pmeGpu->kernelParams->atoms.nAtoms,
253 pmeGpu->archSpecific->pmeStream_,
254 pmeGpu->settings.transferKind,
258 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
260 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
261 copyFromDeviceBuffer(pmeGpu->staging.h_forces.data(),
262 &pmeGpu->kernelParams->atoms.d_forces,
264 pmeGpu->kernelParams->atoms.nAtoms,
265 pmeGpu->archSpecific->pmeStream_,
266 pmeGpu->settings.transferKind,
270 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu,
271 const float* h_coefficients,
274 GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
275 const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
276 GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
277 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
279 &pmeGpu->archSpecific->coefficientsSize[gridIndex],
280 &pmeGpu->archSpecific->coefficientsCapacity[gridIndex],
281 pmeGpu->archSpecific->deviceContext_);
282 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
283 const_cast<float*>(h_coefficients),
285 pmeGpu->kernelParams->atoms.nAtoms,
286 pmeGpu->archSpecific->pmeStream_,
287 pmeGpu->settings.transferKind,
290 const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
291 const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
292 if (paddingCount > 0)
294 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
297 pmeGpu->archSpecific->pmeStream_);
301 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
303 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
305 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex]);
309 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
311 const int order = pmeGpu->common->pme_order;
312 const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
313 GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
314 /* Two arrays of the same size */
315 const bool shouldRealloc = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
316 int currentSizeTemp = pmeGpu->archSpecific->splineDataSize;
317 int currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
318 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta,
321 ¤tSizeTempAlloc,
322 pmeGpu->archSpecific->deviceContext_);
323 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta,
325 &pmeGpu->archSpecific->splineDataSize,
326 &pmeGpu->archSpecific->splineDataSizeAlloc,
327 pmeGpu->archSpecific->deviceContext_);
328 // the host side reallocation
331 pfree(pmeGpu->staging.h_theta);
332 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
333 pfree(pmeGpu->staging.h_dtheta);
334 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
338 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
340 /* Two arrays of the same size */
341 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
342 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
343 pfree(pmeGpu->staging.h_theta);
344 pfree(pmeGpu->staging.h_dtheta);
347 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
349 const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
350 GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
351 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices,
353 &pmeGpu->archSpecific->gridlineIndicesSize,
354 &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
355 pmeGpu->archSpecific->deviceContext_);
356 pfree(pmeGpu->staging.h_gridlineIndices);
357 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
360 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
362 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
363 pfree(pmeGpu->staging.h_gridlineIndices);
366 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
368 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
370 const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
371 * kernelParamsPtr->grid.realGridSizePadded[YY]
372 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
373 const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
374 * kernelParamsPtr->grid.complexGridSizePadded[YY]
375 * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
376 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
378 // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
379 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
381 /* 2 separate grids */
382 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
384 &pmeGpu->archSpecific->complexGridSize[gridIndex],
385 &pmeGpu->archSpecific->complexGridCapacity[gridIndex],
386 pmeGpu->archSpecific->deviceContext_);
387 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
389 &pmeGpu->archSpecific->realGridSize[gridIndex],
390 &pmeGpu->archSpecific->realGridCapacity[gridIndex],
391 pmeGpu->archSpecific->deviceContext_);
395 /* A single buffer so that any grid will fit */
396 const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
397 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
399 &pmeGpu->archSpecific->realGridSize[gridIndex],
400 &pmeGpu->archSpecific->realGridCapacity[gridIndex],
401 pmeGpu->archSpecific->deviceContext_);
402 kernelParamsPtr->grid.d_fourierGrid[gridIndex] = kernelParamsPtr->grid.d_realGrid[gridIndex];
403 pmeGpu->archSpecific->complexGridSize[gridIndex] =
404 pmeGpu->archSpecific->realGridSize[gridIndex];
405 // the size might get used later for copying the grid
410 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
412 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
414 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
416 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid[gridIndex]);
418 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex]);
422 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
424 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
426 clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
428 pmeGpu->archSpecific->realGridSize[gridIndex],
429 pmeGpu->archSpecific->pmeStream_);
433 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
435 pme_gpu_free_fract_shifts(pmeGpu);
437 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
439 const int nx = kernelParamsPtr->grid.realGridSize[XX];
440 const int ny = kernelParamsPtr->grid.realGridSize[YY];
441 const int nz = kernelParamsPtr->grid.realGridSize[ZZ];
442 const int cellCount = c_pmeNeighborUnitcellCount;
443 const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
445 memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
447 const int newFractShiftsSize = cellCount * (nx + ny + nz);
449 initParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
450 &kernelParamsPtr->fractShiftsTableTexture,
451 pmeGpu->common->fsh.data(),
453 pmeGpu->archSpecific->deviceContext_);
455 initParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
456 &kernelParamsPtr->gridlineIndicesTableTexture,
457 pmeGpu->common->nn.data(),
459 pmeGpu->archSpecific->deviceContext_);
462 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
464 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
466 destroyParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
467 &kernelParamsPtr->fractShiftsTableTexture);
468 destroyParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
469 &kernelParamsPtr->gridlineIndicesTableTexture);
470 #elif GMX_GPU_OPENCL || GMX_GPU_SYCL
471 freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
472 freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
476 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
478 return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
481 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, const float* h_grid, const int gridIndex)
483 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
486 pmeGpu->archSpecific->realGridSize[gridIndex],
487 pmeGpu->archSpecific->pmeStream_,
488 pmeGpu->settings.transferKind,
492 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid, const int gridIndex)
494 copyFromDeviceBuffer(h_grid,
495 &pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
497 pmeGpu->archSpecific->realGridSize[gridIndex],
498 pmeGpu->archSpecific->pmeStream_,
499 pmeGpu->settings.transferKind,
501 pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
504 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
506 const size_t splinesCount = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
507 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
508 copyFromDeviceBuffer(pmeGpu->staging.h_dtheta,
509 &kernelParamsPtr->atoms.d_dtheta,
512 pmeGpu->archSpecific->pmeStream_,
513 pmeGpu->settings.transferKind,
515 copyFromDeviceBuffer(pmeGpu->staging.h_theta,
516 &kernelParamsPtr->atoms.d_theta,
519 pmeGpu->archSpecific->pmeStream_,
520 pmeGpu->settings.transferKind,
522 copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices,
523 &kernelParamsPtr->atoms.d_gridlineIndices,
525 kernelParamsPtr->atoms.nAtoms * DIM,
526 pmeGpu->archSpecific->pmeStream_,
527 pmeGpu->settings.transferKind,
531 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
533 const size_t splinesCount = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
534 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
536 // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
537 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices,
539 pmeGpu->nAtomsAlloc * DIM,
540 pmeGpu->archSpecific->pmeStream_);
541 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta,
543 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
544 pmeGpu->archSpecific->pmeStream_);
545 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta,
547 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
548 pmeGpu->archSpecific->pmeStream_);
550 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta,
551 pmeGpu->staging.h_dtheta,
554 pmeGpu->archSpecific->pmeStream_,
555 pmeGpu->settings.transferKind,
557 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta,
558 pmeGpu->staging.h_theta,
561 pmeGpu->archSpecific->pmeStream_,
562 pmeGpu->settings.transferKind,
564 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices,
565 pmeGpu->staging.h_gridlineIndices,
567 kernelParamsPtr->atoms.nAtoms * DIM,
568 pmeGpu->archSpecific->pmeStream_,
569 pmeGpu->settings.transferKind,
573 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
575 pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
578 /*! \brief Internal GPU initialization for PME.
580 * \param[in] pmeGpu GPU PME data.
581 * \param[in] deviceContext GPU context.
582 * \param[in] deviceStream GPU stream.
584 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
586 /* Allocate the target-specific structures */
587 pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
588 pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
590 pmeGpu->archSpecific->performOutOfPlaceFFT = true;
591 /* This should give better performance, according to the cuFFT documentation.
592 * The performance seems to be the same though.
593 * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
597 pmeGpu->kernelParams->usePipeline = char(false);
598 pmeGpu->kernelParams->pipelineAtomStart = 0;
599 pmeGpu->kernelParams->pipelineAtomEnd = 0;
600 pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
602 // Use this path for any non-CUDA GPU acceleration
603 // TODO: is there no really global work size limit in OpenCL?
604 pmeGpu->maxGridWidthX = INT32_MAX / 2;
608 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
610 if (pme_gpu_settings(pmeGpu).performGPUFFT)
612 pmeGpu->archSpecific->fftSetup.resize(0);
613 const bool performOutOfPlaceFFT = pmeGpu->archSpecific->performOutOfPlaceFFT;
614 const bool allocateGrid = false;
615 MPI_Comm comm = MPI_COMM_NULL;
616 std::array<int, 1> gridOffsetsInXForEachRank = { 0 };
617 std::array<int, 1> gridOffsetsInYForEachRank = { 0 };
619 const gmx::FftBackend backend = gmx::FftBackend::Cufft;
621 const gmx::FftBackend backend = gmx::FftBackend::Ocl;
623 # if GMX_SYCL_DPCPP && GMX_FFT_MKL
624 const gmx::FftBackend backend = gmx::FftBackend::SyclMkl;
625 # elif GMX_SYCL_HIPSYCL
626 const gmx::FftBackend backend = gmx::FftBackend::SyclRocfft;
628 const gmx::FftBackend backend = gmx::FftBackend::Sycl;
631 GMX_RELEASE_ASSERT(false, "Unknown GPU backend");
632 const gmx::FftBackend backend = gmx::FftBackend::Count;
635 PmeGpuGridParams& grid = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->grid;
636 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
638 pmeGpu->archSpecific->fftSetup.push_back(
639 std::make_unique<gmx::Gpu3dFft>(backend,
642 gridOffsetsInXForEachRank,
643 gridOffsetsInYForEachRank,
644 grid.realGridSize[ZZ],
645 performOutOfPlaceFFT,
646 pmeGpu->archSpecific->deviceContext_,
647 pmeGpu->archSpecific->pmeStream_,
649 grid.realGridSizePadded,
650 grid.complexGridSizePadded,
651 &(grid.d_realGrid[gridIndex]),
652 &(grid.d_fourierGrid[gridIndex])));
657 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
659 pmeGpu->archSpecific->fftSetup.resize(0);
662 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, const float lambda, PmeOutput* output)
664 const PmeGpu* pmeGpu = pme.gpu;
666 GMX_ASSERT(lambda == 1.0 || pmeGpu->common->ngrids == 2,
667 "Invalid combination of lambda and number of grids");
669 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
671 for (int j = 0; j < c_virialAndEnergyCount; j++)
673 GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[gridIndex][j]),
674 "PME GPU produces incorrect energy/virial.");
677 for (int dim1 = 0; dim1 < DIM; dim1++)
679 for (int dim2 = 0; dim2 < DIM; dim2++)
681 output->coulombVirial_[dim1][dim2] = 0;
684 output->coulombEnergy_ = 0;
686 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
688 if (pmeGpu->common->ngrids == 2)
690 scale = gridIndex == 0 ? (1.0 - lambda) : lambda;
692 output->coulombVirial_[XX][XX] +=
693 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][0];
694 output->coulombVirial_[YY][YY] +=
695 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][1];
696 output->coulombVirial_[ZZ][ZZ] +=
697 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][2];
698 output->coulombVirial_[XX][YY] +=
699 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
700 output->coulombVirial_[YY][XX] +=
701 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
702 output->coulombVirial_[XX][ZZ] +=
703 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
704 output->coulombVirial_[ZZ][XX] +=
705 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
706 output->coulombVirial_[YY][ZZ] +=
707 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
708 output->coulombVirial_[ZZ][YY] +=
709 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
710 output->coulombEnergy_ += scale * 0.5F * pmeGpu->staging.h_virialAndEnergy[gridIndex][6];
712 if (pmeGpu->common->ngrids > 1)
714 output->coulombDvdl_ = 0.5F
715 * (pmeGpu->staging.h_virialAndEnergy[FEP_STATE_B][6]
716 - pmeGpu->staging.h_virialAndEnergy[FEP_STATE_A][6]);
720 /*! \brief Sets the force-related members in \p output
722 * \param[in] pmeGpu PME GPU data structure
723 * \param[out] output Pointer to PME output data structure
725 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
727 output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
728 if (output->haveForceOutput_)
730 output->forces_ = pmeGpu->staging.h_forces;
734 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial, const real lambdaQ)
736 PmeGpu* pmeGpu = pme.gpu;
740 pme_gpu_getForceOutput(pmeGpu, &output);
742 if (computeEnergyAndVirial)
744 if (pme_gpu_settings(pmeGpu).performGPUSolve)
746 pme_gpu_getEnergyAndVirial(pme, lambdaQ, &output);
750 get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
756 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
759 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
762 pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
763 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
764 kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
765 GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
767 gmx::invertBoxMatrix(scaledBox, recipBox);
769 /* The GPU recipBox is transposed as compared to the CPU recipBox.
770 * Spread uses matrix columns (while solve and gather use rows).
771 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
773 const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
774 { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
775 { 0.0, 0.0, recipBox[ZZ][ZZ] } };
776 memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
780 /*! \brief \libinternal
781 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
783 * \param[in] pmeGpu The PME GPU structure.
785 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
787 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
790 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
791 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
793 kernelParamsPtr->grid.ewaldFactor =
794 (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
795 /* The grid size variants */
796 for (int i = 0; i < DIM; i++)
798 kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
799 kernelParamsPtr->grid.realGridSizeFP[i] =
800 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
801 kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
803 // The complex grid currently uses no padding;
804 // if it starts to do so, then another test should be added for that
805 kernelParamsPtr->grid.complexGridSize[i] = kernelParamsPtr->grid.realGridSize[i];
806 kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
808 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
809 if (!pme_gpu_settings(pmeGpu).performGPUFFT)
811 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
812 kernelParamsPtr->grid.realGridSizePadded[ZZ] =
813 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
815 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
816 kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
817 kernelParamsPtr->grid.complexGridSize[ZZ]++;
818 kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
820 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
821 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
823 pme_gpu_realloc_and_copy_bspline_values(pmeGpu, gridIndex);
826 pme_gpu_realloc_grids(pmeGpu);
827 pme_gpu_reinit_3dfft(pmeGpu);
830 /* Several GPU functions that refer to the CPU PME data live here.
831 * We would like to keep these away from the GPU-framework specific code for clarity,
832 * as well as compilation issues with MPI.
835 /*! \brief \libinternal
836 * Copies everything useful from the PME CPU to the PME GPU structure.
837 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
839 * \param[in] pme The PME structure.
841 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
843 /* TODO: Consider refactoring the CPU PME code to use the same structure,
844 * so that this function becomes 2 lines */
845 PmeGpu* pmeGpu = pme->gpu;
846 pmeGpu->common->ngrids = pme->bFEP_q ? 2 : 1;
847 pmeGpu->common->epsilon_r = pme->epsilon_r;
848 pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
849 pmeGpu->common->nk[XX] = pme->nkx;
850 pmeGpu->common->nk[YY] = pme->nky;
851 pmeGpu->common->nk[ZZ] = pme->nkz;
852 pmeGpu->common->pme_order = pme->pme_order;
853 if (pmeGpu->common->pme_order != c_pmeGpuOrder)
855 GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
857 for (int i = 0; i < DIM; i++)
859 pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
861 const int cellCount = c_pmeNeighborUnitcellCount;
862 pmeGpu->common->fsh.resize(0);
863 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
864 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
865 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
866 pmeGpu->common->nn.resize(0);
867 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
868 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
869 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
870 pmeGpu->common->runMode = pme->runMode;
871 pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
872 pmeGpu->common->boxScaler = pme->boxScaler.get();
875 /*! \libinternal \brief
876 * uses heuristics to select the best performing PME gather and scatter kernels
878 * \param[in,out] pmeGpu The PME GPU structure.
880 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
882 if (GMX_GPU_CUDA && pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit)
884 pmeGpu->settings.threadsPerAtom = ThreadsPerAtom::Order;
885 pmeGpu->settings.recalculateSplines = true;
889 pmeGpu->settings.threadsPerAtom = ThreadsPerAtom::OrderSquared;
890 pmeGpu->settings.recalculateSplines = false;
895 /*! \libinternal \brief
896 * Initializes the PME GPU data at the beginning of the run.
897 * TODO: this should become PmeGpu::PmeGpu()
899 * \param[in,out] pme The PME structure.
900 * \param[in] deviceContext The GPU context.
901 * \param[in] deviceStream The GPU stream.
902 * \param[in,out] pmeGpuProgram The handle to the program/kernel data created outside (e.g. in unit tests/runner)
904 static void pme_gpu_init(gmx_pme_t* pme,
905 const DeviceContext& deviceContext,
906 const DeviceStream& deviceStream,
907 const PmeGpuProgram* pmeGpuProgram)
909 pme->gpu = new PmeGpu();
910 PmeGpu* pmeGpu = pme->gpu;
911 changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
912 pmeGpu->common = std::make_shared<PmeShared>();
914 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
915 /* A convenience variable. */
916 pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
917 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
918 pmeGpu->settings.performGPUGather = true;
919 // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
920 pmeGpu->settings.useGpuForceReduction = false;
922 pme_gpu_set_testing(pmeGpu, false);
924 GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
925 pmeGpu->programHandle_ = pmeGpuProgram;
927 pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
929 pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
931 pme_gpu_copy_common_data_from(pme);
932 pme_gpu_alloc_energy_virial(pmeGpu);
934 GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
936 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
937 kernelParamsPtr->constants.elFactor = gmx::c_one4PiEps0 / pmeGpu->common->epsilon_r;
940 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
942 GMX_ASSERT(gridSize != nullptr, "");
943 GMX_ASSERT(paddedGridSize != nullptr, "");
944 GMX_ASSERT(pmeGpu != nullptr, "");
945 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
946 for (int i = 0; i < DIM; i++)
948 (*gridSize)[i] = kernelParamsPtr->grid.realGridSize[i];
949 (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
953 void pme_gpu_reinit(gmx_pme_t* pme,
954 const DeviceContext* deviceContext,
955 const DeviceStream* deviceStream,
956 const PmeGpuProgram* pmeGpuProgram)
958 GMX_ASSERT(pme != nullptr, "Need valid PME object");
962 GMX_RELEASE_ASSERT(deviceContext != nullptr,
963 "Device context can not be nullptr when setting up PME on GPU.");
964 GMX_RELEASE_ASSERT(deviceStream != nullptr,
965 "Device stream can not be nullptr when setting up PME on GPU.");
966 /* First-time initialization */
967 pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
971 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
972 pme_gpu_copy_common_data_from(pme);
974 /* GPU FFT will only get used for a single rank.*/
975 pme->gpu->settings.performGPUFFT =
976 (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
977 pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
979 /* Reinit active timers */
980 pme_gpu_reinit_timings(pme->gpu);
982 pme_gpu_reinit_grids(pme->gpu);
983 // Note: if timing the reinit launch overhead becomes more relevant
984 // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
985 pme_gpu_reinit_computation(pme, nullptr);
986 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
987 * update for mixed mode on grid switch. TODO: use shared recipbox field.
989 std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
992 void pme_gpu_destroy(PmeGpu* pmeGpu)
994 /* Free lots of data */
995 pme_gpu_free_energy_virial(pmeGpu);
996 pme_gpu_free_bspline_values(pmeGpu);
997 pme_gpu_free_forces(pmeGpu);
998 pme_gpu_free_coefficients(pmeGpu);
999 pme_gpu_free_spline_data(pmeGpu);
1000 pme_gpu_free_grid_indices(pmeGpu);
1001 pme_gpu_free_fract_shifts(pmeGpu);
1002 pme_gpu_free_grids(pmeGpu);
1004 pme_gpu_destroy_3dfft(pmeGpu);
1009 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* chargesA, const real* chargesB)
1011 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
1012 kernelParamsPtr->atoms.nAtoms = nAtoms;
1013 const int block_size = pme_gpu_get_atom_data_block_size();
1014 const int nAtomsNewPadded = ((nAtoms + block_size - 1) / block_size) * block_size;
1015 const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
1016 pmeGpu->nAtomsAlloc = nAtomsNewPadded;
1019 GMX_RELEASE_ASSERT(false, "Only single precision supported");
1020 GMX_UNUSED_VALUE(charges);
1023 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
1024 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
1026 if (chargesB != nullptr)
1028 pme_gpu_realloc_and_copy_input_coefficients(
1029 pmeGpu, reinterpret_cast<const float*>(chargesB), gridIndex);
1033 /* Fill the second set of coefficients with chargesA as well to be able to avoid
1034 * conditionals in the GPU kernels */
1035 /* FIXME: This should be avoided by making a separate templated version of the
1036 * relevant kernel(s) (probably only pme_gather_kernel). That would require a
1037 * reduction of the current number of templated parameters of that kernel. */
1038 pme_gpu_realloc_and_copy_input_coefficients(
1039 pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
1045 pme_gpu_realloc_forces(pmeGpu);
1046 pme_gpu_realloc_spline_data(pmeGpu);
1047 pme_gpu_realloc_grid_indices(pmeGpu);
1049 pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
1052 /*! \internal \brief
1053 * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
1054 * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
1056 * \param[in] pmeGpu The PME GPU data structure.
1057 * \param[in] pmeStageId The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
1059 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, PmeStage pmeStageId)
1061 CommandEvent* timingEvent = nullptr;
1062 if (pme_gpu_timings_enabled(pmeGpu))
1064 GMX_ASSERT(pmeStageId < PmeStage::Count, "Wrong PME GPU timing event index");
1065 timingEvent = pmeGpu->archSpecific->timingEvents[pmeStageId].fetchNextEvent();
1070 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, const int grid_index)
1072 PmeStage timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? PmeStage::FftTransformR2C
1073 : PmeStage::FftTransformC2R;
1075 pme_gpu_start_timing(pmeGpu, timerId);
1076 pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
1077 dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1078 pme_gpu_stop_timing(pmeGpu, timerId);
1082 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1083 * to minimize number of unused blocks.
1085 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
1087 // How many maximum widths in X do we need (hopefully just one)
1088 const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1089 // Trying to make things even
1090 const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1091 GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1092 GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1093 "pmeGpuCreateGrid: excessive blocks");
1094 return std::pair<int, int>(colCount, minRowCount);
1098 * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1100 * \param[in] pmeGpu The PME GPU structure.
1101 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1102 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1103 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1105 * \return Pointer to CUDA kernel
1107 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu,
1108 ThreadsPerAtom threadsPerAtom,
1109 bool writeSplinesToGlobal,
1112 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1113 if (writeSplinesToGlobal)
1115 if (threadsPerAtom == ThreadsPerAtom::Order)
1119 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Dual;
1123 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Single;
1130 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesDual;
1134 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesSingle;
1140 if (threadsPerAtom == ThreadsPerAtom::Order)
1144 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1148 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1155 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1159 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1168 * Returns a pointer to appropriate spline kernel based on the input bool values
1170 * \param[in] pmeGpu The PME GPU structure.
1171 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1172 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1173 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1175 * \return Pointer to CUDA kernel
1177 static auto selectSplineKernelPtr(const PmeGpu* pmeGpu,
1178 ThreadsPerAtom threadsPerAtom,
1179 bool gmx_unused writeSplinesToGlobal,
1182 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1184 writeSplinesToGlobal,
1185 "Spline data should always be written to global memory when just calculating splines");
1187 if (threadsPerAtom == ThreadsPerAtom::Order)
1191 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Dual;
1195 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Single;
1202 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelDual;
1206 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelSingle;
1213 * Returns a pointer to appropriate spread kernel based on the input bool values
1215 * \param[in] pmeGpu The PME GPU structure.
1216 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1217 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1218 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1220 * \return Pointer to CUDA kernel
1222 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu,
1223 ThreadsPerAtom threadsPerAtom,
1224 bool writeSplinesToGlobal,
1227 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1228 if (writeSplinesToGlobal)
1230 if (threadsPerAtom == ThreadsPerAtom::Order)
1234 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Dual;
1238 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Single;
1245 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelDual;
1249 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelSingle;
1255 /* if we are not saving the spline data we need to recalculate it
1256 using the spline and spread Kernel */
1257 if (threadsPerAtom == ThreadsPerAtom::Order)
1261 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1265 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1272 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1276 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1283 void pme_gpu_spread(const PmeGpu* pmeGpu,
1284 GpuEventSynchronizer* xReadyOnDevice,
1286 bool computeSplines,
1289 const bool useGpuDirectComm,
1290 gmx::PmeCoordinateReceiverGpu* pmeCoordinateReceiverGpu)
1293 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1294 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1296 GMX_ASSERT(computeSplines || spreadCharges,
1297 "PME spline/spread kernel has invalid input (nothing to do)");
1298 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1299 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1301 const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1303 const int order = pmeGpu->common->pme_order;
1304 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1305 const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1306 const int threadsPerAtom =
1307 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1308 const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1310 GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1311 "Only 16 threads per atom supported in OpenCL");
1312 GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1313 "Recalculating splines not supported in OpenCL");
1315 const int atomsPerBlock = blockSize / threadsPerAtom;
1317 // TODO: pick smaller block size in runtime if needed
1318 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1319 // If doing so, change atomsPerBlock in the kernels as well.
1320 // TODO: test varying block sizes on modern arch-s as well
1321 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1322 //(for spline data mostly)
1323 GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1324 "inconsistent atom data padding vs. spreading block size");
1326 // Ensure that coordinates are ready on the device before launching spread;
1327 // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1328 // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1329 GMX_ASSERT(!GMX_GPU_CUDA || xReadyOnDevice != nullptr || pmeGpu->common->isRankPmeOnly
1330 || pme_gpu_settings(pmeGpu).copyAllOutputs,
1331 "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1335 xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1338 const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1339 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1341 if (pmeGpu->common->ngrids == 1)
1343 kernelParamsPtr->current.scale = 1.0;
1347 kernelParamsPtr->current.scale = 1.0 - lambda;
1350 KernelLaunchConfig config;
1351 config.blockSize[0] = order;
1352 config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1353 config.blockSize[2] = atomsPerBlock;
1354 config.gridSize[0] = dimGrid.first;
1355 config.gridSize[1] = dimGrid.second;
1358 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1359 const bool writeGlobalOrSaveSplines = writeGlobal || (!recalculateSplines);
1364 timingId = PmeStage::SplineAndSpread;
1365 kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu,
1366 pmeGpu->settings.threadsPerAtom,
1367 writeGlobalOrSaveSplines,
1368 pmeGpu->common->ngrids);
1372 timingId = PmeStage::Spline;
1373 kernelPtr = selectSplineKernelPtr(pmeGpu,
1374 pmeGpu->settings.threadsPerAtom,
1375 writeGlobalOrSaveSplines,
1376 pmeGpu->common->ngrids);
1381 timingId = PmeStage::Spread;
1382 kernelPtr = selectSpreadKernelPtr(
1383 pmeGpu, pmeGpu->settings.threadsPerAtom, writeGlobalOrSaveSplines, pmeGpu->common->ngrids);
1387 pme_gpu_start_timing(pmeGpu, timingId);
1388 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1390 kernelParamsPtr->usePipeline = char(computeSplines && spreadCharges && useGpuDirectComm
1391 && (pmeCoordinateReceiverGpu->ppCommNumSenderRanks() > 1)
1392 && !writeGlobalOrSaveSplines);
1393 if (kernelParamsPtr->usePipeline != 0)
1395 int numStagesInPipeline = pmeCoordinateReceiverGpu->ppCommNumSenderRanks();
1397 for (int i = 0; i < numStagesInPipeline; i++)
1400 if (useGpuDirectComm)
1402 senderRank = pmeCoordinateReceiverGpu->synchronizeOnCoordinatesFromPpRank(
1403 i, *(pmeCoordinateReceiverGpu->ppCommStream(i)));
1410 // set kernel configuration options specific to this stage of the pipeline
1411 std::tie(kernelParamsPtr->pipelineAtomStart, kernelParamsPtr->pipelineAtomEnd) =
1412 pmeCoordinateReceiverGpu->ppCommAtomRange(senderRank);
1413 const int blockCount = static_cast<int>(std::ceil(
1414 static_cast<float>(kernelParamsPtr->pipelineAtomEnd - kernelParamsPtr->pipelineAtomStart)
1416 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1417 config.gridSize[0] = dimGrid.first;
1418 config.gridSize[1] = dimGrid.second;
1419 DeviceStream* launchStream = pmeCoordinateReceiverGpu->ppCommStream(senderRank);
1422 #if c_canEmbedBuffers
1423 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1425 const auto kernelArgs =
1426 prepareGpuKernelArguments(kernelPtr,
1429 &kernelParamsPtr->atoms.d_theta,
1430 &kernelParamsPtr->atoms.d_dtheta,
1431 &kernelParamsPtr->atoms.d_gridlineIndices,
1432 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1433 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1434 &kernelParamsPtr->grid.d_fractShiftsTable,
1435 &kernelParamsPtr->grid.d_gridlineIndicesTable,
1436 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1437 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1438 &kernelParamsPtr->atoms.d_coordinates);
1441 launchGpuKernel(kernelPtr, config, *launchStream, timingEvent, "PME spline/spread", kernelArgs);
1443 // Set dependencies for PME stream on all pipeline streams
1444 for (int i = 0; i < pmeCoordinateReceiverGpu->ppCommNumSenderRanks(); i++)
1446 GpuEventSynchronizer event;
1447 event.markEvent(*(pmeCoordinateReceiverGpu->ppCommStream(i)));
1448 event.enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1451 else // pipelining is not in use
1453 if (useGpuDirectComm) // Sync all PME-PP communications to PME stream
1455 pmeCoordinateReceiverGpu->synchronizeOnCoordinatesFromAllPpRanks(pmeGpu->archSpecific->pmeStream_);
1458 #if c_canEmbedBuffers
1459 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1461 const auto kernelArgs =
1462 prepareGpuKernelArguments(kernelPtr,
1465 &kernelParamsPtr->atoms.d_theta,
1466 &kernelParamsPtr->atoms.d_dtheta,
1467 &kernelParamsPtr->atoms.d_gridlineIndices,
1468 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1469 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1470 &kernelParamsPtr->grid.d_fractShiftsTable,
1471 &kernelParamsPtr->grid.d_gridlineIndicesTable,
1472 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1473 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1474 &kernelParamsPtr->atoms.d_coordinates);
1477 launchGpuKernel(kernelPtr,
1479 pmeGpu->archSpecific->pmeStream_,
1481 "PME spline/spread",
1485 pme_gpu_stop_timing(pmeGpu, timingId);
1487 const auto& settings = pmeGpu->settings;
1488 const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1491 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1493 float* h_grid = h_grids[gridIndex];
1494 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid, gridIndex);
1497 const bool copyBackAtomData =
1498 computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1499 if (copyBackAtomData)
1501 pme_gpu_copy_output_spread_atom_data(pmeGpu);
1505 void pme_gpu_solve(const PmeGpu* pmeGpu,
1506 const int gridIndex,
1508 GridOrdering gridOrdering,
1509 bool computeEnergyAndVirial)
1512 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1513 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1514 GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
1515 "Invalid combination of gridIndex and number of grids");
1517 const auto& settings = pmeGpu->settings;
1518 const bool copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1520 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1522 float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1523 if (copyInputAndOutputGrid)
1525 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1528 pmeGpu->archSpecific->complexGridSize[gridIndex],
1529 pmeGpu->archSpecific->pmeStream_,
1530 pmeGpu->settings.transferKind,
1534 int majorDim = -1, middleDim = -1, minorDim = -1;
1535 switch (gridOrdering)
1537 case GridOrdering::YZX:
1543 case GridOrdering::XYZ:
1549 default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1552 const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1554 const int gridLineSize = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1555 const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1556 const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1558 if (blocksPerGridLine == 1)
1560 cellsPerBlock = gridLineSize * gridLinesPerBlock;
1564 cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1566 const int warpSize = pmeGpu->programHandle_->warpSize();
1567 const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1569 static_assert(!GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1570 "The CUDA solve energy kernels needs at least 4 warps. "
1571 "Here we launch at least half of the max warps.");
1573 KernelLaunchConfig config;
1574 config.blockSize[0] = blockSize;
1575 config.gridSize[0] = blocksPerGridLine;
1576 // rounding up to full warps so that shuffle operations produce defined results
1577 config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1578 / gridLinesPerBlock;
1579 config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1581 PmeStage timingId = PmeStage::Solve;
1582 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1583 if (gridOrdering == GridOrdering::YZX)
1587 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelA
1588 : pmeGpu->programHandle_->impl_->solveYZXKernelA;
1592 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelB
1593 : pmeGpu->programHandle_->impl_->solveYZXKernelB;
1596 else if (gridOrdering == GridOrdering::XYZ)
1600 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelA
1601 : pmeGpu->programHandle_->impl_->solveXYZKernelA;
1605 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelB
1606 : pmeGpu->programHandle_->impl_->solveXYZKernelB;
1610 pme_gpu_start_timing(pmeGpu, timingId);
1611 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1612 #if c_canEmbedBuffers
1613 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1615 const auto kernelArgs =
1616 prepareGpuKernelArguments(kernelPtr,
1619 &kernelParamsPtr->grid.d_splineModuli[gridIndex],
1620 &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1621 &kernelParamsPtr->grid.d_fourierGrid[gridIndex]);
1623 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve", kernelArgs);
1624 pme_gpu_stop_timing(pmeGpu, timingId);
1626 if (computeEnergyAndVirial)
1628 copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy[gridIndex],
1629 &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1631 c_virialAndEnergyCount,
1632 pmeGpu->archSpecific->pmeStream_,
1633 pmeGpu->settings.transferKind,
1637 if (copyInputAndOutputGrid)
1639 copyFromDeviceBuffer(h_gridFloat,
1640 &kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1642 pmeGpu->archSpecific->complexGridSize[gridIndex],
1643 pmeGpu->archSpecific->pmeStream_,
1644 pmeGpu->settings.transferKind,
1650 * Returns a pointer to appropriate gather kernel based on the inputvalues
1652 * \param[in] pmeGpu The PME GPU structure.
1653 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1654 * \param[in] readSplinesFromGlobal bool controlling if we should write spline data to global memory
1655 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1657 * \return Pointer to CUDA kernel
1659 inline auto selectGatherKernelPtr(const PmeGpu* pmeGpu,
1660 ThreadsPerAtom threadsPerAtom,
1661 bool readSplinesFromGlobal,
1665 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1667 if (readSplinesFromGlobal)
1669 if (threadsPerAtom == ThreadsPerAtom::Order)
1673 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Dual;
1677 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Single;
1684 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesDual;
1688 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesSingle;
1694 if (threadsPerAtom == ThreadsPerAtom::Order)
1698 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Dual;
1702 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Single;
1709 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelDual;
1713 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelSingle;
1720 void pme_gpu_gather(PmeGpu* pmeGpu, real** h_grids, const float lambda)
1723 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1724 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1726 const auto& settings = pmeGpu->settings;
1728 if (!settings.performGPUFFT || settings.copyAllOutputs)
1730 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1732 float* h_grid = const_cast<float*>(h_grids[gridIndex]);
1733 pme_gpu_copy_input_gather_grid(pmeGpu, h_grid, gridIndex);
1737 if (settings.copyAllOutputs)
1739 pme_gpu_copy_input_gather_atom_data(pmeGpu);
1742 /* Set if we have unit tests */
1743 const bool readGlobal = pmeGpu->settings.copyAllOutputs;
1744 const size_t blockSize = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1745 const int order = pmeGpu->common->pme_order;
1746 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1747 const int threadsPerAtom =
1748 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1749 const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1751 GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1752 "Only 16 threads per atom supported in OpenCL");
1753 GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1754 "Recalculating splines not supported in OpenCL");
1756 const int atomsPerBlock = blockSize / threadsPerAtom;
1758 GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1759 "inconsistent atom data padding vs. gathering block size");
1761 const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1762 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1764 KernelLaunchConfig config;
1765 config.blockSize[0] = order;
1766 config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1767 config.blockSize[2] = atomsPerBlock;
1768 config.gridSize[0] = dimGrid.first;
1769 config.gridSize[1] = dimGrid.second;
1771 // TODO test different cache configs
1773 PmeStage timingId = PmeStage::Gather;
1774 PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1775 selectGatherKernelPtr(pmeGpu,
1776 pmeGpu->settings.threadsPerAtom,
1777 readGlobal || (!recalculateSplines),
1778 pmeGpu->common->ngrids);
1779 // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1781 pme_gpu_start_timing(pmeGpu, timingId);
1782 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1783 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1784 if (pmeGpu->common->ngrids == 1)
1786 kernelParamsPtr->current.scale = 1.0;
1790 kernelParamsPtr->current.scale = 1.0 - lambda;
1793 #if c_canEmbedBuffers
1794 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1796 const auto kernelArgs =
1797 prepareGpuKernelArguments(kernelPtr,
1800 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1801 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1802 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1803 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1804 &kernelParamsPtr->atoms.d_theta,
1805 &kernelParamsPtr->atoms.d_dtheta,
1806 &kernelParamsPtr->atoms.d_gridlineIndices,
1807 &kernelParamsPtr->atoms.d_forces);
1809 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather", kernelArgs);
1810 pme_gpu_stop_timing(pmeGpu, timingId);
1812 if (pmeGpu->settings.useGpuForceReduction)
1814 pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1818 pme_gpu_copy_output_forces(pmeGpu);
1822 DeviceBuffer<gmx::RVec> pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1824 if (pmeGpu && pmeGpu->kernelParams)
1826 return pmeGpu->kernelParams->atoms.d_forces;
1830 return DeviceBuffer<gmx::RVec>{};
1834 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1836 GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1837 "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1840 GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1841 "The device-side buffer can not be set.");
1843 pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1846 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1848 if (pmeGpu && pmeGpu->kernelParams)
1850 return &pmeGpu->archSpecific->pmeForcesReady;