2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team.
5 * Copyright (c) 2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief This file contains internal function implementations
40 * for performing the PME calculations on GPU.
42 * Note that this file is compiled as regular C++ source in OpenCL builds, but
43 * it is treated as CUDA source in CUDA-enabled GPU builds.
45 * \author Aleksei Iupinov <a.yupinov@gmail.com>
46 * \ingroup module_ewald
51 #include "pme_gpu_internal.h"
59 #include "gromacs/ewald/ewald_utils.h"
60 #include "gromacs/gpu_utils/device_context.h"
61 #include "gromacs/gpu_utils/device_stream.h"
62 #include "gromacs/gpu_utils/gpu_utils.h"
63 #include "gromacs/hardware/device_information.h"
64 #include "gromacs/math/invertmatrix.h"
65 #include "gromacs/math/units.h"
66 #include "gromacs/timing/gpu_timing.h"
67 #include "gromacs/utility/exceptions.h"
68 #include "gromacs/utility/fatalerror.h"
69 #include "gromacs/utility/gmxassert.h"
70 #include "gromacs/utility/logger.h"
71 #include "gromacs/utility/stringutil.h"
74 # include "gromacs/gpu_utils/pmalloc_cuda.h"
78 # include "gromacs/gpu_utils/gmxopencl.h"
80 # include "gromacs/gpu_utils/syclutils.h"
83 #include "gromacs/ewald/pme.h"
85 #include "pme_gpu_3dfft.h"
86 #include "pme_gpu_calculate_splines.h"
87 #include "pme_gpu_constants.h"
88 #include "pme_gpu_program_impl.h"
89 #include "pme_gpu_timings.h"
90 #include "pme_gpu_types.h"
91 #include "pme_gpu_types_host.h"
92 #include "pme_gpu_types_host_impl.h"
94 #include "pme_internal.h"
95 #include "pme_solve.h"
99 * Atom limit above which it is advantageous to turn on the
100 * recalculating of the splines in the gather and using less threads per atom in the spline and spread
102 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
105 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
107 * \param[in] pmeGpu The PME GPU structure.
108 * \returns The pointer to the kernel parameters.
110 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
112 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
113 auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
114 return kernelParamsPtr;
118 * Atom data block size (in terms of number of atoms).
119 * This is the least common multiple of number of atoms processed by
120 * a single block/workgroup of the spread and gather kernels.
121 * The GPU atom data buffers must be padded, which means that
122 * the numbers of atoms used for determining the size of the memory
123 * allocation must be divisible by this.
125 constexpr int c_pmeAtomDataBlockSize = 64;
127 int pme_gpu_get_atom_data_block_size()
129 return c_pmeAtomDataBlockSize;
132 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
134 pmeGpu->archSpecific->pmeStream_.synchronize();
137 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
139 const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
142 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
143 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
145 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
147 allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
148 c_virialAndEnergyCount,
149 pmeGpu->archSpecific->deviceContext_);
150 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy[gridIndex]), energyAndVirialSize);
154 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
156 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
158 freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex]);
159 pfree(pmeGpu->staging.h_virialAndEnergy[gridIndex]);
160 pmeGpu->staging.h_virialAndEnergy[gridIndex] = nullptr;
164 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
166 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
168 clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
170 c_virialAndEnergyCount,
171 pmeGpu->archSpecific->pmeStream_);
175 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu, const int gridIndex)
178 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
179 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
180 GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
181 "Invalid combination of gridIndex and number of grids");
183 const int splineValuesOffset[DIM] = { 0,
184 pmeGpu->kernelParams->grid.realGridSize[XX],
185 pmeGpu->kernelParams->grid.realGridSize[XX]
186 + pmeGpu->kernelParams->grid.realGridSize[YY] };
187 memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
189 const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
190 + pmeGpu->kernelParams->grid.realGridSize[YY]
191 + pmeGpu->kernelParams->grid.realGridSize[ZZ];
192 const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize[gridIndex]);
193 reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
195 &pmeGpu->archSpecific->splineValuesSize[gridIndex],
196 &pmeGpu->archSpecific->splineValuesCapacity[gridIndex],
197 pmeGpu->archSpecific->deviceContext_);
200 /* Reallocate the host buffer */
201 pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
202 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli[gridIndex]),
203 newSplineValuesSize * sizeof(float));
205 for (int i = 0; i < DIM; i++)
207 memcpy(pmeGpu->staging.h_splineModuli[gridIndex] + splineValuesOffset[i],
208 pmeGpu->common->bsp_mod[i].data(),
209 pmeGpu->common->bsp_mod[i].size() * sizeof(float));
211 /* TODO: pin original buffer instead! */
212 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
213 pmeGpu->staging.h_splineModuli[gridIndex],
216 pmeGpu->archSpecific->pmeStream_,
217 pmeGpu->settings.transferKind,
221 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
223 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
225 pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
226 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex]);
230 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
232 const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
233 GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
234 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
236 &pmeGpu->archSpecific->forcesSize,
237 &pmeGpu->archSpecific->forcesSizeAlloc,
238 pmeGpu->archSpecific->deviceContext_);
239 pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
240 pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
243 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
245 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
248 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
250 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
251 float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
252 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces,
255 DIM * pmeGpu->kernelParams->atoms.nAtoms,
256 pmeGpu->archSpecific->pmeStream_,
257 pmeGpu->settings.transferKind,
261 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
263 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
264 float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
265 copyFromDeviceBuffer(h_forcesFloat,
266 &pmeGpu->kernelParams->atoms.d_forces,
268 DIM * pmeGpu->kernelParams->atoms.nAtoms,
269 pmeGpu->archSpecific->pmeStream_,
270 pmeGpu->settings.transferKind,
274 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu,
275 const float* h_coefficients,
278 GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
279 const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
280 GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
281 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
283 &pmeGpu->archSpecific->coefficientsSize[gridIndex],
284 &pmeGpu->archSpecific->coefficientsCapacity[gridIndex],
285 pmeGpu->archSpecific->deviceContext_);
286 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
287 const_cast<float*>(h_coefficients),
289 pmeGpu->kernelParams->atoms.nAtoms,
290 pmeGpu->archSpecific->pmeStream_,
291 pmeGpu->settings.transferKind,
294 const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
295 const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
296 if (paddingCount > 0)
298 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
301 pmeGpu->archSpecific->pmeStream_);
305 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
307 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
309 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex]);
313 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
315 const int order = pmeGpu->common->pme_order;
316 const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
317 GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
318 /* Two arrays of the same size */
319 const bool shouldRealloc = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
320 int currentSizeTemp = pmeGpu->archSpecific->splineDataSize;
321 int currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
322 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta,
325 ¤tSizeTempAlloc,
326 pmeGpu->archSpecific->deviceContext_);
327 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta,
329 &pmeGpu->archSpecific->splineDataSize,
330 &pmeGpu->archSpecific->splineDataSizeAlloc,
331 pmeGpu->archSpecific->deviceContext_);
332 // the host side reallocation
335 pfree(pmeGpu->staging.h_theta);
336 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
337 pfree(pmeGpu->staging.h_dtheta);
338 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
342 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
344 /* Two arrays of the same size */
345 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
346 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
347 pfree(pmeGpu->staging.h_theta);
348 pfree(pmeGpu->staging.h_dtheta);
351 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
353 const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
354 GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
355 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices,
357 &pmeGpu->archSpecific->gridlineIndicesSize,
358 &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
359 pmeGpu->archSpecific->deviceContext_);
360 pfree(pmeGpu->staging.h_gridlineIndices);
361 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
364 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
366 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
367 pfree(pmeGpu->staging.h_gridlineIndices);
370 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
372 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
374 const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
375 * kernelParamsPtr->grid.realGridSizePadded[YY]
376 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
377 const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
378 * kernelParamsPtr->grid.complexGridSizePadded[YY]
379 * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
380 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
382 // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
383 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
385 /* 2 separate grids */
386 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
388 &pmeGpu->archSpecific->complexGridSize[gridIndex],
389 &pmeGpu->archSpecific->complexGridCapacity[gridIndex],
390 pmeGpu->archSpecific->deviceContext_);
391 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
393 &pmeGpu->archSpecific->realGridSize[gridIndex],
394 &pmeGpu->archSpecific->realGridCapacity[gridIndex],
395 pmeGpu->archSpecific->deviceContext_);
399 /* A single buffer so that any grid will fit */
400 const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
401 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex],
403 &pmeGpu->archSpecific->realGridSize[gridIndex],
404 &pmeGpu->archSpecific->realGridCapacity[gridIndex],
405 pmeGpu->archSpecific->deviceContext_);
406 kernelParamsPtr->grid.d_fourierGrid[gridIndex] = kernelParamsPtr->grid.d_realGrid[gridIndex];
407 pmeGpu->archSpecific->complexGridSize[gridIndex] =
408 pmeGpu->archSpecific->realGridSize[gridIndex];
409 // the size might get used later for copying the grid
414 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
416 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
418 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
420 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid[gridIndex]);
422 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex]);
426 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
428 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
430 clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
432 pmeGpu->archSpecific->realGridSize[gridIndex],
433 pmeGpu->archSpecific->pmeStream_);
437 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
439 pme_gpu_free_fract_shifts(pmeGpu);
441 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
443 const int nx = kernelParamsPtr->grid.realGridSize[XX];
444 const int ny = kernelParamsPtr->grid.realGridSize[YY];
445 const int nz = kernelParamsPtr->grid.realGridSize[ZZ];
446 const int cellCount = c_pmeNeighborUnitcellCount;
447 const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
449 memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
451 const int newFractShiftsSize = cellCount * (nx + ny + nz);
453 initParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
454 &kernelParamsPtr->fractShiftsTableTexture,
455 pmeGpu->common->fsh.data(),
457 pmeGpu->archSpecific->deviceContext_);
459 initParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
460 &kernelParamsPtr->gridlineIndicesTableTexture,
461 pmeGpu->common->nn.data(),
463 pmeGpu->archSpecific->deviceContext_);
466 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
468 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
470 destroyParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
471 kernelParamsPtr->fractShiftsTableTexture);
472 destroyParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
473 kernelParamsPtr->gridlineIndicesTableTexture);
474 #elif GMX_GPU_OPENCL || GMX_GPU_SYCL
475 freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
476 freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
480 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
482 return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
485 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, const float* h_grid, const int gridIndex)
487 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
490 pmeGpu->archSpecific->realGridSize[gridIndex],
491 pmeGpu->archSpecific->pmeStream_,
492 pmeGpu->settings.transferKind,
496 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid, const int gridIndex)
498 copyFromDeviceBuffer(h_grid,
499 &pmeGpu->kernelParams->grid.d_realGrid[gridIndex],
501 pmeGpu->archSpecific->realGridSize[gridIndex],
502 pmeGpu->archSpecific->pmeStream_,
503 pmeGpu->settings.transferKind,
505 pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
508 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
510 const size_t splinesCount = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
511 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
512 copyFromDeviceBuffer(pmeGpu->staging.h_dtheta,
513 &kernelParamsPtr->atoms.d_dtheta,
516 pmeGpu->archSpecific->pmeStream_,
517 pmeGpu->settings.transferKind,
519 copyFromDeviceBuffer(pmeGpu->staging.h_theta,
520 &kernelParamsPtr->atoms.d_theta,
523 pmeGpu->archSpecific->pmeStream_,
524 pmeGpu->settings.transferKind,
526 copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices,
527 &kernelParamsPtr->atoms.d_gridlineIndices,
529 kernelParamsPtr->atoms.nAtoms * DIM,
530 pmeGpu->archSpecific->pmeStream_,
531 pmeGpu->settings.transferKind,
535 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
537 const size_t splinesCount = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
538 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
540 // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
541 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices,
543 pmeGpu->nAtomsAlloc * DIM,
544 pmeGpu->archSpecific->pmeStream_);
545 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta,
547 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
548 pmeGpu->archSpecific->pmeStream_);
549 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta,
551 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
552 pmeGpu->archSpecific->pmeStream_);
554 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta,
555 pmeGpu->staging.h_dtheta,
558 pmeGpu->archSpecific->pmeStream_,
559 pmeGpu->settings.transferKind,
561 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta,
562 pmeGpu->staging.h_theta,
565 pmeGpu->archSpecific->pmeStream_,
566 pmeGpu->settings.transferKind,
568 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices,
569 pmeGpu->staging.h_gridlineIndices,
571 kernelParamsPtr->atoms.nAtoms * DIM,
572 pmeGpu->archSpecific->pmeStream_,
573 pmeGpu->settings.transferKind,
577 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
579 pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
582 /*! \brief Internal GPU initialization for PME.
584 * \param[in] pmeGpu GPU PME data.
585 * \param[in] deviceContext GPU context.
586 * \param[in] deviceStream GPU stream.
588 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
590 /* Allocate the target-specific structures */
591 pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
592 pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
594 pmeGpu->archSpecific->performOutOfPlaceFFT = true;
595 /* This should give better performance, according to the cuFFT documentation.
596 * The performance seems to be the same though.
597 * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
601 pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
603 // Use this path for any non-CUDA GPU acceleration
604 // TODO: is there no really global work size limit in OpenCL?
605 pmeGpu->maxGridWidthX = INT32_MAX / 2;
609 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
611 if (pme_gpu_settings(pmeGpu).performGPUFFT)
613 pmeGpu->archSpecific->fftSetup.resize(0);
614 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
616 pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu, gridIndex));
621 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
623 pmeGpu->archSpecific->fftSetup.resize(0);
626 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, const float lambda, PmeOutput* output)
628 const PmeGpu* pmeGpu = pme.gpu;
630 GMX_ASSERT(lambda == 1.0 || pmeGpu->common->ngrids == 2,
631 "Invalid combination of lambda and number of grids");
633 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
635 for (int j = 0; j < c_virialAndEnergyCount; j++)
637 GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[gridIndex][j]),
638 "PME GPU produces incorrect energy/virial.");
641 for (int dim1 = 0; dim1 < DIM; dim1++)
643 for (int dim2 = 0; dim2 < DIM; dim2++)
645 output->coulombVirial_[dim1][dim2] = 0;
648 output->coulombEnergy_ = 0;
650 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
652 if (pmeGpu->common->ngrids == 2)
654 scale = gridIndex == 0 ? (1.0 - lambda) : lambda;
656 output->coulombVirial_[XX][XX] +=
657 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][0];
658 output->coulombVirial_[YY][YY] +=
659 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][1];
660 output->coulombVirial_[ZZ][ZZ] +=
661 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][2];
662 output->coulombVirial_[XX][YY] +=
663 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
664 output->coulombVirial_[YY][XX] +=
665 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
666 output->coulombVirial_[XX][ZZ] +=
667 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
668 output->coulombVirial_[ZZ][XX] +=
669 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
670 output->coulombVirial_[YY][ZZ] +=
671 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
672 output->coulombVirial_[ZZ][YY] +=
673 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
674 output->coulombEnergy_ += scale * 0.5F * pmeGpu->staging.h_virialAndEnergy[gridIndex][6];
676 if (pmeGpu->common->ngrids > 1)
678 output->coulombDvdl_ = 0.5F
679 * (pmeGpu->staging.h_virialAndEnergy[FEP_STATE_B][6]
680 - pmeGpu->staging.h_virialAndEnergy[FEP_STATE_A][6]);
684 /*! \brief Sets the force-related members in \p output
686 * \param[in] pmeGpu PME GPU data structure
687 * \param[out] output Pointer to PME output data structure
689 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
691 output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
692 if (output->haveForceOutput_)
694 output->forces_ = pmeGpu->staging.h_forces;
698 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial, const real lambdaQ)
700 PmeGpu* pmeGpu = pme.gpu;
704 pme_gpu_getForceOutput(pmeGpu, &output);
706 if (computeEnergyAndVirial)
708 if (pme_gpu_settings(pmeGpu).performGPUSolve)
710 pme_gpu_getEnergyAndVirial(pme, lambdaQ, &output);
714 get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
720 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
723 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
726 pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
727 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
728 kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
729 GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
731 gmx::invertBoxMatrix(scaledBox, recipBox);
733 /* The GPU recipBox is transposed as compared to the CPU recipBox.
734 * Spread uses matrix columns (while solve and gather use rows).
735 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
737 const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
738 { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
739 { 0.0, 0.0, recipBox[ZZ][ZZ] } };
740 memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
744 /*! \brief \libinternal
745 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
747 * \param[in] pmeGpu The PME GPU structure.
749 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
751 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
754 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
755 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
757 kernelParamsPtr->grid.ewaldFactor =
758 (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
759 /* The grid size variants */
760 for (int i = 0; i < DIM; i++)
762 kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
763 kernelParamsPtr->grid.realGridSizeFP[i] =
764 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
765 kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
767 // The complex grid currently uses no padding;
768 // if it starts to do so, then another test should be added for that
769 kernelParamsPtr->grid.complexGridSize[i] = kernelParamsPtr->grid.realGridSize[i];
770 kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
772 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
773 if (!pme_gpu_settings(pmeGpu).performGPUFFT)
775 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
776 kernelParamsPtr->grid.realGridSizePadded[ZZ] =
777 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
779 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
780 kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
781 kernelParamsPtr->grid.complexGridSize[ZZ]++;
782 kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
784 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
785 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
787 pme_gpu_realloc_and_copy_bspline_values(pmeGpu, gridIndex);
790 pme_gpu_realloc_grids(pmeGpu);
791 pme_gpu_reinit_3dfft(pmeGpu);
794 /* Several GPU functions that refer to the CPU PME data live here.
795 * We would like to keep these away from the GPU-framework specific code for clarity,
796 * as well as compilation issues with MPI.
799 /*! \brief \libinternal
800 * Copies everything useful from the PME CPU to the PME GPU structure.
801 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
803 * \param[in] pme The PME structure.
805 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
807 /* TODO: Consider refactoring the CPU PME code to use the same structure,
808 * so that this function becomes 2 lines */
809 PmeGpu* pmeGpu = pme->gpu;
810 pmeGpu->common->ngrids = pme->bFEP_q ? 2 : 1;
811 pmeGpu->common->epsilon_r = pme->epsilon_r;
812 pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
813 pmeGpu->common->nk[XX] = pme->nkx;
814 pmeGpu->common->nk[YY] = pme->nky;
815 pmeGpu->common->nk[ZZ] = pme->nkz;
816 pmeGpu->common->pme_order = pme->pme_order;
817 if (pmeGpu->common->pme_order != c_pmeGpuOrder)
819 GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
821 for (int i = 0; i < DIM; i++)
823 pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
825 const int cellCount = c_pmeNeighborUnitcellCount;
826 pmeGpu->common->fsh.resize(0);
827 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
828 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
829 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
830 pmeGpu->common->nn.resize(0);
831 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
832 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
833 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
834 pmeGpu->common->runMode = pme->runMode;
835 pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
836 pmeGpu->common->boxScaler = pme->boxScaler;
839 /*! \libinternal \brief
840 * uses heuristics to select the best performing PME gather and scatter kernels
842 * \param[in,out] pmeGpu The PME GPU structure.
844 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
846 if (GMX_GPU_CUDA && pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit)
848 pmeGpu->settings.threadsPerAtom = ThreadsPerAtom::Order;
849 pmeGpu->settings.recalculateSplines = true;
853 pmeGpu->settings.threadsPerAtom = ThreadsPerAtom::OrderSquared;
854 pmeGpu->settings.recalculateSplines = false;
859 /*! \libinternal \brief
860 * Initializes the PME GPU data at the beginning of the run.
861 * TODO: this should become PmeGpu::PmeGpu()
863 * \param[in,out] pme The PME structure.
864 * \param[in] deviceContext The GPU context.
865 * \param[in] deviceStream The GPU stream.
866 * \param[in,out] pmeGpuProgram The handle to the program/kernel data created outside (e.g. in unit tests/runner)
868 static void pme_gpu_init(gmx_pme_t* pme,
869 const DeviceContext& deviceContext,
870 const DeviceStream& deviceStream,
871 const PmeGpuProgram* pmeGpuProgram)
873 pme->gpu = new PmeGpu();
874 PmeGpu* pmeGpu = pme->gpu;
875 changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
876 pmeGpu->common = std::make_shared<PmeShared>();
878 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
879 /* A convenience variable. */
880 pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
881 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
882 pmeGpu->settings.performGPUGather = true;
883 // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
884 pmeGpu->settings.useGpuForceReduction = false;
886 pme_gpu_set_testing(pmeGpu, false);
888 GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
889 pmeGpu->programHandle_ = pmeGpuProgram;
891 pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
893 pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
895 pme_gpu_copy_common_data_from(pme);
896 pme_gpu_alloc_energy_virial(pmeGpu);
898 GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
900 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
901 kernelParamsPtr->constants.elFactor = gmx::c_one4PiEps0 / pmeGpu->common->epsilon_r;
904 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
906 GMX_ASSERT(gridSize != nullptr, "");
907 GMX_ASSERT(paddedGridSize != nullptr, "");
908 GMX_ASSERT(pmeGpu != nullptr, "");
909 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
910 for (int i = 0; i < DIM; i++)
912 (*gridSize)[i] = kernelParamsPtr->grid.realGridSize[i];
913 (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
917 void pme_gpu_reinit(gmx_pme_t* pme,
918 const DeviceContext* deviceContext,
919 const DeviceStream* deviceStream,
920 const PmeGpuProgram* pmeGpuProgram)
922 GMX_ASSERT(pme != nullptr, "Need valid PME object");
926 GMX_RELEASE_ASSERT(deviceContext != nullptr,
927 "Device context can not be nullptr when setting up PME on GPU.");
928 GMX_RELEASE_ASSERT(deviceStream != nullptr,
929 "Device stream can not be nullptr when setting up PME on GPU.");
930 /* First-time initialization */
931 pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
935 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
936 pme_gpu_copy_common_data_from(pme);
938 /* GPU FFT will only get used for a single rank.*/
939 pme->gpu->settings.performGPUFFT =
940 (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
941 pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
943 /* Reinit active timers */
944 pme_gpu_reinit_timings(pme->gpu);
946 pme_gpu_reinit_grids(pme->gpu);
947 // Note: if timing the reinit launch overhead becomes more relevant
948 // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
949 pme_gpu_reinit_computation(pme, nullptr);
950 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
951 * update for mixed mode on grid switch. TODO: use shared recipbox field.
953 std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
956 void pme_gpu_destroy(PmeGpu* pmeGpu)
958 /* Free lots of data */
959 pme_gpu_free_energy_virial(pmeGpu);
960 pme_gpu_free_bspline_values(pmeGpu);
961 pme_gpu_free_forces(pmeGpu);
962 pme_gpu_free_coefficients(pmeGpu);
963 pme_gpu_free_spline_data(pmeGpu);
964 pme_gpu_free_grid_indices(pmeGpu);
965 pme_gpu_free_fract_shifts(pmeGpu);
966 pme_gpu_free_grids(pmeGpu);
968 pme_gpu_destroy_3dfft(pmeGpu);
973 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* chargesA, const real* chargesB)
975 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
976 kernelParamsPtr->atoms.nAtoms = nAtoms;
977 const int block_size = pme_gpu_get_atom_data_block_size();
978 const int nAtomsNewPadded = ((nAtoms + block_size - 1) / block_size) * block_size;
979 const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
980 pmeGpu->nAtomsAlloc = nAtomsNewPadded;
983 GMX_RELEASE_ASSERT(false, "Only single precision supported");
984 GMX_UNUSED_VALUE(charges);
987 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
988 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
990 if (chargesB != nullptr)
992 pme_gpu_realloc_and_copy_input_coefficients(
993 pmeGpu, reinterpret_cast<const float*>(chargesB), gridIndex);
997 /* Fill the second set of coefficients with chargesA as well to be able to avoid
998 * conditionals in the GPU kernels */
999 /* FIXME: This should be avoided by making a separate templated version of the
1000 * relevant kernel(s) (probably only pme_gather_kernel). That would require a
1001 * reduction of the current number of templated parameters of that kernel. */
1002 pme_gpu_realloc_and_copy_input_coefficients(
1003 pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
1009 pme_gpu_realloc_forces(pmeGpu);
1010 pme_gpu_realloc_spline_data(pmeGpu);
1011 pme_gpu_realloc_grid_indices(pmeGpu);
1013 pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
1016 /*! \internal \brief
1017 * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
1018 * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
1020 * \param[in] pmeGpu The PME GPU data structure.
1021 * \param[in] PMEStageId The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
1023 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
1025 CommandEvent* timingEvent = nullptr;
1026 if (pme_gpu_timings_enabled(pmeGpu))
1028 GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
1029 "Wrong PME GPU timing event index");
1030 timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
1035 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, const int grid_index)
1037 int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
1039 pme_gpu_start_timing(pmeGpu, timerId);
1040 pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
1041 dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1042 pme_gpu_stop_timing(pmeGpu, timerId);
1046 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1047 * to minimize number of unused blocks.
1049 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
1051 // How many maximum widths in X do we need (hopefully just one)
1052 const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1053 // Trying to make things even
1054 const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1055 GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1056 GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1057 "pmeGpuCreateGrid: excessive blocks");
1058 return std::pair<int, int>(colCount, minRowCount);
1062 * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1064 * \param[in] pmeGpu The PME GPU structure.
1065 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1066 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1067 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1069 * \return Pointer to CUDA kernel
1071 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu,
1072 ThreadsPerAtom threadsPerAtom,
1073 bool writeSplinesToGlobal,
1076 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1077 if (writeSplinesToGlobal)
1079 if (threadsPerAtom == ThreadsPerAtom::Order)
1083 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Dual;
1087 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Single;
1094 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesDual;
1098 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesSingle;
1104 if (threadsPerAtom == ThreadsPerAtom::Order)
1108 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1112 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1119 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1123 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1132 * Returns a pointer to appropriate spline kernel based on the input bool values
1134 * \param[in] pmeGpu The PME GPU structure.
1135 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1136 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1137 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1139 * \return Pointer to CUDA kernel
1141 static auto selectSplineKernelPtr(const PmeGpu* pmeGpu,
1142 ThreadsPerAtom threadsPerAtom,
1143 bool gmx_unused writeSplinesToGlobal,
1146 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1148 writeSplinesToGlobal,
1149 "Spline data should always be written to global memory when just calculating splines");
1151 if (threadsPerAtom == ThreadsPerAtom::Order)
1155 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Dual;
1159 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Single;
1166 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelDual;
1170 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelSingle;
1177 * Returns a pointer to appropriate spread kernel based on the input bool values
1179 * \param[in] pmeGpu The PME GPU structure.
1180 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1181 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1182 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1184 * \return Pointer to CUDA kernel
1186 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu,
1187 ThreadsPerAtom threadsPerAtom,
1188 bool writeSplinesToGlobal,
1191 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1192 if (writeSplinesToGlobal)
1194 if (threadsPerAtom == ThreadsPerAtom::Order)
1198 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Dual;
1201 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Single;
1208 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelDual;
1212 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelSingle;
1218 /* if we are not saving the spline data we need to recalculate it
1219 using the spline and spread Kernel */
1220 if (threadsPerAtom == ThreadsPerAtom::Order)
1224 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1228 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1235 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1239 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1246 void pme_gpu_spread(const PmeGpu* pmeGpu,
1247 GpuEventSynchronizer* xReadyOnDevice,
1249 bool computeSplines,
1254 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1255 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1257 GMX_ASSERT(computeSplines || spreadCharges,
1258 "PME spline/spread kernel has invalid input (nothing to do)");
1259 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1260 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1262 const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1264 const int order = pmeGpu->common->pme_order;
1265 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1266 const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1267 const int threadsPerAtom =
1268 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1269 const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1271 GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1272 "Only 16 threads per atom supported in OpenCL");
1273 GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1274 "Recalculating splines not supported in OpenCL");
1276 const int atomsPerBlock = blockSize / threadsPerAtom;
1278 // TODO: pick smaller block size in runtime if needed
1279 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1280 // If doing so, change atomsPerBlock in the kernels as well.
1281 // TODO: test varying block sizes on modern arch-s as well
1282 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1283 //(for spline data mostly)
1284 GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1285 "inconsistent atom data padding vs. spreading block size");
1287 // Ensure that coordinates are ready on the device before launching spread;
1288 // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1289 // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1290 GMX_ASSERT(!GMX_GPU_CUDA || xReadyOnDevice != nullptr || pmeGpu->common->isRankPmeOnly
1291 || pme_gpu_settings(pmeGpu).copyAllOutputs,
1292 "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1296 xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1299 const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1300 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1302 if (pmeGpu->common->ngrids == 1)
1304 kernelParamsPtr->current.scale = 1.0;
1308 kernelParamsPtr->current.scale = 1.0 - lambda;
1311 KernelLaunchConfig config;
1312 config.blockSize[0] = order;
1313 config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1314 config.blockSize[2] = atomsPerBlock;
1315 config.gridSize[0] = dimGrid.first;
1316 config.gridSize[1] = dimGrid.second;
1319 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1324 timingId = gtPME_SPLINEANDSPREAD;
1325 kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu,
1326 pmeGpu->settings.threadsPerAtom,
1327 writeGlobal || (!recalculateSplines),
1328 pmeGpu->common->ngrids);
1332 timingId = gtPME_SPLINE;
1333 kernelPtr = selectSplineKernelPtr(pmeGpu,
1334 pmeGpu->settings.threadsPerAtom,
1335 writeGlobal || (!recalculateSplines),
1336 pmeGpu->common->ngrids);
1341 timingId = gtPME_SPREAD;
1342 kernelPtr = selectSpreadKernelPtr(pmeGpu,
1343 pmeGpu->settings.threadsPerAtom,
1344 writeGlobal || (!recalculateSplines),
1345 pmeGpu->common->ngrids);
1349 pme_gpu_start_timing(pmeGpu, timingId);
1350 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1351 #if c_canEmbedBuffers
1352 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1354 const auto kernelArgs =
1355 prepareGpuKernelArguments(kernelPtr,
1358 &kernelParamsPtr->atoms.d_theta,
1359 &kernelParamsPtr->atoms.d_dtheta,
1360 &kernelParamsPtr->atoms.d_gridlineIndices,
1361 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1362 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1363 &kernelParamsPtr->grid.d_fractShiftsTable,
1364 &kernelParamsPtr->grid.d_gridlineIndicesTable,
1365 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1366 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1367 &kernelParamsPtr->atoms.d_coordinates);
1371 kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME spline/spread", kernelArgs);
1372 pme_gpu_stop_timing(pmeGpu, timingId);
1374 const auto& settings = pmeGpu->settings;
1375 const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1378 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1380 float* h_grid = h_grids[gridIndex];
1381 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid, gridIndex);
1384 const bool copyBackAtomData =
1385 computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1386 if (copyBackAtomData)
1388 pme_gpu_copy_output_spread_atom_data(pmeGpu);
1392 void pme_gpu_solve(const PmeGpu* pmeGpu,
1393 const int gridIndex,
1395 GridOrdering gridOrdering,
1396 bool computeEnergyAndVirial)
1399 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1400 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1401 GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
1402 "Invalid combination of gridIndex and number of grids");
1404 const auto& settings = pmeGpu->settings;
1405 const bool copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1407 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1409 float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1410 if (copyInputAndOutputGrid)
1412 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1415 pmeGpu->archSpecific->complexGridSize[gridIndex],
1416 pmeGpu->archSpecific->pmeStream_,
1417 pmeGpu->settings.transferKind,
1421 int majorDim = -1, middleDim = -1, minorDim = -1;
1422 switch (gridOrdering)
1424 case GridOrdering::YZX:
1430 case GridOrdering::XYZ:
1436 default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1439 const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1441 const int gridLineSize = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1442 const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1443 const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1445 if (blocksPerGridLine == 1)
1447 cellsPerBlock = gridLineSize * gridLinesPerBlock;
1451 cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1453 const int warpSize = pmeGpu->programHandle_->warpSize();
1454 const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1456 static_assert(!GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1457 "The CUDA solve energy kernels needs at least 4 warps. "
1458 "Here we launch at least half of the max warps.");
1460 KernelLaunchConfig config;
1461 config.blockSize[0] = blockSize;
1462 config.gridSize[0] = blocksPerGridLine;
1463 // rounding up to full warps so that shuffle operations produce defined results
1464 config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1465 / gridLinesPerBlock;
1466 config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1468 int timingId = gtPME_SOLVE;
1469 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1470 if (gridOrdering == GridOrdering::YZX)
1474 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelA
1475 : pmeGpu->programHandle_->impl_->solveYZXKernelA;
1479 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelB
1480 : pmeGpu->programHandle_->impl_->solveYZXKernelB;
1483 else if (gridOrdering == GridOrdering::XYZ)
1487 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelA
1488 : pmeGpu->programHandle_->impl_->solveXYZKernelA;
1492 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelB
1493 : pmeGpu->programHandle_->impl_->solveXYZKernelB;
1497 pme_gpu_start_timing(pmeGpu, timingId);
1498 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1499 #if c_canEmbedBuffers
1500 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1502 const auto kernelArgs =
1503 prepareGpuKernelArguments(kernelPtr,
1506 &kernelParamsPtr->grid.d_splineModuli[gridIndex],
1507 &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1508 &kernelParamsPtr->grid.d_fourierGrid[gridIndex]);
1510 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve", kernelArgs);
1511 pme_gpu_stop_timing(pmeGpu, timingId);
1513 if (computeEnergyAndVirial)
1515 copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy[gridIndex],
1516 &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1518 c_virialAndEnergyCount,
1519 pmeGpu->archSpecific->pmeStream_,
1520 pmeGpu->settings.transferKind,
1524 if (copyInputAndOutputGrid)
1526 copyFromDeviceBuffer(h_gridFloat,
1527 &kernelParamsPtr->grid.d_fourierGrid[gridIndex],
1529 pmeGpu->archSpecific->complexGridSize[gridIndex],
1530 pmeGpu->archSpecific->pmeStream_,
1531 pmeGpu->settings.transferKind,
1537 * Returns a pointer to appropriate gather kernel based on the inputvalues
1539 * \param[in] pmeGpu The PME GPU structure.
1540 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1541 * \param[in] readSplinesFromGlobal bool controlling if we should write spline data to global memory
1542 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1544 * \return Pointer to CUDA kernel
1546 inline auto selectGatherKernelPtr(const PmeGpu* pmeGpu,
1547 ThreadsPerAtom threadsPerAtom,
1548 bool readSplinesFromGlobal,
1552 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1554 if (readSplinesFromGlobal)
1556 if (threadsPerAtom == ThreadsPerAtom::Order)
1560 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Dual;
1564 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Single;
1571 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesDual;
1575 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesSingle;
1581 if (threadsPerAtom == ThreadsPerAtom::Order)
1585 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Dual;
1589 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Single;
1596 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelDual;
1600 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelSingle;
1607 void pme_gpu_gather(PmeGpu* pmeGpu, real** h_grids, const float lambda)
1610 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1611 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1613 const auto& settings = pmeGpu->settings;
1615 if (!settings.performGPUFFT || settings.copyAllOutputs)
1617 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1619 float* h_grid = const_cast<float*>(h_grids[gridIndex]);
1620 pme_gpu_copy_input_gather_grid(pmeGpu, h_grid, gridIndex);
1624 if (settings.copyAllOutputs)
1626 pme_gpu_copy_input_gather_atom_data(pmeGpu);
1629 /* Set if we have unit tests */
1630 const bool readGlobal = pmeGpu->settings.copyAllOutputs;
1631 const size_t blockSize = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1632 const int order = pmeGpu->common->pme_order;
1633 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1634 const int threadsPerAtom =
1635 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1636 const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1638 GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1639 "Only 16 threads per atom supported in OpenCL");
1640 GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1641 "Recalculating splines not supported in OpenCL");
1643 const int atomsPerBlock = blockSize / threadsPerAtom;
1645 GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1646 "inconsistent atom data padding vs. gathering block size");
1648 const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1649 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1651 KernelLaunchConfig config;
1652 config.blockSize[0] = order;
1653 config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1654 config.blockSize[2] = atomsPerBlock;
1655 config.gridSize[0] = dimGrid.first;
1656 config.gridSize[1] = dimGrid.second;
1658 // TODO test different cache configs
1660 int timingId = gtPME_GATHER;
1661 PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1662 selectGatherKernelPtr(pmeGpu,
1663 pmeGpu->settings.threadsPerAtom,
1664 readGlobal || (!recalculateSplines),
1665 pmeGpu->common->ngrids);
1666 // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1668 pme_gpu_start_timing(pmeGpu, timingId);
1669 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1670 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1671 if (pmeGpu->common->ngrids == 1)
1673 kernelParamsPtr->current.scale = 1.0;
1677 kernelParamsPtr->current.scale = 1.0 - lambda;
1680 #if c_canEmbedBuffers
1681 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1683 const auto kernelArgs =
1684 prepareGpuKernelArguments(kernelPtr,
1687 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1688 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1689 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A],
1690 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1691 &kernelParamsPtr->atoms.d_theta,
1692 &kernelParamsPtr->atoms.d_dtheta,
1693 &kernelParamsPtr->atoms.d_gridlineIndices,
1694 &kernelParamsPtr->atoms.d_forces);
1696 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather", kernelArgs);
1697 pme_gpu_stop_timing(pmeGpu, timingId);
1699 if (pmeGpu->settings.useGpuForceReduction)
1701 pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1705 pme_gpu_copy_output_forces(pmeGpu);
1709 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1711 if (pmeGpu && pmeGpu->kernelParams)
1713 return pmeGpu->kernelParams->atoms.d_forces;
1721 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1723 GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1724 "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1727 GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1728 "The device-side buffer can not be set.");
1730 pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1733 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1735 if (pmeGpu && pmeGpu->kernelParams)
1737 return &pmeGpu->archSpecific->pmeForcesReady;