2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file contains internal function implementations
39 * for performing the PME calculations on GPU.
41 * Note that this file is compiled as regular C++ source in OpenCL builds, but
42 * it is treated as CUDA source in CUDA-enabled GPU builds.
44 * \author Aleksei Iupinov <a.yupinov@gmail.com>
45 * \ingroup module_ewald
50 #include "pme_gpu_internal.h"
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
69 #if GMX_GPU == GMX_GPU_CUDA
70 # include "gromacs/gpu_utils/pmalloc_cuda.h"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 # include "gromacs/gpu_utils/gmxopencl.h"
77 #include "gromacs/ewald/pme.h"
79 #include "pme_gpu_3dfft.h"
80 #include "pme_gpu_constants.h"
81 #include "pme_gpu_program_impl.h"
82 #include "pme_gpu_timings.h"
83 #include "pme_gpu_types.h"
84 #include "pme_gpu_types_host.h"
85 #include "pme_gpu_types_host_impl.h"
86 #include "pme_gpu_utils.h"
88 #include "pme_internal.h"
89 #include "pme_solve.h"
94 * Controls if we should use order (i.e. 4) threads per atom for the GPU
95 * or order*order (i.e. 16) threads per atom.
97 constexpr bool c_useOrderThreadsPerAtom = false;
100 * Controls if we should recalculate the splines in the gather or
101 * save the values in the spread and reload in the gather.
103 constexpr bool c_recalculateSplines = false;
106 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
108 * \param[in] pmeGpu The PME GPU structure.
109 * \returns The pointer to the kernel parameters.
111 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
113 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
114 auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
115 return kernelParamsPtr;
118 int pme_gpu_get_atom_data_alignment(const PmeGpu* /*unused*/)
120 // TODO: this can be simplified, as c_pmeAtomDataAlignment is now constant
123 return c_pmeAtomDataAlignment;
131 int pme_gpu_get_atoms_per_warp(const PmeGpu* pmeGpu)
133 if (c_useOrderThreadsPerAtom)
135 return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom;
139 return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom;
143 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
145 gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
148 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
150 const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
151 allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount,
152 pmeGpu->archSpecific->context);
153 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
156 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
158 freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
159 pfree(pmeGpu->staging.h_virialAndEnergy);
160 pmeGpu->staging.h_virialAndEnergy = nullptr;
163 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
165 clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
166 c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
169 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu)
171 const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
172 pmeGpu->kernelParams->grid.realGridSize[XX]
173 + pmeGpu->kernelParams->grid.realGridSize[YY] };
174 memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
176 const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
177 + pmeGpu->kernelParams->grid.realGridSize[YY]
178 + pmeGpu->kernelParams->grid.realGridSize[ZZ];
179 const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
180 reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
181 &pmeGpu->archSpecific->splineValuesSize,
182 &pmeGpu->archSpecific->splineValuesSizeAlloc, pmeGpu->archSpecific->context);
185 /* Reallocate the host buffer */
186 pfree(pmeGpu->staging.h_splineModuli);
187 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli),
188 newSplineValuesSize * sizeof(float));
190 for (int i = 0; i < DIM; i++)
192 memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i],
193 pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
195 /* TODO: pin original buffer instead! */
196 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
197 0, newSplineValuesSize, pmeGpu->archSpecific->pmeStream,
198 pmeGpu->settings.transferKind, nullptr);
201 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
203 pfree(pmeGpu->staging.h_splineModuli);
204 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
207 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
209 const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
210 GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
211 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
212 &pmeGpu->archSpecific->forcesSize,
213 &pmeGpu->archSpecific->forcesSizeAlloc, pmeGpu->archSpecific->context);
214 pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
215 pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
218 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
220 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
223 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
225 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
226 float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
227 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
228 DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream,
229 pmeGpu->settings.transferKind, nullptr);
232 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
234 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
235 float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
236 copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
237 DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream,
238 pmeGpu->settings.transferKind, nullptr);
241 void pme_gpu_realloc_coordinates(const PmeGpu* pmeGpu)
243 const size_t newCoordinatesSize = pmeGpu->nAtomsAlloc * DIM;
244 GMX_ASSERT(newCoordinatesSize > 0, "Bad number of atoms in PME GPU");
245 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, newCoordinatesSize,
246 &pmeGpu->archSpecific->coordinatesSize,
247 &pmeGpu->archSpecific->coordinatesSizeAlloc, pmeGpu->archSpecific->context);
250 const size_t paddingIndex = DIM * pmeGpu->kernelParams->atoms.nAtoms;
251 const size_t paddingCount = DIM * pmeGpu->nAtomsAlloc - paddingIndex;
252 if (paddingCount > 0)
254 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coordinates, paddingIndex,
255 paddingCount, pmeGpu->archSpecific->pmeStream);
260 void pme_gpu_free_coordinates(const PmeGpu* pmeGpu)
262 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates);
265 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu, const float* h_coefficients)
267 GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
268 const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
269 GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
270 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
271 &pmeGpu->archSpecific->coefficientsSize,
272 &pmeGpu->archSpecific->coefficientsSizeAlloc, pmeGpu->archSpecific->context);
273 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients,
274 const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
275 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
278 const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
279 const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
280 if (paddingCount > 0)
282 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
283 paddingCount, pmeGpu->archSpecific->pmeStream);
288 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
290 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
293 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
295 const int order = pmeGpu->common->pme_order;
296 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
297 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
298 const int newSplineDataSize = DIM * order * nAtomsPadded;
299 GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
300 /* Two arrays of the same size */
301 const bool shouldRealloc = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
302 int currentSizeTemp = pmeGpu->archSpecific->splineDataSize;
303 int currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
304 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize,
305 ¤tSizeTemp, ¤tSizeTempAlloc, pmeGpu->archSpecific->context);
306 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
307 &pmeGpu->archSpecific->splineDataSize,
308 &pmeGpu->archSpecific->splineDataSizeAlloc, pmeGpu->archSpecific->context);
309 // the host side reallocation
312 pfree(pmeGpu->staging.h_theta);
313 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
314 pfree(pmeGpu->staging.h_dtheta);
315 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
319 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
321 /* Two arrays of the same size */
322 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
323 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
324 pfree(pmeGpu->staging.h_theta);
325 pfree(pmeGpu->staging.h_dtheta);
328 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
330 const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
331 GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
332 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
333 &pmeGpu->archSpecific->gridlineIndicesSize,
334 &pmeGpu->archSpecific->gridlineIndicesSizeAlloc, pmeGpu->archSpecific->context);
335 pfree(pmeGpu->staging.h_gridlineIndices);
336 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
339 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
341 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
342 pfree(pmeGpu->staging.h_gridlineIndices);
345 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
347 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
348 const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
349 * kernelParamsPtr->grid.realGridSizePadded[YY]
350 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
351 const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
352 * kernelParamsPtr->grid.complexGridSizePadded[YY]
353 * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
354 // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
355 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
357 /* 2 separate grids */
358 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
359 &pmeGpu->archSpecific->complexGridSize,
360 &pmeGpu->archSpecific->complexGridSizeAlloc, pmeGpu->archSpecific->context);
361 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newRealGridSize,
362 &pmeGpu->archSpecific->realGridSize,
363 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
367 /* A single buffer so that any grid will fit */
368 const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
369 reallocateDeviceBuffer(
370 &kernelParamsPtr->grid.d_realGrid, newGridsSize, &pmeGpu->archSpecific->realGridSize,
371 &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
372 kernelParamsPtr->grid.d_fourierGrid = kernelParamsPtr->grid.d_realGrid;
373 pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
374 // the size might get used later for copying the grid
378 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
380 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
382 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
384 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
387 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
389 clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
390 pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
393 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
395 pme_gpu_free_fract_shifts(pmeGpu);
397 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
399 const int nx = kernelParamsPtr->grid.realGridSize[XX];
400 const int ny = kernelParamsPtr->grid.realGridSize[YY];
401 const int nz = kernelParamsPtr->grid.realGridSize[ZZ];
402 const int cellCount = c_pmeNeighborUnitcellCount;
403 const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
405 memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
407 const int newFractShiftsSize = cellCount * (nx + ny + nz);
409 #if GMX_GPU == GMX_GPU_CUDA
410 initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable, kernelParamsPtr->fractShiftsTableTexture,
411 pmeGpu->common->fsh.data(), newFractShiftsSize);
413 initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
414 kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
416 #elif GMX_GPU == GMX_GPU_OPENCL
417 // No dedicated texture routines....
418 allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize,
419 pmeGpu->archSpecific->context);
420 allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize,
421 pmeGpu->archSpecific->context);
422 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(), 0,
423 newFractShiftsSize, pmeGpu->archSpecific->pmeStream,
424 GpuApiCallBehavior::Async, nullptr);
425 copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(), 0,
426 newFractShiftsSize, pmeGpu->archSpecific->pmeStream,
427 GpuApiCallBehavior::Async, nullptr);
431 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
433 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
434 #if GMX_GPU == GMX_GPU_CUDA
435 destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
436 kernelParamsPtr->fractShiftsTableTexture);
437 destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
438 kernelParamsPtr->gridlineIndicesTableTexture);
439 #elif GMX_GPU == GMX_GPU_OPENCL
440 freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
441 freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
445 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
447 return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
450 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, float* h_grid)
452 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid, 0, pmeGpu->archSpecific->realGridSize,
453 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
456 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid)
458 copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid, 0,
459 pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream,
460 pmeGpu->settings.transferKind, nullptr);
461 pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
464 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
466 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
467 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
468 const size_t splinesCount = DIM * nAtomsPadded * pmeGpu->common->pme_order;
469 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
470 copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
471 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
472 copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
473 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
474 copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
475 0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream,
476 pmeGpu->settings.transferKind, nullptr);
479 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
481 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
482 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
483 const size_t splinesCount = DIM * nAtomsPadded * pmeGpu->common->pme_order;
484 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
487 // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
488 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
489 pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
490 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
491 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
492 pmeGpu->archSpecific->pmeStream);
493 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
494 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
495 pmeGpu->archSpecific->pmeStream);
497 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
498 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
499 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
500 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
501 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
502 0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream,
503 pmeGpu->settings.transferKind, nullptr);
506 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
508 pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
511 void pme_gpu_init_internal(PmeGpu* pmeGpu)
513 #if GMX_GPU == GMX_GPU_CUDA
514 // Prepare to use the device that this PME task was assigned earlier.
515 // Other entities, such as CUDA timing events, are known to implicitly use the device context.
516 CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
519 /* Allocate the target-specific structures */
520 pmeGpu->archSpecific.reset(new PmeGpuSpecific());
521 pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
523 pmeGpu->archSpecific->performOutOfPlaceFFT = true;
524 /* This should give better performance, according to the cuFFT documentation.
525 * The performance seems to be the same though.
526 * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
529 // TODO: this is just a convenient reuse because programHandle_ currently is in charge of creating context
530 pmeGpu->archSpecific->context = pmeGpu->programHandle_->impl_->context;
532 // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
533 if (GMX_GPU == GMX_GPU_CUDA)
535 /* WARNING: CUDA timings are incorrect with multiple streams.
536 * This is the main reason why they are disabled by default.
538 // TODO: Consider turning on by default when we can detect nr of streams.
539 pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
541 else if (GMX_GPU == GMX_GPU_OPENCL)
543 pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
546 #if GMX_GPU == GMX_GPU_CUDA
547 pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
548 #elif GMX_GPU == GMX_GPU_OPENCL
549 pmeGpu->maxGridWidthX = INT32_MAX / 2;
550 // TODO: is there no really global work size limit in OpenCL?
553 /* Creating a PME GPU stream:
554 * - default high priority with CUDA
555 * - no priorities implemented yet with OpenCL; see #2532
557 #if GMX_GPU == GMX_GPU_CUDA
559 int highest_priority, lowest_priority;
560 stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
561 CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
562 stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
563 cudaStreamDefault, // cudaStreamNonBlocking,
565 CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
566 #elif GMX_GPU == GMX_GPU_OPENCL
567 cl_command_queue_properties queueProperties =
568 pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
569 cl_device_id device_id = pmeGpu->deviceInfo->ocl_gpu_id.ocl_device_id;
571 pmeGpu->archSpecific->pmeStream =
572 clCreateCommandQueue(pmeGpu->archSpecific->context, device_id, queueProperties, &clError);
573 if (clError != CL_SUCCESS)
575 GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
580 void pme_gpu_destroy_specific(const PmeGpu* pmeGpu)
582 #if GMX_GPU == GMX_GPU_CUDA
583 /* Destroy the CUDA stream */
584 cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
585 CU_RET_ERR(stat, "PME cudaStreamDestroy error");
586 #elif GMX_GPU == GMX_GPU_OPENCL
587 cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
588 if (clError != CL_SUCCESS)
590 gmx_warning("Failed to destroy PME command queue");
595 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
597 if (pme_gpu_performs_FFT(pmeGpu))
599 pmeGpu->archSpecific->fftSetup.resize(0);
600 for (int i = 0; i < pmeGpu->common->ngrids; i++)
602 pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
607 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
609 pmeGpu->archSpecific->fftSetup.resize(0);
612 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
614 if (order != c_pmeGpuOrder)
618 constexpr int fixedOrder = c_pmeGpuOrder;
619 GMX_UNUSED_VALUE(fixedOrder);
621 const int atomWarpIndex = atomIndex % atomsPerWarp;
622 const int warpIndex = atomIndex / atomsPerWarp;
623 int indexBase, result;
624 switch (atomsPerWarp)
627 indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
628 result = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
632 indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
633 result = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
637 indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
638 result = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
642 indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
643 result = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
647 GMX_THROW(gmx::NotImplementedError(
648 gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in "
649 "getSplineParamFullIndex",
655 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, PmeOutput* output)
657 const PmeGpu* pmeGpu = pme.gpu;
658 for (int j = 0; j < c_virialAndEnergyCount; j++)
660 GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]),
661 "PME GPU produces incorrect energy/virial.");
665 output->coulombVirial_[XX][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
666 output->coulombVirial_[YY][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
667 output->coulombVirial_[ZZ][ZZ] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
668 output->coulombVirial_[XX][YY] = output->coulombVirial_[YY][XX] =
669 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
670 output->coulombVirial_[XX][ZZ] = output->coulombVirial_[ZZ][XX] =
671 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
672 output->coulombVirial_[YY][ZZ] = output->coulombVirial_[ZZ][YY] =
673 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
674 output->coulombEnergy_ = 0.5F * pmeGpu->staging.h_virialAndEnergy[j++];
677 /*! \brief Sets the force-related members in \p output
679 * \param[in] pmeGpu PME GPU data structure
680 * \param[out] output Pointer to PME output data structure
682 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
684 output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
685 if (output->haveForceOutput_)
687 output->forces_ = pmeGpu->staging.h_forces;
691 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const int flags)
693 PmeGpu* pmeGpu = pme.gpu;
694 const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
698 pme_gpu_getForceOutput(pmeGpu, &output);
700 // The caller knows from the flags that the energy and the virial are not usable
701 // on the else branch
702 if (haveComputedEnergyAndVirial)
704 if (pme_gpu_performs_solve(pmeGpu))
706 pme_gpu_getEnergyAndVirial(pme, &output);
710 get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
716 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
719 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
722 pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
723 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
724 kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
725 GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
727 gmx::invertBoxMatrix(scaledBox, recipBox);
729 /* The GPU recipBox is transposed as compared to the CPU recipBox.
730 * Spread uses matrix columns (while solve and gather use rows).
731 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
733 const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
734 { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
735 { 0.0, 0.0, recipBox[ZZ][ZZ] } };
736 memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
740 /*! \brief \libinternal
741 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
743 * \param[in] pmeGpu The PME GPU structure.
745 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
747 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
748 kernelParamsPtr->grid.ewaldFactor =
749 (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
751 /* The grid size variants */
752 for (int i = 0; i < DIM; i++)
754 kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
755 kernelParamsPtr->grid.realGridSizeFP[i] =
756 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
757 kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
759 // The complex grid currently uses no padding;
760 // if it starts to do so, then another test should be added for that
761 kernelParamsPtr->grid.complexGridSize[i] = kernelParamsPtr->grid.realGridSize[i];
762 kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
764 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
765 if (!pme_gpu_performs_FFT(pmeGpu))
767 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
768 kernelParamsPtr->grid.realGridSizePadded[ZZ] =
769 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
772 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
773 kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
774 kernelParamsPtr->grid.complexGridSize[ZZ]++;
775 kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
777 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
778 pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
779 pme_gpu_realloc_grids(pmeGpu);
780 pme_gpu_reinit_3dfft(pmeGpu);
783 /* Several GPU functions that refer to the CPU PME data live here.
784 * We would like to keep these away from the GPU-framework specific code for clarity,
785 * as well as compilation issues with MPI.
788 /*! \brief \libinternal
789 * Copies everything useful from the PME CPU to the PME GPU structure.
790 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
792 * \param[in] pme The PME structure.
794 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
796 /* TODO: Consider refactoring the CPU PME code to use the same structure,
797 * so that this function becomes 2 lines */
798 PmeGpu* pmeGpu = pme->gpu;
799 pmeGpu->common->ngrids = pme->ngrids;
800 pmeGpu->common->epsilon_r = pme->epsilon_r;
801 pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
802 pmeGpu->common->nk[XX] = pme->nkx;
803 pmeGpu->common->nk[YY] = pme->nky;
804 pmeGpu->common->nk[ZZ] = pme->nkz;
805 pmeGpu->common->pme_order = pme->pme_order;
806 if (pmeGpu->common->pme_order != c_pmeGpuOrder)
808 GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
810 for (int i = 0; i < DIM; i++)
812 pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
814 const int cellCount = c_pmeNeighborUnitcellCount;
815 pmeGpu->common->fsh.resize(0);
816 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
817 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
818 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
819 pmeGpu->common->nn.resize(0);
820 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
821 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
822 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
823 pmeGpu->common->runMode = pme->runMode;
824 pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
825 pmeGpu->common->boxScaler = pme->boxScaler;
828 /*! \libinternal \brief
829 * Initializes the PME GPU data at the beginning of the run.
830 * TODO: this should become PmeGpu::PmeGpu()
832 * \param[in,out] pme The PME structure.
833 * \param[in,out] gpuInfo The GPU information structure.
834 * \param[in] pmeGpuProgram The handle to the program/kernel data created outside (e.g. in unit tests/runner)
836 static void pme_gpu_init(gmx_pme_t* pme, const gmx_device_info_t* gpuInfo, PmeGpuProgramHandle pmeGpuProgram)
838 pme->gpu = new PmeGpu();
839 PmeGpu* pmeGpu = pme->gpu;
840 changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
841 pmeGpu->common = std::make_shared<PmeShared>();
843 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
844 /* A convenience variable. */
845 pmeGpu->settings.useDecomposition = (pme->nnodes == 1);
846 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
847 pmeGpu->settings.performGPUGather = true;
848 // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
849 pmeGpu->settings.useGpuForceReduction = false;
851 pme_gpu_set_testing(pmeGpu, false);
853 pmeGpu->deviceInfo = gpuInfo;
854 GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
855 pmeGpu->programHandle_ = pmeGpuProgram;
857 pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
859 pme_gpu_init_internal(pmeGpu);
860 pme_gpu_alloc_energy_virial(pmeGpu);
862 pme_gpu_copy_common_data_from(pme);
864 GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
866 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
867 kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
870 void pme_gpu_transform_spline_atom_data(const PmeGpu* pmeGpu,
871 const PmeAtomComm* atc,
872 PmeSplineDataType type,
874 PmeLayoutTransform transform)
876 // The GPU atom spline data is laid out in a different way currently than the CPU one.
877 // This function converts the data from GPU to CPU layout (in the host memory).
878 // It is only intended for testing purposes so far.
879 // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their
880 // performance (e.g. spreading on GPU, gathering on CPU).
881 GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
882 const uintmax_t threadIndex = 0;
883 const auto atomCount = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
884 const auto atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
885 const auto pmeOrder = pmeGpu->common->pme_order;
886 GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
888 real* cpuSplineBuffer;
889 float* h_splineBuffer;
892 case PmeSplineDataType::Values:
893 cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
894 h_splineBuffer = pmeGpu->staging.h_theta;
897 case PmeSplineDataType::Derivatives:
898 cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
899 h_splineBuffer = pmeGpu->staging.h_dtheta;
902 default: GMX_THROW(gmx::InternalError("Unknown spline data type"));
905 for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
907 for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
909 const auto gpuValueIndex =
910 getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
911 const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
912 GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder,
913 "Atom spline data index out of bounds (while transforming GPU data layout "
917 case PmeLayoutTransform::GpuToHost:
918 cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
921 case PmeLayoutTransform::HostToGpu:
922 h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
925 default: GMX_THROW(gmx::InternalError("Unknown layout transform"));
931 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
933 GMX_ASSERT(gridSize != nullptr, "");
934 GMX_ASSERT(paddedGridSize != nullptr, "");
935 GMX_ASSERT(pmeGpu != nullptr, "");
936 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
937 for (int i = 0; i < DIM; i++)
939 (*gridSize)[i] = kernelParamsPtr->grid.realGridSize[i];
940 (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
944 void pme_gpu_reinit(gmx_pme_t* pme, const gmx_device_info_t* gpuInfo, PmeGpuProgramHandle pmeGpuProgram)
946 if (!pme_gpu_active(pme))
953 /* First-time initialization */
954 pme_gpu_init(pme, gpuInfo, pmeGpuProgram);
958 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
959 pme_gpu_copy_common_data_from(pme);
961 /* GPU FFT will only get used for a single rank.*/
962 pme->gpu->settings.performGPUFFT =
963 (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme_gpu_uses_dd(pme->gpu);
964 pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
966 /* Reinit active timers */
967 pme_gpu_reinit_timings(pme->gpu);
969 pme_gpu_reinit_grids(pme->gpu);
970 // Note: if timing the reinit launch overhead becomes more relevant
971 // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
972 pme_gpu_reinit_computation(pme, nullptr);
973 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
974 * update for mixed mode on grid switch. TODO: use shared recipbox field.
976 std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
979 void pme_gpu_destroy(PmeGpu* pmeGpu)
981 /* Free lots of data */
982 pme_gpu_free_energy_virial(pmeGpu);
983 pme_gpu_free_bspline_values(pmeGpu);
984 pme_gpu_free_forces(pmeGpu);
985 pme_gpu_free_coefficients(pmeGpu);
986 pme_gpu_free_spline_data(pmeGpu);
987 pme_gpu_free_grid_indices(pmeGpu);
988 pme_gpu_free_fract_shifts(pmeGpu);
989 pme_gpu_free_grids(pmeGpu);
991 pme_gpu_destroy_3dfft(pmeGpu);
993 /* Free the GPU-framework specific data last */
994 pme_gpu_destroy_specific(pmeGpu);
999 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* charges)
1001 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
1002 kernelParamsPtr->atoms.nAtoms = nAtoms;
1003 const int alignment = pme_gpu_get_atom_data_alignment(pmeGpu);
1004 pmeGpu->nAtomsPadded = ((nAtoms + alignment - 1) / alignment) * alignment;
1005 const int nAtomsAlloc = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
1006 const bool haveToRealloc =
1007 (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
1008 pmeGpu->nAtomsAlloc = nAtomsAlloc;
1011 GMX_RELEASE_ASSERT(false, "Only single precision supported");
1012 GMX_UNUSED_VALUE(charges);
1014 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(charges));
1015 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
1020 pme_gpu_realloc_forces(pmeGpu);
1021 pme_gpu_realloc_spline_data(pmeGpu);
1022 pme_gpu_realloc_grid_indices(pmeGpu);
1026 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, int grid_index)
1028 int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
1030 pme_gpu_start_timing(pmeGpu, timerId);
1031 pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
1032 dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
1033 pme_gpu_stop_timing(pmeGpu, timerId);
1037 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1038 * to minimize number of unused blocks.
1040 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
1042 // How many maximum widths in X do we need (hopefully just one)
1043 const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1044 // Trying to make things even
1045 const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1046 GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1047 GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
1048 "pmeGpuCreateGrid: excessive blocks");
1049 return std::pair<int, int>(colCount, minRowCount);
1053 * Returns a pointer to appropriate spline and spread kernel based on the input bool values
1055 * \param[in] pmeGpu The PME GPU structure.
1056 * \param[in] useOrderThreadsPerAtom bool controlling if we should use order or order*order threads per atom
1057 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1059 * \return Pointer to CUDA kernel
1061 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool writeSplinesToGlobal)
1063 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1064 if (writeSplinesToGlobal)
1066 if (useOrderThreadsPerAtom)
1068 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4;
1072 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplines;
1077 if (useOrderThreadsPerAtom)
1079 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
1083 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1091 * Returns a pointer to appropriate spline kernel based on the input bool values
1093 * \param[in] pmeGpu The PME GPU structure.
1094 * \param[in] useOrderThreadsPerAtom bool controlling if we should use order or order*order threads per atom
1095 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1097 * \return Pointer to CUDA kernel
1099 static auto selectSplineKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool gmx_unused writeSplinesToGlobal)
1101 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1103 writeSplinesToGlobal,
1104 "Spline data should always be written to global memory when just calculating splines");
1106 if (useOrderThreadsPerAtom)
1108 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4;
1112 kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1118 * Returns a pointer to appropriate spread kernel based on the input bool values
1120 * \param[in] pmeGpu The PME GPU structure.
1121 * \param[in] useOrderThreadsPerAtom bool controlling if we should use order or order*order threads per atom
1122 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1124 * \return Pointer to CUDA kernel
1126 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu, bool useOrderThreadsPerAtom, bool writeSplinesToGlobal)
1128 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1129 if (writeSplinesToGlobal)
1131 if (useOrderThreadsPerAtom)
1133 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4;
1137 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1142 /* if we are not saving the spline data we need to recalculate it
1143 using the spline and spread Kernel */
1144 if (useOrderThreadsPerAtom)
1146 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4;
1150 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1156 void pme_gpu_spread(const PmeGpu* pmeGpu,
1157 GpuEventSynchronizer* xReadyOnDevice,
1158 int gmx_unused gridIndex,
1160 bool computeSplines,
1163 GMX_ASSERT(computeSplines || spreadCharges,
1164 "PME spline/spread kernel has invalid input (nothing to do)");
1165 const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1166 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1168 const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1170 const int order = pmeGpu->common->pme_order;
1171 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1172 const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1173 #if GMX_GPU == GMX_GPU_OPENCL
1174 GMX_ASSERT(!c_useOrderThreadsPerAtom, "Only 16 threads per atom supported in OpenCL");
1175 GMX_ASSERT(!c_recalculateSplines, "Recalculating splines not supported in OpenCL");
1177 const int atomsPerBlock = c_useOrderThreadsPerAtom ? blockSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
1178 : blockSize / c_pmeSpreadGatherThreadsPerAtom;
1180 // TODO: pick smaller block size in runtime if needed
1181 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1182 // If doing so, change atomsPerBlock in the kernels as well.
1183 // TODO: test varying block sizes on modern arch-s as well
1184 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1185 //(for spline data mostly)
1186 GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock),
1187 "inconsistent atom data padding vs. spreading block size");
1189 // Ensure that coordinates are ready on the device before launching spread;
1190 // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1191 // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1192 GMX_ASSERT(xReadyOnDevice != nullptr || (GMX_GPU != GMX_GPU_CUDA)
1193 || pmeGpu->common->isRankPmeOnly || pme_gpu_is_testing(pmeGpu),
1194 "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1197 xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream);
1200 const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1201 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1203 KernelLaunchConfig config;
1204 config.blockSize[0] = order;
1205 config.blockSize[1] = c_useOrderThreadsPerAtom ? 1 : order;
1206 config.blockSize[2] = atomsPerBlock;
1207 config.gridSize[0] = dimGrid.first;
1208 config.gridSize[1] = dimGrid.second;
1209 config.stream = pmeGpu->archSpecific->pmeStream;
1212 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1217 timingId = gtPME_SPLINEANDSPREAD;
1218 kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, c_useOrderThreadsPerAtom,
1219 writeGlobal || (!c_recalculateSplines));
1223 timingId = gtPME_SPLINE;
1224 kernelPtr = selectSplineKernelPtr(pmeGpu, c_useOrderThreadsPerAtom,
1225 writeGlobal || (!c_recalculateSplines));
1230 timingId = gtPME_SPREAD;
1231 kernelPtr = selectSpreadKernelPtr(pmeGpu, c_useOrderThreadsPerAtom,
1232 writeGlobal || (!c_recalculateSplines));
1236 pme_gpu_start_timing(pmeGpu, timingId);
1237 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1238 #if c_canEmbedBuffers
1239 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1241 const auto kernelArgs = prepareGpuKernelArguments(
1242 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1243 &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1244 &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->grid.d_fractShiftsTable,
1245 &kernelParamsPtr->grid.d_gridlineIndicesTable, &kernelParamsPtr->atoms.d_coefficients,
1246 &kernelParamsPtr->atoms.d_coordinates);
1249 launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1250 pme_gpu_stop_timing(pmeGpu, timingId);
1252 const bool copyBackGrid =
1253 spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
1256 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1258 const bool copyBackAtomData =
1259 computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
1260 if (copyBackAtomData)
1262 pme_gpu_copy_output_spread_atom_data(pmeGpu);
1266 void pme_gpu_solve(const PmeGpu* pmeGpu, t_complex* h_grid, GridOrdering gridOrdering, bool computeEnergyAndVirial)
1268 const bool copyInputAndOutputGrid = pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu);
1270 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1272 float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1273 if (copyInputAndOutputGrid)
1275 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat, 0,
1276 pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream,
1277 pmeGpu->settings.transferKind, nullptr);
1280 int majorDim = -1, middleDim = -1, minorDim = -1;
1281 switch (gridOrdering)
1283 case GridOrdering::YZX:
1289 case GridOrdering::XYZ:
1295 default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1298 const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1300 const int gridLineSize = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1301 const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1302 const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1304 if (blocksPerGridLine == 1)
1306 cellsPerBlock = gridLineSize * gridLinesPerBlock;
1310 cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1312 const int warpSize = pmeGpu->programHandle_->impl_->warpSize;
1313 const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1315 static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1316 "The CUDA solve energy kernels needs at least 4 warps. "
1317 "Here we launch at least half of the max warps.");
1319 KernelLaunchConfig config;
1320 config.blockSize[0] = blockSize;
1321 config.gridSize[0] = blocksPerGridLine;
1322 // rounding up to full warps so that shuffle operations produce defined results
1323 config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1324 / gridLinesPerBlock;
1325 config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1326 config.stream = pmeGpu->archSpecific->pmeStream;
1328 int timingId = gtPME_SOLVE;
1329 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1330 if (gridOrdering == GridOrdering::YZX)
1332 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernel
1333 : pmeGpu->programHandle_->impl_->solveYZXKernel;
1335 else if (gridOrdering == GridOrdering::XYZ)
1337 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernel
1338 : pmeGpu->programHandle_->impl_->solveXYZKernel;
1341 pme_gpu_start_timing(pmeGpu, timingId);
1342 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1343 #if c_canEmbedBuffers
1344 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1346 const auto kernelArgs = prepareGpuKernelArguments(
1347 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli,
1348 &kernelParamsPtr->constants.d_virialAndEnergy, &kernelParamsPtr->grid.d_fourierGrid);
1350 launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1351 pme_gpu_stop_timing(pmeGpu, timingId);
1353 if (computeEnergyAndVirial)
1355 copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy,
1356 &kernelParamsPtr->constants.d_virialAndEnergy, 0, c_virialAndEnergyCount,
1357 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1360 if (copyInputAndOutputGrid)
1362 copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid, 0,
1363 pmeGpu->archSpecific->complexGridSize, pmeGpu->archSpecific->pmeStream,
1364 pmeGpu->settings.transferKind, nullptr);
1369 * Returns a pointer to appropriate gather kernel based on the inputvalues
1371 * \param[in] pmeGpu The PME GPU structure.
1372 * \param[in] useOrderThreadsPerAtom bool controlling if we should use order or order*order threads per atom
1373 * \param[in] readSplinesFromGlobal bool controlling if we should write spline data to global memory
1374 * \param[in] forceTreatment Controls if the forces from the gather should increment or replace the input forces.
1376 * \return Pointer to CUDA kernel
1378 inline auto selectGatherKernelPtr(const PmeGpu* pmeGpu,
1379 bool useOrderThreadsPerAtom,
1380 bool readSplinesFromGlobal,
1381 PmeForceOutputHandling forceTreatment)
1384 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1386 if (readSplinesFromGlobal)
1388 if (useOrderThreadsPerAtom)
1390 kernelPtr = (forceTreatment == PmeForceOutputHandling::Set)
1391 ? pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4
1392 : pmeGpu->programHandle_->impl_->gatherReduceWithInputKernelReadSplinesThPerAtom4;
1396 kernelPtr = (forceTreatment == PmeForceOutputHandling::Set)
1397 ? pmeGpu->programHandle_->impl_->gatherKernelReadSplines
1398 : pmeGpu->programHandle_->impl_->gatherReduceWithInputKernelReadSplines;
1403 if (useOrderThreadsPerAtom)
1405 kernelPtr = (forceTreatment == PmeForceOutputHandling::Set)
1406 ? pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4
1407 : pmeGpu->programHandle_->impl_->gatherReduceWithInputKernelThPerAtom4;
1411 kernelPtr = (forceTreatment == PmeForceOutputHandling::Set)
1412 ? pmeGpu->programHandle_->impl_->gatherKernel
1413 : pmeGpu->programHandle_->impl_->gatherReduceWithInputKernel;
1420 void pme_gpu_gather(PmeGpu* pmeGpu, PmeForceOutputHandling forceTreatment, const float* h_grid)
1422 /* Copying the input CPU forces for reduction */
1423 if (forceTreatment != PmeForceOutputHandling::Set)
1425 pme_gpu_copy_input_forces(pmeGpu);
1428 if (!pme_gpu_performs_FFT(pmeGpu) || pme_gpu_is_testing(pmeGpu))
1430 pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float*>(h_grid));
1433 if (pme_gpu_is_testing(pmeGpu))
1435 pme_gpu_copy_input_gather_atom_data(pmeGpu);
1438 /* Set if we have unit tests */
1439 const bool readGlobal = pmeGpu->settings.copyAllOutputs;
1440 const size_t blockSize = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1441 #if GMX_GPU == GMX_GPU_OPENCL
1442 GMX_ASSERT(!c_useOrderThreadsPerAtom, "Only 16 threads per atom supported in OpenCL");
1443 GMX_ASSERT(!c_recalculateSplines, "Recalculating splines not supported in OpenCL");
1445 const int atomsPerBlock = c_useOrderThreadsPerAtom ? blockSize / c_pmeSpreadGatherThreadsPerAtom4ThPerAtom
1446 : blockSize / c_pmeSpreadGatherThreadsPerAtom;
1448 GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock),
1449 "inconsistent atom data padding vs. gathering block size");
1451 const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1452 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1454 const int order = pmeGpu->common->pme_order;
1455 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1457 KernelLaunchConfig config;
1458 config.blockSize[0] = order;
1459 config.blockSize[1] = c_useOrderThreadsPerAtom ? 1 : order;
1460 config.blockSize[2] = atomsPerBlock;
1461 config.gridSize[0] = dimGrid.first;
1462 config.gridSize[1] = dimGrid.second;
1463 config.stream = pmeGpu->archSpecific->pmeStream;
1465 // TODO test different cache configs
1467 int timingId = gtPME_GATHER;
1468 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = selectGatherKernelPtr(
1469 pmeGpu, c_useOrderThreadsPerAtom, readGlobal || (!c_recalculateSplines), forceTreatment);
1470 // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1472 pme_gpu_start_timing(pmeGpu, timingId);
1473 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1474 const auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1475 #if c_canEmbedBuffers
1476 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1478 const auto kernelArgs = prepareGpuKernelArguments(
1479 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients,
1480 &kernelParamsPtr->grid.d_realGrid, &kernelParamsPtr->atoms.d_theta,
1481 &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1482 &kernelParamsPtr->atoms.d_forces);
1484 launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1485 pme_gpu_stop_timing(pmeGpu, timingId);
1487 if (pmeGpu->settings.useGpuForceReduction)
1489 pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream);
1493 pme_gpu_copy_output_forces(pmeGpu);
1497 DeviceBuffer<float> pme_gpu_get_kernelparam_coordinates(const PmeGpu* pmeGpu)
1499 GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1500 "PME GPU device buffer was requested in non-GPU build or before the GPU PME was "
1503 return pmeGpu->kernelParams->atoms.d_coordinates;
1506 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1508 if (pmeGpu && pmeGpu->kernelParams)
1510 return pmeGpu->kernelParams->atoms.d_forces;
1518 /*! \brief Check the validity of the device buffer.
1520 * Checks if the buffer is not nullptr and, when possible, if it is big enough.
1522 * \todo Split and move this function to gpu_utils.
1524 * \param[in] buffer Device buffer to be checked.
1525 * \param[in] requiredSize Number of elements that the buffer will have to accommodate.
1527 * \returns If the device buffer can be set.
1529 template<typename T>
1530 static bool checkDeviceBuffer(gmx_unused DeviceBuffer<T> buffer, gmx_unused int requiredSize)
1532 #if GMX_GPU == GMX_GPU_CUDA
1533 GMX_ASSERT(buffer != nullptr, "The device pointer is nullptr");
1534 return buffer != nullptr;
1535 #elif GMX_GPU == GMX_GPU_OPENCL
1537 int retval = clGetMemObjectInfo(buffer, CL_MEM_SIZE, sizeof(size), &size, nullptr);
1538 GMX_ASSERT(retval == CL_SUCCESS,
1539 gmx::formatString("clGetMemObjectInfo failed with error code #%d", retval).c_str());
1540 GMX_ASSERT(static_cast<int>(size) >= requiredSize,
1541 "Number of atoms in device buffer is smaller then required size.");
1542 return retval == CL_SUCCESS && static_cast<int>(size) >= requiredSize;
1543 #elif GMX_GPU == GMX_GPU_NONE
1544 GMX_ASSERT(false, "Setter for device-side coordinates was called in non-GPU build.");
1549 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<float> d_x)
1551 GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1552 "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1555 GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1556 "The device-side buffer can not be set.");
1558 pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1561 void* pme_gpu_get_stream(const PmeGpu* pmeGpu)
1565 return static_cast<void*>(&pmeGpu->archSpecific->pmeStream);
1573 void* pme_gpu_get_context(const PmeGpu* pmeGpu)
1577 return static_cast<void*>(&pmeGpu->archSpecific->context);
1585 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1587 if (pmeGpu && pmeGpu->kernelParams)
1589 return &pmeGpu->archSpecific->pmeForcesReady;