2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file contains internal function implementations
39 * for performing the PME calculations on GPU.
41 * Note that this file is compiled as regular C++ source in OpenCL builds, but
42 * it is treated as CUDA source in CUDA-enabled GPU builds.
44 * \author Aleksei Iupinov <a.yupinov@gmail.com>
45 * \ingroup module_ewald
50 #include "pme-gpu-internal.h"
57 #include "gromacs/ewald/ewald-utils.h"
58 #include "gromacs/gpu_utils/gpu_utils.h"
59 #include "gromacs/math/invertmatrix.h"
60 #include "gromacs/math/units.h"
61 #include "gromacs/timing/gpu_timing.h"
62 #include "gromacs/utility/exceptions.h"
63 #include "gromacs/utility/fatalerror.h"
64 #include "gromacs/utility/gmxassert.h"
65 #include "gromacs/utility/logger.h"
66 #include "gromacs/utility/stringutil.h"
68 #if GMX_GPU == GMX_GPU_CUDA
69 #include "gromacs/gpu_utils/pmalloc_cuda.h"
72 #elif GMX_GPU == GMX_GPU_OPENCL
73 #include "gromacs/gpu_utils/gmxopencl.h"
76 #include "gromacs/ewald/pme.h"
78 #include "pme-gpu-3dfft.h"
79 #include "pme-gpu-constants.h"
80 #include "pme-gpu-program-impl.h"
81 #include "pme-gpu-timings.h"
82 #include "pme-gpu-types.h"
83 #include "pme-gpu-types-host.h"
84 #include "pme-gpu-types-host-impl.h"
85 #include "pme-gpu-utils.h"
87 #include "pme-internal.h"
90 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
92 * \param[in] pmeGpu The PME GPU structure.
93 * \returns The pointer to the kernel parameters.
95 static PmeGpuKernelParamsBase *pme_gpu_get_kernel_params_base_ptr(const PmeGpu *pmeGpu)
97 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
98 auto *kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase *>(pmeGpu->kernelParams.get());
99 return kernelParamsPtr;
102 int pme_gpu_get_atom_data_alignment(const PmeGpu *)
104 //TODO: this can be simplified, as PME_ATOM_DATA_ALIGNMENT is now constant
105 return PME_ATOM_DATA_ALIGNMENT;
108 int pme_gpu_get_atoms_per_warp(const PmeGpu *pmeGpu)
110 #if GMX_GPU == GMX_GPU_CUDA
111 const int order = pmeGpu->common->pme_order;
112 GMX_ASSERT(order > 0, "Invalid PME order");
113 return PME_SPREADGATHER_ATOMS_PER_WARP;
115 GMX_THROW(gmx::NotImplementedError("Atom alignment per warp has to be deduced dynamically for OpenCL"));
116 GMX_UNUSED_VALUE(pmeGpu);
120 void pme_gpu_synchronize(const PmeGpu *pmeGpu)
122 gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
125 void pme_gpu_alloc_energy_virial(const PmeGpu *pmeGpu)
127 const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
128 allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount, pmeGpu->archSpecific->context);
129 pmalloc((void **)&pmeGpu->staging.h_virialAndEnergy, energyAndVirialSize);
132 void pme_gpu_free_energy_virial(PmeGpu *pmeGpu)
134 freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
135 pfree(pmeGpu->staging.h_virialAndEnergy);
136 pmeGpu->staging.h_virialAndEnergy = nullptr;
139 void pme_gpu_clear_energy_virial(const PmeGpu *pmeGpu)
141 clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
142 c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
145 void pme_gpu_realloc_and_copy_bspline_values(const PmeGpu *pmeGpu)
147 const int splineValuesOffset[DIM] = {
149 pmeGpu->kernelParams->grid.realGridSize[XX],
150 pmeGpu->kernelParams->grid.realGridSize[XX] + pmeGpu->kernelParams->grid.realGridSize[YY]
152 memcpy((void *)&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
154 const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX] +
155 pmeGpu->kernelParams->grid.realGridSize[YY] +
156 pmeGpu->kernelParams->grid.realGridSize[ZZ];
157 const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
158 reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
159 &pmeGpu->archSpecific->splineValuesSize, &pmeGpu->archSpecific->splineValuesSizeAlloc, pmeGpu->archSpecific->context);
162 /* Reallocate the host buffer */
163 pfree(pmeGpu->staging.h_splineModuli);
164 pmalloc((void **)&pmeGpu->staging.h_splineModuli, newSplineValuesSize * sizeof(float));
166 for (int i = 0; i < DIM; i++)
168 memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i], pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
170 /* TODO: pin original buffer instead! */
171 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
172 0, newSplineValuesSize,
173 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
176 void pme_gpu_free_bspline_values(const PmeGpu *pmeGpu)
178 pfree(pmeGpu->staging.h_splineModuli);
179 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
182 void pme_gpu_realloc_forces(PmeGpu *pmeGpu)
184 const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
185 GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
186 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
187 &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc, pmeGpu->archSpecific->context);
188 pmeGpu->staging.h_forces.reserve(pmeGpu->nAtomsAlloc);
189 pmeGpu->staging.h_forces.resize(pmeGpu->kernelParams->atoms.nAtoms);
192 void pme_gpu_free_forces(const PmeGpu *pmeGpu)
194 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
197 void pme_gpu_copy_input_forces(PmeGpu *pmeGpu)
199 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
200 float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
201 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat,
202 0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
203 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
206 void pme_gpu_copy_output_forces(PmeGpu *pmeGpu)
208 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
209 float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
210 copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces,
211 0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
212 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
215 void pme_gpu_realloc_coordinates(const PmeGpu *pmeGpu)
217 const size_t newCoordinatesSize = pmeGpu->nAtomsAlloc * DIM;
218 GMX_ASSERT(newCoordinatesSize > 0, "Bad number of atoms in PME GPU");
219 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, newCoordinatesSize,
220 &pmeGpu->archSpecific->coordinatesSize, &pmeGpu->archSpecific->coordinatesSizeAlloc, pmeGpu->archSpecific->context);
223 const size_t paddingIndex = DIM * pmeGpu->kernelParams->atoms.nAtoms;
224 const size_t paddingCount = DIM * pmeGpu->nAtomsAlloc - paddingIndex;
225 if (paddingCount > 0)
227 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coordinates, paddingIndex,
228 paddingCount, pmeGpu->archSpecific->pmeStream);
233 void pme_gpu_copy_input_coordinates(const PmeGpu *pmeGpu, const rvec *h_coordinates)
235 GMX_ASSERT(h_coordinates, "Bad host-side coordinate buffer in PME GPU");
237 GMX_RELEASE_ASSERT(false, "Only single precision is supported");
238 GMX_UNUSED_VALUE(h_coordinates);
240 const float *h_coordinatesFloat = reinterpret_cast<const float *>(h_coordinates);
241 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, h_coordinatesFloat,
242 0, pmeGpu->kernelParams->atoms.nAtoms * DIM,
243 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
247 void pme_gpu_free_coordinates(const PmeGpu *pmeGpu)
249 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates);
252 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu *pmeGpu, const float *h_coefficients)
254 GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
255 const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
256 GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
257 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
258 &pmeGpu->archSpecific->coefficientsSize, &pmeGpu->archSpecific->coefficientsSizeAlloc, pmeGpu->archSpecific->context);
259 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, const_cast<float *>(h_coefficients),
260 0, pmeGpu->kernelParams->atoms.nAtoms,
261 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
264 const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
265 const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
266 if (paddingCount > 0)
268 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
269 paddingCount, pmeGpu->archSpecific->pmeStream);
274 void pme_gpu_free_coefficients(const PmeGpu *pmeGpu)
276 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
279 void pme_gpu_realloc_spline_data(const PmeGpu *pmeGpu)
281 const int order = pmeGpu->common->pme_order;
282 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
283 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
284 const int newSplineDataSize = DIM * order * nAtomsPadded;
285 GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
286 /* Two arrays of the same size */
287 const bool shouldRealloc = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
288 int currentSizeTemp = pmeGpu->archSpecific->splineDataSize;
289 int currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
290 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize,
291 ¤tSizeTemp, ¤tSizeTempAlloc, pmeGpu->archSpecific->context);
292 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
293 &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc, pmeGpu->archSpecific->context);
294 // the host side reallocation
297 pfree(pmeGpu->staging.h_theta);
298 pmalloc((void **)&pmeGpu->staging.h_theta, newSplineDataSize * sizeof(float));
299 pfree(pmeGpu->staging.h_dtheta);
300 pmalloc((void **)&pmeGpu->staging.h_dtheta, newSplineDataSize * sizeof(float));
304 void pme_gpu_free_spline_data(const PmeGpu *pmeGpu)
306 /* Two arrays of the same size */
307 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
308 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
309 pfree(pmeGpu->staging.h_theta);
310 pfree(pmeGpu->staging.h_dtheta);
313 void pme_gpu_realloc_grid_indices(const PmeGpu *pmeGpu)
315 const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
316 GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
317 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
318 &pmeGpu->archSpecific->gridlineIndicesSize, &pmeGpu->archSpecific->gridlineIndicesSizeAlloc, pmeGpu->archSpecific->context);
319 pfree(pmeGpu->staging.h_gridlineIndices);
320 pmalloc((void **)&pmeGpu->staging.h_gridlineIndices, newIndicesSize * sizeof(int));
323 void pme_gpu_free_grid_indices(const PmeGpu *pmeGpu)
325 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
326 pfree(pmeGpu->staging.h_gridlineIndices);
329 void pme_gpu_realloc_grids(PmeGpu *pmeGpu)
331 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
332 const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX] *
333 kernelParamsPtr->grid.realGridSizePadded[YY] *
334 kernelParamsPtr->grid.realGridSizePadded[ZZ];
335 const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX] *
336 kernelParamsPtr->grid.complexGridSizePadded[YY] *
337 kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
338 // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
339 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
341 /* 2 separate grids */
342 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
343 &pmeGpu->archSpecific->complexGridSize, &pmeGpu->archSpecific->complexGridSizeAlloc, pmeGpu->archSpecific->context);
344 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newRealGridSize,
345 &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
349 /* A single buffer so that any grid will fit */
350 const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
351 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newGridsSize,
352 &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
353 kernelParamsPtr->grid.d_fourierGrid = kernelParamsPtr->grid.d_realGrid;
354 pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
355 // the size might get used later for copying the grid
359 void pme_gpu_free_grids(const PmeGpu *pmeGpu)
361 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
363 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
365 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
368 void pme_gpu_clear_grids(const PmeGpu *pmeGpu)
370 clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
371 pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
374 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu *pmeGpu)
376 pme_gpu_free_fract_shifts(pmeGpu);
378 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
380 const int nx = kernelParamsPtr->grid.realGridSize[XX];
381 const int ny = kernelParamsPtr->grid.realGridSize[YY];
382 const int nz = kernelParamsPtr->grid.realGridSize[ZZ];
383 const int cellCount = c_pmeNeighborUnitcellCount;
384 const int gridDataOffset[DIM] = {0, cellCount * nx, cellCount * (nx + ny)};
386 memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
388 const int newFractShiftsSize = cellCount * (nx + ny + nz);
390 #if GMX_GPU == GMX_GPU_CUDA
391 initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
392 kernelParamsPtr->fractShiftsTableTexture,
393 pmeGpu->common->fsh.data(),
397 initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
398 kernelParamsPtr->gridlineIndicesTableTexture,
399 pmeGpu->common->nn.data(),
402 #elif GMX_GPU == GMX_GPU_OPENCL
403 // No dedicated texture routines....
404 allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize, pmeGpu->archSpecific->context);
405 allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize, pmeGpu->archSpecific->context);
406 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(),
407 0, newFractShiftsSize,
408 pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
409 copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(),
410 0, newFractShiftsSize,
411 pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
415 void pme_gpu_free_fract_shifts(const PmeGpu *pmeGpu)
417 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
418 #if GMX_GPU == GMX_GPU_CUDA
419 destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
420 kernelParamsPtr->fractShiftsTableTexture,
422 destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
423 kernelParamsPtr->gridlineIndicesTableTexture,
425 #elif GMX_GPU == GMX_GPU_OPENCL
426 freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
427 freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
431 bool pme_gpu_stream_query(const PmeGpu *pmeGpu)
433 return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
436 void pme_gpu_copy_input_gather_grid(const PmeGpu *pmeGpu, float *h_grid)
438 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid,
439 0, pmeGpu->archSpecific->realGridSize,
440 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
443 void pme_gpu_copy_output_spread_grid(const PmeGpu *pmeGpu, float *h_grid)
445 copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid,
446 0, pmeGpu->archSpecific->realGridSize,
447 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
448 pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
451 void pme_gpu_copy_output_spread_atom_data(const PmeGpu *pmeGpu)
453 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
454 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
455 const size_t splinesCount = DIM * nAtomsPadded * pmeGpu->common->pme_order;
456 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
457 copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta,
459 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
460 copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta,
462 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
463 copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
464 0, kernelParamsPtr->atoms.nAtoms * DIM,
465 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
468 void pme_gpu_copy_input_gather_atom_data(const PmeGpu *pmeGpu)
470 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
471 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
472 const size_t splinesCount = DIM * nAtomsPadded * pmeGpu->common->pme_order;
473 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
476 // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
477 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
478 pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
479 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
480 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
481 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
482 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
484 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta,
486 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
487 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta,
489 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
490 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
491 0, kernelParamsPtr->atoms.nAtoms * DIM,
492 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
495 void pme_gpu_sync_spread_grid(const PmeGpu *pmeGpu)
497 pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
500 void pme_gpu_init_internal(PmeGpu *pmeGpu)
502 /* Allocate the target-specific structures */
503 pmeGpu->archSpecific.reset(new PmeGpuSpecific());
504 pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
506 pmeGpu->archSpecific->performOutOfPlaceFFT = true;
507 /* This should give better performance, according to the cuFFT documentation.
508 * The performance seems to be the same though.
509 * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
512 // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
513 if (GMX_GPU == GMX_GPU_CUDA)
515 /* WARNING: CUDA timings are incorrect with multiple streams.
516 * This is the main reason why they are disabled by default.
518 // TODO: Consider turning on by default when we can detect nr of streams.
519 pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
521 else if (GMX_GPU == GMX_GPU_OPENCL)
523 pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
526 // TODO: this is just a convenient reuse because programHandle_ currently is in charge of creating context
527 pmeGpu->archSpecific->context = pmeGpu->programHandle_->impl_->context;
529 #if GMX_GPU == GMX_GPU_CUDA
530 // Prepare to use the device that this PME task was assigned earlier.
531 CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
534 #if GMX_GPU == GMX_GPU_CUDA
535 pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
536 #elif GMX_GPU == GMX_GPU_OPENCL
537 //TODO we'll need work size checks for OpenCL too
540 /* Creating a PME GPU stream:
541 * - default high priority with CUDA
542 * - no priorities implemented yet with OpenCL; see #2532
544 #if GMX_GPU == GMX_GPU_CUDA
546 int highest_priority, lowest_priority;
547 stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
548 CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
549 stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
550 cudaStreamDefault, //cudaStreamNonBlocking,
552 CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
553 #elif GMX_GPU == GMX_GPU_OPENCL
554 cl_command_queue_properties queueProperties = pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
555 cl_device_id device_id = pmeGpu->deviceInfo->ocl_gpu_id.ocl_device_id;
557 pmeGpu->archSpecific->pmeStream = clCreateCommandQueue(pmeGpu->archSpecific->context,
558 device_id, queueProperties, &clError);
559 if (clError != CL_SUCCESS)
561 GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
566 void pme_gpu_destroy_specific(const PmeGpu *pmeGpu)
568 #if GMX_GPU == GMX_GPU_CUDA
569 /* Destroy the CUDA stream */
570 cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
571 CU_RET_ERR(stat, "PME cudaStreamDestroy error");
572 #elif GMX_GPU == GMX_GPU_OPENCL
573 cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
574 if (clError != CL_SUCCESS)
576 gmx_warning("Failed to destroy PME command queue");
581 void pme_gpu_reinit_3dfft(const PmeGpu *pmeGpu)
583 if (pme_gpu_performs_FFT(pmeGpu))
585 pmeGpu->archSpecific->fftSetup.resize(0);
586 for (int i = 0; i < pmeGpu->common->ngrids; i++)
588 pmeGpu->archSpecific->fftSetup.push_back(std::unique_ptr<GpuParallel3dFft>(new GpuParallel3dFft(pmeGpu)));
593 void pme_gpu_destroy_3dfft(const PmeGpu *pmeGpu)
595 pmeGpu->archSpecific->fftSetup.resize(0);
598 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int warpIndex, int atomWarpIndex)
604 constexpr int fixedOrder = 4;
605 GMX_UNUSED_VALUE(fixedOrder);
606 const int indexBase = getSplineParamIndexBase<fixedOrder>(warpIndex, atomWarpIndex);
607 return getSplineParamIndex<fixedOrder>(indexBase, dimIndex, splineIndex);
610 gmx::ArrayRef<gmx::RVec> pme_gpu_get_forces(PmeGpu *pmeGpu)
612 return pmeGpu->staging.h_forces;
615 void pme_gpu_get_energy_virial(const PmeGpu *pmeGpu, real *energy, matrix virial)
617 for (int j = 0; j < c_virialAndEnergyCount; j++)
619 GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]), "PME GPU produces incorrect energy/virial.");
622 GMX_ASSERT(energy, "Invalid energy output pointer in PME GPU");
624 virial[XX][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
625 virial[YY][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
626 virial[ZZ][ZZ] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
627 virial[XX][YY] = virial[YY][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
628 virial[XX][ZZ] = virial[ZZ][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
629 virial[YY][ZZ] = virial[ZZ][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
630 *energy = 0.5f * pmeGpu->staging.h_virialAndEnergy[j++];
633 void pme_gpu_update_input_box(PmeGpu gmx_unused *pmeGpu,
634 const matrix gmx_unused box)
637 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
640 pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
641 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
642 kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
643 GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0f, "Zero volume of the unit cell");
645 gmx::invertBoxMatrix(scaledBox, recipBox);
647 /* The GPU recipBox is transposed as compared to the CPU recipBox.
648 * Spread uses matrix columns (while solve and gather use rows).
649 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
651 const real newRecipBox[DIM][DIM] =
653 {recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX]},
654 { 0.0, recipBox[YY][YY], recipBox[ZZ][YY]},
655 { 0.0, 0.0, recipBox[ZZ][ZZ]}
657 memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
661 /*! \brief \libinternal
662 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
664 * \param[in] pmeGpu The PME GPU structure.
666 static void pme_gpu_reinit_grids(PmeGpu *pmeGpu)
668 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
669 kernelParamsPtr->grid.ewaldFactor = (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
671 /* The grid size variants */
672 for (int i = 0; i < DIM; i++)
674 kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
675 kernelParamsPtr->grid.realGridSizeFP[i] = (float)kernelParamsPtr->grid.realGridSize[i];
676 kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
678 // The complex grid currently uses no padding;
679 // if it starts to do so, then another test should be added for that
680 kernelParamsPtr->grid.complexGridSize[i] = kernelParamsPtr->grid.realGridSize[i];
681 kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
683 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
684 if (!pme_gpu_performs_FFT(pmeGpu))
686 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
687 kernelParamsPtr->grid.realGridSizePadded[ZZ] = (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
690 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
691 kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
692 kernelParamsPtr->grid.complexGridSize[ZZ]++;
693 kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
695 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
696 pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
697 pme_gpu_realloc_grids(pmeGpu);
698 pme_gpu_reinit_3dfft(pmeGpu);
701 /* Several GPU functions that refer to the CPU PME data live here.
702 * We would like to keep these away from the GPU-framework specific code for clarity,
703 * as well as compilation issues with MPI.
706 /*! \brief \libinternal
707 * Copies everything useful from the PME CPU to the PME GPU structure.
708 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
710 * \param[in] pme The PME structure.
712 static void pme_gpu_copy_common_data_from(const gmx_pme_t *pme)
714 /* TODO: Consider refactoring the CPU PME code to use the same structure,
715 * so that this function becomes 2 lines */
716 PmeGpu *pmeGpu = pme->gpu;
717 pmeGpu->common->ngrids = pme->ngrids;
718 pmeGpu->common->epsilon_r = pme->epsilon_r;
719 pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
720 pmeGpu->common->nk[XX] = pme->nkx;
721 pmeGpu->common->nk[YY] = pme->nky;
722 pmeGpu->common->nk[ZZ] = pme->nkz;
723 pmeGpu->common->pme_order = pme->pme_order;
724 for (int i = 0; i < DIM; i++)
726 pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
728 const int cellCount = c_pmeNeighborUnitcellCount;
729 pmeGpu->common->fsh.resize(0);
730 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
731 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
732 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
733 pmeGpu->common->nn.resize(0);
734 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
735 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
736 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
737 pmeGpu->common->runMode = pme->runMode;
738 pmeGpu->common->boxScaler = pme->boxScaler;
741 /*! \libinternal \brief
742 * Initializes the PME GPU data at the beginning of the run.
743 * TODO: this should become PmeGpu::PmeGpu()
745 * \param[in,out] pme The PME structure.
746 * \param[in,out] gpuInfo The GPU information structure.
747 * \param[in] pmeGpuProgram The handle to the program/kernel data created outside (e.g. in unit tests/runner)
749 static void pme_gpu_init(gmx_pme_t *pme,
750 gmx_device_info_t *gpuInfo,
751 PmeGpuProgramHandle pmeGpuProgram)
753 pme->gpu = new PmeGpu();
754 PmeGpu *pmeGpu = pme->gpu;
755 changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
756 pmeGpu->common = std::shared_ptr<PmeShared>(new PmeShared());
758 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
759 /* A convenience variable. */
760 pmeGpu->settings.useDecomposition = (pme->nnodes == 1);
761 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
762 pmeGpu->settings.performGPUGather = true;
764 pme_gpu_set_testing(pmeGpu, false);
766 pmeGpu->deviceInfo = gpuInfo;
767 GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
768 pmeGpu->programHandle_ = pmeGpuProgram;
770 pme_gpu_init_internal(pmeGpu);
771 pme_gpu_alloc_energy_virial(pmeGpu);
773 pme_gpu_copy_common_data_from(pme);
775 GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0f, "PME GPU: bad electrostatic coefficient");
777 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
778 kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
781 void pme_gpu_transform_spline_atom_data(const PmeGpu *pmeGpu, const pme_atomcomm_t *atc,
782 PmeSplineDataType type, int dimIndex, PmeLayoutTransform transform)
784 // The GPU atom spline data is laid out in a different way currently than the CPU one.
785 // This function converts the data from GPU to CPU layout (in the host memory).
786 // It is only intended for testing purposes so far.
787 // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their performance
788 // (e.g. spreading on GPU, gathering on CPU).
789 GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
790 const uintmax_t threadIndex = 0;
791 const auto atomCount = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
792 const auto atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
793 const auto pmeOrder = pmeGpu->common->pme_order;
795 real *cpuSplineBuffer;
796 float *h_splineBuffer;
799 case PmeSplineDataType::Values:
800 cpuSplineBuffer = atc->spline[threadIndex].theta[dimIndex];
801 h_splineBuffer = pmeGpu->staging.h_theta;
804 case PmeSplineDataType::Derivatives:
805 cpuSplineBuffer = atc->spline[threadIndex].dtheta[dimIndex];
806 h_splineBuffer = pmeGpu->staging.h_dtheta;
810 GMX_THROW(gmx::InternalError("Unknown spline data type"));
813 for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
815 auto atomWarpIndex = atomIndex % atomsPerWarp;
816 auto warpIndex = atomIndex / atomsPerWarp;
817 for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
819 const auto gpuValueIndex = getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, warpIndex, atomWarpIndex);
820 const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
821 GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder, "Atom spline data index out of bounds (while transforming GPU data layout for host)");
824 case PmeLayoutTransform::GpuToHost:
825 cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
828 case PmeLayoutTransform::HostToGpu:
829 h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
833 GMX_THROW(gmx::InternalError("Unknown layout transform"));
839 void pme_gpu_get_real_grid_sizes(const PmeGpu *pmeGpu, gmx::IVec *gridSize, gmx::IVec *paddedGridSize)
841 GMX_ASSERT(gridSize != nullptr, "");
842 GMX_ASSERT(paddedGridSize != nullptr, "");
843 GMX_ASSERT(pmeGpu != nullptr, "");
844 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
845 for (int i = 0; i < DIM; i++)
847 (*gridSize)[i] = kernelParamsPtr->grid.realGridSize[i];
848 (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
852 void pme_gpu_reinit(gmx_pme_t *pme,
853 gmx_device_info_t *gpuInfo,
854 PmeGpuProgramHandle pmeGpuProgram)
856 if (!pme_gpu_active(pme))
863 /* First-time initialization */
864 pme_gpu_init(pme, gpuInfo, pmeGpuProgram);
868 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
869 pme_gpu_copy_common_data_from(pme);
871 /* GPU FFT will only get used for a single rank.*/
872 pme->gpu->settings.performGPUFFT = (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme_gpu_uses_dd(pme->gpu);
873 pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
875 /* Reinit active timers */
876 pme_gpu_reinit_timings(pme->gpu);
878 pme_gpu_reinit_grids(pme->gpu);
879 // Note: if timing the reinit launch overhead becomes more relevant
880 // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
881 pme_gpu_reinit_computation(pme, nullptr);
882 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
883 * update for mixed mode on grid switch. TODO: use shared recipbox field.
885 std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
888 void pme_gpu_destroy(PmeGpu *pmeGpu)
890 /* Free lots of data */
891 pme_gpu_free_energy_virial(pmeGpu);
892 pme_gpu_free_bspline_values(pmeGpu);
893 pme_gpu_free_forces(pmeGpu);
894 pme_gpu_free_coordinates(pmeGpu);
895 pme_gpu_free_coefficients(pmeGpu);
896 pme_gpu_free_spline_data(pmeGpu);
897 pme_gpu_free_grid_indices(pmeGpu);
898 pme_gpu_free_fract_shifts(pmeGpu);
899 pme_gpu_free_grids(pmeGpu);
901 pme_gpu_destroy_3dfft(pmeGpu);
903 /* Free the GPU-framework specific data last */
904 pme_gpu_destroy_specific(pmeGpu);
909 void pme_gpu_reinit_atoms(PmeGpu *pmeGpu, const int nAtoms, const real *charges)
911 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
912 kernelParamsPtr->atoms.nAtoms = nAtoms;
913 const int alignment = pme_gpu_get_atom_data_alignment(pmeGpu);
914 pmeGpu->nAtomsPadded = ((nAtoms + alignment - 1) / alignment) * alignment;
915 const int nAtomsAlloc = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
916 const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
917 pmeGpu->nAtomsAlloc = nAtomsAlloc;
920 GMX_RELEASE_ASSERT(false, "Only single precision supported");
921 GMX_UNUSED_VALUE(charges);
923 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float *>(charges));
924 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
929 pme_gpu_realloc_coordinates(pmeGpu);
930 pme_gpu_realloc_forces(pmeGpu);
931 pme_gpu_realloc_spline_data(pmeGpu);
932 pme_gpu_realloc_grid_indices(pmeGpu);
936 void pme_gpu_3dfft(const PmeGpu *pmeGpu, gmx_fft_direction dir, int grid_index)
938 int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
940 auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timerId);
941 pme_gpu_start_timing(pmeGpu, timerId);
942 pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(dir, timingEvent);
943 pme_gpu_stop_timing(pmeGpu, timerId);
947 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
948 * to minimize number of unused blocks.
950 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu *pmeGpu, int blockCount)
952 // How many maximum widths in X do we need (hopefully just one)
953 const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
954 // Trying to make things even
955 const int colCount = (blockCount + minRowCount - 1) / minRowCount;
956 GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
957 GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount, "pmeGpuCreateGrid: excessive blocks");
958 return std::pair<int, int>(colCount, minRowCount);
961 void pme_gpu_spread(const PmeGpu *pmeGpu,
962 int gmx_unused gridIndex,
967 GMX_ASSERT(computeSplines || spreadCharges, "PME spline/spread kernel has invalid input (nothing to do)");
968 const auto *kernelParamsPtr = pmeGpu->kernelParams.get();
969 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
971 #if GMX_GPU == GMX_GPU_OPENCL
972 const int c_spreadMaxThreadsPerBlock = 1; //FIXME make a runtime decision
975 const int order = pmeGpu->common->pme_order;
976 const int atomsPerBlock = c_spreadMaxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM;
977 // TODO: pick smaller block size in runtime if needed
978 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
979 // If doing so, change atomsPerBlock in the kernels as well.
980 // TODO: test varying block sizes on modern arch-s as well
981 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
982 //(for spline data mostly, together with varying PME_GPU_PARALLEL_SPLINE define)
983 GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. spreading block size");
985 const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
986 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
988 KernelLaunchConfig config;
989 config.blockSize[0] = config.blockSize[1] = order;
990 config.blockSize[2] = atomsPerBlock;
991 config.gridSize[0] = dimGrid.first;
992 config.gridSize[1] = dimGrid.second;
993 config.stream = pmeGpu->archSpecific->pmeStream;
997 GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not implemented!"));
1001 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1006 timingId = gtPME_SPLINEANDSPREAD;
1007 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1011 timingId = gtPME_SPLINE;
1012 kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1017 timingId = gtPME_SPREAD;
1018 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1021 pme_gpu_start_timing(pmeGpu, timingId);
1022 auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1023 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1024 launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1025 pme_gpu_stop_timing(pmeGpu, timingId);
1027 const bool copyBackGrid = spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
1030 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1032 const bool copyBackAtomData = computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
1033 if (copyBackAtomData)
1035 pme_gpu_copy_output_spread_atom_data(pmeGpu);
1039 void pme_gpu_solve(const PmeGpu *pmeGpu, t_complex *h_grid,
1040 GridOrdering gridOrdering, bool computeEnergyAndVirial)
1042 const bool copyInputAndOutputGrid = pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu);
1044 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
1046 float *h_gridFloat = reinterpret_cast<float *>(h_grid);
1047 if (copyInputAndOutputGrid)
1049 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat,
1050 0, pmeGpu->archSpecific->complexGridSize,
1051 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1054 int majorDim = -1, middleDim = -1, minorDim = -1;
1055 switch (gridOrdering)
1057 case GridOrdering::YZX:
1063 case GridOrdering::XYZ:
1070 GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1073 #if GMX_GPU == GMX_GPU_OPENCL
1074 const int c_solveMaxThreadsPerBlock = 1; //FIXME make a runtime decision
1077 const int maxBlockSize = c_solveMaxThreadsPerBlock;
1078 const int gridLineSize = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1079 const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1080 const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1081 const int cellsPerBlock = gridLineSize * gridLinesPerBlock;
1082 const int blockSize = (cellsPerBlock + warp_size - 1) / warp_size * warp_size;
1085 KernelLaunchConfig config;
1086 config.blockSize[0] = blockSize;
1087 config.gridSize[0] = blocksPerGridLine;
1088 // rounding up to full warps so that shuffle operations produce defined results
1089 config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1) / gridLinesPerBlock;
1090 config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1091 config.stream = pmeGpu->archSpecific->pmeStream;
1093 int timingId = gtPME_SOLVE;
1094 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1095 if (gridOrdering == GridOrdering::YZX)
1097 kernelPtr = computeEnergyAndVirial ?
1098 pmeGpu->programHandle_->impl_->solveYZXEnergyKernel :
1099 pmeGpu->programHandle_->impl_->solveYZXKernel;
1101 else if (gridOrdering == GridOrdering::XYZ)
1103 kernelPtr = computeEnergyAndVirial ?
1104 pmeGpu->programHandle_->impl_->solveXYZEnergyKernel :
1105 pmeGpu->programHandle_->impl_->solveXYZKernel;
1108 pme_gpu_start_timing(pmeGpu, timingId);
1109 auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1110 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1111 launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1112 pme_gpu_stop_timing(pmeGpu, timingId);
1114 if (computeEnergyAndVirial)
1116 copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy, &kernelParamsPtr->constants.d_virialAndEnergy,
1117 0, c_virialAndEnergyCount,
1118 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1121 if (copyInputAndOutputGrid)
1123 copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid,
1124 0, pmeGpu->archSpecific->complexGridSize,
1125 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1129 void pme_gpu_gather(PmeGpu *pmeGpu,
1130 PmeForceOutputHandling forceTreatment,
1134 /* Copying the input CPU forces for reduction */
1135 if (forceTreatment != PmeForceOutputHandling::Set)
1137 pme_gpu_copy_input_forces(pmeGpu);
1140 const int order = pmeGpu->common->pme_order;
1141 const auto *kernelParamsPtr = pmeGpu->kernelParams.get();
1143 if (!pme_gpu_performs_FFT(pmeGpu) || pme_gpu_is_testing(pmeGpu))
1145 pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float *>(h_grid));
1148 if (pme_gpu_is_testing(pmeGpu))
1150 pme_gpu_copy_input_gather_atom_data(pmeGpu);
1153 #if GMX_GPU == GMX_GPU_OPENCL
1154 const int c_gatherMaxThreadsPerBlock = 1; //FIXME make a runtime decision
1156 const int atomsPerBlock = (c_gatherMaxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM);
1157 GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. gathering block size");
1159 const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1160 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1162 KernelLaunchConfig config;
1163 config.blockSize[0] = config.blockSize[1] = order;
1164 config.blockSize[2] = atomsPerBlock;
1165 config.gridSize[0] = dimGrid.first;
1166 config.gridSize[1] = dimGrid.second;
1167 config.stream = pmeGpu->archSpecific->pmeStream;
1171 GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not implemented!"));
1174 // TODO test different cache configs
1176 int timingId = gtPME_GATHER;
1177 // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1178 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = (forceTreatment == PmeForceOutputHandling::Set) ?
1179 pmeGpu->programHandle_->impl_->gatherKernel :
1180 pmeGpu->programHandle_->impl_->gatherReduceWithInputKernel;
1182 pme_gpu_start_timing(pmeGpu, timingId);
1183 auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1184 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1185 launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1186 pme_gpu_stop_timing(pmeGpu, timingId);
1188 pme_gpu_copy_output_forces(pmeGpu);