2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file contains internal function implementations
39 * for performing the PME calculations on GPU.
41 * Note that this file is compiled as regular C++ source in OpenCL builds, but
42 * it is treated as CUDA source in CUDA-enabled GPU builds.
44 * \author Aleksei Iupinov <a.yupinov@gmail.com>
45 * \ingroup module_ewald
50 #include "pme-gpu-internal.h"
57 #include "gromacs/ewald/ewald-utils.h"
58 #include "gromacs/gpu_utils/gpu_utils.h"
59 #include "gromacs/math/invertmatrix.h"
60 #include "gromacs/math/units.h"
61 #include "gromacs/timing/gpu_timing.h"
62 #include "gromacs/utility/exceptions.h"
63 #include "gromacs/utility/fatalerror.h"
64 #include "gromacs/utility/gmxassert.h"
65 #include "gromacs/utility/logger.h"
66 #include "gromacs/utility/stringutil.h"
68 #if GMX_GPU == GMX_GPU_CUDA
69 #include "gromacs/gpu_utils/pmalloc_cuda.h"
72 #elif GMX_GPU == GMX_GPU_OPENCL
73 #include "gromacs/gpu_utils/gmxopencl.h"
76 #include "gromacs/ewald/pme.h"
78 #include "pme-gpu-3dfft.h"
79 #include "pme-gpu-constants.h"
80 #include "pme-gpu-program-impl.h"
81 #include "pme-gpu-timings.h"
82 #include "pme-gpu-types.h"
83 #include "pme-gpu-types-host.h"
84 #include "pme-gpu-types-host-impl.h"
85 #include "pme-gpu-utils.h"
87 #include "pme-internal.h"
90 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
92 * \param[in] pmeGpu The PME GPU structure.
93 * \returns The pointer to the kernel parameters.
95 static PmeGpuKernelParamsBase *pme_gpu_get_kernel_params_base_ptr(const PmeGpu *pmeGpu)
97 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
98 auto *kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase *>(pmeGpu->kernelParams.get());
99 return kernelParamsPtr;
102 int pme_gpu_get_atom_data_alignment(const PmeGpu * /*unused*/)
104 //TODO: this can be simplified, as PME_ATOM_DATA_ALIGNMENT is now constant
105 return PME_ATOM_DATA_ALIGNMENT;
108 int pme_gpu_get_atoms_per_warp(const PmeGpu *pmeGpu)
110 const int order = pmeGpu->common->pme_order;
111 GMX_ASSERT(order > 0, "Invalid PME order");
112 return pmeGpu->programHandle_->impl_->warpSize / PME_SPREADGATHER_THREADS_PER_ATOM;
115 void pme_gpu_synchronize(const PmeGpu *pmeGpu)
117 gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
120 void pme_gpu_alloc_energy_virial(const PmeGpu *pmeGpu)
122 const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
123 allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount, pmeGpu->archSpecific->context);
124 pmalloc((void **)&pmeGpu->staging.h_virialAndEnergy, energyAndVirialSize);
127 void pme_gpu_free_energy_virial(PmeGpu *pmeGpu)
129 freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
130 pfree(pmeGpu->staging.h_virialAndEnergy);
131 pmeGpu->staging.h_virialAndEnergy = nullptr;
134 void pme_gpu_clear_energy_virial(const PmeGpu *pmeGpu)
136 clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
137 c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
140 void pme_gpu_realloc_and_copy_bspline_values(const PmeGpu *pmeGpu)
142 const int splineValuesOffset[DIM] = {
144 pmeGpu->kernelParams->grid.realGridSize[XX],
145 pmeGpu->kernelParams->grid.realGridSize[XX] + pmeGpu->kernelParams->grid.realGridSize[YY]
147 memcpy((void *)&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
149 const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX] +
150 pmeGpu->kernelParams->grid.realGridSize[YY] +
151 pmeGpu->kernelParams->grid.realGridSize[ZZ];
152 const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
153 reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
154 &pmeGpu->archSpecific->splineValuesSize, &pmeGpu->archSpecific->splineValuesSizeAlloc, pmeGpu->archSpecific->context);
157 /* Reallocate the host buffer */
158 pfree(pmeGpu->staging.h_splineModuli);
159 pmalloc((void **)&pmeGpu->staging.h_splineModuli, newSplineValuesSize * sizeof(float));
161 for (int i = 0; i < DIM; i++)
163 memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i], pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
165 /* TODO: pin original buffer instead! */
166 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
167 0, newSplineValuesSize,
168 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
171 void pme_gpu_free_bspline_values(const PmeGpu *pmeGpu)
173 pfree(pmeGpu->staging.h_splineModuli);
174 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
177 void pme_gpu_realloc_forces(PmeGpu *pmeGpu)
179 const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
180 GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
181 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
182 &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc, pmeGpu->archSpecific->context);
183 pmeGpu->staging.h_forces.reserve(pmeGpu->nAtomsAlloc);
184 pmeGpu->staging.h_forces.resize(pmeGpu->kernelParams->atoms.nAtoms);
187 void pme_gpu_free_forces(const PmeGpu *pmeGpu)
189 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
192 void pme_gpu_copy_input_forces(PmeGpu *pmeGpu)
194 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
195 float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
196 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat,
197 0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
198 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
201 void pme_gpu_copy_output_forces(PmeGpu *pmeGpu)
203 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
204 float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
205 copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces,
206 0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
207 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
210 void pme_gpu_realloc_coordinates(const PmeGpu *pmeGpu)
212 const size_t newCoordinatesSize = pmeGpu->nAtomsAlloc * DIM;
213 GMX_ASSERT(newCoordinatesSize > 0, "Bad number of atoms in PME GPU");
214 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, newCoordinatesSize,
215 &pmeGpu->archSpecific->coordinatesSize, &pmeGpu->archSpecific->coordinatesSizeAlloc, pmeGpu->archSpecific->context);
218 const size_t paddingIndex = DIM * pmeGpu->kernelParams->atoms.nAtoms;
219 const size_t paddingCount = DIM * pmeGpu->nAtomsAlloc - paddingIndex;
220 if (paddingCount > 0)
222 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coordinates, paddingIndex,
223 paddingCount, pmeGpu->archSpecific->pmeStream);
228 void pme_gpu_copy_input_coordinates(const PmeGpu *pmeGpu, const rvec *h_coordinates)
230 GMX_ASSERT(h_coordinates, "Bad host-side coordinate buffer in PME GPU");
232 GMX_RELEASE_ASSERT(false, "Only single precision is supported");
233 GMX_UNUSED_VALUE(h_coordinates);
235 const float *h_coordinatesFloat = reinterpret_cast<const float *>(h_coordinates);
236 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, h_coordinatesFloat,
237 0, pmeGpu->kernelParams->atoms.nAtoms * DIM,
238 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
242 void pme_gpu_free_coordinates(const PmeGpu *pmeGpu)
244 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates);
247 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu *pmeGpu, const float *h_coefficients)
249 GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
250 const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
251 GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
252 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
253 &pmeGpu->archSpecific->coefficientsSize, &pmeGpu->archSpecific->coefficientsSizeAlloc, pmeGpu->archSpecific->context);
254 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, const_cast<float *>(h_coefficients),
255 0, pmeGpu->kernelParams->atoms.nAtoms,
256 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
259 const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
260 const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
261 if (paddingCount > 0)
263 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
264 paddingCount, pmeGpu->archSpecific->pmeStream);
269 void pme_gpu_free_coefficients(const PmeGpu *pmeGpu)
271 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
274 void pme_gpu_realloc_spline_data(const PmeGpu *pmeGpu)
276 const int order = pmeGpu->common->pme_order;
277 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
278 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
279 const int newSplineDataSize = DIM * order * nAtomsPadded;
280 GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
281 /* Two arrays of the same size */
282 const bool shouldRealloc = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
283 int currentSizeTemp = pmeGpu->archSpecific->splineDataSize;
284 int currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
285 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize,
286 ¤tSizeTemp, ¤tSizeTempAlloc, pmeGpu->archSpecific->context);
287 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
288 &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc, pmeGpu->archSpecific->context);
289 // the host side reallocation
292 pfree(pmeGpu->staging.h_theta);
293 pmalloc((void **)&pmeGpu->staging.h_theta, newSplineDataSize * sizeof(float));
294 pfree(pmeGpu->staging.h_dtheta);
295 pmalloc((void **)&pmeGpu->staging.h_dtheta, newSplineDataSize * sizeof(float));
299 void pme_gpu_free_spline_data(const PmeGpu *pmeGpu)
301 /* Two arrays of the same size */
302 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
303 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
304 pfree(pmeGpu->staging.h_theta);
305 pfree(pmeGpu->staging.h_dtheta);
308 void pme_gpu_realloc_grid_indices(const PmeGpu *pmeGpu)
310 const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
311 GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
312 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
313 &pmeGpu->archSpecific->gridlineIndicesSize, &pmeGpu->archSpecific->gridlineIndicesSizeAlloc, pmeGpu->archSpecific->context);
314 pfree(pmeGpu->staging.h_gridlineIndices);
315 pmalloc((void **)&pmeGpu->staging.h_gridlineIndices, newIndicesSize * sizeof(int));
318 void pme_gpu_free_grid_indices(const PmeGpu *pmeGpu)
320 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
321 pfree(pmeGpu->staging.h_gridlineIndices);
324 void pme_gpu_realloc_grids(PmeGpu *pmeGpu)
326 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
327 const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX] *
328 kernelParamsPtr->grid.realGridSizePadded[YY] *
329 kernelParamsPtr->grid.realGridSizePadded[ZZ];
330 const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX] *
331 kernelParamsPtr->grid.complexGridSizePadded[YY] *
332 kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
333 // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
334 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
336 /* 2 separate grids */
337 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
338 &pmeGpu->archSpecific->complexGridSize, &pmeGpu->archSpecific->complexGridSizeAlloc, pmeGpu->archSpecific->context);
339 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newRealGridSize,
340 &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
344 /* A single buffer so that any grid will fit */
345 const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
346 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newGridsSize,
347 &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
348 kernelParamsPtr->grid.d_fourierGrid = kernelParamsPtr->grid.d_realGrid;
349 pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
350 // the size might get used later for copying the grid
354 void pme_gpu_free_grids(const PmeGpu *pmeGpu)
356 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
358 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
360 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
363 void pme_gpu_clear_grids(const PmeGpu *pmeGpu)
365 clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
366 pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
369 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu *pmeGpu)
371 pme_gpu_free_fract_shifts(pmeGpu);
373 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
375 const int nx = kernelParamsPtr->grid.realGridSize[XX];
376 const int ny = kernelParamsPtr->grid.realGridSize[YY];
377 const int nz = kernelParamsPtr->grid.realGridSize[ZZ];
378 const int cellCount = c_pmeNeighborUnitcellCount;
379 const int gridDataOffset[DIM] = {0, cellCount * nx, cellCount * (nx + ny)};
381 memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
383 const int newFractShiftsSize = cellCount * (nx + ny + nz);
385 #if GMX_GPU == GMX_GPU_CUDA
386 initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
387 kernelParamsPtr->fractShiftsTableTexture,
388 pmeGpu->common->fsh.data(),
392 initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
393 kernelParamsPtr->gridlineIndicesTableTexture,
394 pmeGpu->common->nn.data(),
397 #elif GMX_GPU == GMX_GPU_OPENCL
398 // No dedicated texture routines....
399 allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize, pmeGpu->archSpecific->context);
400 allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize, pmeGpu->archSpecific->context);
401 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(),
402 0, newFractShiftsSize,
403 pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
404 copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(),
405 0, newFractShiftsSize,
406 pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
410 void pme_gpu_free_fract_shifts(const PmeGpu *pmeGpu)
412 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
413 #if GMX_GPU == GMX_GPU_CUDA
414 destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
415 kernelParamsPtr->fractShiftsTableTexture,
417 destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
418 kernelParamsPtr->gridlineIndicesTableTexture,
420 #elif GMX_GPU == GMX_GPU_OPENCL
421 freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
422 freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
426 bool pme_gpu_stream_query(const PmeGpu *pmeGpu)
428 return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
431 void pme_gpu_copy_input_gather_grid(const PmeGpu *pmeGpu, float *h_grid)
433 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid,
434 0, pmeGpu->archSpecific->realGridSize,
435 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
438 void pme_gpu_copy_output_spread_grid(const PmeGpu *pmeGpu, float *h_grid)
440 copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid,
441 0, pmeGpu->archSpecific->realGridSize,
442 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
443 pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
446 void pme_gpu_copy_output_spread_atom_data(const PmeGpu *pmeGpu)
448 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
449 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
450 const size_t splinesCount = DIM * nAtomsPadded * pmeGpu->common->pme_order;
451 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
452 copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta,
454 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
455 copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta,
457 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
458 copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
459 0, kernelParamsPtr->atoms.nAtoms * DIM,
460 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
463 void pme_gpu_copy_input_gather_atom_data(const PmeGpu *pmeGpu)
465 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
466 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
467 const size_t splinesCount = DIM * nAtomsPadded * pmeGpu->common->pme_order;
468 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
471 // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
472 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
473 pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
474 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
475 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
476 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
477 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
479 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta,
481 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
482 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta,
484 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
485 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
486 0, kernelParamsPtr->atoms.nAtoms * DIM,
487 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
490 void pme_gpu_sync_spread_grid(const PmeGpu *pmeGpu)
492 pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
495 void pme_gpu_init_internal(PmeGpu *pmeGpu)
497 /* Allocate the target-specific structures */
498 pmeGpu->archSpecific.reset(new PmeGpuSpecific());
499 pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
501 pmeGpu->archSpecific->performOutOfPlaceFFT = true;
502 /* This should give better performance, according to the cuFFT documentation.
503 * The performance seems to be the same though.
504 * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
507 // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
508 if (GMX_GPU == GMX_GPU_CUDA)
510 /* WARNING: CUDA timings are incorrect with multiple streams.
511 * This is the main reason why they are disabled by default.
513 // TODO: Consider turning on by default when we can detect nr of streams.
514 pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
516 else if (GMX_GPU == GMX_GPU_OPENCL)
518 pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
521 // TODO: this is just a convenient reuse because programHandle_ currently is in charge of creating context
522 pmeGpu->archSpecific->context = pmeGpu->programHandle_->impl_->context;
524 #if GMX_GPU == GMX_GPU_CUDA
525 // Prepare to use the device that this PME task was assigned earlier.
526 CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
529 #if GMX_GPU == GMX_GPU_CUDA
530 pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
531 #elif GMX_GPU == GMX_GPU_OPENCL
532 //TODO we'll need work size checks for OpenCL too
535 /* Creating a PME GPU stream:
536 * - default high priority with CUDA
537 * - no priorities implemented yet with OpenCL; see #2532
539 #if GMX_GPU == GMX_GPU_CUDA
541 int highest_priority, lowest_priority;
542 stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
543 CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
544 stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
545 cudaStreamDefault, //cudaStreamNonBlocking,
547 CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
548 #elif GMX_GPU == GMX_GPU_OPENCL
549 cl_command_queue_properties queueProperties = pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
550 cl_device_id device_id = pmeGpu->deviceInfo->ocl_gpu_id.ocl_device_id;
552 pmeGpu->archSpecific->pmeStream = clCreateCommandQueue(pmeGpu->archSpecific->context,
553 device_id, queueProperties, &clError);
554 if (clError != CL_SUCCESS)
556 GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
561 void pme_gpu_destroy_specific(const PmeGpu *pmeGpu)
563 #if GMX_GPU == GMX_GPU_CUDA
564 /* Destroy the CUDA stream */
565 cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
566 CU_RET_ERR(stat, "PME cudaStreamDestroy error");
567 #elif GMX_GPU == GMX_GPU_OPENCL
568 cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
569 if (clError != CL_SUCCESS)
571 gmx_warning("Failed to destroy PME command queue");
576 void pme_gpu_reinit_3dfft(const PmeGpu *pmeGpu)
578 if (pme_gpu_performs_FFT(pmeGpu))
580 pmeGpu->archSpecific->fftSetup.resize(0);
581 for (int i = 0; i < pmeGpu->common->ngrids; i++)
583 pmeGpu->archSpecific->fftSetup.push_back(std::unique_ptr<GpuParallel3dFft>(new GpuParallel3dFft(pmeGpu)));
588 void pme_gpu_destroy_3dfft(const PmeGpu *pmeGpu)
590 pmeGpu->archSpecific->fftSetup.resize(0);
593 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
599 constexpr int fixedOrder = 4;
600 GMX_UNUSED_VALUE(fixedOrder);
602 const int atomWarpIndex = atomIndex % atomsPerWarp;
603 const int warpIndex = atomIndex / atomsPerWarp;
604 int indexBase, result;
605 switch (atomsPerWarp)
608 indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
609 result = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
613 GMX_THROW(gmx::NotImplementedError(gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in getSplineParamFullIndex", atomsPerWarp)));
618 gmx::ArrayRef<gmx::RVec> pme_gpu_get_forces(PmeGpu *pmeGpu)
620 return pmeGpu->staging.h_forces;
623 void pme_gpu_get_energy_virial(const PmeGpu *pmeGpu, real *energy, matrix virial)
625 for (int j = 0; j < c_virialAndEnergyCount; j++)
627 GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]), "PME GPU produces incorrect energy/virial.");
630 GMX_ASSERT(energy, "Invalid energy output pointer in PME GPU");
632 virial[XX][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
633 virial[YY][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
634 virial[ZZ][ZZ] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
635 virial[XX][YY] = virial[YY][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
636 virial[XX][ZZ] = virial[ZZ][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
637 virial[YY][ZZ] = virial[ZZ][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
638 *energy = 0.5f * pmeGpu->staging.h_virialAndEnergy[j++];
641 void pme_gpu_update_input_box(PmeGpu gmx_unused *pmeGpu,
642 const matrix gmx_unused box)
645 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
648 pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
649 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
650 kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
651 GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0f, "Zero volume of the unit cell");
653 gmx::invertBoxMatrix(scaledBox, recipBox);
655 /* The GPU recipBox is transposed as compared to the CPU recipBox.
656 * Spread uses matrix columns (while solve and gather use rows).
657 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
659 const real newRecipBox[DIM][DIM] =
661 {recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX]},
662 { 0.0, recipBox[YY][YY], recipBox[ZZ][YY]},
663 { 0.0, 0.0, recipBox[ZZ][ZZ]}
665 memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
669 /*! \brief \libinternal
670 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
672 * \param[in] pmeGpu The PME GPU structure.
674 static void pme_gpu_reinit_grids(PmeGpu *pmeGpu)
676 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
677 kernelParamsPtr->grid.ewaldFactor = (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
679 /* The grid size variants */
680 for (int i = 0; i < DIM; i++)
682 kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
683 kernelParamsPtr->grid.realGridSizeFP[i] = (float)kernelParamsPtr->grid.realGridSize[i];
684 kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
686 // The complex grid currently uses no padding;
687 // if it starts to do so, then another test should be added for that
688 kernelParamsPtr->grid.complexGridSize[i] = kernelParamsPtr->grid.realGridSize[i];
689 kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
691 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
692 if (!pme_gpu_performs_FFT(pmeGpu))
694 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
695 kernelParamsPtr->grid.realGridSizePadded[ZZ] = (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
698 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
699 kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
700 kernelParamsPtr->grid.complexGridSize[ZZ]++;
701 kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
703 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
704 pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
705 pme_gpu_realloc_grids(pmeGpu);
706 pme_gpu_reinit_3dfft(pmeGpu);
709 /* Several GPU functions that refer to the CPU PME data live here.
710 * We would like to keep these away from the GPU-framework specific code for clarity,
711 * as well as compilation issues with MPI.
714 /*! \brief \libinternal
715 * Copies everything useful from the PME CPU to the PME GPU structure.
716 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
718 * \param[in] pme The PME structure.
720 static void pme_gpu_copy_common_data_from(const gmx_pme_t *pme)
722 /* TODO: Consider refactoring the CPU PME code to use the same structure,
723 * so that this function becomes 2 lines */
724 PmeGpu *pmeGpu = pme->gpu;
725 pmeGpu->common->ngrids = pme->ngrids;
726 pmeGpu->common->epsilon_r = pme->epsilon_r;
727 pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
728 pmeGpu->common->nk[XX] = pme->nkx;
729 pmeGpu->common->nk[YY] = pme->nky;
730 pmeGpu->common->nk[ZZ] = pme->nkz;
731 pmeGpu->common->pme_order = pme->pme_order;
732 for (int i = 0; i < DIM; i++)
734 pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
736 const int cellCount = c_pmeNeighborUnitcellCount;
737 pmeGpu->common->fsh.resize(0);
738 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
739 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
740 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
741 pmeGpu->common->nn.resize(0);
742 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
743 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
744 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
745 pmeGpu->common->runMode = pme->runMode;
746 pmeGpu->common->boxScaler = pme->boxScaler;
749 /*! \libinternal \brief
750 * Initializes the PME GPU data at the beginning of the run.
751 * TODO: this should become PmeGpu::PmeGpu()
753 * \param[in,out] pme The PME structure.
754 * \param[in,out] gpuInfo The GPU information structure.
755 * \param[in] pmeGpuProgram The handle to the program/kernel data created outside (e.g. in unit tests/runner)
757 static void pme_gpu_init(gmx_pme_t *pme,
758 gmx_device_info_t *gpuInfo,
759 PmeGpuProgramHandle pmeGpuProgram)
761 pme->gpu = new PmeGpu();
762 PmeGpu *pmeGpu = pme->gpu;
763 changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
764 pmeGpu->common = std::shared_ptr<PmeShared>(new PmeShared());
766 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
767 /* A convenience variable. */
768 pmeGpu->settings.useDecomposition = (pme->nnodes == 1);
769 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
770 pmeGpu->settings.performGPUGather = true;
772 pme_gpu_set_testing(pmeGpu, false);
774 pmeGpu->deviceInfo = gpuInfo;
775 GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
776 pmeGpu->programHandle_ = pmeGpuProgram;
778 pme_gpu_init_internal(pmeGpu);
779 pme_gpu_alloc_energy_virial(pmeGpu);
781 pme_gpu_copy_common_data_from(pme);
783 GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0f, "PME GPU: bad electrostatic coefficient");
785 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
786 kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
789 void pme_gpu_transform_spline_atom_data(const PmeGpu *pmeGpu, const pme_atomcomm_t *atc,
790 PmeSplineDataType type, int dimIndex, PmeLayoutTransform transform)
792 // The GPU atom spline data is laid out in a different way currently than the CPU one.
793 // This function converts the data from GPU to CPU layout (in the host memory).
794 // It is only intended for testing purposes so far.
795 // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their performance
796 // (e.g. spreading on GPU, gathering on CPU).
797 GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
798 const uintmax_t threadIndex = 0;
799 const auto atomCount = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
800 const auto atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
801 const auto pmeOrder = pmeGpu->common->pme_order;
803 real *cpuSplineBuffer;
804 float *h_splineBuffer;
807 case PmeSplineDataType::Values:
808 cpuSplineBuffer = atc->spline[threadIndex].theta[dimIndex];
809 h_splineBuffer = pmeGpu->staging.h_theta;
812 case PmeSplineDataType::Derivatives:
813 cpuSplineBuffer = atc->spline[threadIndex].dtheta[dimIndex];
814 h_splineBuffer = pmeGpu->staging.h_dtheta;
818 GMX_THROW(gmx::InternalError("Unknown spline data type"));
821 for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
823 for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
825 const auto gpuValueIndex = getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
826 const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
827 GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder, "Atom spline data index out of bounds (while transforming GPU data layout for host)");
830 case PmeLayoutTransform::GpuToHost:
831 cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
834 case PmeLayoutTransform::HostToGpu:
835 h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
839 GMX_THROW(gmx::InternalError("Unknown layout transform"));
845 void pme_gpu_get_real_grid_sizes(const PmeGpu *pmeGpu, gmx::IVec *gridSize, gmx::IVec *paddedGridSize)
847 GMX_ASSERT(gridSize != nullptr, "");
848 GMX_ASSERT(paddedGridSize != nullptr, "");
849 GMX_ASSERT(pmeGpu != nullptr, "");
850 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
851 for (int i = 0; i < DIM; i++)
853 (*gridSize)[i] = kernelParamsPtr->grid.realGridSize[i];
854 (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
858 void pme_gpu_reinit(gmx_pme_t *pme,
859 gmx_device_info_t *gpuInfo,
860 PmeGpuProgramHandle pmeGpuProgram)
862 if (!pme_gpu_active(pme))
869 /* First-time initialization */
870 pme_gpu_init(pme, gpuInfo, pmeGpuProgram);
874 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
875 pme_gpu_copy_common_data_from(pme);
877 /* GPU FFT will only get used for a single rank.*/
878 pme->gpu->settings.performGPUFFT = (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme_gpu_uses_dd(pme->gpu);
879 pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
881 /* Reinit active timers */
882 pme_gpu_reinit_timings(pme->gpu);
884 pme_gpu_reinit_grids(pme->gpu);
885 // Note: if timing the reinit launch overhead becomes more relevant
886 // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
887 pme_gpu_reinit_computation(pme, nullptr);
888 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
889 * update for mixed mode on grid switch. TODO: use shared recipbox field.
891 std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
894 void pme_gpu_destroy(PmeGpu *pmeGpu)
896 /* Free lots of data */
897 pme_gpu_free_energy_virial(pmeGpu);
898 pme_gpu_free_bspline_values(pmeGpu);
899 pme_gpu_free_forces(pmeGpu);
900 pme_gpu_free_coordinates(pmeGpu);
901 pme_gpu_free_coefficients(pmeGpu);
902 pme_gpu_free_spline_data(pmeGpu);
903 pme_gpu_free_grid_indices(pmeGpu);
904 pme_gpu_free_fract_shifts(pmeGpu);
905 pme_gpu_free_grids(pmeGpu);
907 pme_gpu_destroy_3dfft(pmeGpu);
909 /* Free the GPU-framework specific data last */
910 pme_gpu_destroy_specific(pmeGpu);
915 void pme_gpu_reinit_atoms(PmeGpu *pmeGpu, const int nAtoms, const real *charges)
917 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
918 kernelParamsPtr->atoms.nAtoms = nAtoms;
919 const int alignment = pme_gpu_get_atom_data_alignment(pmeGpu);
920 pmeGpu->nAtomsPadded = ((nAtoms + alignment - 1) / alignment) * alignment;
921 const int nAtomsAlloc = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
922 const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
923 pmeGpu->nAtomsAlloc = nAtomsAlloc;
926 GMX_RELEASE_ASSERT(false, "Only single precision supported");
927 GMX_UNUSED_VALUE(charges);
929 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float *>(charges));
930 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
935 pme_gpu_realloc_coordinates(pmeGpu);
936 pme_gpu_realloc_forces(pmeGpu);
937 pme_gpu_realloc_spline_data(pmeGpu);
938 pme_gpu_realloc_grid_indices(pmeGpu);
942 void pme_gpu_3dfft(const PmeGpu *pmeGpu, gmx_fft_direction dir, int grid_index)
944 int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
946 auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timerId);
947 pme_gpu_start_timing(pmeGpu, timerId);
948 pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(dir, timingEvent);
949 pme_gpu_stop_timing(pmeGpu, timerId);
953 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
954 * to minimize number of unused blocks.
956 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu *pmeGpu, int blockCount)
958 // How many maximum widths in X do we need (hopefully just one)
959 const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
960 // Trying to make things even
961 const int colCount = (blockCount + minRowCount - 1) / minRowCount;
962 GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
963 GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount, "pmeGpuCreateGrid: excessive blocks");
964 return std::pair<int, int>(colCount, minRowCount);
967 void pme_gpu_spread(const PmeGpu *pmeGpu,
968 int gmx_unused gridIndex,
973 GMX_ASSERT(computeSplines || spreadCharges, "PME spline/spread kernel has invalid input (nothing to do)");
974 const auto *kernelParamsPtr = pmeGpu->kernelParams.get();
975 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
977 const int maxThreadsPerBlock = c_spreadMaxWarpsPerBlock * pmeGpu->programHandle_->impl_->warpSize;
978 // TODO take device limitations (CL_DEVICE_MAX_WORK_GROUP_SIZE) into account for all 3 kernels
979 const int order = pmeGpu->common->pme_order;
980 const int atomsPerBlock = maxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM;
981 // TODO: pick smaller block size in runtime if needed
982 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
983 // If doing so, change atomsPerBlock in the kernels as well.
984 // TODO: test varying block sizes on modern arch-s as well
985 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
986 //(for spline data mostly, together with varying PME_GPU_PARALLEL_SPLINE define)
987 GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. spreading block size");
989 const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
990 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
992 KernelLaunchConfig config;
993 config.blockSize[0] = config.blockSize[1] = order;
994 config.blockSize[2] = atomsPerBlock;
995 config.gridSize[0] = dimGrid.first;
996 config.gridSize[1] = dimGrid.second;
997 config.stream = pmeGpu->archSpecific->pmeStream;
1001 GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not implemented!"));
1005 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1010 timingId = gtPME_SPLINEANDSPREAD;
1011 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1015 timingId = gtPME_SPLINE;
1016 kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1021 timingId = gtPME_SPREAD;
1022 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1025 pme_gpu_start_timing(pmeGpu, timingId);
1026 auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1027 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1028 launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1029 pme_gpu_stop_timing(pmeGpu, timingId);
1031 const bool copyBackGrid = spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
1034 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1036 const bool copyBackAtomData = computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
1037 if (copyBackAtomData)
1039 pme_gpu_copy_output_spread_atom_data(pmeGpu);
1043 void pme_gpu_solve(const PmeGpu *pmeGpu, t_complex *h_grid,
1044 GridOrdering gridOrdering, bool computeEnergyAndVirial)
1046 const bool copyInputAndOutputGrid = pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu);
1048 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
1050 float *h_gridFloat = reinterpret_cast<float *>(h_grid);
1051 if (copyInputAndOutputGrid)
1053 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat,
1054 0, pmeGpu->archSpecific->complexGridSize,
1055 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1058 int majorDim = -1, middleDim = -1, minorDim = -1;
1059 switch (gridOrdering)
1061 case GridOrdering::YZX:
1067 case GridOrdering::XYZ:
1074 GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1077 const int maxThreadsPerBlock = c_solveMaxWarpsPerBlock * pmeGpu->programHandle_->impl_->warpSize;
1078 const int maxBlockSize = maxThreadsPerBlock;
1079 const int gridLineSize = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1080 const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1081 const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1082 const int cellsPerBlock = gridLineSize * gridLinesPerBlock;
1083 const size_t warpSize = pmeGpu->programHandle_->impl_->warpSize;
1084 const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1087 KernelLaunchConfig config;
1088 config.blockSize[0] = blockSize;
1089 config.gridSize[0] = blocksPerGridLine;
1090 // rounding up to full warps so that shuffle operations produce defined results
1091 config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1) / gridLinesPerBlock;
1092 config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1093 config.stream = pmeGpu->archSpecific->pmeStream;
1095 int timingId = gtPME_SOLVE;
1096 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1097 if (gridOrdering == GridOrdering::YZX)
1099 kernelPtr = computeEnergyAndVirial ?
1100 pmeGpu->programHandle_->impl_->solveYZXEnergyKernel :
1101 pmeGpu->programHandle_->impl_->solveYZXKernel;
1103 else if (gridOrdering == GridOrdering::XYZ)
1105 kernelPtr = computeEnergyAndVirial ?
1106 pmeGpu->programHandle_->impl_->solveXYZEnergyKernel :
1107 pmeGpu->programHandle_->impl_->solveXYZKernel;
1110 pme_gpu_start_timing(pmeGpu, timingId);
1111 auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1112 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1113 launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1114 pme_gpu_stop_timing(pmeGpu, timingId);
1116 if (computeEnergyAndVirial)
1118 copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy, &kernelParamsPtr->constants.d_virialAndEnergy,
1119 0, c_virialAndEnergyCount,
1120 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1123 if (copyInputAndOutputGrid)
1125 copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid,
1126 0, pmeGpu->archSpecific->complexGridSize,
1127 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1131 void pme_gpu_gather(PmeGpu *pmeGpu,
1132 PmeForceOutputHandling forceTreatment,
1136 /* Copying the input CPU forces for reduction */
1137 if (forceTreatment != PmeForceOutputHandling::Set)
1139 pme_gpu_copy_input_forces(pmeGpu);
1142 const int order = pmeGpu->common->pme_order;
1143 const auto *kernelParamsPtr = pmeGpu->kernelParams.get();
1145 if (!pme_gpu_performs_FFT(pmeGpu) || pme_gpu_is_testing(pmeGpu))
1147 pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float *>(h_grid));
1150 if (pme_gpu_is_testing(pmeGpu))
1152 pme_gpu_copy_input_gather_atom_data(pmeGpu);
1155 const int maxThreadsPerBlock = c_gatherMaxWarpsPerBlock * pmeGpu->programHandle_->impl_->warpSize;
1156 const int atomsPerBlock = maxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM;
1157 GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. gathering block size");
1159 const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1160 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1162 KernelLaunchConfig config;
1163 config.blockSize[0] = config.blockSize[1] = order;
1164 config.blockSize[2] = atomsPerBlock;
1165 config.gridSize[0] = dimGrid.first;
1166 config.gridSize[1] = dimGrid.second;
1167 config.stream = pmeGpu->archSpecific->pmeStream;
1171 GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not implemented!"));
1174 // TODO test different cache configs
1176 int timingId = gtPME_GATHER;
1177 // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1178 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = (forceTreatment == PmeForceOutputHandling::Set) ?
1179 pmeGpu->programHandle_->impl_->gatherKernel :
1180 pmeGpu->programHandle_->impl_->gatherReduceWithInputKernel;
1182 pme_gpu_start_timing(pmeGpu, timingId);
1183 auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1184 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1185 launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1186 pme_gpu_stop_timing(pmeGpu, timingId);
1188 pme_gpu_copy_output_forces(pmeGpu);