2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file contains internal function implementations
39 * for performing the PME calculations on GPU.
41 * \author Aleksei Iupinov <a.yupinov@gmail.com>
42 * \ingroup module_ewald
47 #include "pme-gpu-internal.h"
54 #include "gromacs/ewald/ewald-utils.h"
55 #include "gromacs/gpu_utils/gpu_utils.h"
56 #include "gromacs/math/invertmatrix.h"
57 #include "gromacs/math/units.h"
58 #include "gromacs/mdtypes/commrec.h"
59 #include "gromacs/utility/exceptions.h"
60 #include "gromacs/utility/gmxassert.h"
61 #include "gromacs/utility/logger.h"
62 #include "gromacs/utility/stringutil.h"
65 #include "pme-internal.h"
68 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
70 * \param[in] pmeGpu The PME GPU structure.
71 * \returns The pointer to the kernel parameters.
73 static PmeGpuKernelParamsBase *pme_gpu_get_kernel_params_base_ptr(const PmeGpu *pmeGpu)
75 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
76 auto *kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase *>(pmeGpu->kernelParams.get());
77 return kernelParamsPtr;
80 gmx::ArrayRef<gmx::RVec> pme_gpu_get_forces(PmeGpu *pmeGpu)
82 return pmeGpu->staging.h_forces;
85 void pme_gpu_get_energy_virial(const PmeGpu *pmeGpu, real *energy, matrix virial)
87 for (int j = 0; j < c_virialAndEnergyCount; j++)
89 GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]), "PME GPU produces incorrect energy/virial.");
92 GMX_ASSERT(energy, "Invalid energy output pointer in PME GPU");
94 virial[XX][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
95 virial[YY][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
96 virial[ZZ][ZZ] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
97 virial[XX][YY] = virial[YY][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
98 virial[XX][ZZ] = virial[ZZ][XX] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
99 virial[YY][ZZ] = virial[ZZ][YY] = 0.25f * pmeGpu->staging.h_virialAndEnergy[j++];
100 *energy = 0.5f * pmeGpu->staging.h_virialAndEnergy[j++];
103 void pme_gpu_update_input_box(PmeGpu gmx_unused *pmeGpu,
104 const matrix gmx_unused box)
107 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
110 pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
111 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
112 kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
113 GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0f, "Zero volume of the unit cell");
115 gmx::invertBoxMatrix(scaledBox, recipBox);
117 /* The GPU recipBox is transposed as compared to the CPU recipBox.
118 * Spread uses matrix columns (while solve and gather use rows).
119 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
121 const real newRecipBox[DIM][DIM] =
123 {recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX]},
124 { 0.0, recipBox[YY][YY], recipBox[ZZ][YY]},
125 { 0.0, 0.0, recipBox[ZZ][ZZ]}
127 memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
131 /*! \brief \libinternal
132 * The PME GPU reinitialization function that is called both at the end of any PME computation and on any load balancing.
134 * \param[in] pmeGpu The PME GPU structure.
136 static void pme_gpu_reinit_computation(const PmeGpu *pmeGpu)
138 pme_gpu_clear_grids(pmeGpu);
139 pme_gpu_clear_energy_virial(pmeGpu);
142 void pme_gpu_finish_computation(const PmeGpu *pmeGpu)
144 // Synchronize the whole PME stream at once, including D2H result transfers.
145 pme_gpu_synchronize(pmeGpu);
147 pme_gpu_update_timings(pmeGpu);
148 pme_gpu_reinit_computation(pmeGpu);
151 /*! \brief \libinternal
152 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
154 * \param[in] pmeGpu The PME GPU structure.
156 static void pme_gpu_reinit_grids(PmeGpu *pmeGpu)
158 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
159 kernelParamsPtr->grid.ewaldFactor = (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
161 /* The grid size variants */
162 for (int i = 0; i < DIM; i++)
164 kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
165 kernelParamsPtr->grid.realGridSizeFP[i] = (float)kernelParamsPtr->grid.realGridSize[i];
166 kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
168 // The complex grid currently uses no padding;
169 // if it starts to do so, then another test should be added for that
170 kernelParamsPtr->grid.complexGridSize[i] = kernelParamsPtr->grid.realGridSize[i];
171 kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
173 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
174 if (!pme_gpu_performs_FFT(pmeGpu))
176 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
177 kernelParamsPtr->grid.realGridSizePadded[ZZ] = (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
180 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
181 kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
182 kernelParamsPtr->grid.complexGridSize[ZZ]++;
183 kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
185 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
186 pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
187 pme_gpu_realloc_grids(pmeGpu);
188 pme_gpu_reinit_3dfft(pmeGpu);
191 /* Several GPU functions that refer to the CPU PME data live here.
192 * We would like to keep these away from the GPU-framework specific code for clarity,
193 * as well as compilation issues with MPI.
196 /*! \brief \libinternal
197 * Copies everything useful from the PME CPU to the PME GPU structure.
198 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
200 * \param[in] pme The PME structure.
202 static void pme_gpu_copy_common_data_from(const gmx_pme_t *pme)
204 /* TODO: Consider refactoring the CPU PME code to use the same structure,
205 * so that this function becomes 2 lines */
206 PmeGpu *pmeGpu = pme->gpu;
207 pmeGpu->common->ngrids = pme->ngrids;
208 pmeGpu->common->epsilon_r = pme->epsilon_r;
209 pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
210 pmeGpu->common->nk[XX] = pme->nkx;
211 pmeGpu->common->nk[YY] = pme->nky;
212 pmeGpu->common->nk[ZZ] = pme->nkz;
213 pmeGpu->common->pmegrid_n[XX] = pme->pmegrid_nx;
214 pmeGpu->common->pmegrid_n[YY] = pme->pmegrid_ny;
215 pmeGpu->common->pmegrid_n[ZZ] = pme->pmegrid_nz;
216 pmeGpu->common->pme_order = pme->pme_order;
217 for (int i = 0; i < DIM; i++)
219 pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
221 const int cellCount = c_pmeNeighborUnitcellCount;
222 pmeGpu->common->fsh.resize(0);
223 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
224 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
225 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
226 pmeGpu->common->nn.resize(0);
227 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
228 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
229 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
230 pmeGpu->common->runMode = pme->runMode;
231 pmeGpu->common->boxScaler = pme->boxScaler;
234 /*! \brief \libinternal
235 * Finds out if PME with given inputs is possible to run on GPU.
237 * \param[in] pme The PME structure.
238 * \param[out] error The error message if the input is not supported on GPU.
239 * \returns True if this PME input is possible to run on GPU, false otherwise.
241 static bool pme_gpu_check_restrictions(const gmx_pme_t *pme, std::string *error)
243 std::list<std::string> errorReasons;
244 if (pme->nnodes != 1)
246 errorReasons.push_back("PME decomposition");
248 if (pme->pme_order != 4)
250 errorReasons.push_back("interpolation orders other than 4");
254 errorReasons.push_back("free energy calculations (multiple grids)");
258 errorReasons.push_back("Lennard-Jones PME");
262 errorReasons.push_back("double precision");
265 #if GMX_GPU != GMX_GPU_CUDA
267 errorReasons.push_back("non-CUDA build of GROMACS");
271 bool inputSupported = errorReasons.empty();
272 if (!inputSupported && error)
274 std::string regressionTestMarker = "PME GPU does not support";
275 // this prefix is tested for in the regression tests script gmxtest.pl
276 *error = regressionTestMarker + ": " + gmx::joinStrings(errorReasons, "; ") + ".";
278 return inputSupported;
281 /*! \libinternal \brief
282 * Initializes the PME GPU data at the beginning of the run.
284 * \param[in,out] pme The PME structure.
285 * \param[in,out] gpuInfo The GPU information structure.
287 static void pme_gpu_init(gmx_pme_t *pme, gmx_device_info_t *gpuInfo)
289 std::string errorString;
290 bool canRunOnGpu = pme_gpu_check_restrictions(pme, &errorString);
293 GMX_THROW(gmx::NotImplementedError(errorString));
296 pme->gpu = new PmeGpu();
297 PmeGpu *pmeGpu = pme->gpu;
298 changePinningPolicy(&pmeGpu->staging.h_forces, gmx::PinningPolicy::CanBePinned);
299 pmeGpu->common = std::shared_ptr<PmeShared>(new PmeShared());
301 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
302 /* A convenience variable. */
303 pmeGpu->settings.useDecomposition = (pme->nnodes == 1);
304 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
305 pmeGpu->settings.performGPUGather = true;
307 pme_gpu_set_testing(pmeGpu, false);
309 pmeGpu->deviceInfo = gpuInfo;
311 pme_gpu_init_internal(pmeGpu);
312 pme_gpu_init_sync_events(pmeGpu);
313 pme_gpu_alloc_energy_virial(pmeGpu);
315 pme_gpu_copy_common_data_from(pme);
317 GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0f, "PME GPU: bad electrostatic coefficient");
319 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
320 kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
323 void pme_gpu_transform_spline_atom_data(const PmeGpu *pmeGpu, const pme_atomcomm_t *atc,
324 PmeSplineDataType type, int dimIndex, PmeLayoutTransform transform)
326 // The GPU atom spline data is laid out in a different way currently than the CPU one.
327 // This function converts the data from GPU to CPU layout (in the host memory).
328 // It is only intended for testing purposes so far.
329 // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their performance
330 // (e.g. spreading on GPU, gathering on CPU).
331 GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
332 const uintmax_t threadIndex = 0;
333 const auto atomCount = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
334 const auto atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
335 const auto pmeOrder = pmeGpu->common->pme_order;
337 real *cpuSplineBuffer;
338 float *h_splineBuffer;
341 case PmeSplineDataType::Values:
342 cpuSplineBuffer = atc->spline[threadIndex].theta[dimIndex];
343 h_splineBuffer = pmeGpu->staging.h_theta;
346 case PmeSplineDataType::Derivatives:
347 cpuSplineBuffer = atc->spline[threadIndex].dtheta[dimIndex];
348 h_splineBuffer = pmeGpu->staging.h_dtheta;
352 GMX_THROW(gmx::InternalError("Unknown spline data type"));
355 for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
357 auto atomWarpIndex = atomIndex % atomsPerWarp;
358 auto warpIndex = atomIndex / atomsPerWarp;
359 for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
361 const auto gpuValueIndex = ((pmeOrder * warpIndex + orderIndex) * DIM + dimIndex) * atomsPerWarp + atomWarpIndex;
362 const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
363 GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder, "Atom spline data index out of bounds (while transforming GPU data layout for host)");
366 case PmeLayoutTransform::GpuToHost:
367 cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
370 case PmeLayoutTransform::HostToGpu:
371 h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
375 GMX_THROW(gmx::InternalError("Unknown layout transform"));
381 void pme_gpu_get_real_grid_sizes(const PmeGpu *pmeGpu, gmx::IVec *gridSize, gmx::IVec *paddedGridSize)
383 GMX_ASSERT(gridSize != nullptr, "");
384 GMX_ASSERT(paddedGridSize != nullptr, "");
385 GMX_ASSERT(pmeGpu != nullptr, "");
386 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
387 for (int i = 0; i < DIM; i++)
389 (*gridSize)[i] = kernelParamsPtr->grid.realGridSize[i];
390 (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
394 void pme_gpu_reinit(gmx_pme_t *pme, gmx_device_info_t *gpuInfo)
396 if (!pme_gpu_active(pme))
403 /* First-time initialization */
404 pme_gpu_init(pme, gpuInfo);
408 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
409 pme_gpu_copy_common_data_from(pme);
411 /* GPU FFT will only get used for a single rank.*/
412 pme->gpu->settings.performGPUFFT = (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme_gpu_uses_dd(pme->gpu);
413 pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
415 /* Reinit active timers */
416 pme_gpu_reinit_timings(pme->gpu);
418 pme_gpu_reinit_grids(pme->gpu);
419 pme_gpu_reinit_computation(pme->gpu);
420 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
421 * update for mixed mode on grid switch. TODO: use shared recipbox field.
423 std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
426 void pme_gpu_destroy(PmeGpu *pmeGpu)
428 /* Free lots of data */
429 pme_gpu_free_energy_virial(pmeGpu);
430 pme_gpu_free_bspline_values(pmeGpu);
431 pme_gpu_free_forces(pmeGpu);
432 pme_gpu_free_coordinates(pmeGpu);
433 pme_gpu_free_coefficients(pmeGpu);
434 pme_gpu_free_spline_data(pmeGpu);
435 pme_gpu_free_grid_indices(pmeGpu);
436 pme_gpu_free_fract_shifts(pmeGpu);
437 pme_gpu_free_grids(pmeGpu);
439 pme_gpu_destroy_3dfft(pmeGpu);
440 pme_gpu_destroy_sync_events(pmeGpu);
442 /* Free the GPU-framework specific data last */
443 pme_gpu_destroy_specific(pmeGpu);
448 void pme_gpu_reinit_atoms(PmeGpu *pmeGpu, const int nAtoms, const real *charges)
450 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
451 kernelParamsPtr->atoms.nAtoms = nAtoms;
452 const int alignment = pme_gpu_get_atom_data_alignment(pmeGpu);
453 pmeGpu->nAtomsPadded = ((nAtoms + alignment - 1) / alignment) * alignment;
454 const int nAtomsAlloc = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
455 const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
456 pmeGpu->nAtomsAlloc = nAtomsAlloc;
459 GMX_RELEASE_ASSERT(false, "Only single precision supported");
460 GMX_UNUSED_VALUE(charges);
462 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float *>(charges));
463 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
468 pme_gpu_realloc_coordinates(pmeGpu);
469 pme_gpu_realloc_forces(pmeGpu);
470 pme_gpu_realloc_spline_data(pmeGpu);
471 pme_gpu_realloc_grid_indices(pmeGpu);