2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020 by the GROMACS development team.
5 * Copyright (c) 2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
38 * Implements common routines for PME tests.
40 * \author Aleksei Iupinov <a.yupinov@gmail.com>
41 * \ingroup module_ewald
45 #include "pmetestcommon.h"
51 #include "gromacs/domdec/domdec.h"
52 #include "gromacs/ewald/pme_gather.h"
53 #include "gromacs/ewald/pme_gpu_calculate_splines.h"
54 #include "gromacs/ewald/pme_gpu_constants.h"
55 #include "gromacs/ewald/pme_gpu_internal.h"
56 #include "gromacs/ewald/pme_gpu_staging.h"
57 #include "gromacs/ewald/pme_grid.h"
58 #include "gromacs/ewald/pme_internal.h"
59 #include "gromacs/ewald/pme_redistribute.h"
60 #include "gromacs/ewald/pme_solve.h"
61 #include "gromacs/ewald/pme_spread.h"
62 #include "gromacs/fft/parallel_3dfft.h"
63 #include "gromacs/gpu_utils/gpu_utils.h"
64 #include "gromacs/hardware/device_management.h"
65 #include "gromacs/math/invertmatrix.h"
66 #include "gromacs/mdtypes/commrec.h"
67 #include "gromacs/pbcutil/pbc.h"
68 #include "gromacs/topology/topology.h"
69 #include "gromacs/utility/exceptions.h"
70 #include "gromacs/utility/gmxassert.h"
71 #include "gromacs/utility/logger.h"
72 #include "gromacs/utility/stringutil.h"
74 #include "testutils/test_hardware_environment.h"
75 #include "testutils/testasserts.h"
84 bool pmeSupportsInputForMode(const gmx_hw_info_t& hwinfo, const t_inputrec* inputRec, CodePath mode)
89 case CodePath::CPU: implemented = true; break;
92 implemented = (pme_gpu_supports_build(nullptr) && pme_gpu_supports_hardware(hwinfo, nullptr)
93 && pme_gpu_supports_input(*inputRec, nullptr));
96 default: GMX_THROW(InternalError("Test not implemented for this mode"));
101 uint64_t getSplineModuliDoublePrecisionUlps(int splineOrder)
103 /* Arbitrary ulp tolerance for sine/cosine implementation. It's
104 * hard to know what to pick without testing lots of
105 * implementations. */
106 const uint64_t sineUlps = 10;
107 return 4 * (splineOrder - 2) + 2 * sineUlps * splineOrder;
110 //! PME initialization
111 PmeSafePointer pmeInitWrapper(const t_inputrec* inputRec,
113 const DeviceContext* deviceContext,
114 const DeviceStream* deviceStream,
115 const PmeGpuProgram* pmeGpuProgram,
116 const Matrix3x3& box,
117 const real ewaldCoeff_q,
118 const real ewaldCoeff_lj)
120 const MDLogger dummyLogger;
121 const auto runMode = (mode == CodePath::CPU) ? PmeRunMode::CPU : PmeRunMode::Mixed;
122 t_commrec dummyCommrec = { 0 };
123 NumPmeDomains numPmeDomains = { 1, 1 };
124 gmx_pme_t* pmeDataRaw = gmx_pme_init(&dummyCommrec, numPmeDomains, inputRec, false, false, true,
125 ewaldCoeff_q, ewaldCoeff_lj, 1, runMode, nullptr,
126 deviceContext, deviceStream, pmeGpuProgram, dummyLogger);
127 PmeSafePointer pme(pmeDataRaw); // taking ownership
129 // TODO get rid of this with proper matrix type
131 for (int i = 0; i < DIM; i++)
133 for (int j = 0; j < DIM; j++)
135 boxTemp[i][j] = box[i * DIM + j];
138 const char* boxError = check_box(PbcType::Unset, boxTemp);
139 GMX_RELEASE_ASSERT(boxError == nullptr, boxError);
143 case CodePath::CPU: invertBoxMatrix(boxTemp, pme->recipbox); break;
146 pme_gpu_set_testing(pme->gpu, true);
147 pme_gpu_update_input_box(pme->gpu, boxTemp);
150 default: GMX_THROW(InternalError("Test not implemented for this mode"));
156 PmeSafePointer pmeInitEmpty(const t_inputrec* inputRec)
158 const Matrix3x3 defaultBox = { { 1.0F, 0.0F, 0.0F, 0.0F, 1.0F, 0.0F, 0.0F, 0.0F, 1.0F } };
159 return pmeInitWrapper(inputRec, CodePath::CPU, nullptr, nullptr, nullptr, defaultBox, 0.0F, 0.0F);
162 //! Make a GPU state-propagator manager
163 std::unique_ptr<StatePropagatorDataGpu> makeStatePropagatorDataGpu(const gmx_pme_t& pme,
164 const DeviceContext* deviceContext,
165 const DeviceStream* deviceStream)
167 // TODO: Pin the host buffer and use async memory copies
168 // TODO: Special constructor for PME-only rank / PME-tests is used here. There should be a mechanism to
169 // restrict one from using other constructor here.
170 return std::make_unique<StatePropagatorDataGpu>(deviceStream, *deviceContext, GpuApiCallBehavior::Sync,
171 pme_gpu_get_block_size(&pme), nullptr);
174 //! PME initialization with atom data
175 void pmeInitAtoms(gmx_pme_t* pme,
176 StatePropagatorDataGpu* stateGpu,
178 const CoordinatesVector& coordinates,
179 const ChargesVector& charges)
181 const index atomCount = coordinates.size();
182 GMX_RELEASE_ASSERT(atomCount == charges.ssize(), "Mismatch in atom data");
183 PmeAtomComm* atc = nullptr;
188 atc = &(pme->atc[0]);
189 atc->x = coordinates;
190 atc->coefficient = charges;
191 gmx_pme_reinit_atoms(pme, atomCount, charges.data(), nullptr);
192 /* With decomposition there would be more boilerplate atc code here, e.g. do_redist_pos_coeffs */
196 // TODO: Avoid use of atc in the GPU code path
197 atc = &(pme->atc[0]);
198 // We need to set atc->n for passing the size in the tests
199 atc->setNumAtoms(atomCount);
200 gmx_pme_reinit_atoms(pme, atomCount, charges.data(), nullptr);
202 stateGpu->reinit(atomCount, atomCount);
203 stateGpu->copyCoordinatesToGpu(arrayRefFromArray(coordinates.data(), coordinates.size()),
204 gmx::AtomLocality::All);
205 pme_gpu_set_kernelparam_coordinates(pme->gpu, stateGpu->getCoordinates());
209 default: GMX_THROW(InternalError("Test not implemented for this mode"));
213 //! Getting local PME real grid pointer for test I/O
214 static real* pmeGetRealGridInternal(const gmx_pme_t* pme)
216 const size_t gridIndex = 0;
217 return pme->fftgrid[gridIndex];
220 //! Getting local PME real grid dimensions
221 static void pmeGetRealGridSizesInternal(const gmx_pme_t* pme,
223 IVec& gridSize, //NOLINT(google-runtime-references)
224 IVec& paddedGridSize) //NOLINT(google-runtime-references)
226 const size_t gridIndex = 0;
227 IVec gridOffsetUnused;
231 gmx_parallel_3dfft_real_limits(pme->pfft_setup[gridIndex], gridSize, gridOffsetUnused,
236 pme_gpu_get_real_grid_sizes(pme->gpu, &gridSize, &paddedGridSize);
239 default: GMX_THROW(InternalError("Test not implemented for this mode"));
243 //! Getting local PME complex grid pointer for test I/O
244 static t_complex* pmeGetComplexGridInternal(const gmx_pme_t* pme)
246 const size_t gridIndex = 0;
247 return pme->cfftgrid[gridIndex];
250 //! Getting local PME complex grid dimensions
251 static void pmeGetComplexGridSizesInternal(const gmx_pme_t* pme,
252 IVec& gridSize, //NOLINT(google-runtime-references)
253 IVec& paddedGridSize) //NOLINT(google-runtime-references)
255 const size_t gridIndex = 0;
256 IVec gridOffsetUnused, complexOrderUnused;
257 gmx_parallel_3dfft_complex_limits(pme->pfft_setup[gridIndex], complexOrderUnused, gridSize,
258 gridOffsetUnused, paddedGridSize); // TODO: what about YZX ordering?
261 //! Getting the PME grid memory buffer and its sizes - template definition
262 template<typename ValueType>
263 static void pmeGetGridAndSizesInternal(const gmx_pme_t* /*unused*/,
265 ValueType*& /*unused*/, //NOLINT(google-runtime-references)
266 IVec& /*unused*/, //NOLINT(google-runtime-references)
267 IVec& /*unused*/) //NOLINT(google-runtime-references)
269 GMX_THROW(InternalError("Deleted function call"));
270 // explicitly deleting general template does not compile in clang/icc, see https://llvm.org/bugs/show_bug.cgi?id=17537
273 //! Getting the PME real grid memory buffer and its sizes
275 void pmeGetGridAndSizesInternal<real>(const gmx_pme_t* pme, CodePath mode, real*& grid, IVec& gridSize, IVec& paddedGridSize)
277 grid = pmeGetRealGridInternal(pme);
278 pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSize);
281 //! Getting the PME complex grid memory buffer and its sizes
283 void pmeGetGridAndSizesInternal<t_complex>(const gmx_pme_t* pme,
287 IVec& paddedGridSize)
289 grid = pmeGetComplexGridInternal(pme);
290 pmeGetComplexGridSizesInternal(pme, gridSize, paddedGridSize);
293 //! PME spline calculation and charge spreading
294 void pmePerformSplineAndSpread(gmx_pme_t* pme,
295 CodePath mode, // TODO const qualifiers elsewhere
299 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
300 PmeAtomComm* atc = &(pme->atc[0]);
301 const size_t gridIndex = 0;
302 const bool computeSplinesForZeroCharges = true;
303 real** fftgrid = spreadCharges ? pme->fftgrid : nullptr;
304 real* pmegrid = pme->pmegrid[gridIndex].grid.grid;
309 spread_on_grid(pme, atc, &pme->pmegrid[gridIndex], computeSplines, spreadCharges,
310 fftgrid != nullptr ? fftgrid[gridIndex] : nullptr,
311 computeSplinesForZeroCharges, gridIndex);
312 if (spreadCharges && !pme->bUseThreads)
314 wrap_periodic_pmegrid(pme, pmegrid);
315 copy_pmegrid_to_fftgrid(
316 pme, pmegrid, fftgrid != nullptr ? fftgrid[gridIndex] : nullptr, gridIndex);
320 /* The compiler will complain about passing fftgrid (converting double ** to float **) if using
321 * double precision. GPUs are not used with double precision anyhow. */
325 const real lambdaQ = 1.0;
326 // no synchronization needed as x is transferred in the PME stream
327 GpuEventSynchronizer* xReadyOnDevice = nullptr;
328 pme_gpu_spread(pme->gpu, xReadyOnDevice, fftgrid, computeSplines, spreadCharges, lambdaQ);
333 default: GMX_THROW(InternalError("Test not implemented for this mode"));
337 //! Getting the internal spline data buffer pointer
338 static real* pmeGetSplineDataInternal(const gmx_pme_t* pme, PmeSplineDataType type, int dimIndex)
340 GMX_ASSERT((0 <= dimIndex) && (dimIndex < DIM), "Invalid dimension index");
341 const PmeAtomComm* atc = &(pme->atc[0]);
342 const size_t threadIndex = 0;
343 real* splineBuffer = nullptr;
346 case PmeSplineDataType::Values:
347 splineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
350 case PmeSplineDataType::Derivatives:
351 splineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
354 default: GMX_THROW(InternalError("Unknown spline data type"));
360 void pmePerformSolve(const gmx_pme_t* pme,
362 PmeSolveAlgorithm method,
364 GridOrdering gridOrdering,
365 bool computeEnergyAndVirial)
367 t_complex* h_grid = pmeGetComplexGridInternal(pme);
368 const bool useLorentzBerthelot = false;
369 const size_t threadIndex = 0;
370 const size_t gridIndex = 0;
374 if (gridOrdering != GridOrdering::YZX)
376 GMX_THROW(InternalError("Test not implemented for this mode"));
380 case PmeSolveAlgorithm::Coulomb:
381 solve_pme_yzx(pme, h_grid, cellVolume, computeEnergyAndVirial, pme->nthread, threadIndex);
384 case PmeSolveAlgorithm::LennardJones:
385 solve_pme_lj_yzx(pme, &h_grid, useLorentzBerthelot, cellVolume,
386 computeEnergyAndVirial, pme->nthread, threadIndex);
389 default: GMX_THROW(InternalError("Test not implemented for this mode"));
396 case PmeSolveAlgorithm::Coulomb:
397 pme_gpu_solve(pme->gpu, gridIndex, h_grid, gridOrdering, computeEnergyAndVirial);
400 default: GMX_THROW(InternalError("Test not implemented for this mode"));
404 default: GMX_THROW(InternalError("Test not implemented for this mode"));
408 //! PME force gathering
409 void pmePerformGather(gmx_pme_t* pme, CodePath mode, ForcesVector& forces)
411 PmeAtomComm* atc = &(pme->atc[0]);
412 const index atomCount = atc->numAtoms();
413 GMX_RELEASE_ASSERT(forces.ssize() == atomCount, "Invalid force buffer size");
414 const real scale = 1.0;
415 const size_t threadIndex = 0;
416 const size_t gridIndex = 0;
417 real* pmegrid = pme->pmegrid[gridIndex].grid.grid;
418 real** fftgrid = pme->fftgrid;
424 if (atc->nthread == 1)
426 // something which is normally done in serial spline computation (make_thread_local_ind())
427 atc->spline[threadIndex].n = atomCount;
429 copy_fftgrid_to_pmegrid(pme, fftgrid[gridIndex], pmegrid, gridIndex, pme->nthread, threadIndex);
430 unwrap_periodic_pmegrid(pme, pmegrid);
431 gather_f_bsplines(pme, pmegrid, true, atc, &atc->spline[threadIndex], scale);
434 /* The compiler will complain about passing fftgrid (converting double ** to float **) if using
435 * double precision. GPUs are not used with double precision anyhow. */
439 // Variable initialization needs a non-switch scope
440 const bool computeEnergyAndVirial = false;
441 const real lambdaQ = 1.0;
442 PmeOutput output = pme_gpu_getOutput(*pme, computeEnergyAndVirial, lambdaQ);
443 GMX_ASSERT(forces.size() == output.forces_.size(),
444 "Size of force buffers did not match");
445 pme_gpu_gather(pme->gpu, fftgrid, lambdaQ);
446 std::copy(std::begin(output.forces_), std::end(output.forces_), std::begin(forces));
451 default: GMX_THROW(InternalError("Test not implemented for this mode"));
455 //! PME test finalization before fetching the outputs
456 void pmeFinalizeTest(const gmx_pme_t* pme, CodePath mode)
460 case CodePath::CPU: break;
462 case CodePath::GPU: pme_gpu_synchronize(pme->gpu); break;
464 default: GMX_THROW(InternalError("Test not implemented for this mode"));
468 //! A binary enum for spline data layout transformation
469 enum class PmeLayoutTransform
475 /*! \brief Gets a unique index to an element in a spline parameter buffer.
477 * These theta/dtheta buffers are laid out for GPU spread/gather
478 * kernels. The index is wrt the execution block, in range(0,
479 * atomsPerBlock * order * DIM).
481 * This is a wrapper, only used in unit tests.
482 * \param[in] order PME order
483 * \param[in] splineIndex Spline contribution index (from 0 to \p order - 1)
484 * \param[in] dimIndex Dimension index (from 0 to 2)
485 * \param[in] atomIndex Atom index wrt the block.
486 * \param[in] atomsPerWarp Number of atoms processed by a warp.
488 * \returns Index into theta or dtheta array using GPU layout.
490 static int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
492 if (order != c_pmeGpuOrder)
496 constexpr int fixedOrder = c_pmeGpuOrder;
497 GMX_UNUSED_VALUE(fixedOrder);
499 const int atomWarpIndex = atomIndex % atomsPerWarp;
500 const int warpIndex = atomIndex / atomsPerWarp;
501 int indexBase, result;
502 switch (atomsPerWarp)
505 indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
506 result = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
510 indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
511 result = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
515 indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
516 result = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
520 indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
521 result = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
525 GMX_THROW(NotImplementedError(
526 formatString("Test function call not unrolled for atomsPerWarp = %d in "
527 "getSplineParamFullIndex",
533 /*!\brief Return the number of atoms per warp */
534 static int pme_gpu_get_atoms_per_warp(const PmeGpu* pmeGpu)
536 const int order = pmeGpu->common->pme_order;
537 const int threadsPerAtom =
538 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
539 return pmeGpu->programHandle_->warpSize() / threadsPerAtom;
542 /*! \brief Rearranges the atom spline data between the GPU and host layouts.
543 * Only used for test purposes so far, likely to be horribly slow.
545 * \param[in] pmeGpu The PME GPU structure.
546 * \param[out] atc The PME CPU atom data structure (with a single-threaded layout).
547 * \param[in] type The spline data type (values or derivatives).
548 * \param[in] dimIndex Dimension index.
549 * \param[in] transform Layout transform type
551 static void pme_gpu_transform_spline_atom_data(const PmeGpu* pmeGpu,
552 const PmeAtomComm* atc,
553 PmeSplineDataType type,
555 PmeLayoutTransform transform)
557 // The GPU atom spline data is laid out in a different way currently than the CPU one.
558 // This function converts the data from GPU to CPU layout (in the host memory).
559 // It is only intended for testing purposes so far.
560 // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their
561 // performance (e.g. spreading on GPU, gathering on CPU).
562 GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
563 const uintmax_t threadIndex = 0;
564 const auto atomCount = atc->numAtoms();
565 const auto atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
566 const auto pmeOrder = pmeGpu->common->pme_order;
567 GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
569 real* cpuSplineBuffer;
570 float* h_splineBuffer;
573 case PmeSplineDataType::Values:
574 cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
575 h_splineBuffer = pmeGpu->staging.h_theta;
578 case PmeSplineDataType::Derivatives:
579 cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
580 h_splineBuffer = pmeGpu->staging.h_dtheta;
583 default: GMX_THROW(InternalError("Unknown spline data type"));
586 for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
588 for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
590 const auto gpuValueIndex =
591 getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
592 const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
593 GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder,
594 "Atom spline data index out of bounds (while transforming GPU data layout "
598 case PmeLayoutTransform::GpuToHost:
599 cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
602 case PmeLayoutTransform::HostToGpu:
603 h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
606 default: GMX_THROW(InternalError("Unknown layout transform"));
612 //! Setting atom spline values/derivatives to be used in spread/gather
613 void pmeSetSplineData(const gmx_pme_t* pme,
615 const SplineParamsDimVector& splineValues,
616 PmeSplineDataType type,
619 const PmeAtomComm* atc = &(pme->atc[0]);
620 const index atomCount = atc->numAtoms();
621 const index pmeOrder = pme->pme_order;
622 const index dimSize = pmeOrder * atomCount;
623 GMX_RELEASE_ASSERT(dimSize == splineValues.ssize(), "Mismatch in spline data");
624 real* splineBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
629 std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
633 std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
634 pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::HostToGpu);
637 default: GMX_THROW(InternalError("Test not implemented for this mode"));
641 //! Setting gridline indices to be used in spread/gather
642 void pmeSetGridLineIndices(gmx_pme_t* pme, CodePath mode, const GridLineIndicesVector& gridLineIndices)
644 PmeAtomComm* atc = &(pme->atc[0]);
645 const index atomCount = atc->numAtoms();
646 GMX_RELEASE_ASSERT(atomCount == gridLineIndices.ssize(), "Mismatch in gridline indices size");
648 IVec paddedGridSizeUnused, gridSize(0, 0, 0);
649 pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSizeUnused);
651 for (const auto& index : gridLineIndices)
653 for (int i = 0; i < DIM; i++)
655 GMX_RELEASE_ASSERT((0 <= index[i]) && (index[i] < gridSize[i]),
656 "Invalid gridline index");
663 memcpy(pme_gpu_staging(pme->gpu).h_gridlineIndices, gridLineIndices.data(),
664 atomCount * sizeof(gridLineIndices[0]));
668 atc->idx.resize(gridLineIndices.size());
669 std::copy(gridLineIndices.begin(), gridLineIndices.end(), atc->idx.begin());
671 default: GMX_THROW(InternalError("Test not implemented for this mode"));
675 //! Getting plain index into the complex 3d grid
676 inline size_t pmeGetGridPlainIndexInternal(const IVec& index, const IVec& paddedGridSize, GridOrdering gridOrdering)
679 switch (gridOrdering)
681 case GridOrdering::YZX:
682 result = (index[YY] * paddedGridSize[ZZ] + index[ZZ]) * paddedGridSize[XX] + index[XX];
685 case GridOrdering::XYZ:
686 result = (index[XX] * paddedGridSize[YY] + index[YY]) * paddedGridSize[ZZ] + index[ZZ];
689 default: GMX_THROW(InternalError("Test not implemented for this mode"));
694 //! Setting real or complex grid
695 template<typename ValueType>
696 static void pmeSetGridInternal(const gmx_pme_t* pme,
698 GridOrdering gridOrdering,
699 const SparseGridValuesInput<ValueType>& gridValues)
701 IVec gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
703 pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
707 case CodePath::GPU: // intentional absence of break, the grid will be copied from the host buffer in testing mode
710 paddedGridSize[XX] * paddedGridSize[YY] * paddedGridSize[ZZ] * sizeof(ValueType));
711 for (const auto& gridValue : gridValues)
713 for (int i = 0; i < DIM; i++)
715 GMX_RELEASE_ASSERT((0 <= gridValue.first[i]) && (gridValue.first[i] < gridSize[i]),
716 "Invalid grid value index");
718 const size_t gridValueIndex =
719 pmeGetGridPlainIndexInternal(gridValue.first, paddedGridSize, gridOrdering);
720 grid[gridValueIndex] = gridValue.second;
724 default: GMX_THROW(InternalError("Test not implemented for this mode"));
728 //! Setting real grid to be used in gather
729 void pmeSetRealGrid(const gmx_pme_t* pme, CodePath mode, const SparseRealGridValuesInput& gridValues)
731 pmeSetGridInternal<real>(pme, mode, GridOrdering::XYZ, gridValues);
734 //! Setting complex grid to be used in solve
735 void pmeSetComplexGrid(const gmx_pme_t* pme,
737 GridOrdering gridOrdering,
738 const SparseComplexGridValuesInput& gridValues)
740 pmeSetGridInternal<t_complex>(pme, mode, gridOrdering, gridValues);
743 //! Getting the single dimension's spline values or derivatives
744 SplineParamsDimVector pmeGetSplineData(const gmx_pme_t* pme, CodePath mode, PmeSplineDataType type, int dimIndex)
746 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
747 const PmeAtomComm* atc = &(pme->atc[0]);
748 const size_t atomCount = atc->numAtoms();
749 const size_t pmeOrder = pme->pme_order;
750 const size_t dimSize = pmeOrder * atomCount;
752 real* sourceBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
753 SplineParamsDimVector result;
757 pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::GpuToHost);
758 result = arrayRefFromArray(sourceBuffer, dimSize);
761 case CodePath::CPU: result = arrayRefFromArray(sourceBuffer, dimSize); break;
763 default: GMX_THROW(InternalError("Test not implemented for this mode"));
768 //! Getting the gridline indices
769 GridLineIndicesVector pmeGetGridlineIndices(const gmx_pme_t* pme, CodePath mode)
771 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
772 const PmeAtomComm* atc = &(pme->atc[0]);
773 const size_t atomCount = atc->numAtoms();
775 GridLineIndicesVector gridLineIndices;
779 gridLineIndices = arrayRefFromArray(
780 reinterpret_cast<IVec*>(pme_gpu_staging(pme->gpu).h_gridlineIndices), atomCount);
783 case CodePath::CPU: gridLineIndices = atc->idx; break;
785 default: GMX_THROW(InternalError("Test not implemented for this mode"));
787 return gridLineIndices;
790 //! Getting real or complex grid - only non zero values
791 template<typename ValueType>
792 static SparseGridValuesOutput<ValueType> pmeGetGridInternal(const gmx_pme_t* pme,
794 GridOrdering gridOrdering)
796 IVec gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
798 pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
799 SparseGridValuesOutput<ValueType> gridValues;
802 case CodePath::GPU: // intentional absence of break
805 for (int ix = 0; ix < gridSize[XX]; ix++)
807 for (int iy = 0; iy < gridSize[YY]; iy++)
809 for (int iz = 0; iz < gridSize[ZZ]; iz++)
811 IVec temp(ix, iy, iz);
812 const size_t gridValueIndex =
813 pmeGetGridPlainIndexInternal(temp, paddedGridSize, gridOrdering);
814 const ValueType value = grid[gridValueIndex];
815 if (value != ValueType{})
817 auto key = formatString("Cell %d %d %d", ix, iy, iz);
818 gridValues[key] = value;
825 default: GMX_THROW(InternalError("Test not implemented for this mode"));
830 //! Getting the real grid (spreading output of pmePerformSplineAndSpread())
831 SparseRealGridValuesOutput pmeGetRealGrid(const gmx_pme_t* pme, CodePath mode)
833 return pmeGetGridInternal<real>(pme, mode, GridOrdering::XYZ);
836 //! Getting the complex grid output of pmePerformSolve()
837 SparseComplexGridValuesOutput pmeGetComplexGrid(const gmx_pme_t* pme, CodePath mode, GridOrdering gridOrdering)
839 return pmeGetGridInternal<t_complex>(pme, mode, gridOrdering);
842 //! Getting the reciprocal energy and virial
843 PmeOutput pmeGetReciprocalEnergyAndVirial(const gmx_pme_t* pme, CodePath mode, PmeSolveAlgorithm method)
846 const real lambdaQ = 1.0;
852 case PmeSolveAlgorithm::Coulomb:
853 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output);
856 case PmeSolveAlgorithm::LennardJones:
857 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output);
860 default: GMX_THROW(InternalError("Test not implemented for this mode"));
866 case PmeSolveAlgorithm::Coulomb:
867 pme_gpu_getEnergyAndVirial(*pme, lambdaQ, &output);
870 default: GMX_THROW(InternalError("Test not implemented for this mode"));
874 default: GMX_THROW(InternalError("Test not implemented for this mode"));
879 const char* codePathToString(CodePath codePath)
883 case CodePath::CPU: return "CPU";
884 case CodePath::GPU: return "GPU";
885 default: GMX_THROW(NotImplementedError("This CodePath should support codePathToString"));
889 PmeTestHardwareContext::PmeTestHardwareContext() : codePath_(CodePath::CPU) {}
891 PmeTestHardwareContext::PmeTestHardwareContext(TestDevice* testDevice) :
892 codePath_(CodePath::GPU),
893 testDevice_(testDevice)
895 setActiveDevice(testDevice_->deviceInfo());
896 pmeGpuProgram_ = buildPmeGpuProgram(testDevice_->deviceContext());
899 //! Returns a human-readable context description line
900 std::string PmeTestHardwareContext::description() const
904 case CodePath::CPU: return "CPU";
905 case CodePath::GPU: return "GPU (" + testDevice_->description() + ")";
906 default: return "Unknown code path.";
910 void PmeTestHardwareContext::activate() const
912 if (codePath_ == CodePath::GPU)
914 setActiveDevice(testDevice_->deviceInfo());
918 std::vector<std::unique_ptr<PmeTestHardwareContext>> createPmeTestHardwareContextList()
920 std::vector<std::unique_ptr<PmeTestHardwareContext>> pmeTestHardwareContextList;
922 pmeTestHardwareContextList.emplace_back(std::make_unique<PmeTestHardwareContext>());
924 const auto& testDeviceList = getTestHardwareEnvironment()->getTestDeviceList();
925 for (const auto& testDevice : testDeviceList)
927 pmeTestHardwareContextList.emplace_back(std::make_unique<PmeTestHardwareContext>(testDevice.get()));
929 return pmeTestHardwareContextList;