StatePropagatorDataGpu object to manage GPU forces, positions and velocities buffers
[alexxy/gromacs.git] / src / gromacs / ewald / tests / pmetestcommon.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  * \brief
37  * Implements common routines for PME tests.
38  *
39  * \author Aleksei Iupinov <a.yupinov@gmail.com>
40  * \ingroup module_ewald
41  */
42 #include "gmxpre.h"
43
44 #include "pmetestcommon.h"
45
46 #include <cstring>
47
48 #include <algorithm>
49
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/ewald/pme_gather.h"
52 #include "gromacs/ewald/pme_gpu_internal.h"
53 #include "gromacs/ewald/pme_grid.h"
54 #include "gromacs/ewald/pme_internal.h"
55 #include "gromacs/ewald/pme_redistribute.h"
56 #include "gromacs/ewald/pme_solve.h"
57 #include "gromacs/ewald/pme_spread.h"
58 #include "gromacs/fft/parallel_3dfft.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/mdtypes/commrec.h"
62 #include "gromacs/pbcutil/pbc.h"
63 #include "gromacs/topology/topology.h"
64 #include "gromacs/utility/exceptions.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
68
69 #include "testutils/testasserts.h"
70
71 namespace gmx
72 {
73 namespace test
74 {
75
76 bool pmeSupportsInputForMode(const gmx_hw_info_t &hwinfo,
77                              const t_inputrec    *inputRec,
78                              CodePath             mode)
79 {
80     bool       implemented;
81     gmx_mtop_t mtop;
82     switch (mode)
83     {
84         case CodePath::CPU:
85             implemented = true;
86             break;
87
88         case CodePath::GPU:
89             implemented = (pme_gpu_supports_build(nullptr) &&
90                            pme_gpu_supports_hardware(hwinfo, nullptr) &&
91                            pme_gpu_supports_input(*inputRec, mtop, nullptr));
92             break;
93
94         default:
95             GMX_THROW(InternalError("Test not implemented for this mode"));
96     }
97     return implemented;
98 }
99
100 uint64_t getSplineModuliDoublePrecisionUlps(int splineOrder)
101 {
102     /* Arbitrary ulp tolerance for sine/cosine implementation. It's
103      * hard to know what to pick without testing lots of
104      * implementations. */
105     const uint64_t sineUlps = 10;
106     return 4 * (splineOrder - 2) + 2 * sineUlps * splineOrder;
107 }
108
109 //! PME initialization - internal
110 static PmeSafePointer pmeInitInternal(const t_inputrec         *inputRec,
111                                       CodePath                  mode,
112                                       const gmx_device_info_t  *gpuInfo,
113                                       PmeGpuProgramHandle       pmeGpuProgram,
114                                       const Matrix3x3          &box,
115                                       real                      ewaldCoeff_q = 1.0F,
116                                       real                      ewaldCoeff_lj = 1.0F
117                                       )
118 {
119     const MDLogger dummyLogger;
120     const auto     runMode       = (mode == CodePath::CPU) ? PmeRunMode::CPU : PmeRunMode::Mixed;
121     t_commrec      dummyCommrec  = {0};
122     NumPmeDomains  numPmeDomains = { 1, 1 };
123     gmx_pme_t     *pmeDataRaw    = gmx_pme_init(&dummyCommrec, numPmeDomains, inputRec, false, false, true,
124                                                 ewaldCoeff_q, ewaldCoeff_lj, 1, runMode, nullptr, gpuInfo, pmeGpuProgram, dummyLogger);
125     PmeSafePointer pme(pmeDataRaw); // taking ownership
126
127     // TODO get rid of this with proper matrix type
128     matrix boxTemp;
129     for (int i = 0; i < DIM; i++)
130     {
131         for (int j = 0; j < DIM; j++)
132         {
133             boxTemp[i][j] = box[i * DIM + j];
134         }
135     }
136     const char *boxError = check_box(-1, boxTemp);
137     GMX_RELEASE_ASSERT(boxError == nullptr, boxError);
138
139     switch (mode)
140     {
141         case CodePath::CPU:
142             invertBoxMatrix(boxTemp, pme->recipbox);
143             break;
144
145         case CodePath::GPU:
146             pme_gpu_set_testing(pme->gpu, true);
147             pme_gpu_update_input_box(pme->gpu, boxTemp);
148             break;
149
150         default:
151             GMX_THROW(InternalError("Test not implemented for this mode"));
152     }
153
154     return pme;
155 }
156
157 //! Simple PME initialization based on input, no atom data
158 PmeSafePointer pmeInitEmpty(const t_inputrec         *inputRec,
159                             CodePath                  mode,
160                             const gmx_device_info_t  *gpuInfo,
161                             PmeGpuProgramHandle       pmeGpuProgram,
162                             const Matrix3x3          &box,
163                             real                      ewaldCoeff_q,
164                             real                      ewaldCoeff_lj
165                             )
166 {
167     return pmeInitInternal(inputRec, mode, gpuInfo, pmeGpuProgram, box, ewaldCoeff_q, ewaldCoeff_lj);
168     // hiding the fact that PME actually needs to know the number of atoms in advance
169 }
170
171 //! PME initialization with atom data
172 PmeSafePointer pmeInitAtoms(const t_inputrec                        *inputRec,
173                             CodePath                                 mode,
174                             const gmx_device_info_t                 *gpuInfo,
175                             PmeGpuProgramHandle                      pmeGpuProgram,
176                             const CoordinatesVector                 &coordinates,
177                             const ChargesVector                     &charges,
178                             const Matrix3x3                         &box,
179                             std::shared_ptr<StatePropagatorDataGpu>  stateGpu
180                             )
181 {
182     const index     atomCount = coordinates.size();
183     GMX_RELEASE_ASSERT(atomCount == charges.ssize(), "Mismatch in atom data");
184     PmeSafePointer  pmeSafe = pmeInitInternal(inputRec, mode, gpuInfo, pmeGpuProgram, box);
185     PmeAtomComm    *atc     = nullptr;
186
187     switch (mode)
188     {
189         case CodePath::CPU:
190             atc              = &(pmeSafe->atc[0]);
191             atc->x           = coordinates;
192             atc->coefficient = charges;
193             gmx_pme_reinit_atoms(pmeSafe.get(), atomCount, charges.data());
194             /* With decomposition there would be more boilerplate atc code here, e.g. do_redist_pos_coeffs */
195             break;
196
197         case CodePath::GPU:
198             // TODO: Avoid use of atc in the GPU code path
199             atc              = &(pmeSafe->atc[0]);
200             // We need to set atc->n for passing the size in the tests
201             atc->setNumAtoms(atomCount);
202             gmx_pme_reinit_atoms(pmeSafe.get(), atomCount, charges.data());
203
204             // TODO: Pin the host buffer and use async memory copies
205             stateGpu = std::make_unique<StatePropagatorDataGpu>(pme_gpu_get_device_stream(pmeSafe.get()),
206                                                                 pme_gpu_get_device_context(pmeSafe.get()),
207                                                                 GpuApiCallBehavior::Sync,
208                                                                 pme_gpu_get_padding_size(pmeSafe.get()));
209             stateGpu->reinit(atomCount, atomCount);
210             stateGpu->copyCoordinatesToGpu(arrayRefFromArray(coordinates.data(), coordinates.size()), gmx::StatePropagatorDataGpu::AtomLocality::All);
211             pme_gpu_set_kernelparam_coordinates(pmeSafe->gpu, stateGpu->getCoordinates());
212
213             break;
214
215         default:
216             GMX_THROW(InternalError("Test not implemented for this mode"));
217     }
218
219     return pmeSafe;
220 }
221
222 //! Getting local PME real grid pointer for test I/O
223 static real *pmeGetRealGridInternal(const gmx_pme_t *pme)
224 {
225     const size_t gridIndex = 0;
226     return pme->fftgrid[gridIndex];
227 }
228
229 //! Getting local PME real grid dimensions
230 static void pmeGetRealGridSizesInternal(const gmx_pme_t      *pme,
231                                         CodePath              mode,
232                                         IVec                 &gridSize,       //NOLINT(google-runtime-references)
233                                         IVec                 &paddedGridSize) //NOLINT(google-runtime-references)
234 {
235     const size_t gridIndex = 0;
236     IVec         gridOffsetUnused;
237     switch (mode)
238     {
239         case CodePath::CPU:
240             gmx_parallel_3dfft_real_limits(pme->pfft_setup[gridIndex], gridSize, gridOffsetUnused, paddedGridSize);
241             break;
242
243         case CodePath::GPU:
244             pme_gpu_get_real_grid_sizes(pme->gpu, &gridSize, &paddedGridSize);
245             break;
246
247         default:
248             GMX_THROW(InternalError("Test not implemented for this mode"));
249     }
250 }
251
252 //! Getting local PME complex grid pointer for test I/O
253 static t_complex *pmeGetComplexGridInternal(const gmx_pme_t *pme)
254 {
255     const size_t gridIndex = 0;
256     return pme->cfftgrid[gridIndex];
257 }
258
259 //! Getting local PME complex grid dimensions
260 static void pmeGetComplexGridSizesInternal(const gmx_pme_t      *pme,
261                                            IVec                 &gridSize,       //NOLINT(google-runtime-references)
262                                            IVec                 &paddedGridSize) //NOLINT(google-runtime-references)
263 {
264     const size_t gridIndex = 0;
265     IVec         gridOffsetUnused, complexOrderUnused;
266     gmx_parallel_3dfft_complex_limits(pme->pfft_setup[gridIndex], complexOrderUnused, gridSize, gridOffsetUnused, paddedGridSize); //TODO: what about YZX ordering?
267 }
268
269 //! Getting the PME grid memory buffer and its sizes - template definition
270 template<typename ValueType> static void pmeGetGridAndSizesInternal(const gmx_pme_t * /*unused*/, CodePath /*unused*/, ValueType * & /*unused*/, IVec & /*unused*/, IVec & /*unused*/) //NOLINT(google-runtime-references)
271 {
272     GMX_THROW(InternalError("Deleted function call"));
273     // explicitly deleting general template does not compile in clang/icc, see https://llvm.org/bugs/show_bug.cgi?id=17537
274 }
275
276 //! Getting the PME real grid memory buffer and its sizes
277 template<> void pmeGetGridAndSizesInternal<real>(const gmx_pme_t *pme, CodePath mode, real * &grid, IVec &gridSize, IVec &paddedGridSize)
278 {
279     grid = pmeGetRealGridInternal(pme);
280     pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSize);
281 }
282
283 //! Getting the PME complex grid memory buffer and its sizes
284 template<> void pmeGetGridAndSizesInternal<t_complex>(const gmx_pme_t *pme, CodePath /*unused*/, t_complex * &grid, IVec &gridSize, IVec &paddedGridSize)
285 {
286     grid = pmeGetComplexGridInternal(pme);
287     pmeGetComplexGridSizesInternal(pme, gridSize, paddedGridSize);
288 }
289
290 //! PME spline calculation and charge spreading
291 void pmePerformSplineAndSpread(gmx_pme_t *pme, CodePath mode, // TODO const qualifiers elsewhere
292                                bool computeSplines, bool spreadCharges)
293 {
294     GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
295     PmeAtomComm    *atc                          = &(pme->atc[0]);
296     const size_t    gridIndex                    = 0;
297     const bool      computeSplinesForZeroCharges = true;
298     real           *fftgrid                      = spreadCharges ? pme->fftgrid[gridIndex] : nullptr;
299     real           *pmegrid                      = pme->pmegrid[gridIndex].grid.grid;
300
301     switch (mode)
302     {
303         case CodePath::CPU:
304             spread_on_grid(pme, atc, &pme->pmegrid[gridIndex], computeSplines, spreadCharges,
305                            fftgrid, computeSplinesForZeroCharges, gridIndex);
306             if (spreadCharges && !pme->bUseThreads)
307             {
308                 wrap_periodic_pmegrid(pme, pmegrid);
309                 copy_pmegrid_to_fftgrid(pme, pmegrid, fftgrid, gridIndex);
310             }
311             break;
312
313         case CodePath::GPU:
314             pme_gpu_spread(pme->gpu, gridIndex, fftgrid, computeSplines, spreadCharges);
315             break;
316
317         default:
318             GMX_THROW(InternalError("Test not implemented for this mode"));
319     }
320 }
321
322 //! Getting the internal spline data buffer pointer
323 static real *pmeGetSplineDataInternal(const gmx_pme_t *pme, PmeSplineDataType type, int dimIndex)
324 {
325     GMX_ASSERT((0 <= dimIndex) && (dimIndex < DIM), "Invalid dimension index");
326     const PmeAtomComm    *atc          = &(pme->atc[0]);
327     const size_t          threadIndex  = 0;
328     real                 *splineBuffer = nullptr;
329     switch (type)
330     {
331         case PmeSplineDataType::Values:
332             splineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
333             break;
334
335         case PmeSplineDataType::Derivatives:
336             splineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
337             break;
338
339         default:
340             GMX_THROW(InternalError("Unknown spline data type"));
341     }
342     return splineBuffer;
343 }
344
345 //! PME solving
346 void pmePerformSolve(const gmx_pme_t *pme, CodePath mode,
347                      PmeSolveAlgorithm method, real cellVolume,
348                      GridOrdering gridOrdering, bool computeEnergyAndVirial)
349 {
350     t_complex      *h_grid                 = pmeGetComplexGridInternal(pme);
351     const bool      useLorentzBerthelot    = false;
352     const size_t    threadIndex            = 0;
353     switch (mode)
354     {
355         case CodePath::CPU:
356             if (gridOrdering != GridOrdering::YZX)
357             {
358                 GMX_THROW(InternalError("Test not implemented for this mode"));
359             }
360             switch (method)
361             {
362                 case PmeSolveAlgorithm::Coulomb:
363                     solve_pme_yzx(pme, h_grid, cellVolume,
364                                   computeEnergyAndVirial, pme->nthread, threadIndex);
365                     break;
366
367                 case PmeSolveAlgorithm::LennardJones:
368                     solve_pme_lj_yzx(pme, &h_grid, useLorentzBerthelot,
369                                      cellVolume, computeEnergyAndVirial, pme->nthread, threadIndex);
370                     break;
371
372                 default:
373                     GMX_THROW(InternalError("Test not implemented for this mode"));
374             }
375             break;
376
377         case CodePath::GPU:
378             switch (method)
379             {
380                 case PmeSolveAlgorithm::Coulomb:
381                     pme_gpu_solve(pme->gpu, h_grid, gridOrdering, computeEnergyAndVirial);
382                     break;
383
384                 default:
385                     GMX_THROW(InternalError("Test not implemented for this mode"));
386             }
387             break;
388
389         default:
390             GMX_THROW(InternalError("Test not implemented for this mode"));
391     }
392 }
393
394 //! PME force gathering
395 void pmePerformGather(gmx_pme_t *pme, CodePath mode,
396                       PmeForceOutputHandling inputTreatment, ForcesVector &forces)
397 {
398     PmeAtomComm    *atc                     = &(pme->atc[0]);
399     const index     atomCount               = atc->numAtoms();
400     GMX_RELEASE_ASSERT(forces.ssize() == atomCount, "Invalid force buffer size");
401     const bool      forceReductionWithInput = (inputTreatment == PmeForceOutputHandling::ReduceWithInput);
402     const real      scale                   = 1.0;
403     const size_t    threadIndex             = 0;
404     const size_t    gridIndex               = 0;
405     real           *pmegrid                 = pme->pmegrid[gridIndex].grid.grid;
406     real           *fftgrid                 = pme->fftgrid[gridIndex];
407
408     switch (mode)
409     {
410         case CodePath::CPU:
411             atc->f = forces;
412             if (atc->nthread == 1)
413             {
414                 // something which is normally done in serial spline computation (make_thread_local_ind())
415                 atc->spline[threadIndex].n = atomCount;
416             }
417             copy_fftgrid_to_pmegrid(pme, fftgrid, pmegrid, gridIndex, pme->nthread, threadIndex);
418             unwrap_periodic_pmegrid(pme, pmegrid);
419             gather_f_bsplines(pme, pmegrid, !forceReductionWithInput, atc, &atc->spline[threadIndex], scale);
420             break;
421
422         case CodePath::GPU:
423         {
424             // Variable initialization needs a non-switch scope
425             PmeOutput output = pme_gpu_getOutput(*pme, GMX_PME_CALC_F);
426             GMX_ASSERT(forces.size() == output.forces_.size(), "Size of force buffers did not match");
427             if (forceReductionWithInput)
428             {
429                 std::copy(std::begin(forces), std::end(forces), std::begin(output.forces_));
430             }
431             pme_gpu_gather(pme->gpu, inputTreatment, reinterpret_cast<float *>(fftgrid));
432             std::copy(std::begin(output.forces_), std::end(output.forces_), std::begin(forces));
433         }
434         break;
435
436         default:
437             GMX_THROW(InternalError("Test not implemented for this mode"));
438     }
439 }
440
441 //! PME test finalization before fetching the outputs
442 void pmeFinalizeTest(const gmx_pme_t *pme, CodePath mode)
443 {
444     switch (mode)
445     {
446         case CodePath::CPU:
447             break;
448
449         case CodePath::GPU:
450             pme_gpu_synchronize(pme->gpu);
451             break;
452
453         default:
454             GMX_THROW(InternalError("Test not implemented for this mode"));
455     }
456 }
457
458 //! Setting atom spline values/derivatives to be used in spread/gather
459 void pmeSetSplineData(const gmx_pme_t *pme, CodePath mode,
460                       const SplineParamsDimVector &splineValues, PmeSplineDataType type, int dimIndex)
461 {
462     const PmeAtomComm    *atc         = &(pme->atc[0]);
463     const index           atomCount   = atc->numAtoms();
464     const index           pmeOrder    = pme->pme_order;
465     const index           dimSize     = pmeOrder * atomCount;
466     GMX_RELEASE_ASSERT(dimSize == splineValues.ssize(), "Mismatch in spline data");
467     real                 *splineBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
468
469     switch (mode)
470     {
471         case CodePath::CPU:
472             std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
473             break;
474
475         case CodePath::GPU:
476             std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
477             pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::HostToGpu);
478             break;
479
480         default:
481             GMX_THROW(InternalError("Test not implemented for this mode"));
482     }
483 }
484
485 //! Setting gridline indices to be used in spread/gather
486 void pmeSetGridLineIndices(gmx_pme_t *pme, CodePath mode,
487                            const GridLineIndicesVector &gridLineIndices)
488 {
489     PmeAtomComm                *atc         = &(pme->atc[0]);
490     const index                 atomCount   = atc->numAtoms();
491     GMX_RELEASE_ASSERT(atomCount == gridLineIndices.ssize(), "Mismatch in gridline indices size");
492
493     IVec paddedGridSizeUnused, gridSize(0, 0, 0);
494     pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSizeUnused);
495
496     for (const auto &index : gridLineIndices)
497     {
498         for (int i = 0; i < DIM; i++)
499         {
500             GMX_RELEASE_ASSERT((0 <= index[i]) && (index[i] < gridSize[i]), "Invalid gridline index");
501         }
502     }
503
504     switch (mode)
505     {
506         case CodePath::GPU:
507             memcpy(pme->gpu->staging.h_gridlineIndices, gridLineIndices.data(), atomCount * sizeof(gridLineIndices[0]));
508             break;
509
510         case CodePath::CPU:
511             atc->idx.resize(gridLineIndices.size());
512             std::copy(gridLineIndices.begin(), gridLineIndices.end(), atc->idx.begin());
513             break;
514         default:
515             GMX_THROW(InternalError("Test not implemented for this mode"));
516     }
517 }
518
519 //! Getting plain index into the complex 3d grid
520 inline size_t pmeGetGridPlainIndexInternal(const IVec &index, const IVec &paddedGridSize, GridOrdering gridOrdering)
521 {
522     size_t result;
523     switch (gridOrdering)
524     {
525         case GridOrdering::YZX:
526             result = (index[YY] * paddedGridSize[ZZ] + index[ZZ]) * paddedGridSize[XX] + index[XX];
527             break;
528
529         case GridOrdering::XYZ:
530             result = (index[XX] * paddedGridSize[YY] + index[YY]) * paddedGridSize[ZZ] + index[ZZ];
531             break;
532
533         default:
534             GMX_THROW(InternalError("Test not implemented for this mode"));
535     }
536     return result;
537 }
538
539 //! Setting real or complex grid
540 template<typename ValueType>
541 static void pmeSetGridInternal(const gmx_pme_t *pme, CodePath mode,
542                                GridOrdering gridOrdering,
543                                const SparseGridValuesInput<ValueType> &gridValues)
544 {
545     IVec       gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
546     ValueType *grid;
547     pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
548
549     switch (mode)
550     {
551         case CodePath::GPU: // intentional absence of break, the grid will be copied from the host buffer in testing mode
552         case CodePath::CPU:
553             std::memset(grid, 0, paddedGridSize[XX] * paddedGridSize[YY] * paddedGridSize[ZZ] * sizeof(ValueType));
554             for (const auto &gridValue : gridValues)
555             {
556                 for (int i = 0; i < DIM; i++)
557                 {
558                     GMX_RELEASE_ASSERT((0 <= gridValue.first[i]) && (gridValue.first[i] < gridSize[i]), "Invalid grid value index");
559                 }
560                 const size_t gridValueIndex = pmeGetGridPlainIndexInternal(gridValue.first, paddedGridSize, gridOrdering);
561                 grid[gridValueIndex] = gridValue.second;
562             }
563             break;
564
565         default:
566             GMX_THROW(InternalError("Test not implemented for this mode"));
567     }
568 }
569
570 //! Setting real grid to be used in gather
571 void pmeSetRealGrid(const gmx_pme_t *pme, CodePath mode,
572                     const SparseRealGridValuesInput &gridValues)
573 {
574     pmeSetGridInternal<real>(pme, mode, GridOrdering::XYZ, gridValues);
575 }
576
577 //! Setting complex grid to be used in solve
578 void pmeSetComplexGrid(const gmx_pme_t *pme, CodePath mode,
579                        GridOrdering gridOrdering,
580                        const SparseComplexGridValuesInput &gridValues)
581 {
582     pmeSetGridInternal<t_complex>(pme, mode, gridOrdering, gridValues);
583 }
584
585 //! Getting the single dimension's spline values or derivatives
586 SplineParamsDimVector pmeGetSplineData(const gmx_pme_t *pme, CodePath mode,
587                                        PmeSplineDataType type, int dimIndex)
588 {
589     GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
590     const PmeAtomComm       *atc         = &(pme->atc[0]);
591     const size_t             atomCount   = atc->numAtoms();
592     const size_t             pmeOrder    = pme->pme_order;
593     const size_t             dimSize     = pmeOrder * atomCount;
594
595     real                    *sourceBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
596     SplineParamsDimVector    result;
597     switch (mode)
598     {
599         case CodePath::GPU:
600             pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::GpuToHost);
601             result = arrayRefFromArray(sourceBuffer, dimSize);
602             break;
603
604         case CodePath::CPU:
605             result = arrayRefFromArray(sourceBuffer, dimSize);
606             break;
607
608         default:
609             GMX_THROW(InternalError("Test not implemented for this mode"));
610     }
611     return result;
612 }
613
614 //! Getting the gridline indices
615 GridLineIndicesVector pmeGetGridlineIndices(const gmx_pme_t *pme, CodePath mode)
616 {
617     GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
618     const PmeAtomComm    *atc         = &(pme->atc[0]);
619     const size_t          atomCount   = atc->numAtoms();
620
621     GridLineIndicesVector gridLineIndices;
622     switch (mode)
623     {
624         case CodePath::GPU:
625             gridLineIndices = arrayRefFromArray(reinterpret_cast<IVec *>(pme->gpu->staging.h_gridlineIndices), atomCount);
626             break;
627
628         case CodePath::CPU:
629             gridLineIndices = atc->idx;
630             break;
631
632         default:
633             GMX_THROW(InternalError("Test not implemented for this mode"));
634     }
635     return gridLineIndices;
636 }
637
638 //! Getting real or complex grid - only non zero values
639 template<typename ValueType>
640 static SparseGridValuesOutput<ValueType> pmeGetGridInternal(const gmx_pme_t *pme, CodePath mode, GridOrdering gridOrdering)
641 {
642     IVec       gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
643     ValueType *grid;
644     pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
645     SparseGridValuesOutput<ValueType> gridValues;
646     switch (mode)
647     {
648         case CodePath::GPU: // intentional absence of break
649         case CodePath::CPU:
650             gridValues.clear();
651             for (int ix = 0; ix < gridSize[XX]; ix++)
652             {
653                 for (int iy = 0; iy < gridSize[YY]; iy++)
654                 {
655                     for (int iz = 0; iz < gridSize[ZZ]; iz++)
656                     {
657                         IVec            temp(ix, iy, iz);
658                         const size_t    gridValueIndex = pmeGetGridPlainIndexInternal(temp, paddedGridSize, gridOrdering);
659                         const ValueType value          = grid[gridValueIndex];
660                         if (value != ValueType {})
661                         {
662                             auto key = formatString("Cell %d %d %d", ix, iy, iz);
663                             gridValues[key] = value;
664                         }
665                     }
666                 }
667             }
668             break;
669
670         default:
671             GMX_THROW(InternalError("Test not implemented for this mode"));
672     }
673     return gridValues;
674 }
675
676 //! Getting the real grid (spreading output of pmePerformSplineAndSpread())
677 SparseRealGridValuesOutput pmeGetRealGrid(const gmx_pme_t *pme, CodePath mode)
678 {
679     return pmeGetGridInternal<real>(pme, mode, GridOrdering::XYZ);
680 }
681
682 //! Getting the complex grid output of pmePerformSolve()
683 SparseComplexGridValuesOutput pmeGetComplexGrid(const gmx_pme_t *pme, CodePath mode,
684                                                 GridOrdering gridOrdering)
685 {
686     return pmeGetGridInternal<t_complex>(pme, mode, gridOrdering);
687 }
688
689 //! Getting the reciprocal energy and virial
690 PmeOutput pmeGetReciprocalEnergyAndVirial(const gmx_pme_t *pme, CodePath mode,
691                                           PmeSolveAlgorithm method)
692 {
693     PmeOutput output;
694     switch (mode)
695     {
696         case CodePath::CPU:
697             switch (method)
698             {
699                 case PmeSolveAlgorithm::Coulomb:
700                     get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output);
701                     break;
702
703                 case PmeSolveAlgorithm::LennardJones:
704                     get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output);
705                     break;
706
707                 default:
708                     GMX_THROW(InternalError("Test not implemented for this mode"));
709             }
710             break;
711         case CodePath::GPU:
712             switch (method)
713             {
714                 case PmeSolveAlgorithm::Coulomb:
715                     pme_gpu_getEnergyAndVirial(*pme, &output);
716                     break;
717
718                 default:
719                     GMX_THROW(InternalError("Test not implemented for this mode"));
720             }
721             break;
722
723         default:
724             GMX_THROW(InternalError("Test not implemented for this mode"));
725     }
726     return output;
727 }
728
729 }  // namespace test
730 }  // namespace gmx