Pipeline GPU PME Spline/Spread with PP Comms
[alexxy/gromacs.git] / src / gromacs / ewald / tests / pmetestcommon.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020 by the GROMACS development team.
5  * Copyright (c) 2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36 /*! \internal \file
37  * \brief
38  * Implements common routines for PME tests.
39  *
40  * \author Aleksei Iupinov <a.yupinov@gmail.com>
41  * \ingroup module_ewald
42  */
43 #include "gmxpre.h"
44
45 #include "pmetestcommon.h"
46
47 #include <cstring>
48
49 #include <algorithm>
50
51 #include "gromacs/domdec/domdec.h"
52 #include "gromacs/ewald/pme_gather.h"
53 #include "gromacs/ewald/pme_gpu_calculate_splines.h"
54 #include "gromacs/ewald/pme_gpu_constants.h"
55 #include "gromacs/ewald/pme_gpu_internal.h"
56 #include "gromacs/ewald/pme_gpu_staging.h"
57 #include "gromacs/ewald/pme_grid.h"
58 #include "gromacs/ewald/pme_internal.h"
59 #include "gromacs/ewald/pme_redistribute.h"
60 #include "gromacs/ewald/pme_solve.h"
61 #include "gromacs/ewald/pme_spread.h"
62 #include "gromacs/fft/parallel_3dfft.h"
63 #include "gromacs/gpu_utils/gpu_utils.h"
64 #include "gromacs/hardware/device_management.h"
65 #include "gromacs/math/invertmatrix.h"
66 #include "gromacs/mdtypes/commrec.h"
67 #include "gromacs/pbcutil/pbc.h"
68 #include "gromacs/topology/topology.h"
69 #include "gromacs/utility/exceptions.h"
70 #include "gromacs/utility/gmxassert.h"
71 #include "gromacs/utility/logger.h"
72 #include "gromacs/utility/stringutil.h"
73 #include "gromacs/ewald/pme_coordinate_receiver_gpu.h"
74
75 #include "testutils/test_hardware_environment.h"
76 #include "testutils/testasserts.h"
77
78 class DeviceContext;
79
80 namespace gmx
81 {
82 namespace test
83 {
84
85 bool pmeSupportsInputForMode(const gmx_hw_info_t& hwinfo, const t_inputrec* inputRec, CodePath mode)
86 {
87     bool implemented;
88     switch (mode)
89     {
90         case CodePath::CPU: implemented = true; break;
91
92         case CodePath::GPU:
93             implemented = (pme_gpu_supports_build(nullptr) && pme_gpu_supports_hardware(hwinfo, nullptr)
94                            && pme_gpu_supports_input(*inputRec, nullptr));
95             break;
96
97         default: GMX_THROW(InternalError("Test not implemented for this mode"));
98     }
99     return implemented;
100 }
101
102 uint64_t getSplineModuliDoublePrecisionUlps(int splineOrder)
103 {
104     /* Arbitrary ulp tolerance for sine/cosine implementation. It's
105      * hard to know what to pick without testing lots of
106      * implementations. */
107     const uint64_t sineUlps = 10;
108     return 4 * (splineOrder - 2) + 2 * sineUlps * splineOrder;
109 }
110
111 //! PME initialization
112 PmeSafePointer pmeInitWrapper(const t_inputrec*    inputRec,
113                               const CodePath       mode,
114                               const DeviceContext* deviceContext,
115                               const DeviceStream*  deviceStream,
116                               const PmeGpuProgram* pmeGpuProgram,
117                               const Matrix3x3&     box,
118                               const real           ewaldCoeff_q,
119                               const real           ewaldCoeff_lj)
120 {
121     const MDLogger dummyLogger;
122     const auto     runMode       = (mode == CodePath::CPU) ? PmeRunMode::CPU : PmeRunMode::Mixed;
123     t_commrec      dummyCommrec  = { 0 };
124     NumPmeDomains  numPmeDomains = { 1, 1 };
125     gmx_pme_t*     pmeDataRaw    = gmx_pme_init(&dummyCommrec,
126                                          numPmeDomains,
127                                          inputRec,
128                                          false,
129                                          false,
130                                          true,
131                                          ewaldCoeff_q,
132                                          ewaldCoeff_lj,
133                                          1,
134                                          runMode,
135                                          nullptr,
136                                          deviceContext,
137                                          deviceStream,
138                                          pmeGpuProgram,
139                                          dummyLogger);
140     PmeSafePointer pme(pmeDataRaw); // taking ownership
141
142     // TODO get rid of this with proper matrix type
143     matrix boxTemp;
144     for (int i = 0; i < DIM; i++)
145     {
146         for (int j = 0; j < DIM; j++)
147         {
148             boxTemp[i][j] = box[i * DIM + j];
149         }
150     }
151     const char* boxError = check_box(PbcType::Unset, boxTemp);
152     GMX_RELEASE_ASSERT(boxError == nullptr, boxError);
153
154     switch (mode)
155     {
156         case CodePath::CPU: invertBoxMatrix(boxTemp, pme->recipbox); break;
157
158         case CodePath::GPU:
159             pme_gpu_set_testing(pme->gpu, true);
160             pme_gpu_update_input_box(pme->gpu, boxTemp);
161             break;
162
163         default: GMX_THROW(InternalError("Test not implemented for this mode"));
164     }
165
166     return pme;
167 }
168
169 PmeSafePointer pmeInitEmpty(const t_inputrec* inputRec)
170 {
171     const Matrix3x3 defaultBox = { { 1.0F, 0.0F, 0.0F, 0.0F, 1.0F, 0.0F, 0.0F, 0.0F, 1.0F } };
172     return pmeInitWrapper(inputRec, CodePath::CPU, nullptr, nullptr, nullptr, defaultBox, 0.0F, 0.0F);
173 }
174
175 //! Make a GPU state-propagator manager
176 std::unique_ptr<StatePropagatorDataGpu> makeStatePropagatorDataGpu(const gmx_pme_t&     pme,
177                                                                    const DeviceContext* deviceContext,
178                                                                    const DeviceStream* deviceStream)
179 {
180     // TODO: Pin the host buffer and use async memory copies
181     // TODO: Special constructor for PME-only rank / PME-tests is used here. There should be a mechanism to
182     //       restrict one from using other constructor here.
183     return std::make_unique<StatePropagatorDataGpu>(
184             deviceStream, *deviceContext, GpuApiCallBehavior::Sync, pme_gpu_get_block_size(&pme), nullptr);
185 }
186
187 //! PME initialization with atom data
188 void pmeInitAtoms(gmx_pme_t*               pme,
189                   StatePropagatorDataGpu*  stateGpu,
190                   const CodePath           mode,
191                   const CoordinatesVector& coordinates,
192                   const ChargesVector&     charges)
193 {
194     const index atomCount = coordinates.size();
195     GMX_RELEASE_ASSERT(atomCount == charges.ssize(), "Mismatch in atom data");
196     PmeAtomComm* atc = nullptr;
197
198     switch (mode)
199     {
200         case CodePath::CPU:
201             atc              = &(pme->atc[0]);
202             atc->x           = coordinates;
203             atc->coefficient = charges;
204             gmx_pme_reinit_atoms(pme, atomCount, charges, {});
205             /* With decomposition there would be more boilerplate atc code here, e.g. do_redist_pos_coeffs */
206             break;
207
208         case CodePath::GPU:
209             // TODO: Avoid use of atc in the GPU code path
210             atc = &(pme->atc[0]);
211             // We need to set atc->n for passing the size in the tests
212             atc->setNumAtoms(atomCount);
213             gmx_pme_reinit_atoms(pme, atomCount, charges, {});
214
215             stateGpu->reinit(atomCount, atomCount);
216             stateGpu->copyCoordinatesToGpu(arrayRefFromArray(coordinates.data(), coordinates.size()),
217                                            gmx::AtomLocality::Local);
218             pme_gpu_set_kernelparam_coordinates(pme->gpu, stateGpu->getCoordinates());
219
220             break;
221
222         default: GMX_THROW(InternalError("Test not implemented for this mode"));
223     }
224 }
225
226 //! Getting local PME real grid pointer for test I/O
227 static real* pmeGetRealGridInternal(const gmx_pme_t* pme)
228 {
229     const size_t gridIndex = 0;
230     return pme->fftgrid[gridIndex];
231 }
232
233 //! Getting local PME real grid dimensions
234 static void pmeGetRealGridSizesInternal(const gmx_pme_t* pme,
235                                         CodePath         mode,
236                                         IVec& gridSize,       //NOLINT(google-runtime-references)
237                                         IVec& paddedGridSize) //NOLINT(google-runtime-references)
238 {
239     const size_t gridIndex = 0;
240     IVec         gridOffsetUnused;
241     switch (mode)
242     {
243         case CodePath::CPU:
244             gmx_parallel_3dfft_real_limits(
245                     pme->pfft_setup[gridIndex], gridSize, gridOffsetUnused, paddedGridSize);
246             break;
247
248         case CodePath::GPU:
249             pme_gpu_get_real_grid_sizes(pme->gpu, &gridSize, &paddedGridSize);
250             break;
251
252         default: GMX_THROW(InternalError("Test not implemented for this mode"));
253     }
254 }
255
256 //! Getting local PME complex grid pointer for test I/O
257 static t_complex* pmeGetComplexGridInternal(const gmx_pme_t* pme)
258 {
259     const size_t gridIndex = 0;
260     return pme->cfftgrid[gridIndex];
261 }
262
263 //! Getting local PME complex grid dimensions
264 static void pmeGetComplexGridSizesInternal(const gmx_pme_t* pme,
265                                            IVec& gridSize,       //NOLINT(google-runtime-references)
266                                            IVec& paddedGridSize) //NOLINT(google-runtime-references)
267 {
268     const size_t gridIndex = 0;
269     IVec         gridOffsetUnused, complexOrderUnused;
270     gmx_parallel_3dfft_complex_limits(
271             pme->pfft_setup[gridIndex], complexOrderUnused, gridSize, gridOffsetUnused, paddedGridSize); // TODO: what about YZX ordering?
272 }
273
274 //! Getting the PME grid memory buffer and its sizes - template definition
275 template<typename ValueType>
276 static void pmeGetGridAndSizesInternal(const gmx_pme_t* /*unused*/,
277                                        CodePath /*unused*/,
278                                        ValueType*& /*unused*/, //NOLINT(google-runtime-references)
279                                        IVec& /*unused*/,       //NOLINT(google-runtime-references)
280                                        IVec& /*unused*/)       //NOLINT(google-runtime-references)
281 {
282     GMX_THROW(InternalError("Deleted function call"));
283     // explicitly deleting general template does not compile in clang, see https://llvm.org/bugs/show_bug.cgi?id=17537
284 }
285
286 //! Getting the PME real grid memory buffer and its sizes
287 template<>
288 void pmeGetGridAndSizesInternal<real>(const gmx_pme_t* pme, CodePath mode, real*& grid, IVec& gridSize, IVec& paddedGridSize)
289 {
290     grid = pmeGetRealGridInternal(pme);
291     pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSize);
292 }
293
294 //! Getting the PME complex grid memory buffer and its sizes
295 template<>
296 void pmeGetGridAndSizesInternal<t_complex>(const gmx_pme_t* pme,
297                                            CodePath /*unused*/,
298                                            t_complex*& grid,
299                                            IVec&       gridSize,
300                                            IVec&       paddedGridSize)
301 {
302     grid = pmeGetComplexGridInternal(pme);
303     pmeGetComplexGridSizesInternal(pme, gridSize, paddedGridSize);
304 }
305
306 //! PME spline calculation and charge spreading
307 void pmePerformSplineAndSpread(gmx_pme_t* pme,
308                                CodePath   mode, // TODO const qualifiers elsewhere
309                                bool       computeSplines,
310                                bool       spreadCharges)
311 {
312     GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
313     PmeAtomComm* atc                          = &(pme->atc[0]);
314     const size_t gridIndex                    = 0;
315     const bool   computeSplinesForZeroCharges = true;
316     real**       fftgrid                      = spreadCharges ? pme->fftgrid : nullptr;
317     real*        pmegrid                      = pme->pmegrid[gridIndex].grid.grid;
318
319     switch (mode)
320     {
321         case CodePath::CPU:
322             spread_on_grid(pme,
323                            atc,
324                            &pme->pmegrid[gridIndex],
325                            computeSplines,
326                            spreadCharges,
327                            fftgrid != nullptr ? fftgrid[gridIndex] : nullptr,
328                            computeSplinesForZeroCharges,
329                            gridIndex);
330             if (spreadCharges && !pme->bUseThreads)
331             {
332                 wrap_periodic_pmegrid(pme, pmegrid);
333                 copy_pmegrid_to_fftgrid(
334                         pme, pmegrid, fftgrid != nullptr ? fftgrid[gridIndex] : nullptr, gridIndex);
335             }
336             break;
337
338 /* The compiler will complain about passing fftgrid (converting double ** to float **) if using
339  * double precision. GPUs are not used with double precision anyhow. */
340 #if !GMX_DOUBLE
341         case CodePath::GPU:
342         {
343             const real lambdaQ = 1.0;
344             // no synchronization needed as x is transferred in the PME stream
345             GpuEventSynchronizer* xReadyOnDevice = nullptr;
346
347             bool                           useGpuDirectComm         = false;
348             gmx::PmeCoordinateReceiverGpu* pmeCoordinateReceiverGpu = nullptr;
349
350             pme_gpu_spread(pme->gpu,
351                            xReadyOnDevice,
352                            fftgrid,
353                            computeSplines,
354                            spreadCharges,
355                            lambdaQ,
356                            useGpuDirectComm,
357                            pmeCoordinateReceiverGpu);
358         }
359         break;
360 #endif
361
362         default: GMX_THROW(InternalError("Test not implemented for this mode"));
363     }
364 }
365
366 //! Getting the internal spline data buffer pointer
367 static real* pmeGetSplineDataInternal(const gmx_pme_t* pme, PmeSplineDataType type, int dimIndex)
368 {
369     GMX_ASSERT((0 <= dimIndex) && (dimIndex < DIM), "Invalid dimension index");
370     const PmeAtomComm* atc          = &(pme->atc[0]);
371     const size_t       threadIndex  = 0;
372     real*              splineBuffer = nullptr;
373     switch (type)
374     {
375         case PmeSplineDataType::Values:
376             splineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
377             break;
378
379         case PmeSplineDataType::Derivatives:
380             splineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
381             break;
382
383         default: GMX_THROW(InternalError("Unknown spline data type"));
384     }
385     return splineBuffer;
386 }
387
388 //! PME solving
389 void pmePerformSolve(const gmx_pme_t*  pme,
390                      CodePath          mode,
391                      PmeSolveAlgorithm method,
392                      real              cellVolume,
393                      GridOrdering      gridOrdering,
394                      bool              computeEnergyAndVirial)
395 {
396     t_complex*   h_grid              = pmeGetComplexGridInternal(pme);
397     const bool   useLorentzBerthelot = false;
398     const size_t threadIndex         = 0;
399     const size_t gridIndex           = 0;
400     switch (mode)
401     {
402         case CodePath::CPU:
403             if (gridOrdering != GridOrdering::YZX)
404             {
405                 GMX_THROW(InternalError("Test not implemented for this mode"));
406             }
407             switch (method)
408             {
409                 case PmeSolveAlgorithm::Coulomb:
410                     solve_pme_yzx(pme, h_grid, cellVolume, computeEnergyAndVirial, pme->nthread, threadIndex);
411                     break;
412
413                 case PmeSolveAlgorithm::LennardJones:
414                     solve_pme_lj_yzx(pme,
415                                      &h_grid,
416                                      useLorentzBerthelot,
417                                      cellVolume,
418                                      computeEnergyAndVirial,
419                                      pme->nthread,
420                                      threadIndex);
421                     break;
422
423                 default: GMX_THROW(InternalError("Test not implemented for this mode"));
424             }
425             break;
426
427         case CodePath::GPU:
428             switch (method)
429             {
430                 case PmeSolveAlgorithm::Coulomb:
431                     pme_gpu_solve(pme->gpu, gridIndex, h_grid, gridOrdering, computeEnergyAndVirial);
432                     break;
433
434                 default: GMX_THROW(InternalError("Test not implemented for this mode"));
435             }
436             break;
437
438         default: GMX_THROW(InternalError("Test not implemented for this mode"));
439     }
440 }
441
442 //! PME force gathering
443 void pmePerformGather(gmx_pme_t* pme, CodePath mode, ForcesVector& forces)
444 {
445     PmeAtomComm* atc       = &(pme->atc[0]);
446     const index  atomCount = atc->numAtoms();
447     GMX_RELEASE_ASSERT(forces.ssize() == atomCount, "Invalid force buffer size");
448     const real   scale       = 1.0;
449     const size_t threadIndex = 0;
450     const size_t gridIndex   = 0;
451     real*        pmegrid     = pme->pmegrid[gridIndex].grid.grid;
452     real**       fftgrid     = pme->fftgrid;
453
454     switch (mode)
455     {
456         case CodePath::CPU:
457             atc->f = forces;
458             if (atc->nthread == 1)
459             {
460                 // something which is normally done in serial spline computation (make_thread_local_ind())
461                 atc->spline[threadIndex].n = atomCount;
462             }
463             copy_fftgrid_to_pmegrid(pme, fftgrid[gridIndex], pmegrid, gridIndex, pme->nthread, threadIndex);
464             unwrap_periodic_pmegrid(pme, pmegrid);
465             gather_f_bsplines(pme, pmegrid, true, atc, &atc->spline[threadIndex], scale);
466             break;
467
468 /* The compiler will complain about passing fftgrid (converting double ** to float **) if using
469  * double precision. GPUs are not used with double precision anyhow. */
470 #if !GMX_DOUBLE
471         case CodePath::GPU:
472         {
473             // Variable initialization needs a non-switch scope
474             const bool computeEnergyAndVirial = false;
475             const real lambdaQ                = 1.0;
476             PmeOutput  output = pme_gpu_getOutput(*pme, computeEnergyAndVirial, lambdaQ);
477             GMX_ASSERT(forces.size() == output.forces_.size(),
478                        "Size of force buffers did not match");
479             pme_gpu_gather(pme->gpu, fftgrid, lambdaQ);
480             std::copy(std::begin(output.forces_), std::end(output.forces_), std::begin(forces));
481         }
482         break;
483 #endif
484
485         default: GMX_THROW(InternalError("Test not implemented for this mode"));
486     }
487 }
488
489 //! PME test finalization before fetching the outputs
490 void pmeFinalizeTest(const gmx_pme_t* pme, CodePath mode)
491 {
492     switch (mode)
493     {
494         case CodePath::CPU: break;
495
496         case CodePath::GPU: pme_gpu_synchronize(pme->gpu); break;
497
498         default: GMX_THROW(InternalError("Test not implemented for this mode"));
499     }
500 }
501
502 //! A binary enum for spline data layout transformation
503 enum class PmeLayoutTransform
504 {
505     GpuToHost,
506     HostToGpu
507 };
508
509 /*! \brief Gets a unique index to an element in a spline parameter buffer.
510  *
511  * These theta/dtheta buffers are laid out for GPU spread/gather
512  * kernels. The index is wrt the execution block, in range(0,
513  * atomsPerBlock * order * DIM).
514  *
515  * This is a wrapper, only used in unit tests.
516  * \param[in] order            PME order
517  * \param[in] splineIndex      Spline contribution index (from 0 to \p order - 1)
518  * \param[in] dimIndex         Dimension index (from 0 to 2)
519  * \param[in] atomIndex        Atom index wrt the block.
520  * \param[in] atomsPerWarp     Number of atoms processed by a warp.
521  *
522  * \returns Index into theta or dtheta array using GPU layout.
523  */
524 static int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
525 {
526     if (order != c_pmeGpuOrder)
527     {
528         throw order;
529     }
530     constexpr int fixedOrder = c_pmeGpuOrder;
531     GMX_UNUSED_VALUE(fixedOrder);
532
533     const int atomWarpIndex = atomIndex % atomsPerWarp;
534     const int warpIndex     = atomIndex / atomsPerWarp;
535     int       indexBase, result;
536     switch (atomsPerWarp)
537     {
538         case 1:
539             indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
540             result    = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
541             break;
542
543         case 2:
544             indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
545             result    = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
546             break;
547
548         case 4:
549             indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
550             result    = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
551             break;
552
553         case 8:
554             indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
555             result    = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
556             break;
557
558         default:
559             GMX_THROW(NotImplementedError(
560                     formatString("Test function call not unrolled for atomsPerWarp = %d in "
561                                  "getSplineParamFullIndex",
562                                  atomsPerWarp)));
563     }
564     return result;
565 }
566
567 /*!\brief Return the number of atoms per warp */
568 static int pme_gpu_get_atoms_per_warp(const PmeGpu* pmeGpu)
569 {
570     const int order = pmeGpu->common->pme_order;
571     const int threadsPerAtom =
572             (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
573     return pmeGpu->programHandle_->warpSize() / threadsPerAtom;
574 }
575
576 /*! \brief Rearranges the atom spline data between the GPU and host layouts.
577  * Only used for test purposes so far, likely to be horribly slow.
578  *
579  * \param[in]  pmeGpu     The PME GPU structure.
580  * \param[out] atc        The PME CPU atom data structure (with a single-threaded layout).
581  * \param[in]  type       The spline data type (values or derivatives).
582  * \param[in]  dimIndex   Dimension index.
583  * \param[in]  transform  Layout transform type
584  */
585 static void pme_gpu_transform_spline_atom_data(const PmeGpu*      pmeGpu,
586                                                const PmeAtomComm* atc,
587                                                PmeSplineDataType  type,
588                                                int                dimIndex,
589                                                PmeLayoutTransform transform)
590 {
591     // The GPU atom spline data is laid out in a different way currently than the CPU one.
592     // This function converts the data from GPU to CPU layout (in the host memory).
593     // It is only intended for testing purposes so far.
594     // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their
595     // performance (e.g. spreading on GPU, gathering on CPU).
596     GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
597     const uintmax_t threadIndex  = 0;
598     const auto      atomCount    = atc->numAtoms();
599     const auto      atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
600     const auto      pmeOrder     = pmeGpu->common->pme_order;
601     GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
602
603     real*  cpuSplineBuffer;
604     float* h_splineBuffer;
605     switch (type)
606     {
607         case PmeSplineDataType::Values:
608             cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
609             h_splineBuffer  = pmeGpu->staging.h_theta;
610             break;
611
612         case PmeSplineDataType::Derivatives:
613             cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
614             h_splineBuffer  = pmeGpu->staging.h_dtheta;
615             break;
616
617         default: GMX_THROW(InternalError("Unknown spline data type"));
618     }
619
620     for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
621     {
622         for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
623         {
624             const auto gpuValueIndex =
625                     getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
626             const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
627             GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder,
628                        "Atom spline data index out of bounds (while transforming GPU data layout "
629                        "for host)");
630             switch (transform)
631             {
632                 case PmeLayoutTransform::GpuToHost:
633                     cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
634                     break;
635
636                 case PmeLayoutTransform::HostToGpu:
637                     h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
638                     break;
639
640                 default: GMX_THROW(InternalError("Unknown layout transform"));
641             }
642         }
643     }
644 }
645
646 //! Setting atom spline values/derivatives to be used in spread/gather
647 void pmeSetSplineData(const gmx_pme_t*             pme,
648                       CodePath                     mode,
649                       const SplineParamsDimVector& splineValues,
650                       PmeSplineDataType            type,
651                       int                          dimIndex)
652 {
653     const PmeAtomComm* atc       = &(pme->atc[0]);
654     const index        atomCount = atc->numAtoms();
655     const index        pmeOrder  = pme->pme_order;
656     const index        dimSize   = pmeOrder * atomCount;
657     GMX_RELEASE_ASSERT(dimSize == splineValues.ssize(), "Mismatch in spline data");
658     real* splineBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
659
660     switch (mode)
661     {
662         case CodePath::CPU:
663             std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
664             break;
665
666         case CodePath::GPU:
667             std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
668             pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::HostToGpu);
669             break;
670
671         default: GMX_THROW(InternalError("Test not implemented for this mode"));
672     }
673 }
674
675 //! Setting gridline indices to be used in spread/gather
676 void pmeSetGridLineIndices(gmx_pme_t* pme, CodePath mode, const GridLineIndicesVector& gridLineIndices)
677 {
678     PmeAtomComm* atc       = &(pme->atc[0]);
679     const index  atomCount = atc->numAtoms();
680     GMX_RELEASE_ASSERT(atomCount == gridLineIndices.ssize(), "Mismatch in gridline indices size");
681
682     IVec paddedGridSizeUnused, gridSize(0, 0, 0);
683     pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSizeUnused);
684
685     for (const auto& index : gridLineIndices)
686     {
687         for (int i = 0; i < DIM; i++)
688         {
689             GMX_RELEASE_ASSERT((0 <= index[i]) && (index[i] < gridSize[i]),
690                                "Invalid gridline index");
691         }
692     }
693
694     switch (mode)
695     {
696         case CodePath::GPU:
697             memcpy(pme_gpu_staging(pme->gpu).h_gridlineIndices,
698                    gridLineIndices.data(),
699                    atomCount * sizeof(gridLineIndices[0]));
700             break;
701
702         case CodePath::CPU:
703             atc->idx.resize(gridLineIndices.size());
704             std::copy(gridLineIndices.begin(), gridLineIndices.end(), atc->idx.begin());
705             break;
706         default: GMX_THROW(InternalError("Test not implemented for this mode"));
707     }
708 }
709
710 //! Getting plain index into the complex 3d grid
711 inline size_t pmeGetGridPlainIndexInternal(const IVec& index, const IVec& paddedGridSize, GridOrdering gridOrdering)
712 {
713     size_t result;
714     switch (gridOrdering)
715     {
716         case GridOrdering::YZX:
717             result = (index[YY] * paddedGridSize[ZZ] + index[ZZ]) * paddedGridSize[XX] + index[XX];
718             break;
719
720         case GridOrdering::XYZ:
721             result = (index[XX] * paddedGridSize[YY] + index[YY]) * paddedGridSize[ZZ] + index[ZZ];
722             break;
723
724         default: GMX_THROW(InternalError("Test not implemented for this mode"));
725     }
726     return result;
727 }
728
729 //! Setting real or complex grid
730 template<typename ValueType>
731 static void pmeSetGridInternal(const gmx_pme_t*                        pme,
732                                CodePath                                mode,
733                                GridOrdering                            gridOrdering,
734                                const SparseGridValuesInput<ValueType>& gridValues)
735 {
736     IVec       gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
737     ValueType* grid;
738     pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
739
740     switch (mode)
741     {
742         case CodePath::GPU: // intentional absence of break, the grid will be copied from the host buffer in testing mode
743         case CodePath::CPU:
744             std::memset(grid, 0, paddedGridSize[XX] * paddedGridSize[YY] * paddedGridSize[ZZ] * sizeof(ValueType));
745             for (const auto& gridValue : gridValues)
746             {
747                 for (int i = 0; i < DIM; i++)
748                 {
749                     GMX_RELEASE_ASSERT((0 <= gridValue.first[i]) && (gridValue.first[i] < gridSize[i]),
750                                        "Invalid grid value index");
751                 }
752                 const size_t gridValueIndex =
753                         pmeGetGridPlainIndexInternal(gridValue.first, paddedGridSize, gridOrdering);
754                 grid[gridValueIndex] = gridValue.second;
755             }
756             break;
757
758         default: GMX_THROW(InternalError("Test not implemented for this mode"));
759     }
760 }
761
762 //! Setting real grid to be used in gather
763 void pmeSetRealGrid(const gmx_pme_t* pme, CodePath mode, const SparseRealGridValuesInput& gridValues)
764 {
765     pmeSetGridInternal<real>(pme, mode, GridOrdering::XYZ, gridValues);
766 }
767
768 //! Setting complex grid to be used in solve
769 void pmeSetComplexGrid(const gmx_pme_t*                    pme,
770                        CodePath                            mode,
771                        GridOrdering                        gridOrdering,
772                        const SparseComplexGridValuesInput& gridValues)
773 {
774     pmeSetGridInternal<t_complex>(pme, mode, gridOrdering, gridValues);
775 }
776
777 //! Getting the single dimension's spline values or derivatives
778 SplineParamsDimVector pmeGetSplineData(const gmx_pme_t* pme, CodePath mode, PmeSplineDataType type, int dimIndex)
779 {
780     GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
781     const PmeAtomComm* atc       = &(pme->atc[0]);
782     const size_t       atomCount = atc->numAtoms();
783     const size_t       pmeOrder  = pme->pme_order;
784     const size_t       dimSize   = pmeOrder * atomCount;
785
786     real*                 sourceBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
787     SplineParamsDimVector result;
788     switch (mode)
789     {
790         case CodePath::GPU:
791             pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::GpuToHost);
792             result = arrayRefFromArray(sourceBuffer, dimSize);
793             break;
794
795         case CodePath::CPU: result = arrayRefFromArray(sourceBuffer, dimSize); break;
796
797         default: GMX_THROW(InternalError("Test not implemented for this mode"));
798     }
799     return result;
800 }
801
802 //! Getting the gridline indices
803 GridLineIndicesVector pmeGetGridlineIndices(const gmx_pme_t* pme, CodePath mode)
804 {
805     GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
806     const PmeAtomComm* atc       = &(pme->atc[0]);
807     const size_t       atomCount = atc->numAtoms();
808
809     GridLineIndicesVector gridLineIndices;
810     switch (mode)
811     {
812         case CodePath::GPU:
813             gridLineIndices = arrayRefFromArray(
814                     reinterpret_cast<IVec*>(pme_gpu_staging(pme->gpu).h_gridlineIndices), atomCount);
815             break;
816
817         case CodePath::CPU: gridLineIndices = atc->idx; break;
818
819         default: GMX_THROW(InternalError("Test not implemented for this mode"));
820     }
821     return gridLineIndices;
822 }
823
824 //! Getting real or complex grid - only non zero values
825 template<typename ValueType>
826 static SparseGridValuesOutput<ValueType> pmeGetGridInternal(const gmx_pme_t* pme,
827                                                             CodePath         mode,
828                                                             GridOrdering     gridOrdering)
829 {
830     IVec       gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
831     ValueType* grid;
832     pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
833     SparseGridValuesOutput<ValueType> gridValues;
834     switch (mode)
835     {
836         case CodePath::GPU: // intentional absence of break
837         case CodePath::CPU:
838             gridValues.clear();
839             for (int ix = 0; ix < gridSize[XX]; ix++)
840             {
841                 for (int iy = 0; iy < gridSize[YY]; iy++)
842                 {
843                     for (int iz = 0; iz < gridSize[ZZ]; iz++)
844                     {
845                         IVec         temp(ix, iy, iz);
846                         const size_t gridValueIndex =
847                                 pmeGetGridPlainIndexInternal(temp, paddedGridSize, gridOrdering);
848                         const ValueType value = grid[gridValueIndex];
849                         if (value != ValueType{})
850                         {
851                             auto key        = formatString("Cell %d %d %d", ix, iy, iz);
852                             gridValues[key] = value;
853                         }
854                     }
855                 }
856             }
857             break;
858
859         default: GMX_THROW(InternalError("Test not implemented for this mode"));
860     }
861     return gridValues;
862 }
863
864 //! Getting the real grid (spreading output of pmePerformSplineAndSpread())
865 SparseRealGridValuesOutput pmeGetRealGrid(const gmx_pme_t* pme, CodePath mode)
866 {
867     return pmeGetGridInternal<real>(pme, mode, GridOrdering::XYZ);
868 }
869
870 //! Getting the complex grid output of pmePerformSolve()
871 SparseComplexGridValuesOutput pmeGetComplexGrid(const gmx_pme_t* pme, CodePath mode, GridOrdering gridOrdering)
872 {
873     return pmeGetGridInternal<t_complex>(pme, mode, gridOrdering);
874 }
875
876 //! Getting the reciprocal energy and virial
877 PmeOutput pmeGetReciprocalEnergyAndVirial(const gmx_pme_t* pme, CodePath mode, PmeSolveAlgorithm method)
878 {
879     PmeOutput  output;
880     const real lambdaQ = 1.0;
881     switch (mode)
882     {
883         case CodePath::CPU:
884             switch (method)
885             {
886                 case PmeSolveAlgorithm::Coulomb:
887                     get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output);
888                     break;
889
890                 case PmeSolveAlgorithm::LennardJones:
891                     get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output);
892                     break;
893
894                 default: GMX_THROW(InternalError("Test not implemented for this mode"));
895             }
896             break;
897         case CodePath::GPU:
898             switch (method)
899             {
900                 case PmeSolveAlgorithm::Coulomb:
901                     pme_gpu_getEnergyAndVirial(*pme, lambdaQ, &output);
902                     break;
903
904                 default: GMX_THROW(InternalError("Test not implemented for this mode"));
905             }
906             break;
907
908         default: GMX_THROW(InternalError("Test not implemented for this mode"));
909     }
910     return output;
911 }
912
913 const char* codePathToString(CodePath codePath)
914 {
915     switch (codePath)
916     {
917         case CodePath::CPU: return "CPU";
918         case CodePath::GPU: return "GPU";
919         default: GMX_THROW(NotImplementedError("This CodePath should support codePathToString"));
920     }
921 }
922
923 PmeTestHardwareContext::PmeTestHardwareContext() : codePath_(CodePath::CPU) {}
924
925 PmeTestHardwareContext::PmeTestHardwareContext(TestDevice* testDevice) :
926     codePath_(CodePath::GPU), testDevice_(testDevice)
927 {
928     setActiveDevice(testDevice_->deviceInfo());
929     pmeGpuProgram_ = buildPmeGpuProgram(testDevice_->deviceContext());
930 }
931
932 //! Returns a human-readable context description line
933 std::string PmeTestHardwareContext::description() const
934 {
935     switch (codePath_)
936     {
937         case CodePath::CPU: return "CPU";
938         case CodePath::GPU: return "GPU (" + testDevice_->description() + ")";
939         default: return "Unknown code path.";
940     }
941 }
942
943 void PmeTestHardwareContext::activate() const
944 {
945     if (codePath_ == CodePath::GPU)
946     {
947         setActiveDevice(testDevice_->deviceInfo());
948     }
949 }
950
951 std::vector<std::unique_ptr<PmeTestHardwareContext>> createPmeTestHardwareContextList()
952 {
953     std::vector<std::unique_ptr<PmeTestHardwareContext>> pmeTestHardwareContextList;
954     // Add CPU
955     pmeTestHardwareContextList.emplace_back(std::make_unique<PmeTestHardwareContext>());
956     // Add GPU devices
957     const auto& testDeviceList = getTestHardwareEnvironment()->getTestDeviceList();
958     for (const auto& testDevice : testDeviceList)
959     {
960         pmeTestHardwareContextList.emplace_back(std::make_unique<PmeTestHardwareContext>(testDevice.get()));
961     }
962     return pmeTestHardwareContextList;
963 }
964
965 } // namespace test
966 } // namespace gmx