Merge release-2021 into master
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
index b10c31d3e8e86d8f5b2c3b3743f763790492ce9f..e2c87459a0589403888d57fdfead803bbabb869f 100644 (file)
@@ -3,7 +3,8 @@
  *
  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
  * Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
+ * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
  * and including many others, as listed in the AUTHORS file in the
  * top-level source directory and at http://www.gromacs.org.
@@ -83,7 +84,7 @@
 #include <list>
 
 #include "gromacs/domdec/domdec.h"
-#include "gromacs/ewald/ewald-utils.h"
+#include "gromacs/ewald/ewald_utils.h"
 #include "gromacs/fft/parallel_3dfft.h"
 #include "gromacs/fileio/pdbio.h"
 #include "gromacs/gmxlib/network.h"
@@ -98,6 +99,7 @@
 #include "gromacs/mdtypes/forcerec.h"
 #include "gromacs/mdtypes/inputrec.h"
 #include "gromacs/mdtypes/md_enums.h"
+#include "gromacs/mdtypes/simulation_workload.h"
 #include "gromacs/pbcutil/pbc.h"
 #include "gromacs/timing/cyclecounter.h"
 #include "gromacs/timing/wallcycle.h"
 #include "gromacs/utility/logger.h"
 #include "gromacs/utility/real.h"
 #include "gromacs/utility/smalloc.h"
-#include "gromacs/utility/stringutil.h"
+#include "gromacs/utility/message_string_collector.h"
 #include "gromacs/utility/unique_cptr.h"
 
-#include "calculate-spline-moduli.h"
-#include "pme-gather.h"
-#include "pme-gpu-internal.h"
-#include "pme-grid.h"
-#include "pme-internal.h"
-#include "pme-redistribute.h"
-#include "pme-solve.h"
-#include "pme-spline-work.h"
-#include "pme-spread.h"
-
-/*! \brief Help build a descriptive message in \c error if there are
- * \c errorReasons why PME on GPU is not supported.
- *
- * \returns Whether the lack of errorReasons indicate there is support. */
-static bool
-addMessageIfNotSupported(const std::list<std::string> &errorReasons,
-                         std::string                  *error)
+#include "calculate_spline_moduli.h"
+#include "pme_gather.h"
+#include "pme_gpu_internal.h"
+#include "pme_grid.h"
+#include "pme_internal.h"
+#include "pme_redistribute.h"
+#include "pme_solve.h"
+#include "pme_spline_work.h"
+#include "pme_spread.h"
+
+//NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
+bool g_allowPmeWithSyclForTesting = false;
+
+bool pme_gpu_supports_build(std::string* error)
 {
-    bool foundErrorReasons = errorReasons.empty();
-    if (!foundErrorReasons && error)
-    {
-        std::string regressionTestMarker = "PME GPU does not support";
-        // this prefix is tested for in the regression tests script gmxtest.pl
-        *error = regressionTestMarker + ": " + gmx::joinStrings(errorReasons, "; ") + ".";
-    }
-    return foundErrorReasons;
+    gmx::MessageStringCollector errorReasons;
+    // Before changing the prefix string, make sure that it is not searched for in regression tests.
+    errorReasons.startContext("PME GPU does not support:");
+    errorReasons.appendIf(GMX_DOUBLE, "Double-precision build of GROMACS.");
+    errorReasons.appendIf(!GMX_GPU, "Non-GPU build of GROMACS.");
+    errorReasons.appendIf(GMX_GPU_SYCL && !g_allowPmeWithSyclForTesting, "SYCL build."); // SYCL-TODO
+    errorReasons.finishContext();
+    if (error != nullptr)
+    {
+        *error = errorReasons.toString();
+    }
+    return errorReasons.isEmpty();
 }
 
-bool pme_gpu_supports_build(const gmx_hw_info_t &hwinfo,
-                            std::string         *error)
+bool pme_gpu_supports_hardware(const gmx_hw_info_t gmx_unused& hwinfo, std::string* error)
 {
-    std::list<std::string> errorReasons;
-    if (GMX_DOUBLE)
-    {
-        errorReasons.emplace_back("double precision");
-    }
-    if (GMX_GPU == GMX_GPU_NONE)
+    gmx::MessageStringCollector errorReasons;
+    // Before changing the prefix string, make sure that it is not searched for in regression tests.
+    errorReasons.startContext("PME GPU does not support:");
+#ifdef __APPLE__
+    errorReasons.appendIf(GMX_GPU_OPENCL, "Apple OS X operating system");
+#endif
+    errorReasons.finishContext();
+    if (error != nullptr)
     {
-        errorReasons.emplace_back("non-GPU build of GROMACS");
+        *error = errorReasons.toString();
     }
-    if (GMX_GPU == GMX_GPU_OPENCL)
-    {
-        if (!areAllGpuDevicesFromAmd(hwinfo.gpu_info))
-        {
-            errorReasons.emplace_back("only AMD devices are supported");
-        }
-    }
-    return addMessageIfNotSupported(errorReasons, error);
+    return errorReasons.isEmpty();
 }
 
-bool pme_gpu_supports_input(const t_inputrec &ir, const gmx_mtop_t &mtop, std::string *error)
+bool pme_gpu_supports_input(const t_inputrec& ir, std::string* error)
 {
-    std::list<std::string> errorReasons;
-    if (!EEL_PME(ir.coulombtype))
-    {
-        errorReasons.emplace_back("systems that do not use PME for electrostatics");
-    }
-    if (ir.pme_order != 4)
-    {
-        errorReasons.emplace_back("interpolation orders other than 4");
-    }
-    if (ir.efep != efepNO)
-    {
-        if (gmx_mtop_has_perturbed_charges(mtop))
-        {
-            errorReasons.emplace_back("free energy calculations with perturbed charges (multiple grids)");
-        }
-    }
-    if (EVDW_PME(ir.vdwtype))
-    {
-        errorReasons.emplace_back("Lennard-Jones PME");
-    }
-    if (ir.cutoff_scheme == ecutsGROUP)
-    {
-        errorReasons.emplace_back("group cutoff scheme");
-    }
-    if (!EI_DYNAMICS(ir.eI))
+    gmx::MessageStringCollector errorReasons;
+    // Before changing the prefix string, make sure that it is not searched for in regression tests.
+    errorReasons.startContext("PME GPU does not support:");
+    errorReasons.appendIf(!EEL_PME(ir.coulombtype),
+                          "Systems that do not use PME for electrostatics.");
+    errorReasons.appendIf((ir.pme_order != 4), "Interpolation orders other than 4.");
+    errorReasons.appendIf(EVDW_PME(ir.vdwtype), "Lennard-Jones PME.");
+    errorReasons.appendIf(!EI_DYNAMICS(ir.eI), "Non-dynamical integrator (use md, sd, etc).");
+    errorReasons.finishContext();
+    if (error != nullptr)
+    {
+        *error = errorReasons.toString();
+    }
+    return errorReasons.isEmpty();
+}
+
+bool pme_gpu_mixed_mode_supports_input(const t_inputrec& ir, std::string* error)
+{
+    gmx::MessageStringCollector errorReasons;
+    // Before changing the prefix string, make sure that it is not searched for in regression tests.
+    errorReasons.startContext("PME GPU in Mixed mode does not support:");
+    errorReasons.appendIf(ir.efep != FreeEnergyPerturbationType::No, "Free Energy Perturbation.");
+    errorReasons.finishContext();
+    if (error != nullptr)
     {
-        errorReasons.emplace_back("not a dynamical integrator");
+        *error = errorReasons.toString();
     }
-    return addMessageIfNotSupported(errorReasons, error);
+    return errorReasons.isEmpty();
 }
 
 /*! \brief \libinternal
@@ -207,38 +203,26 @@ bool pme_gpu_supports_input(const t_inputrec &ir, const gmx_mtop_t &mtop, std::s
  * \param[out] error        The error message if the input is not supported on GPU.
  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
  */
-static bool pme_gpu_check_restrictions(const gmx_pme_t *pme, std::string *error)
+static bool pme_gpu_check_restrictions(const gmx_pme_t* pme, std::string* error)
 {
-    std::list<std::string> errorReasons;
-    if (pme->nnodes != 1)
-    {
-        errorReasons.emplace_back("PME decomposition");
-    }
-    if (pme->pme_order != 4)
-    {
-        errorReasons.emplace_back("interpolation orders other than 4");
-    }
-    if (pme->bFEP)
-    {
-        errorReasons.emplace_back("free energy calculations (multiple grids)");
-    }
-    if (pme->doLJ)
-    {
-        errorReasons.emplace_back("Lennard-Jones PME");
-    }
-    if (GMX_DOUBLE)
-    {
-        errorReasons.emplace_back("double precision");
-    }
-    if (GMX_GPU == GMX_GPU_NONE)
-    {
-        errorReasons.emplace_back("non-GPU build of GROMACS");
-    }
-
-    return addMessageIfNotSupported(errorReasons, error);
+    gmx::MessageStringCollector errorReasons;
+    // Before changing the prefix string, make sure that it is not searched for in regression tests.
+    errorReasons.startContext("PME GPU does not support:");
+    errorReasons.appendIf((pme->nnodes != 1), "PME decomposition.");
+    errorReasons.appendIf((pme->pme_order != 4), "interpolation orders other than 4.");
+    errorReasons.appendIf(pme->doLJ, "Lennard-Jones PME.");
+    errorReasons.appendIf(GMX_DOUBLE, "Double precision build of GROMACS.");
+    errorReasons.appendIf(!GMX_GPU, "Non-GPU build of GROMACS.");
+    errorReasons.appendIf(GMX_GPU_SYCL && !g_allowPmeWithSyclForTesting, "SYCL build of GROMACS."); // SYCL-TODO
+    errorReasons.finishContext();
+    if (error != nullptr)
+    {
+        *error = errorReasons.toString();
+    }
+    return errorReasons.isEmpty();
 }
 
-PmeRunMode pme_run_mode(const gmx_pme_t *pme)
+PmeRunMode pme_run_mode(const gmx_pme_tpme)
 {
     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
     return pme->runMode;
@@ -257,7 +241,7 @@ gmx::PinningPolicy pme_get_pinning_policy()
 const int gmxCacheLineSize = 64;
 
 //! Set up coordinate communication
-static void setup_coordinate_communication(pme_atomcomm_t *atc)
+static void setup_coordinate_communication(PmeAtomComm* atc)
 {
     int nslab, n, i;
     int fw, bw;
@@ -265,20 +249,20 @@ static void setup_coordinate_communication(pme_atomcomm_t *atc)
     nslab = atc->nslab;
 
     n = 0;
-    for (i = 1; i <= nslab/2; i++)
+    for (i = 1; i <= nslab / 2; i++)
     {
         fw = (atc->nodeid + i) % nslab;
         bw = (atc->nodeid - i + nslab) % nslab;
         if (n < nslab - 1)
         {
-            atc->node_dest[n] = fw;
-            atc->node_src[n]  = bw;
+            atc->slabCommSetup[n].node_dest = fw;
+            atc->slabCommSetup[n].node_src  = bw;
             n++;
         }
         if (n < nslab - 1)
         {
-            atc->node_dest[n] = bw;
-            atc->node_src[n]  = fw;
+            atc->slabCommSetup[n].node_dest = bw;
+            atc->slabCommSetup[n].node_src  = fw;
             n++;
         }
     }
@@ -287,11 +271,11 @@ static void setup_coordinate_communication(pme_atomcomm_t *atc)
 /*! \brief Round \p n up to the next multiple of \p f */
 static int mult_up(int n, int f)
 {
-    return ((n + f - 1)/f)*f;
+    return ((n + f - 1) / f) * f;
 }
 
 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
-static double estimate_pme_load_imbalance(struct gmx_pme_t *pme)
+static double estimate_pme_load_imbalance(struct gmx_pme_tpme)
 {
     int    nma, nmi;
     double n1, n2, n3;
@@ -299,142 +283,77 @@ static double estimate_pme_load_imbalance(struct gmx_pme_t *pme)
     nma = pme->nnodes_major;
     nmi = pme->nnodes_minor;
 
-    n1 = mult_up(pme->nkx, nma)*mult_up(pme->nky, nmi)*pme->nkz;
-    n2 = mult_up(pme->nkx, nma)*mult_up(pme->nkz, nmi)*pme->nky;
-    n3 = mult_up(pme->nky, nma)*mult_up(pme->nkz, nmi)*pme->nkx;
+    n1 = mult_up(pme->nkx, nma) * mult_up(pme->nky, nmi) * pme->nkz;
+    n2 = mult_up(pme->nkx, nma) * mult_up(pme->nkz, nmi) * pme->nky;
+    n3 = mult_up(pme->nky, nma) * mult_up(pme->nkz, nmi) * pme->nkx;
 
     /* pme_solve is roughly double the cost of an fft */
 
-    return (n1 + n2 + 3*n3)/static_cast<double>(6*pme->nkx*pme->nky*pme->nkz);
+    return (n1 + n2 + 3 * n3) / static_cast<double>(6 * pme->nkx * pme->nky * pme->nkz);
 }
 
-/*! \brief Initialize atom communication data structure */
-static void init_atomcomm(struct gmx_pme_t *pme, pme_atomcomm_t *atc,
-                          int dimind, gmx_bool bSpread)
-{
-    int thread;
+#ifndef DOXYGEN
 
-    atc->dimind    = dimind;
-    atc->nslab     = 1;
-    atc->nodeid    = 0;
-    atc->pd_nalloc = 0;
-#if GMX_MPI
-    if (pme->nnodes > 1)
+PmeAtomComm::PmeAtomComm(MPI_Comm   PmeMpiCommunicator,
+                         const int  numThreads,
+                         const int  pmeOrder,
+                         const int  dimIndex,
+                         const bool doSpread) :
+    dimind(dimIndex), bSpread(doSpread), pme_order(pmeOrder), nthread(numThreads), spline(nthread)
+{
+    if (PmeMpiCommunicator != MPI_COMM_NULL)
     {
-        atc->mpi_comm = pme->mpi_comm_d[dimind];
-        MPI_Comm_size(atc->mpi_comm, &atc->nslab);
-        MPI_Comm_rank(atc->mpi_comm, &atc->nodeid);
+        mpi_comm = PmeMpiCommunicator;
+#    if GMX_MPI
+        MPI_Comm_size(mpi_comm, &nslab);
+        MPI_Comm_rank(mpi_comm, &nodeid);
+#    endif
     }
     if (debug)
     {
-        fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", atc->dimind, atc->nslab, atc->nodeid);
+        fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", dimind, nslab, nodeid);
     }
-#endif
 
-    atc->bSpread   = bSpread;
-    atc->pme_order = pme->pme_order;
-
-    if (atc->nslab > 1)
+    if (nslab > 1)
     {
-        snew(atc->node_dest, atc->nslab);
-        snew(atc->node_src, atc->nslab);
-        setup_coordinate_communication(atc);
+        slabCommSetup.resize(nslab);
+        setup_coordinate_communication(this);
 
-        snew(atc->count_thread, pme->nthread);
-        for (thread = 0; thread < pme->nthread; thread++)
+        count_thread.resize(nthread);
+        for (auto& countThread : count_thread)
         {
-            snew(atc->count_thread[thread], atc->nslab);
+            countThread.resize(nslab);
         }
-        atc->count = atc->count_thread[0];
-        snew(atc->rcount, atc->nslab);
-        snew(atc->buf_index, atc->nslab);
     }
 
-    atc->nthread = pme->nthread;
-    if (atc->nthread > 1)
+    if (nthread > 1)
     {
-        snew(atc->thread_plist, atc->nthread);
-    }
-    snew(atc->spline, atc->nthread);
-    for (thread = 0; thread < atc->nthread; thread++)
-    {
-        if (atc->nthread > 1)
-        {
-            snew(atc->thread_plist[thread].n, atc->nthread+2*gmxCacheLineSize);
-            atc->thread_plist[thread].n += gmxCacheLineSize;
-        }
-    }
-}
-
-/*! \brief Destroy an atom communication data structure and its child structs */
-static void destroy_atomcomm(pme_atomcomm_t *atc)
-{
-    sfree(atc->pd);
-    if (atc->nslab > 1)
-    {
-        sfree(atc->node_dest);
-        sfree(atc->node_src);
-        for (int i = 0; i < atc->nthread; i++)
-        {
-            sfree(atc->count_thread[i]);
-        }
-        sfree(atc->count_thread);
-        sfree(atc->rcount);
-        sfree(atc->buf_index);
-
-        sfree(atc->x);
-        sfree(atc->coefficient);
-        sfree(atc->f);
-    }
-    sfree(atc->idx);
-    sfree(atc->fractx);
+        threadMap.resize(nthread);
 
-    sfree(atc->thread_idx);
-    for (int i = 0; i < atc->nthread; i++)
-    {
-        if (atc->nthread > 1)
-        {
-            int *n_ptr = atc->thread_plist[i].n - gmxCacheLineSize;
-            sfree(n_ptr);
-            sfree(atc->thread_plist[i].i);
-        }
-        sfree(atc->spline[i].ind);
-        for (int d = 0; d < ZZ; d++)
+#    pragma omp parallel for num_threads(nthread) schedule(static)
+        for (int thread = 0; thread < nthread; thread++)
         {
-            sfree(atc->spline[i].theta[d]);
-            sfree(atc->spline[i].dtheta[d]);
+            try
+            {
+                /* Allocate buffer with padding to avoid cache polution */
+                threadMap[thread].nBuffer.resize(nthread + 2 * gmxCacheLineSize);
+                threadMap[thread].n = threadMap[thread].nBuffer.data() + gmxCacheLineSize;
+            }
+            GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
         }
-        sfree_aligned(atc->spline[i].ptr_dtheta_z);
-        sfree_aligned(atc->spline[i].ptr_theta_z);
     }
-    if (atc->nthread > 1)
-    {
-        sfree(atc->thread_plist);
-    }
-    sfree(atc->spline);
 }
 
+#endif // !DOXYGEN
+
 /*! \brief Initialize data structure for communication */
-static void
-init_overlap_comm(pme_overlap_t *  ol,
-                  int              norder,
-#if GMX_MPI
-                  MPI_Comm         comm,
-#endif
-                  int              nnodes,
-                  int              nodeid,
-                  int              ndata,
-                  int              commplainsize)
+static void init_overlap_comm(pme_overlap_t* ol, int norder, MPI_Comm comm, int nnodes, int nodeid, int ndata, int commplainsize)
 {
-    gmx_bool         bCont;
-#if GMX_MPI
-    MPI_Status       stat;
+    gmx_bool bCont;
 
     ol->mpi_comm = comm;
-#endif
-
-    ol->nnodes = nnodes;
-    ol->nodeid = nodeid;
+    ol->nnodes   = nnodes;
+    ol->nodeid   = nodeid;
 
     /* Linear translation of the PME grid won't affect reciprocal space
      * calculations, so to optimize we only interpolate "upwards",
@@ -479,42 +398,41 @@ init_overlap_comm(pme_overlap_t *  ol,
         bCont = FALSE;
         for (int i = 0; i < nnodes; i++)
         {
-            if ((i + testRankCount <  nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount]) ||
-                (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
+            if ((i + testRankCount < nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount])
+                || (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
             {
                 bCont = TRUE;
             }
         }
-    }
-    while (bCont && testRankCount < nnodes);
+    } while (bCont && testRankCount < nnodes);
 
     ol->comm_data.resize(testRankCount - 1);
     ol->send_size = 0;
 
     for (size_t b = 0; b < ol->comm_data.size(); b++)
     {
-        pme_grid_comm_t *pgc = &ol->comm_data[b];
+        pme_grid_comm_tpgc = &ol->comm_data[b];
 
         /* Send */
-        pgc->send_id = (ol->nodeid + (b + 1)) % ol->nnodes;
+        pgc->send_id  = (ol->nodeid + (b + 1)) % ol->nnodes;
         int fft_start = ol->s2g0[pgc->send_id];
         int fft_end   = ol->s2g0[pgc->send_id + 1];
         if (pgc->send_id < nodeid)
         {
             fft_start += ndata;
-            fft_end   += ndata;
+            fft_end += ndata;
         }
         int send_index1  = ol->s2g1[nodeid];
         send_index1      = std::min(send_index1, fft_end);
         pgc->send_index0 = fft_start;
         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
-        ol->send_size   += pgc->send_nindex;
+        ol->send_size += pgc->send_nindex;
 
         /* We always start receiving to the first index of our slab */
-        pgc->recv_id     = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
-        fft_start        = ol->s2g0[ol->nodeid];
-        fft_end          = ol->s2g0[ol->nodeid + 1];
-        int recv_index1  = ol->s2g1[pgc->recv_id];
+        pgc->recv_id    = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
+        fft_start       = ol->s2g0[ol->nodeid];
+        fft_end         = ol->s2g0[ol->nodeid + 1];
+        int recv_index1 = ol->s2g1[pgc->recv_id];
         if (pgc->recv_id > nodeid)
         {
             recv_index1 -= ndata;
@@ -526,11 +444,21 @@ init_overlap_comm(pme_overlap_t *  ol,
 
 #if GMX_MPI
     /* Communicate the buffer sizes to receive */
+    MPI_Status stat;
     for (size_t b = 0; b < ol->comm_data.size(); b++)
     {
-        MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b,
-                     &ol->comm_data[b].recv_size, 1, MPI_INT, ol->comm_data[b].recv_id, b,
-                     ol->mpi_comm, &stat);
+        MPI_Sendrecv(&ol->send_size,
+                     1,
+                     MPI_INT,
+                     ol->comm_data[b].send_id,
+                     b,
+                     &ol->comm_data[b].recv_size,
+                     1,
+                     MPI_INT,
+                     ol->comm_data[b].recv_id,
+                     b,
+                     ol->mpi_comm,
+                     &stat);
     }
 #endif
 
@@ -548,7 +476,7 @@ int minimalPmeGridSize(int pmeOrder)
      * But we use the maximum for simplicity since in practice there is not
      * much performance difference between pme_order and 2*(pme_order -1).
      */
-    int minimalSize = 2*(pmeOrder - 1);
+    int minimalSize = 2 * (pmeOrder - 1);
 
     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
@@ -556,11 +484,7 @@ int minimalPmeGridSize(int pmeOrder)
     return minimalSize;
 }
 
-bool gmx_pme_check_restrictions(int pme_order,
-                                int nkx, int nky, int nkz,
-                                int numPmeDomainsAlongX,
-                                bool useThreads,
-                                bool errorsAreFatal)
+bool gmx_pme_check_restrictions(int pme_order, int nkx, int nky, int nkz, int numPmeDomainsAlongX, bool useThreads, bool errorsAreFatal)
 {
     if (pme_order > PME_ORDER_MAX)
     {
@@ -570,38 +494,42 @@ bool gmx_pme_check_restrictions(int pme_order,
         }
 
         std::string message = gmx::formatString(
-                    "pme_order (%d) is larger than the maximum allowed value (%d). Modify and recompile the code if you really need such a high order.",
-                    pme_order, PME_ORDER_MAX);
+                "pme_order (%d) is larger than the maximum allowed value (%d). Modify and "
+                "recompile the code if you really need such a high order.",
+                pme_order,
+                PME_ORDER_MAX);
         GMX_THROW(gmx::InconsistentInputError(message));
     }
 
     const int minGridSize = minimalPmeGridSize(pme_order);
-    if (nkx < minGridSize ||
-        nky < minGridSize ||
-        nkz < minGridSize)
+    if (nkx < minGridSize || nky < minGridSize || nkz < minGridSize)
     {
         if (!errorsAreFatal)
         {
             return false;
         }
-        std::string message = gmx::formatString(
-                    "The PME grid sizes need to be >= 2*(pme_order-1) (%d)",
-                    minGridSize);
+        std::string message =
+                gmx::formatString("The PME grid sizes need to be >= 2*(pme_order-1) (%d)", minGridSize);
         GMX_THROW(gmx::InconsistentInputError(message));
     }
 
     /* Check for a limitation of the (current) sum_fftgrid_dd code.
      * We only allow multiple communication pulses in dim 1, not in dim 0.
      */
-    if (useThreads && (nkx < numPmeDomainsAlongX*pme_order &&
-                       nkx != numPmeDomainsAlongX*(pme_order - 1)))
+    if (useThreads
+        && (nkx < numPmeDomainsAlongX * pme_order && nkx != numPmeDomainsAlongX * (pme_order - 1)))
     {
         if (!errorsAreFatal)
         {
             return false;
         }
-        gmx_fatal(FARGS, "The number of PME grid lines per rank along x is %g. But when using OpenMP threads, the number of grid lines per rank along x should be >= pme_order (%d) or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly more along y and/or z) by specifying -dd manually.",
-                  nkx/static_cast<double>(numPmeDomainsAlongX), pme_order);
+        gmx_fatal(FARGS,
+                  "The number of PME grid lines per rank along x is %g. But when using OpenMP "
+                  "threads, the number of grid lines per rank along x should be >= pme_order (%d) "
+                  "or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly "
+                  "more along y and/or z) by specifying -dd manually.",
+                  nkx / static_cast<double>(numPmeDomainsAlongX),
+                  pme_order);
     }
 
     return true;
@@ -610,27 +538,27 @@ bool gmx_pme_check_restrictions(int pme_order,
 /*! \brief Round \p enumerator */
 static int div_round_up(int enumerator, int denominator)
 {
-    return (enumerator + denominator - 1)/denominator;
+    return (enumerator + denominator - 1) / denominator;
 }
 
-gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
-                        const NumPmeDomains     &numPmeDomains,
-                        const t_inputrec        *ir,
-                        int                      homenr,
-                        gmx_bool                 bFreeEnergy_q,
-                        gmx_bool                 bFreeEnergy_lj,
-                        gmx_bool                 bReproducible,
-                        real                     ewaldcoeff_q,
-                        real                     ewaldcoeff_lj,
-                        int                      nthread,
-                        PmeRunMode               runMode,
-                        PmeGpu                  *pmeGpu,
-                        const gmx_device_info_t *gpuInfo,
-                        PmeGpuProgramHandle      pmeGpuProgram,
-                        const gmx::MDLogger      & /*mdlog*/)
+gmx_pme_t* gmx_pme_init(const t_commrec*     cr,
+                        const NumPmeDomainsnumPmeDomains,
+                        const t_inputrec*    ir,
+                        gmx_bool             bFreeEnergy_q,
+                        gmx_bool             bFreeEnergy_lj,
+                        gmx_bool             bReproducible,
+                        real                 ewaldcoeff_q,
+                        real                 ewaldcoeff_lj,
+                        int                  nthread,
+                        PmeRunMode           runMode,
+                        PmeGpu*              pmeGpu,
+                        const DeviceContext* deviceContext,
+                        const DeviceStream*  deviceStream,
+                        const PmeGpuProgram* pmeGpuProgram,
+                        const gmx::MDLogger& mdlog)
 {
-    int               use_threads, sum_use_threads, i;
-    ivec              ndata;
+    int  use_threads, sum_use_threads, i;
+    ivec ndata;
 
     if (debug)
     {
@@ -639,25 +567,23 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
 
     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
 
-    pme->sum_qgrid_tmp       = nullptr;
-    pme->sum_qgrid_dd_tmp    = nullptr;
+    pme->buf_nalloc = 0;
 
-    pme->buf_nalloc          = 0;
+    pme->nnodes  = 1;
+    pme->bPPnode = TRUE;
 
-    pme->nnodes              = 1;
-    pme->bPPnode             = TRUE;
+    pme->nnodes_major = numPmeDomains.x;
+    pme->nnodes_minor = numPmeDomains.y;
 
-    pme->nnodes_major        = numPmeDomains.x;
-    pme->nnodes_minor        = numPmeDomains.y;
-
-#if GMX_MPI
-    if (numPmeDomains.x*numPmeDomains.y > 1)
+    if (numPmeDomains.x * numPmeDomains.y > 1)
     {
         pme->mpi_comm = cr->mpi_comm_mygroup;
 
+#if GMX_MPI
         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
-        if (pme->nnodes != numPmeDomains.x*numPmeDomains.y)
+#endif
+        if (pme->nnodes != numPmeDomains.x * numPmeDomains.y)
         {
             gmx_incons("PME rank count mismatch");
         }
@@ -666,57 +592,52 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
     {
         pme->mpi_comm = MPI_COMM_NULL;
     }
-#endif
 
     if (pme->nnodes == 1)
     {
-#if GMX_MPI
         pme->mpi_comm_d[0] = MPI_COMM_NULL;
         pme->mpi_comm_d[1] = MPI_COMM_NULL;
-#endif
-        pme->ndecompdim   = 0;
-        pme->nodeid_major = 0;
-        pme->nodeid_minor = 0;
-#if GMX_MPI
-        pme->mpi_comm_d[0] = pme->mpi_comm_d[1] = MPI_COMM_NULL;
-#endif
+        pme->ndecompdim    = 0;
+        pme->nodeid_major  = 0;
+        pme->nodeid_minor  = 0;
     }
     else
     {
         if (numPmeDomains.y == 1)
         {
-#if GMX_MPI
             pme->mpi_comm_d[0] = pme->mpi_comm;
             pme->mpi_comm_d[1] = MPI_COMM_NULL;
-#endif
-            pme->ndecompdim   = 1;
-            pme->nodeid_major = pme->nodeid;
-            pme->nodeid_minor = 0;
-
+            pme->ndecompdim    = 1;
+            pme->nodeid_major  = pme->nodeid;
+            pme->nodeid_minor  = 0;
         }
         else if (numPmeDomains.x == 1)
         {
-#if GMX_MPI
             pme->mpi_comm_d[0] = MPI_COMM_NULL;
             pme->mpi_comm_d[1] = pme->mpi_comm;
-#endif
-            pme->ndecompdim   = 1;
-            pme->nodeid_major = 0;
-            pme->nodeid_minor = pme->nodeid;
+            pme->ndecompdim    = 1;
+            pme->nodeid_major  = 0;
+            pme->nodeid_minor  = pme->nodeid;
         }
         else
         {
             if (pme->nnodes % numPmeDomains.x != 0)
             {
-                gmx_incons("For 2D PME decomposition, #PME ranks must be divisible by the number of domains along x");
+                gmx_incons(
+                        "For 2D PME decomposition, #PME ranks must be divisible by the number of "
+                        "domains along x");
             }
             pme->ndecompdim = 2;
 
 #if GMX_MPI
-            MPI_Comm_split(pme->mpi_comm, pme->nodeid % numPmeDomains.y,
-                           pme->nodeid, &pme->mpi_comm_d[0]);  /* My communicator along major dimension */
-            MPI_Comm_split(pme->mpi_comm, pme->nodeid/numPmeDomains.y,
-                           pme->nodeid, &pme->mpi_comm_d[1]);  /* My communicator along minor dimension */
+            MPI_Comm_split(pme->mpi_comm,
+                           pme->nodeid % numPmeDomains.y,
+                           pme->nodeid,
+                           &pme->mpi_comm_d[0]); /* My communicator along major dimension */
+            MPI_Comm_split(pme->mpi_comm,
+                           pme->nodeid / numPmeDomains.y,
+                           pme->nodeid,
+                           &pme->mpi_comm_d[1]); /* My communicator along minor dimension */
 
             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
@@ -724,8 +645,10 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
 #endif
         }
-        pme->bPPnode = thisRankHasDuty(cr, DUTY_PP);
     }
+    // cr is always initialized if there is a a PP rank, so we can safely assume
+    // that when it is not, like in ewald tests, we not on a PP rank.
+    pme->bPPnode = ((cr != nullptr && cr->duty != 0) && thisRankHasDuty(cr, DUTY_PP));
 
     pme->nthread = nthread;
 
@@ -734,8 +657,7 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
 #if GMX_MPI
     if (pme->nnodes > 1)
     {
-        MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT,
-                      MPI_SUM, pme->mpi_comm);
+        MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT, MPI_SUM, pme->mpi_comm);
     }
     else
 #endif
@@ -744,7 +666,7 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
     }
     pme->bUseThreads = (sum_use_threads > 0);
 
-    if (ir->ePBC == epbcSCREW)
+    if (ir->pbcType == PbcType::Screw)
     {
         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
     }
@@ -757,36 +679,33 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
      * configures with free-energy, but that has never been tested.
      */
-    pme->doCoulomb     = EEL_PME(ir->coulombtype);
-    pme->doLJ          = EVDW_PME(ir->vdwtype);
-    pme->bFEP_q        = ((ir->efep != efepNO) && bFreeEnergy_q);
-    pme->bFEP_lj       = ((ir->efep != efepNO) && bFreeEnergy_lj);
-    pme->bFEP          = (pme->bFEP_q || pme->bFEP_lj);
-    pme->nkx           = ir->nkx;
-    pme->nky           = ir->nky;
-    pme->nkz           = ir->nkz;
-    pme->bP3M          = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != nullptr);
+    pme->doCoulomb = EEL_PME(ir->coulombtype);
+    pme->doLJ      = EVDW_PME(ir->vdwtype);
+    pme->bFEP_q    = ((ir->efep != FreeEnergyPerturbationType::No) && bFreeEnergy_q);
+    pme->bFEP_lj   = ((ir->efep != FreeEnergyPerturbationType::No) && bFreeEnergy_lj);
+    pme->bFEP      = (pme->bFEP_q || pme->bFEP_lj);
+    pme->nkx       = ir->nkx;
+    pme->nky       = ir->nky;
+    pme->nkz       = ir->nkz;
+    pme->bP3M = (ir->coulombtype == CoulombInteractionType::P3mAD || getenv("GMX_PME_P3M") != nullptr);
     pme->pme_order     = ir->pme_order;
     pme->ewaldcoeff_q  = ewaldcoeff_q;
     pme->ewaldcoeff_lj = ewaldcoeff_lj;
 
     /* Always constant electrostatics coefficients */
-    pme->epsilon_r     = ir->epsilon_r;
+    pme->epsilon_r = ir->epsilon_r;
 
     /* Always constant LJ coefficients */
     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
 
     // The box requires scaling with nwalls = 2, we store that condition as well
     // as the scaling factor
-    delete pme->boxScaler;
-    pme->boxScaler = new EwaldBoxZScaler(*ir);
+    pme->boxScaler = std::make_unique<EwaldBoxZScaler>(
+            EwaldBoxZScaler(inputrecPbcXY2Walls(ir), ir->wall_ewald_zfac));
 
     /* If we violate restrictions, generate a fatal error here */
-    gmx_pme_check_restrictions(pme->pme_order,
-                               pme->nkx, pme->nky, pme->nkz,
-                               pme->nnodes_major,
-                               pme->bUseThreads,
-                               true);
+    gmx_pme_check_restrictions(
+            pme->pme_order, pme->nkx, pme->nky, pme->nkz, pme->nnodes_major, pme->bUseThreads, true);
 
     if (pme->nnodes > 1)
     {
@@ -806,16 +725,24 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
         imbal = estimate_pme_load_imbalance(pme.get());
         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
         {
-            fprintf(stderr,
-                    "\n"
-                    "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
-                    "      For optimal PME load balancing\n"
-                    "      PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x (%d)\n"
-                    "      and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y (%d)\n"
-                    "\n",
-                    gmx::roundToInt((imbal-1)*100),
-                    pme->nkx, pme->nky, pme->nnodes_major,
-                    pme->nky, pme->nkz, pme->nnodes_minor);
+            GMX_LOG(mdlog.warning)
+                    .asParagraph()
+                    .appendTextFormatted(
+                            "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
+                            "      For optimal PME load balancing\n"
+                            "      PME grid_x (%d) and grid_y (%d) should be divisible by "
+                            "#PME_ranks_x "
+                            "(%d)\n"
+                            "      and PME grid_y (%d) and grid_z (%d) should be divisible by "
+                            "#PME_ranks_y "
+                            "(%d)",
+                            gmx::roundToInt((imbal - 1) * 100),
+                            pme->nkx,
+                            pme->nky,
+                            pme->nnodes_major,
+                            pme->nky,
+                            pme->nkz,
+                            pme->nnodes_minor);
         }
     }
 
@@ -824,32 +751,35 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
      * y is always copied through a buffer: we don't need padding in z,
      * but we do need the overlap in x because of the communication order.
      */
-    init_overlap_comm(&pme->overlap[0], pme->pme_order,
-#if GMX_MPI
+    init_overlap_comm(&pme->overlap[0],
+                      pme->pme_order,
                       pme->mpi_comm_d[0],
-#endif
-                      pme->nnodes_major, pme->nodeid_major,
+                      pme->nnodes_major,
+                      pme->nodeid_major,
                       pme->nkx,
-                      (div_round_up(pme->nky, pme->nnodes_minor)+pme->pme_order)*(pme->nkz+pme->pme_order-1));
+                      (div_round_up(pme->nky, pme->nnodes_minor) + pme->pme_order)
+                              * (pme->nkz + pme->pme_order - 1));
 
     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
      * We do this with an offset buffer of equal size, so we need to allocate
      * extra for the offset. That's what the (+1)*pme->nkz is for.
      */
-    init_overlap_comm(&pme->overlap[1], pme->pme_order,
-#if GMX_MPI
+    init_overlap_comm(&pme->overlap[1],
+                      pme->pme_order,
                       pme->mpi_comm_d[1],
-#endif
-                      pme->nnodes_minor, pme->nodeid_minor,
+                      pme->nnodes_minor,
+                      pme->nodeid_minor,
                       pme->nky,
-                      (div_round_up(pme->nkx, pme->nnodes_major)+pme->pme_order+1)*pme->nkz);
+                      (div_round_up(pme->nkx, pme->nnodes_major) + pme->pme_order + 1) * pme->nkz);
 
     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
      * Note that gmx_pme_check_restrictions checked for this already.
      */
     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
     {
-        gmx_incons("More than one communication pulse required for grid overlap communication along the major dimension while using threads");
+        gmx_incons(
+                "More than one communication pulse required for grid overlap communication along "
+                "the major dimension while using threads");
     }
 
     snew(pme->bsp_mod[XX], pme->nkx);
@@ -862,10 +792,8 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
     /* The required size of the interpolation grid, including overlap.
      * The allocated size (pmegrid_n?) might be slightly larger.
      */
-    pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] -
-        pme->overlap[0].s2g0[pme->nodeid_major];
-    pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] -
-        pme->overlap[1].s2g0[pme->nodeid_minor];
+    pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] - pme->overlap[0].s2g0[pme->nodeid_major];
+    pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] - pme->overlap[1].s2g0[pme->nodeid_minor];
     pme->pmegrid_nz_base = pme->nkz;
     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
@@ -873,30 +801,24 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
     pme->pmegrid_start_iz = 0;
 
-    make_gridindex_to_localindex(pme->nkx,
-                                 pme->pmegrid_start_ix,
-                                 pme->pmegrid_nx - (pme->pme_order-1),
-                                 &pme->nnx, &pme->fshx);
-    make_gridindex_to_localindex(pme->nky,
-                                 pme->pmegrid_start_iy,
-                                 pme->pmegrid_ny - (pme->pme_order-1),
-                                 &pme->nny, &pme->fshy);
-    make_gridindex_to_localindex(pme->nkz,
-                                 pme->pmegrid_start_iz,
-                                 pme->pmegrid_nz_base,
-                                 &pme->nnz, &pme->fshz);
+    make_gridindex_to_localindex(
+            pme->nkx, pme->pmegrid_start_ix, pme->pmegrid_nx - (pme->pme_order - 1), &pme->nnx, &pme->fshx);
+    make_gridindex_to_localindex(
+            pme->nky, pme->pmegrid_start_iy, pme->pmegrid_ny - (pme->pme_order - 1), &pme->nny, &pme->fshy);
+    make_gridindex_to_localindex(
+            pme->nkz, pme->pmegrid_start_iz, pme->pmegrid_nz_base, &pme->nnz, &pme->fshz);
 
     pme->spline_work = make_pme_spline_work(pme->pme_order);
 
-    ndata[0]    = pme->nkx;
-    ndata[1]    = pme->nky;
-    ndata[2]    = pme->nkz;
+    ndata[0] = pme->nkx;
+    ndata[1] = pme->nky;
+    ndata[2] = pme->nkz;
     /* It doesn't matter if we allocate too many grids here,
      * we only allocate and use the ones we need.
      */
     if (pme->doLJ)
     {
-        pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
+        pme->ngrids = ((ir->ljpme_combination_rule == LongRangeVdW::LB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
     }
     else
     {
@@ -908,27 +830,34 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
 
     for (i = 0; i < pme->ngrids; ++i)
     {
-        if ((i <  DO_Q && pme->doCoulomb && (i == 0 ||
-                                             bFreeEnergy_q)) ||
-            (i >= DO_Q && pme->doLJ && (i == 2 ||
-                                        bFreeEnergy_lj ||
-                                        ir->ljpme_combination_rule == eljpmeLB)))
+        if ((i < DO_Q && pme->doCoulomb && (i == 0 || bFreeEnergy_q))
+            || (i >= DO_Q && pme->doLJ
+                && (i == 2 || bFreeEnergy_lj || ir->ljpme_combination_rule == LongRangeVdW::LB)))
         {
             pmegrids_init(&pme->pmegrid[i],
-                          pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
+                          pme->pmegrid_nx,
+                          pme->pmegrid_ny,
+                          pme->pmegrid_nz,
                           pme->pmegrid_nz_base,
                           pme->pme_order,
                           pme->bUseThreads,
                           pme->nthread,
-                          pme->overlap[0].s2g1[pme->nodeid_major]-pme->overlap[0].s2g0[pme->nodeid_major+1],
-                          pme->overlap[1].s2g1[pme->nodeid_minor]-pme->overlap[1].s2g0[pme->nodeid_minor+1]);
+                          pme->overlap[0].s2g1[pme->nodeid_major]
+                                  - pme->overlap[0].s2g0[pme->nodeid_major + 1],
+                          pme->overlap[1].s2g1[pme->nodeid_minor]
+                                  - pme->overlap[1].s2g0[pme->nodeid_minor + 1]);
             /* This routine will allocate the grid data to fit the FFTs */
-            const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed) ? gmx::PinningPolicy::PinnedIfSupported : gmx::PinningPolicy::CannotBePinned;
-            gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata,
-                                    &pme->fftgrid[i], &pme->cfftgrid[i],
+            const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed)
+                                                        ? gmx::PinningPolicy::PinnedIfSupported
+                                                        : gmx::PinningPolicy::CannotBePinned;
+            gmx_parallel_3dfft_init(&pme->pfft_setup[i],
+                                    ndata,
+                                    &pme->fftgrid[i],
+                                    &pme->cfftgrid[i],
                                     pme->mpi_comm_d,
-                                    bReproducible, pme->nthread, allocateRealGridForGpu);
-
+                                    bReproducible,
+                                    pme->nthread,
+                                    allocateRealGridForGpu);
         }
     }
 
@@ -944,37 +873,33 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
     }
 
     /* Use atc[0] for spreading */
-    init_atomcomm(pme.get(), &pme->atc[0], numPmeDomains.x > 1 ? 0 : 1, TRUE);
+    const int firstDimIndex   = (numPmeDomains.x > 1 ? 0 : 1);
+    MPI_Comm  mpiCommFirstDim = (pme->nnodes > 1 ? pme->mpi_comm_d[firstDimIndex] : MPI_COMM_NULL);
+    bool      doSpread        = true;
+    pme->atc.emplace_back(mpiCommFirstDim, pme->nthread, pme->pme_order, firstDimIndex, doSpread);
     if (pme->ndecompdim >= 2)
     {
-        init_atomcomm(pme.get(), &pme->atc[1], 1, FALSE);
+        const int secondDimIndex = 1;
+        doSpread                 = false;
+        pme->atc.emplace_back(pme->mpi_comm_d[1], pme->nthread, pme->pme_order, secondDimIndex, doSpread);
     }
 
-    if (pme->nnodes == 1)
-    {
-        pme->atc[0].n = homenr;
-        pme_realloc_atomcomm_things(&pme->atc[0]);
-    }
-
-    pme->lb_buf1       = nullptr;
-    pme->lb_buf2       = nullptr;
-    pme->lb_buf_nalloc = 0;
-
-    if (pme_gpu_active(pme.get()))
+    // Initial check of validity of the input for running on the GPU
+    if (pme->runMode != PmeRunMode::CPU)
     {
-        if (!pme->gpu)
+        std::string errorString;
+        bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
+        if (!canRunOnGpu)
         {
-            // Initial check of validity of the data
-            std::string errorString;
-            bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
-            if (!canRunOnGpu)
-            {
-                GMX_THROW(gmx::NotImplementedError(errorString));
-            }
+            GMX_THROW(gmx::NotImplementedError(errorString));
         }
-
-        pme_gpu_reinit(pme.get(), gpuInfo, pmeGpuProgram);
+        pme_gpu_reinit(pme.get(), deviceContext, deviceStream, pmeGpuProgram);
     }
+    else
+    {
+        GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object when PME is on a CPU.");
+    }
+
 
     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
 
@@ -982,21 +907,19 @@ gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
     return pme.release();
 }
 
-void gmx_pme_reinit(struct gmx_pme_t **pmedata,
-                    const t_commrec   *cr,
-                    struct gmx_pme_t * pme_src,
-                    const t_inputrec * ir,
+void gmx_pme_reinit(struct gmx_pme_t** pmedata,
+                    const t_commrec*   cr,
+                    struct gmx_pme_t pme_src,
+                    const t_inputrec ir,
                     const ivec         grid_size,
                     real               ewaldcoeff_q,
                     real               ewaldcoeff_lj)
 {
-    int        homenr;
-
     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
     // TODO: This would be better as just copying a sub-structure that contains
     // all the PME parameters and nothing else.
     t_inputrec irc;
-    irc.ePBC                   = ir->ePBC;
+    irc.pbcType                = ir->pbcType;
     irc.coulombtype            = ir->coulombtype;
     irc.vdwtype                = ir->vdwtype;
     irc.efep                   = ir->efep;
@@ -1007,39 +930,48 @@ void gmx_pme_reinit(struct gmx_pme_t **pmedata,
     irc.nky                    = grid_size[YY];
     irc.nkz                    = grid_size[ZZ];
 
-    if (pme_src->nnodes == 1)
-    {
-        homenr = pme_src->atc[0].n;
-    }
-    else
-    {
-        homenr = -1;
-    }
-
     try
     {
+        // This is reinit. Any logging should have been done at first init.
+        // Here we should avoid writing notes for settings the user did not
+        // set directly.
         const gmx::MDLogger dummyLogger;
-        // This is reinit which is currently only changing grid size/coefficients,
-        // so we don't expect the actual logging.
-        // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
         GMX_ASSERT(pmedata, "Invalid PME pointer");
         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
-        *pmedata = gmx_pme_init(cr, numPmeDomains,
-                                &irc, homenr, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE, ewaldcoeff_q, ewaldcoeff_lj,
-                                pme_src->nthread, pme_src->runMode, pme_src->gpu, nullptr, nullptr, dummyLogger);
-        //TODO this is mostly passing around current values
+        *pmedata                    = gmx_pme_init(cr,
+                                numPmeDomains,
+                                &irc,
+                                pme_src->bFEP_q,
+                                pme_src->bFEP_lj,
+                                FALSE,
+                                ewaldcoeff_q,
+                                ewaldcoeff_lj,
+                                pme_src->nthread,
+                                pme_src->runMode,
+                                pme_src->gpu,
+                                nullptr,
+                                nullptr,
+                                nullptr,
+                                dummyLogger);
+        /* When running PME on the CPU not using domain decomposition,
+         * the atom data is allocated once only in gmx_pme_(re)init().
+         */
+        if (!pme_src->gpu && pme_src->nnodes == 1)
+        {
+            gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), {}, {});
+        }
+        // TODO this is mostly passing around current values
     }
-    GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+    GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
 
     /* We can easily reuse the allocated pme grids in pme_src */
     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
     /* We would like to reuse the fft grids, but that's harder */
 }
 
-void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V)
+real gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q)
 {
-    pme_atomcomm_t *atc;
-    pmegrids_t     *grid;
+    pmegrids_t* grid;
 
     if (pme->nnodes > 1)
     {
@@ -1050,17 +982,12 @@ void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V
         gmx_incons("gmx_pme_calc_energy with free energy");
     }
 
-    atc            = &pme->atc_energy;
-    atc->nthread   = 1;
-    if (atc->spline == nullptr)
+    if (!pme->atc_energy)
     {
-        snew(atc->spline, atc->nthread);
+        pme->atc_energy = std::make_unique<PmeAtomComm>(MPI_COMM_NULL, 1, pme->pme_order, 0, true);
     }
-    atc->nslab     = 1;
-    atc->bSpread   = TRUE;
-    atc->pme_order = pme->pme_order;
-    atc->n         = n;
-    pme_realloc_atomcomm_things(atc);
+    PmeAtomComm* atc = pme->atc_energy.get();
+    atc->setNumAtoms(x.ssize());
     atc->x           = x;
     atc->coefficient = q;
 
@@ -1070,83 +997,81 @@ void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V
     /* Only calculate the spline coefficients, don't actually spread */
     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
 
-    *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
+    return gather_energy_bsplines(pme, grid->grid.grid, atc);
 }
 
 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
-static void
-calc_initial_lb_coeffs(struct gmx_pme_t *pme, const real *local_c6, const real *local_sigma)
+static void calc_initial_lb_coeffs(gmx::ArrayRef<real>       coefficient,
+                                   gmx::ArrayRef<const real> local_c6,
+                                   gmx::ArrayRef<const real> local_sigma)
 {
-    int  i;
-    for (i = 0; i < pme->atc[0].n; ++i)
+    for (gmx::index i = 0; i < coefficient.ssize(); ++i)
     {
-        real sigma4;
-        sigma4                     = local_sigma[i];
-        sigma4                     = sigma4*sigma4;
-        sigma4                     = sigma4*sigma4;
-        pme->atc[0].coefficient[i] = local_c6[i] / sigma4;
+        real sigma4    = local_sigma[i];
+        sigma4         = sigma4 * sigma4;
+        sigma4         = sigma4 * sigma4;
+        coefficient[i] = local_c6[i] / sigma4;
     }
 }
 
 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
-static void
-calc_next_lb_coeffs(struct gmx_pme_t *pme, const real *local_sigma)
+static void calc_next_lb_coeffs(gmx::ArrayRef<real> coefficient, gmx::ArrayRef<const real> local_sigma)
 {
-    int  i;
-
-    for (i = 0; i < pme->atc[0].n; ++i)
+    for (gmx::index i = 0; i < coefficient.ssize(); ++i)
     {
-        pme->atc[0].coefficient[i] *= local_sigma[i];
+        coefficient[i] *= local_sigma[i];
     }
 }
 
-int gmx_pme_do(struct gmx_pme_t *pme,
-               int start,       int homenr,
-               rvec x[],        rvec f[],
-               real chargeA[],  real chargeB[],
-               real c6A[],      real c6B[],
-               real sigmaA[],   real sigmaB[],
-               matrix box,      const t_commrec *cr,
-               int  maxshift_x, int maxshift_y,
-               t_nrnb *nrnb,    gmx_wallcycle *wcycle,
-               matrix vir_q,    matrix vir_lj,
-               real *energy_q,  real *energy_lj,
-               real lambda_q,   real lambda_lj,
-               real *dvdlambda_q, real *dvdlambda_lj,
-               int flags)
+int gmx_pme_do(struct gmx_pme_t*              pme,
+               gmx::ArrayRef<const gmx::RVec> coordinates,
+               gmx::ArrayRef<gmx::RVec>       forces,
+               gmx::ArrayRef<const real>      chargeA,
+               gmx::ArrayRef<const real>      chargeB,
+               gmx::ArrayRef<const real>      c6A,
+               gmx::ArrayRef<const real>      c6B,
+               gmx::ArrayRef<const real>      sigmaA,
+               gmx::ArrayRef<const real>      sigmaB,
+               const matrix                   box,
+               const t_commrec*               cr,
+               int                            maxshift_x,
+               int                            maxshift_y,
+               t_nrnb*                        nrnb,
+               gmx_wallcycle*                 wcycle,
+               matrix                         vir_q,
+               matrix                         vir_lj,
+               real*                          energy_q,
+               real*                          energy_lj,
+               real                           lambda_q,
+               real                           lambda_lj,
+               real*                          dvdlambda_q,
+               real*                          dvdlambda_lj,
+               const gmx::StepWorkload&       stepWork)
 {
-    GMX_ASSERT(pme->runMode == PmeRunMode::CPU, "gmx_pme_do should not be called on the GPU PME run.");
-
-    int                  d, i, j, npme, grid_index, max_grid_index;
-    int                  n_d;
-    pme_atomcomm_t      *atc        = nullptr;
-    pmegrids_t          *pmegrid    = nullptr;
-    real                *grid       = nullptr;
-    rvec                *f_d;
-    real                *coefficient = nullptr;
-    real                 energy_AB[4];
-    matrix               vir_AB[4];
-    real                 scale, lambda;
-    gmx_bool             bClearF;
-    gmx_parallel_3dfft_t pfft_setup;
-    real              *  fftgrid;
-    t_complex          * cfftgrid;
-    int                  thread;
-    gmx_bool             bFirst, bDoSplines;
-    int                  fep_state;
-    int                  fep_states_lj           = pme->bFEP_lj ? 2 : 1;
-    const gmx_bool       bCalcEnerVir            = (flags & GMX_PME_CALC_ENER_VIR) != 0;
-    const gmx_bool       bBackFFT                = (flags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
-    const gmx_bool       bCalcF                  = (flags & GMX_PME_CALC_F) != 0;
-
-    /* We could be passing lambda!=1 while no q or LJ is actually perturbed */
+    GMX_ASSERT(pme->runMode == PmeRunMode::CPU,
+               "gmx_pme_do should not be called on the GPU PME run.");
+
+    PmeAtomComm&              atc     = pme->atc[0];
+    pmegrids_t*               pmegrid = nullptr;
+    real*                     grid    = nullptr;
+    gmx::ArrayRef<const real> coefficient;
+    std::array<PmeOutput, 2>  output; // The second is used for the B state with FEP
+    gmx_parallel_3dfft_t      pfft_setup;
+    real*                     fftgrid;
+    t_complex*                cfftgrid;
+    int                       thread;
+    const int                 fep_states_lj = pme->bFEP_lj ? 2 : 1;
+    // There's no support for computing energy without virial, or vice versa
+    const bool computeEnergyAndVirial = (stepWork.computeEnergy || stepWork.computeVirial);
+
+    /* We could be passing lambda!=0 while no q or LJ is actually perturbed */
     if (!pme->bFEP_q)
     {
-        lambda_q  = 1;
+        lambda_q = 0;
     }
     if (!pme->bFEP_lj)
     {
-        lambda_lj = 1;
+        lambda_lj = 0;
     }
 
     assert(pme->nnodes > 0);
@@ -1154,37 +1079,28 @@ int gmx_pme_do(struct gmx_pme_t *pme,
 
     if (pme->nnodes > 1)
     {
-        atc      = &pme->atc[0];
-        atc->npd = homenr;
-        if (atc->npd > atc->pd_nalloc)
-        {
-            atc->pd_nalloc = over_alloc_dd(atc->npd);
-            srenew(atc->pd, atc->pd_nalloc);
-        }
-        for (d = pme->ndecompdim-1; d >= 0; d--)
+        atc.pd.resize(coordinates.ssize());
+        for (int d = pme->ndecompdim - 1; d >= 0; d--)
         {
-            atc           = &pme->atc[d];
-            atc->maxshift = (atc->dimind == 0 ? maxshift_x : maxshift_y);
+            PmeAtomComm& atc = pme->atc[d];
+            atc.maxshift     = (atc.dimind == 0 ? maxshift_x : maxshift_y);
         }
     }
     else
     {
-        atc = &pme->atc[0];
-        /* This could be necessary for TPI */
-        pme->atc[0].n = homenr;
-        if (DOMAINDECOMP(cr))
-        {
-            pme_realloc_atomcomm_things(atc);
-        }
-        atc->x = x;
-        atc->f = f;
+        GMX_ASSERT(coordinates.ssize() == atc.numAtoms(), "We expect atc.numAtoms() coordinates");
+        GMX_ASSERT(forces.ssize() >= atc.numAtoms(),
+                   "We need a force buffer with at least atc.numAtoms() elements");
+
+        atc.x = coordinates;
+        atc.f = forces;
     }
 
     matrix scaledBox;
     pme->boxScaler->scaleBox(box, scaledBox);
 
     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
-    bFirst = TRUE;
+    bool bFirst = true;
 
     /* For simplicity, we construct the splines for all particles if
      * more than one PME calculations is needed. Some optimization
@@ -1193,7 +1109,7 @@ int gmx_pme_do(struct gmx_pme_t *pme,
      * that don't yet have them.
      */
 
-    bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
+    bool bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
 
     /* We need a maximum of four separate PME calculations:
      * grid_index=0: Coulomb PME with charges from state A
@@ -1205,19 +1121,17 @@ int gmx_pme_do(struct gmx_pme_t *pme,
      */
 
     /* If we are doing LJ-PME with LB, we only do Q here */
-    max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
-
-    for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
+    const int max_grid_index = (pme->ljpme_combination_rule == LongRangeVdW::LB) ? DO_Q : DO_Q_AND_LJ;
+    bool      bClearF;
+    for (int grid_index = 0; grid_index < max_grid_index; ++grid_index)
     {
         /* Check if we should do calculations at this grid_index
          * If grid_index is odd we should be doing FEP
          * If grid_index < 2 we should be doing electrostatic PME
          * If grid_index >= 2 we should be doing LJ-PME
          */
-        if ((grid_index <  DO_Q && (!pme->doCoulomb ||
-                                    (grid_index == 1 && !pme->bFEP_q))) ||
-            (grid_index >= DO_Q && (!pme->doLJ ||
-                                    (grid_index == 3 && !pme->bFEP_lj))))
+        if ((grid_index < DO_Q && (!pme->doCoulomb || (grid_index == 1 && !pme->bFEP_q)))
+            || (grid_index >= DO_Q && (!pme->doLJ || (grid_index == 3 && !pme->bFEP_lj))))
         {
             continue;
         }
@@ -1228,80 +1142,57 @@ int gmx_pme_do(struct gmx_pme_t *pme,
         pfft_setup = pme->pfft_setup[grid_index];
         switch (grid_index)
         {
-            case 0: coefficient = chargeA + start; break;
-            case 1: coefficient = chargeB + start; break;
-            case 2: coefficient = c6A + start; break;
-            case 3: coefficient = c6B + start; break;
+            case 0: coefficient = chargeA; break;
+            case 1: coefficient = chargeB; break;
+            case 2: coefficient = c6A; break;
+            case 3: coefficient = c6B; break;
         }
 
         grid = pmegrid->grid.grid;
 
-        if (debug)
-        {
-            fprintf(debug, "PME: number of ranks = %d, rank = %d\n",
-                    cr->nnodes, cr->nodeid);
-            fprintf(debug, "Grid = %p\n", static_cast<void*>(grid));
-            if (grid == nullptr)
-            {
-                gmx_fatal(FARGS, "No grid!");
-            }
-        }
-
         if (pme->nnodes == 1)
         {
-            atc->coefficient = coefficient;
+            atc.coefficient = coefficient;
         }
         else
         {
-            wallcycle_start(wcycle, ewcPME_REDISTXF);
-            do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, coefficient);
+            wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
+            do_redist_pos_coeffs(pme, cr, bFirst, coordinates, coefficient);
 
-            wallcycle_stop(wcycle, ewcPME_REDISTXF);
+            wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
         }
 
-        if (debug)
+        wallcycle_start(wcycle, WallCycleCounter::PmeSpread);
+
+        /* Spread the coefficients on a grid */
+        spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
+
+        if (bFirst)
         {
-            fprintf(debug, "Rank= %6d, pme local particles=%6d\n",
-                    cr->nodeid, atc->n);
+            inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
         }
+        inc_nrnb(nrnb, eNR_SPREADBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
 
-        if (flags & GMX_PME_SPREAD)
+        if (!pme->bUseThreads)
         {
-            wallcycle_start(wcycle, ewcPME_SPREAD);
+            wrap_periodic_pmegrid(pme, grid);
 
-            /* Spread the coefficients on a grid */
-            spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
-
-            if (bFirst)
+            /* sum contributions to local grid from other nodes */
+            if (pme->nnodes > 1)
             {
-                inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
+                gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
             }
-            inc_nrnb(nrnb, eNR_SPREADBSP,
-                     pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
-
-            if (!pme->bUseThreads)
-            {
-                wrap_periodic_pmegrid(pme, grid);
-
-                /* sum contributions to local grid from other nodes */
-#if GMX_MPI
-                if (pme->nnodes > 1)
-                {
-                    gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
-                }
-#endif
 
-                copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
-            }
+            copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
+        }
 
-            wallcycle_stop(wcycle, ewcPME_SPREAD);
+        wallcycle_stop(wcycle, WallCycleCounter::PmeSpread);
 
-            /* TODO If the OpenMP and single-threaded implementations
-               converge, then spread_on_grid() and
-               copy_pmegrid_to_fftgrid() will perhaps live in the same
-               source file.
-             */
-        }
+        /* TODO If the OpenMP and single-threaded implementations
+           converge, then spread_on_grid() and
+           copy_pmegrid_to_fftgrid() will perhaps live in the same
+           source file.
+        */
 
         /* Here we start a large thread parallel region */
 #pragma omp parallel num_threads(pme->nthread) private(thread)
@@ -1309,100 +1200,96 @@ int gmx_pme_do(struct gmx_pme_t *pme,
             try
             {
                 thread = gmx_omp_get_thread_num();
-                if (flags & GMX_PME_SOLVE)
-                {
-                    int loop_count;
-
-                    /* do 3d-fft */
-                    if (thread == 0)
-                    {
-                        wallcycle_start(wcycle, ewcPME_FFT);
-                    }
-                    gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
-                                               thread, wcycle);
-                    if (thread == 0)
-                    {
-                        wallcycle_stop(wcycle, ewcPME_FFT);
-                    }
+                int loop_count;
 
-                    /* solve in k-space for our local cells */
-                    if (thread == 0)
-                    {
-                        wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
-                    }
-                    if (grid_index < DO_Q)
-                    {
-                        loop_count =
-                            solve_pme_yzx(pme, cfftgrid,
-                                          scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
-                                          bCalcEnerVir,
-                                          pme->nthread, thread);
-                    }
-                    else
-                    {
-                        loop_count =
-                            solve_pme_lj_yzx(pme, &cfftgrid, FALSE,
-                                             scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
-                                             bCalcEnerVir,
-                                             pme->nthread, thread);
-                    }
+                /* do 3d-fft */
+                if (thread == 0)
+                {
+                    wallcycle_start(wcycle, WallCycleCounter::PmeFft);
+                }
+                gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
+                if (thread == 0)
+                {
+                    wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
+                }
 
-                    if (thread == 0)
-                    {
-                        wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
-                        inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
-                    }
+                /* solve in k-space for our local cells */
+                if (thread == 0)
+                {
+                    wallcycle_start(
+                            wcycle,
+                            (grid_index < DO_Q ? WallCycleCounter::PmeSolve : WallCycleCounter::LJPme));
+                }
+                if (grid_index < DO_Q)
+                {
+                    loop_count = solve_pme_yzx(pme,
+                                               cfftgrid,
+                                               scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
+                                               computeEnergyAndVirial,
+                                               pme->nthread,
+                                               thread);
+                }
+                else
+                {
+                    loop_count =
+                            solve_pme_lj_yzx(pme,
+                                             &cfftgrid,
+                                             FALSE,
+                                             scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
+                                             computeEnergyAndVirial,
+                                             pme->nthread,
+                                             thread);
                 }
 
-                if (bBackFFT)
+                if (thread == 0)
                 {
-                    /* do 3d-invfft */
-                    if (thread == 0)
-                    {
-                        wallcycle_start(wcycle, ewcPME_FFT);
-                    }
-                    gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
-                                               thread, wcycle);
-                    if (thread == 0)
-                    {
-                        wallcycle_stop(wcycle, ewcPME_FFT);
+                    wallcycle_stop(
+                            wcycle,
+                            (grid_index < DO_Q ? WallCycleCounter::PmeSolve : WallCycleCounter::LJPme));
+                    inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
+                }
 
+                /* do 3d-invfft */
+                if (thread == 0)
+                {
+                    wallcycle_start(wcycle, WallCycleCounter::PmeFft);
+                }
+                gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
+                if (thread == 0)
+                {
+                    wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
 
-                        if (pme->nodeid == 0)
-                        {
-                            real ntot = pme->nkx*pme->nky*pme->nkz;
-                            npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
-                            inc_nrnb(nrnb, eNR_FFT, 2*npme);
-                        }
 
-                        /* Note: this wallcycle region is closed below
-                           outside an OpenMP region, so take care if
-                           refactoring code here. */
-                        wallcycle_start(wcycle, ewcPME_GATHER);
+                    if (pme->nodeid == 0)
+                    {
+                        real      ntot = pme->nkx * pme->nky * pme->nkz;
+                        const int npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
+                        inc_nrnb(nrnb, eNR_FFT, 2 * npme);
                     }
 
-                    copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
+                    /* Note: this wallcycle region is closed below
+                       outside an OpenMP region, so take care if
+                       refactoring code here. */
+                    wallcycle_start(wcycle, WallCycleCounter::PmeGather);
                 }
-            } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+
+                copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
+            }
+            GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
         }
         /* End of thread parallel section.
          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
          */
 
-        if (bBackFFT)
+        /* distribute local grid to all nodes */
+        if (pme->nnodes > 1)
         {
-            /* distribute local grid to all nodes */
-#if GMX_MPI
-            if (pme->nnodes > 1)
-            {
-                gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
-            }
-#endif
-
-            unwrap_periodic_pmegrid(pme, grid);
+            gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
         }
 
-        if (bCalcF)
+        unwrap_periodic_pmegrid(pme, grid);
+
+        if (stepWork.computeForces)
         {
             /* interpolate forces for our local atoms */
 
@@ -1411,40 +1298,42 @@ int gmx_pme_do(struct gmx_pme_t *pme,
              * atc->f is the actual force array, not a buffer,
              * therefore we should not clear it.
              */
-            lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
-            bClearF = (bFirst && PAR(cr));
+            real lambda = grid_index < DO_Q ? lambda_q : lambda_lj;
+            bClearF     = (bFirst && PAR(cr));
 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
             for (thread = 0; thread < pme->nthread; thread++)
             {
                 try
                 {
-                    gather_f_bsplines(pme, grid, bClearF, atc,
-                                      &atc->spline[thread],
-                                      pme->bFEP ? (grid_index % 2 == 0 ? 1.0-lambda : lambda) : 1.0);
+                    gather_f_bsplines(pme,
+                                      grid,
+                                      bClearF,
+                                      &atc,
+                                      &atc.spline[thread],
+                                      pme->bFEP ? (grid_index % 2 == 0 ? 1.0 - lambda : lambda) : 1.0);
                 }
-                GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+                GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
             }
 
 
-            inc_nrnb(nrnb, eNR_GATHERFBSP,
-                     pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
+            inc_nrnb(nrnb, eNR_GATHERFBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
             /* Note: this wallcycle region is opened above inside an OpenMP
                region, so take care if refactoring code here. */
-            wallcycle_stop(wcycle, ewcPME_GATHER);
+            wallcycle_stop(wcycle, WallCycleCounter::PmeGather);
         }
 
-        if (bCalcEnerVir)
+        if (computeEnergyAndVirial)
         {
             /* This should only be called on the master thread
              * and after the threads have synchronized.
              */
             if (grid_index < 2)
             {
-                get_pme_ener_vir_q(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
+                get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output[grid_index % 2]);
             }
             else
             {
-                get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
+                get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[grid_index % 2]);
             }
         }
         bFirst = FALSE;
@@ -1453,313 +1342,290 @@ int gmx_pme_do(struct gmx_pme_t *pme,
     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
      * seven terms. */
 
-    if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB)
+    if (pme->doLJ && pme->ljpme_combination_rule == LongRangeVdW::LB)
     {
         /* Loop over A- and B-state if we are doing FEP */
-        for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
+        for (int fep_state = 0; fep_state < fep_states_lj; ++fep_state)
         {
-            real *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
+            std::vector<real>         local_c6;
+            std::vector<real>         local_sigma;
+            gmx::ArrayRef<const real> RedistC6;
+            gmx::ArrayRef<const real> RedistSigma;
+            gmx::ArrayRef<real>       coefficientBuffer;
             if (pme->nnodes == 1)
             {
-                if (pme->lb_buf1 == nullptr)
-                {
-                    pme->lb_buf_nalloc = pme->atc[0].n;
-                    snew(pme->lb_buf1, pme->lb_buf_nalloc);
-                }
-                pme->atc[0].coefficient = pme->lb_buf1;
+                pme->lb_buf1.resize(atc.numAtoms());
+                coefficientBuffer = pme->lb_buf1;
                 switch (fep_state)
                 {
                     case 0:
-                        local_c6      = c6A;
-                        local_sigma   = sigmaA;
+                        local_c6.assign(c6A.begin(), c6A.end());
+                        local_sigma.assign(sigmaA.begin(), sigmaA.end());
                         break;
                     case 1:
-                        local_c6      = c6B;
-                        local_sigma   = sigmaB;
+                        local_c6.assign(c6B.begin(), c6B.end());
+                        local_sigma.assign(sigmaB.begin(), sigmaB.end());
                         break;
-                    default:
-                        gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
+                    default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
                 }
             }
             else
             {
-                atc = &pme->atc[0];
+                coefficientBuffer = atc.coefficientBuffer;
                 switch (fep_state)
                 {
                     case 0:
-                        RedistC6      = c6A;
-                        RedistSigma   = sigmaA;
+                        RedistC6    = c6A;
+                        RedistSigma = sigmaA;
                         break;
                     case 1:
-                        RedistC6      = c6B;
-                        RedistSigma   = sigmaB;
+                        RedistC6    = c6B;
+                        RedistSigma = sigmaB;
                         break;
-                    default:
-                        gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
+                    default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
                 }
-                wallcycle_start(wcycle, ewcPME_REDISTXF);
+                wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
 
-                do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, RedistC6);
-                if (pme->lb_buf_nalloc < atc->n)
+                do_redist_pos_coeffs(pme, cr, bFirst, coordinates, RedistC6);
+                pme->lb_buf1.resize(atc.numAtoms());
+                pme->lb_buf2.resize(atc.numAtoms());
+                local_c6.assign(pme->lb_buf1.begin(), pme->lb_buf1.end());
+                for (int i = 0; i < atc.numAtoms(); ++i)
                 {
-                    pme->lb_buf_nalloc = atc->nalloc;
-                    srenew(pme->lb_buf1, pme->lb_buf_nalloc);
-                    srenew(pme->lb_buf2, pme->lb_buf_nalloc);
-                }
-                local_c6 = pme->lb_buf1;
-                for (i = 0; i < atc->n; ++i)
-                {
-                    local_c6[i] = atc->coefficient[i];
+                    local_c6[i] = atc.coefficient[i];
                 }
 
-                do_redist_pos_coeffs(pme, cr, start, homenr, FALSE, x, RedistSigma);
-                local_sigma = pme->lb_buf2;
-                for (i = 0; i < atc->n; ++i)
+                do_redist_pos_coeffs(pme, cr, FALSE, coordinates, RedistSigma);
+                local_sigma.assign(pme->lb_buf2.begin(), pme->lb_buf2.end());
+                for (int i = 0; i < atc.numAtoms(); ++i)
                 {
-                    local_sigma[i] = atc->coefficient[i];
+                    local_sigma[i] = atc.coefficient[i];
                 }
 
-                wallcycle_stop(wcycle, ewcPME_REDISTXF);
+                wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
             }
-            calc_initial_lb_coeffs(pme, local_c6, local_sigma);
+            atc.coefficient = coefficientBuffer;
+            calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
 
             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
-            for (grid_index = 2; grid_index < 9; ++grid_index)
+            for (int grid_index = 2; grid_index < 9; ++grid_index)
             {
                 /* Unpack structure */
                 pmegrid    = &pme->pmegrid[grid_index];
                 fftgrid    = pme->fftgrid[grid_index];
                 pfft_setup = pme->pfft_setup[grid_index];
-                calc_next_lb_coeffs(pme, local_sigma);
+                calc_next_lb_coeffs(coefficientBuffer, local_sigma);
                 grid = pmegrid->grid.grid;
 
-                if (flags & GMX_PME_SPREAD)
-                {
-                    wallcycle_start(wcycle, ewcPME_SPREAD);
-                    /* Spread the c6 on a grid */
-                    spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
+                wallcycle_start(wcycle, WallCycleCounter::PmeSpread);
+                /* Spread the c6 on a grid */
+                spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
 
-                    if (bFirst)
-                    {
-                        inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
-                    }
+                if (bFirst)
+                {
+                    inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
+                }
 
-                    inc_nrnb(nrnb, eNR_SPREADBSP,
-                             pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
-                    if (pme->nthread == 1)
+                inc_nrnb(nrnb,
+                         eNR_SPREADBSP,
+                         pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
+                if (pme->nthread == 1)
+                {
+                    wrap_periodic_pmegrid(pme, grid);
+                    /* sum contributions to local grid from other nodes */
+                    if (pme->nnodes > 1)
                     {
-                        wrap_periodic_pmegrid(pme, grid);
-                        /* sum contributions to local grid from other nodes */
-#if GMX_MPI
-                        if (pme->nnodes > 1)
-                        {
-                            gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
-                        }
-#endif
-                        copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
+                        gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
                     }
-                    wallcycle_stop(wcycle, ewcPME_SPREAD);
+                    copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
                 }
+                wallcycle_stop(wcycle, WallCycleCounter::PmeSpread);
+
                 /*Here we start a large thread parallel region*/
 #pragma omp parallel num_threads(pme->nthread) private(thread)
                 {
                     try
                     {
                         thread = gmx_omp_get_thread_num();
-                        if (flags & GMX_PME_SOLVE)
+                        /* do 3d-fft */
+                        if (thread == 0)
                         {
-                            /* do 3d-fft */
-                            if (thread == 0)
-                            {
-                                wallcycle_start(wcycle, ewcPME_FFT);
-                            }
+                            wallcycle_start(wcycle, WallCycleCounter::PmeFft);
+                        }
 
-                            gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
-                                                       thread, wcycle);
-                            if (thread == 0)
-                            {
-                                wallcycle_stop(wcycle, ewcPME_FFT);
-                            }
+                        gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
+                        if (thread == 0)
+                        {
+                            wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
                         }
                     }
-                    GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+                    GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
                 }
                 bFirst = FALSE;
             }
-            if (flags & GMX_PME_SOLVE)
-            {
-                /* solve in k-space for our local cells */
+            /* solve in k-space for our local cells */
 #pragma omp parallel num_threads(pme->nthread) private(thread)
+            {
+                try
                 {
-                    try
+                    int loop_count;
+                    thread = gmx_omp_get_thread_num();
+                    if (thread == 0)
                     {
-                        int loop_count;
-                        thread = gmx_omp_get_thread_num();
-                        if (thread == 0)
-                        {
-                            wallcycle_start(wcycle, ewcLJPME);
-                        }
+                        wallcycle_start(wcycle, WallCycleCounter::LJPme);
+                    }
 
-                        loop_count =
-                            solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE,
-                                             scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
-                                             bCalcEnerVir,
-                                             pme->nthread, thread);
-                        if (thread == 0)
-                        {
-                            wallcycle_stop(wcycle, ewcLJPME);
-                            inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
-                        }
+                    loop_count =
+                            solve_pme_lj_yzx(pme,
+                                             &pme->cfftgrid[2],
+                                             TRUE,
+                                             scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
+                                             computeEnergyAndVirial,
+                                             pme->nthread,
+                                             thread);
+                    if (thread == 0)
+                    {
+                        wallcycle_stop(wcycle, WallCycleCounter::LJPme);
+                        inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
                     }
-                    GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
                 }
+                GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
             }
 
-            if (bCalcEnerVir)
+            if (computeEnergyAndVirial)
             {
                 /* This should only be called on the master thread and
                  * after the threads have synchronized.
                  */
-                get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[2+fep_state], vir_AB[2+fep_state]);
+                get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
             }
 
-            if (bBackFFT)
+            bFirst = !pme->doCoulomb;
+            calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
+            for (int grid_index = 8; grid_index >= 2; --grid_index)
             {
-                bFirst = !pme->doCoulomb;
-                calc_initial_lb_coeffs(pme, local_c6, local_sigma);
-                for (grid_index = 8; grid_index >= 2; --grid_index)
-                {
-                    /* Unpack structure */
-                    pmegrid    = &pme->pmegrid[grid_index];
-                    fftgrid    = pme->fftgrid[grid_index];
-                    pfft_setup = pme->pfft_setup[grid_index];
-                    grid       = pmegrid->grid.grid;
-                    calc_next_lb_coeffs(pme, local_sigma);
+                /* Unpack structure */
+                pmegrid    = &pme->pmegrid[grid_index];
+                fftgrid    = pme->fftgrid[grid_index];
+                pfft_setup = pme->pfft_setup[grid_index];
+                grid       = pmegrid->grid.grid;
+                calc_next_lb_coeffs(coefficientBuffer, local_sigma);
 #pragma omp parallel num_threads(pme->nthread) private(thread)
+                {
+                    try
                     {
-                        try
+                        thread = gmx_omp_get_thread_num();
+                        /* do 3d-invfft */
+                        if (thread == 0)
                         {
-                            thread = gmx_omp_get_thread_num();
-                            /* do 3d-invfft */
-                            if (thread == 0)
-                            {
-                                wallcycle_start(wcycle, ewcPME_FFT);
-                            }
+                            wallcycle_start(wcycle, WallCycleCounter::PmeFft);
+                        }
 
-                            gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
-                                                       thread, wcycle);
-                            if (thread == 0)
-                            {
-                                wallcycle_stop(wcycle, ewcPME_FFT);
+                        gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
+                        if (thread == 0)
+                        {
+                            wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
 
 
-                                if (pme->nodeid == 0)
-                                {
-                                    real ntot = pme->nkx*pme->nky*pme->nkz;
-                                    npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
-                                    inc_nrnb(nrnb, eNR_FFT, 2*npme);
-                                }
-                                wallcycle_start(wcycle, ewcPME_GATHER);
+                            if (pme->nodeid == 0)
+                            {
+                                real      ntot = pme->nkx * pme->nky * pme->nkz;
+                                const int npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
+                                inc_nrnb(nrnb, eNR_FFT, 2 * npme);
                             }
-
-                            copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
+                            wallcycle_start(wcycle, WallCycleCounter::PmeGather);
                         }
-                        GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
-                    } /*#pragma omp parallel*/
 
-                    /* distribute local grid to all nodes */
-#if GMX_MPI
-                    if (pme->nnodes > 1)
-                    {
-                        gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
+                        copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
                     }
-#endif
+                    GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
+                } /*#pragma omp parallel*/
 
-                    unwrap_periodic_pmegrid(pme, grid);
+                /* distribute local grid to all nodes */
+                if (pme->nnodes > 1)
+                {
+                    gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
+                }
 
-                    if (bCalcF)
-                    {
-                        /* interpolate forces for our local atoms */
-                        bClearF = (bFirst && PAR(cr));
-                        scale   = pme->bFEP ? (fep_state < 1 ? 1.0-lambda_lj : lambda_lj) : 1.0;
-                        scale  *= lb_scale_factor[grid_index-2];
+                unwrap_periodic_pmegrid(pme, grid);
+
+                if (stepWork.computeForces)
+                {
+                    /* interpolate forces for our local atoms */
+                    bClearF    = (bFirst && PAR(cr));
+                    real scale = pme->bFEP ? (fep_state < 1 ? 1.0 - lambda_lj : lambda_lj) : 1.0;
+                    scale *= lb_scale_factor[grid_index - 2];
 
 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
-                        for (thread = 0; thread < pme->nthread; thread++)
+                    for (thread = 0; thread < pme->nthread; thread++)
+                    {
+                        try
                         {
-                            try
-                            {
-                                gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
-                                                  &pme->atc[0].spline[thread],
-                                                  scale);
-                            }
-                            GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+                            gather_f_bsplines(
+                                    pme, grid, bClearF, &pme->atc[0], &pme->atc[0].spline[thread], scale);
                         }
+                        GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
+                    }
 
 
-                        inc_nrnb(nrnb, eNR_GATHERFBSP,
-                                 pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
-                    }
-                    wallcycle_stop(wcycle, ewcPME_GATHER);
+                    inc_nrnb(nrnb,
+                             eNR_GATHERFBSP,
+                             pme->pme_order * pme->pme_order * pme->pme_order * pme->atc[0].numAtoms());
+                }
+                wallcycle_stop(wcycle, WallCycleCounter::PmeGather);
 
-                    bFirst = FALSE;
-                } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
-            }     /* if (bCalcF) */
-        }         /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
-    }             /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
+                bFirst = FALSE;
+            } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
+        }     /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
+    }         /* if (pme->doLJ && pme->ljpme_combination_rule == LongRangeVdW::LB) */
 
-    if (bCalcF && pme->nnodes > 1)
+    if (stepWork.computeForces && pme->nnodes > 1)
     {
-        wallcycle_start(wcycle, ewcPME_REDISTXF);
-        for (d = 0; d < pme->ndecompdim; d++)
+        wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
+        for (int d = 0; d < pme->ndecompdim; d++)
         {
-            atc = &pme->atc[d];
+            gmx::ArrayRef<gmx::RVec> forcesRef;
             if (d == pme->ndecompdim - 1)
             {
-                n_d = homenr;
-                f_d = f + start;
+                const size_t numAtoms = coordinates.size();
+                GMX_ASSERT(forces.size() >= numAtoms, "Need at least numAtoms forces");
+                forcesRef = forces.subArray(0, numAtoms);
             }
             else
             {
-                n_d = pme->atc[d+1].n;
-                f_d = pme->atc[d+1].f;
+                forcesRef = pme->atc[d + 1].f;
             }
-            if (DOMAINDECOMP(cr))
+            if (haveDDAtomOrdering(*cr))
             {
-                dd_pmeredist_f(pme, atc, n_d, f_d,
-                               d == pme->ndecompdim-1 && pme->bPPnode);
+                dd_pmeredist_f(pme, &pme->atc[d], forcesRef, d == pme->ndecompdim - 1 && pme->bPPnode);
             }
         }
 
-        wallcycle_stop(wcycle, ewcPME_REDISTXF);
+        wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
     }
 
-    if (bCalcEnerVir)
+    if (computeEnergyAndVirial)
     {
         if (pme->doCoulomb)
         {
             if (!pme->bFEP_q)
             {
-                *energy_q = energy_AB[0];
-                m_add(vir_q, vir_AB[0], vir_q);
+                *energy_q = output[0].coulombEnergy_;
+                m_add(vir_q, output[0].coulombVirial_, vir_q);
             }
             else
             {
-                *energy_q       = (1.0-lambda_q)*energy_AB[0] + lambda_q*energy_AB[1];
-                *dvdlambda_q   += energy_AB[1] - energy_AB[0];
-                for (i = 0; i < DIM; i++)
+                *energy_q = (1.0 - lambda_q) * output[0].coulombEnergy_ + lambda_q * output[1].coulombEnergy_;
+                *dvdlambda_q += output[1].coulombEnergy_ - output[0].coulombEnergy_;
+                for (int i = 0; i < DIM; i++)
                 {
-                    for (j = 0; j < DIM; j++)
+                    for (int j = 0; j < DIM; j++)
                     {
-                        vir_q[i][j] += (1.0-lambda_q)*vir_AB[0][i][j] +
-                            lambda_q*vir_AB[1][i][j];
+                        vir_q[i][j] += (1.0 - lambda_q) * output[0].coulombVirial_[i][j]
+                                       + lambda_q * output[1].coulombVirial_[i][j];
                     }
                 }
             }
-            if (debug)
-            {
-                fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
-            }
         }
         else
         {
@@ -1770,25 +1636,23 @@ int gmx_pme_do(struct gmx_pme_t *pme,
         {
             if (!pme->bFEP_lj)
             {
-                *energy_lj = energy_AB[2];
-                m_add(vir_lj, vir_AB[2], vir_lj);
+                *energy_lj = output[0].lennardJonesEnergy_;
+                m_add(vir_lj, output[0].lennardJonesVirial_, vir_lj);
             }
             else
             {
-                *energy_lj     = (1.0-lambda_lj)*energy_AB[2] + lambda_lj*energy_AB[3];
-                *dvdlambda_lj += energy_AB[3] - energy_AB[2];
-                for (i = 0; i < DIM; i++)
+                *energy_lj = (1.0 - lambda_lj) * output[0].lennardJonesEnergy_
+                             + lambda_lj * output[1].lennardJonesEnergy_;
+                *dvdlambda_lj += output[1].lennardJonesEnergy_ - output[0].lennardJonesEnergy_;
+                for (int i = 0; i < DIM; i++)
                 {
-                    for (j = 0; j < DIM; j++)
+                    for (int j = 0; j < DIM; j++)
                     {
-                        vir_lj[i][j] += (1.0-lambda_lj)*vir_AB[2][i][j] + lambda_lj*vir_AB[3][i][j];
+                        vir_lj[i][j] += (1.0 - lambda_lj) * output[0].lennardJonesVirial_[i][j]
+                                        + lambda_lj * output[1].lennardJonesVirial_[i][j];
                     }
                 }
             }
-            if (debug)
-            {
-                fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
-            }
         }
         else
         {
@@ -1798,15 +1662,13 @@ int gmx_pme_do(struct gmx_pme_t *pme,
     return 0;
 }
 
-void gmx_pme_destroy(gmx_pme_t *pme)
+void gmx_pme_destroy(gmx_pme_tpme)
 {
     if (!pme)
     {
         return;
     }
 
-    delete pme->boxScaler;
-
     sfree(pme->nnx);
     sfree(pme->nny);
     sfree(pme->nnz);
@@ -1829,19 +1691,11 @@ void gmx_pme_destroy(gmx_pme_t *pme)
     sfree(pme->cfftgrid);
     sfree(pme->pfft_setup);
 
-    for (int i = 0; i < std::max(1, pme->ndecompdim); i++) //pme->atc[0] is always allocated
-    {
-        destroy_atomcomm(&pme->atc[i]);
-    }
-
     for (int i = 0; i < DIM; i++)
     {
         sfree(pme->bsp_mod[i]);
     }
 
-    sfree(pme->lb_buf1);
-    sfree(pme->lb_buf2);
-
     sfree(pme->bufv);
     sfree(pme->bufr);
 
@@ -1850,12 +1704,9 @@ void gmx_pme_destroy(gmx_pme_t *pme)
         pme_free_all_work(&pme->solve_work, pme->nthread);
     }
 
-    sfree(pme->sum_qgrid_tmp);
-    sfree(pme->sum_qgrid_dd_tmp);
-
     destroy_pme_spline_work(pme->spline_work);
 
-    if (pme_gpu_active(pme) && pme->gpu)
+    if (pme->gpu != nullptr)
     {
         pme_gpu_destroy(pme->gpu);
     }
@@ -1863,11 +1714,45 @@ void gmx_pme_destroy(gmx_pme_t *pme)
     delete pme;
 }
 
-void gmx_pme_reinit_atoms(const gmx_pme_t *pme, const int nAtoms, const real *charges)
+void gmx_pme_reinit_atoms(gmx_pme_t*                pme,
+                          const int                 numAtoms,
+                          gmx::ArrayRef<const real> chargesA,
+                          gmx::ArrayRef<const real> chargesB)
 {
-    if (pme_gpu_active(pme))
+    if (pme->gpu != nullptr)
     {
-        pme_gpu_reinit_atoms(pme->gpu, nAtoms, charges);
+        GMX_ASSERT(!(pme->bFEP_q && chargesB.empty()),
+                   "B state charges must be specified if running Coulomb FEP on the GPU");
+        pme_gpu_reinit_atoms(pme->gpu, numAtoms, chargesA.data(), pme->bFEP_q ? chargesB.data() : nullptr);
     }
-    // TODO: handle the CPU case here; handle the whole t_mdatoms
+    else
+    {
+        pme->atc[0].setNumAtoms(numAtoms);
+        // TODO: set the charges here as well
+    }
+}
+
+bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size)
+{
+    return (pme.nkx == grid_size[XX] && pme.nky == grid_size[YY] && pme.nkz == grid_size[ZZ]);
+}
+
+void gmx::SeparatePmeRanksPermitted::disablePmeRanks(const std::string& reason)
+{
+    permitSeparatePmeRanks_ = false;
+
+    if (!reason.empty())
+    {
+        reasons_.push_back(reason);
+    }
+}
+
+bool gmx::SeparatePmeRanksPermitted::permitSeparatePmeRanks() const
+{
+    return permitSeparatePmeRanks_;
+}
+
+std::string gmx::SeparatePmeRanksPermitted::reasonsWhyDisabled() const
+{
+    return joinStrings(reasons_, "; ");
 }