Merge release-2021 into master
[alexxy/gromacs.git] / src / gromacs / ewald / pme.h
index ccf5e7227d929cd34b2f63733eeb4dd827abfbc9..eb0c5651753535db601959d2101f7f8e12669ebd 100644 (file)
@@ -3,7 +3,8 @@
  *
  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
  * Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
+ * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
  * and including many others, as listed in the AUTHORS file in the
  * top-level source directory and at http://www.gromacs.org.
 #define GMX_EWALD_PME_H
 
 #include <string>
+#include <vector>
 
+#include "gromacs/gpu_utils/devicebuffer_datatype.h"
 #include "gromacs/gpu_utils/gpu_macros.h"
 #include "gromacs/math/vectypes.h"
-#include "gromacs/timing/walltime_accounting.h"
-#include "gromacs/utility/arrayref.h"
-#include "gromacs/utility/basedefinitions.h"
 #include "gromacs/utility/real.h"
 
-struct interaction_const_t;
+struct gmx_hw_info_t;
 struct t_commrec;
 struct t_inputrec;
 struct t_nrnb;
 struct PmeGpu;
 struct gmx_wallclock_gpu_pme_t;
-struct gmx_device_info_t;
+struct gmx_enerdata_t;
 struct gmx_mtop_t;
 struct gmx_pme_t;
 struct gmx_wallcycle;
 struct NumPmeDomains;
 
+class DeviceContext;
+class DeviceStream;
 enum class GpuTaskCompletion;
 class PmeGpuProgram;
-//! Convenience name.
-using PmeGpuProgramHandle = const PmeGpuProgram *;
+class GpuEventSynchronizer;
+
+/*! \brief Hack to selectively enable some parts of PME during unit testing.
+ *
+ * Set to \c false by default. If any of the tests sets it to \c true, it will
+ * make the compatibility check consider PME to be supported in SYCL builds.
+ *
+ * Currently we don't have proper PME implementation with SYCL, but we still want
+ * to run tests for some of the kernels.
+ *
+ * \todo Remove after #3927 is done and PME is fully enabled in SYCL builds.
+ */
+//NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
+extern bool g_allowPmeWithSyclForTesting;
 
 namespace gmx
 {
+template<typename>
+class ArrayRef;
 class ForceWithVirial;
 class MDLogger;
 enum class PinningPolicy : int;
-}
+class StepWorkload;
+
+/*! \libinternal \brief Class for managing usage of separate PME-only ranks
+ *
+ * Used for checking if some parts of the code could not use PME-only ranks
+ *
+ */
+class SeparatePmeRanksPermitted
+{
+public:
+    //! Disables PME ranks permitted flag with a reason
+    void disablePmeRanks(const std::string& reason);
+    //! Return status of PME ranks usage
+    bool permitSeparatePmeRanks() const;
+    //! Returns all reasons, for not using PME ranks
+    std::string reasonsWhyDisabled() const;
+
+private:
+    //! Flag that informs whether simualtion could use dedicated PME ranks
+    bool permitSeparatePmeRanks_ = true;
+    //! Storage for all reasons, why PME ranks could not be used
+    std::vector<std::string> reasons_;
+};
 
-enum {
-    GMX_SUM_GRID_FORWARD, GMX_SUM_GRID_BACKWARD
+class PmeCoordinateReceiverGpu;
+} // namespace gmx
+
+enum
+{
+    GMX_SUM_GRID_FORWARD,
+    GMX_SUM_GRID_BACKWARD
 };
 
 /*! \brief Possible PME codepaths on a rank.
@@ -90,23 +133,18 @@ enum {
  */
 enum class PmeRunMode
 {
-    None,    //!< No PME task is done
-    CPU,     //!< Whole PME computation is done on CPU
-    GPU,     //!< Whole PME computation is done on GPU
-    Mixed,   //!< Mixed mode: only spread and gather run on GPU; FFT and solving are done on CPU.
-};
-
-//! PME gathering output forces treatment
-enum class PmeForceOutputHandling
-{
-    Set,             /**< Gather simply writes into provided force buffer */
-    ReduceWithInput, /**< Gather adds its output to the buffer.
-                        On GPU, that means additional H2D copy before the kernel launch. */
+    None,  //!< No PME task is done
+    CPU,   //!< Whole PME computation is done on CPU
+    GPU,   //!< Whole PME computation is done on GPU
+    Mixed, //!< Mixed mode: only spread and gather run on GPU; FFT and solving are done on CPU.
 };
 
 /*! \brief Return the smallest allowed PME grid size for \p pmeOrder */
 int minimalPmeGridSize(int pmeOrder);
 
+//! Return whether the grid of \c pme is identical to \c grid_size.
+bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size);
+
 /*! \brief Check restrictions on pme_order and the PME grid nkx,nky,nkz.
  *
  * With errorsAreFatal=true, an exception or fatal error is generated
@@ -118,9 +156,11 @@ int minimalPmeGridSize(int pmeOrder);
  *
  * The PME GPU restrictions are checked separately during pme_gpu_init().
  */
-bool gmx_pme_check_restrictions(int pme_order,
-                                int nkx, int nky, int nkz,
-                                int numPmeDomainsAlongX,
+bool gmx_pme_check_restrictions(int  pme_order,
+                                int  nkx,
+                                int  nky,
+                                int  nkz,
+                                int  numPmeDomainsAlongX,
                                 bool useThreads,
                                 bool errorsAreFatal);
 
@@ -130,118 +170,101 @@ bool gmx_pme_check_restrictions(int pme_order,
  * \returns  Pointer to newly allocated and initialized PME data.
  *
  * \todo We should evolve something like a \c GpuManager that holds \c
- * gmx_device_info_t * and \c PmeGpuProgramHandle and perhaps other
+ * DeviceInformation* and \c PmeGpuProgram* and perhaps other
  * related things whose lifetime can/should exceed that of a task (or
- * perhaps task manager). See Redmine #2522.
+ * perhaps task manager). See Issue #2522.
  */
-gmx_pme_t *gmx_pme_init(const t_commrec *cr,
-                        const NumPmeDomains &numPmeDomains,
-                        const t_inputrec *ir, int homenr,
-                        gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
-                        gmx_bool bReproducible,
-                        real ewaldcoeff_q, real ewaldcoeff_lj,
-                        int nthread,
-                        PmeRunMode runMode,
-                        PmeGpu *pmeGpu,
-                        const gmx_device_info_t *gpuInfo,
-                        PmeGpuProgramHandle pmeGpuProgram,
-                        const gmx::MDLogger &mdlog);
-
-/*! \brief Destroys the PME data structure.*/
-void gmx_pme_destroy(gmx_pme_t *pme);
-
-//@{
-/*! \brief Flag values that control what gmx_pme_do() will calculate
- *
- * These can be combined with bitwise-OR if more than one thing is required.
+gmx_pme_t* gmx_pme_init(const t_commrec*     cr,
+                        const NumPmeDomains& numPmeDomains,
+                        const t_inputrec*    ir,
+                        gmx_bool             bFreeEnergy_q,
+                        gmx_bool             bFreeEnergy_lj,
+                        gmx_bool             bReproducible,
+                        real                 ewaldcoeff_q,
+                        real                 ewaldcoeff_lj,
+                        int                  nthread,
+                        PmeRunMode           runMode,
+                        PmeGpu*              pmeGpu,
+                        const DeviceContext* deviceContext,
+                        const DeviceStream*  deviceStream,
+                        const PmeGpuProgram* pmeGpuProgram,
+                        const gmx::MDLogger& mdlog);
+
+/*! \brief As gmx_pme_init, but takes most settings, except the grid/Ewald coefficients, from
+ * pme_src. This is only called when the PME cut-off/grid size changes.
  */
-#define GMX_PME_SPREAD        (1<<0)
-#define GMX_PME_SOLVE         (1<<1)
-#define GMX_PME_CALC_F        (1<<2)
-#define GMX_PME_CALC_ENER_VIR (1<<3)
-/* This forces the grid to be backtransformed even without GMX_PME_CALC_F */
-#define GMX_PME_CALC_POT      (1<<4)
+void gmx_pme_reinit(gmx_pme_t**       pmedata,
+                    const t_commrec*  cr,
+                    gmx_pme_t*        pme_src,
+                    const t_inputrec* ir,
+                    const ivec        grid_size,
+                    real              ewaldcoeff_q,
+                    real              ewaldcoeff_lj);
 
-#define GMX_PME_DO_ALL_F  (GMX_PME_SPREAD | GMX_PME_SOLVE | GMX_PME_CALC_F)
-//@}
+/*! \brief Destroys the PME data structure.*/
+void gmx_pme_destroy(gmx_pme_t* pme);
 
 /*! \brief Do a PME calculation on a CPU for the long range electrostatics and/or LJ.
  *
+ * Computes the PME forces and the energy and viral, when requested,
+ * for all atoms in \p coordinates. Forces, when requested, are added
+ * to the buffer \p forces, which is allowed to contain more elements
+ * than the number of elements in \p coordinates.
  * The meaning of \p flags is defined above, and determines which
  * parts of the calculation are performed.
  *
  * \return 0 indicates all well, non zero is an error code.
  */
-int gmx_pme_do(struct gmx_pme_t *pme,
-               int start,       int homenr,
-               rvec x[],        rvec f[],
-               real chargeA[],  real chargeB[],
-               real c6A[],      real c6B[],
-               real sigmaA[],   real sigmaB[],
-               matrix box,      const t_commrec *cr,
-               int  maxshift_x, int maxshift_y,
-               t_nrnb *nrnb,    gmx_wallcycle *wcycle,
-               matrix vir_q,    matrix vir_lj,
-               real *energy_q,  real *energy_lj,
-               real lambda_q,   real lambda_lj,
-               real *dvdlambda_q, real *dvdlambda_lj,
-               int flags);
-
-/*! \brief Called on the nodes that do PME exclusively */
-int gmx_pmeonly(struct gmx_pme_t *pme,
-                const t_commrec *cr,     t_nrnb *mynrnb,
-                gmx_wallcycle  *wcycle,
-                gmx_walltime_accounting_t walltime_accounting,
-                t_inputrec *ir, PmeRunMode runMode);
+int gmx_pme_do(struct gmx_pme_t*              pme,
+               gmx::ArrayRef<const gmx::RVec> coordinates,
+               gmx::ArrayRef<gmx::RVec>       forces,
+               gmx::ArrayRef<const real>      chargeA,
+               gmx::ArrayRef<const real>      chargeB,
+               gmx::ArrayRef<const real>      c6A,
+               gmx::ArrayRef<const real>      c6B,
+               gmx::ArrayRef<const real>      sigmaA,
+               gmx::ArrayRef<const real>      sigmaB,
+               const matrix                   box,
+               const t_commrec*               cr,
+               int                            maxshift_x,
+               int                            maxshift_y,
+               t_nrnb*                        nrnb,
+               gmx_wallcycle*                 wcycle,
+               matrix                         vir_q,
+               matrix                         vir_lj,
+               real*                          energy_q,
+               real*                          energy_lj,
+               real                           lambda_q,
+               real                           lambda_lj,
+               real*                          dvdlambda_q,
+               real*                          dvdlambda_lj,
+               const gmx::StepWorkload&       stepWork);
 
 /*! \brief Calculate the PME grid energy V for n charges.
  *
  * The potential (found in \p pme) must have been found already with a
- * call to gmx_pme_do() with at least GMX_PME_SPREAD and GMX_PME_SOLVE
- * specified. Note that the charges are not spread on the grid in the
+ * call to gmx_pme_do(). Note that the charges are not spread on the grid in the
  * pme struct. Currently does not work in parallel or with free
  * energy.
  */
-void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V);
-
-/*! \brief Send the charges and maxshift to out PME-only node. */
-void gmx_pme_send_parameters(const t_commrec *cr,
-                             const interaction_const_t *ic,
-                             gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
-                             real *chargeA, real *chargeB,
-                             real *sqrt_c6A, real *sqrt_c6B,
-                             real *sigmaA, real *sigmaB,
-                             int maxshift_x, int maxshift_y);
-
-/*! \brief Send the coordinates to our PME-only node and request a PME calculation */
-void gmx_pme_send_coordinates(const t_commrec *cr, matrix box, rvec *x,
-                              real lambda_q, real lambda_lj,
-                              gmx_bool bEnerVir,
-                              int64_t step, gmx_wallcycle *wcycle);
-
-/*! \brief Tell our PME-only node to finish */
-void gmx_pme_send_finish(const t_commrec *cr);
-
-/*! \brief Tell our PME-only node to reset all cycle and flop counters */
-void gmx_pme_send_resetcounters(const t_commrec *cr, int64_t step);
-
-/*! \brief PP nodes receive the long range forces from the PME nodes */
-void gmx_pme_receive_f(const t_commrec *cr,
-                       gmx::ForceWithVirial *forceWithVirial,
-                       real *energy_q, real *energy_lj,
-                       real *dvdlambda_q, real *dvdlambda_lj,
-                       float *pme_cycles);
+real gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q);
 
 /*! \brief
  * This function updates the local atom data on GPU after DD (charges, coordinates, etc.).
  * TODO: it should update the PME CPU atom data as well.
  * (currently PME CPU call gmx_pme_do() gets passed the input pointers for each computation).
  *
- * \param[in] pme            The PME structure.
- * \param[in] nAtoms         The number of particles.
- * \param[in] charges        The pointer to the array of particle charges.
+ * \param[in,out] pme        The PME structure.
+ * \param[in]     numAtoms   The number of particles.
+ * \param[in]     chargesA   The pointer to the array of particle charges in the normal state or FEP
+ * state A. Can be nullptr if PME is not performed on the GPU.
+ * \param[in]     chargesB   The pointer to the array of particle charges in state B. Only used if
+ * charges are perturbed and can otherwise be nullptr.
  */
-void gmx_pme_reinit_atoms(const gmx_pme_t *pme, int nAtoms, const real *charges);
+void gmx_pme_reinit_atoms(gmx_pme_t*                pme,
+                          int                       numAtoms,
+                          gmx::ArrayRef<const real> chargesA,
+                          gmx::ArrayRef<const real> chargesB);
 
 /* A block of PME GPU functions */
 
@@ -250,11 +273,20 @@ void gmx_pme_reinit_atoms(const gmx_pme_t *pme, int nAtoms, const real *charges)
  * pme_gpu_check_restrictions(), except that works with a
  * formed gmx_pme_t structure. Should that one go away/work with inputrec?
  *
- * \param[out] error  If non-null, the error message when PME is not supported on GPU.
+ * \param[out] error   If non-null, the error message when PME is not supported on GPU.
  *
  * \returns true if PME can run on GPU on this build, false otherwise.
  */
-bool pme_gpu_supports_build(std::string *error);
+bool pme_gpu_supports_build(std::string* error);
+
+/*! \brief Checks whether the detected (GPU) hardware allows to run PME on GPU.
+ *
+ * \param[in]  hwinfo  Information about the detected hardware
+ * \param[out] error   If non-null, the error message when PME is not supported on GPU.
+ *
+ * \returns true if PME can run on GPU on this build, false otherwise.
+ */
+bool pme_gpu_supports_hardware(const gmx_hw_info_t& hwinfo, std::string* error);
 
 /*! \brief Checks whether the input system allows to run PME on GPU.
  * TODO: this partly duplicates an internal PME assert function
@@ -262,12 +294,22 @@ bool pme_gpu_supports_build(std::string *error);
  * formed gmx_pme_t structure. Should that one go away/work with inputrec?
  *
  * \param[in]  ir     Input system.
- * \param[in]  mtop   Complete system topology to check if an FE simulation perturbs charges.
  * \param[out] error  If non-null, the error message if the input is not supported on GPU.
  *
  * \returns true if PME can run on GPU with this input, false otherwise.
  */
-bool pme_gpu_supports_input(const t_inputrec &ir, const gmx_mtop_t &mtop, std::string *error);
+bool pme_gpu_supports_input(const t_inputrec& ir, std::string* error);
+
+/*! \brief Checks whether the input system allows to run PME on GPU in Mixed mode.
+ * Assumes that the input system is compatible with GPU PME otherwise, that is,
+ * before calling this function one should check that \ref pme_gpu_supports_input returns \c true.
+ *
+ * \param[in]  ir     Input system.
+ * \param[out] error  If non-null, the error message if the input is not supported.
+ *
+ * \returns true if PME can run on GPU in Mixed mode with this input, false otherwise.
+ */
+bool pme_gpu_mixed_mode_supports_input(const t_inputrec& ir, std::string* error);
 
 /*! \brief
  * Returns the active PME codepath (CPU, GPU, mixed).
@@ -276,7 +318,7 @@ bool pme_gpu_supports_input(const t_inputrec &ir, const gmx_mtop_t &mtop, std::s
  * \param[in]  pme            The PME data structure.
  * \returns active PME codepath.
  */
-PmeRunMode pme_run_mode(const gmx_pme_t *pme);
+PmeRunMode pme_run_mode(const gmx_pme_tpme);
 
 /*! \libinternal \brief
  * Return the pinning policy appropriate for this build configuration
@@ -292,11 +334,21 @@ gmx::PinningPolicy pme_get_pinning_policy();
  * \param[in]  pme            The PME data structure.
  * \returns true if PME can run on GPU, false otherwise.
  */
-inline bool pme_gpu_task_enabled(const gmx_pme_t *pme)
+inline bool pme_gpu_task_enabled(const gmx_pme_tpme)
 {
     return (pme != nullptr) && (pme_run_mode(pme) != PmeRunMode::CPU);
 }
 
+/*! \brief Returns the block size requirement
+ *
+ * The GPU version of PME requires that the coordinates array have a
+ * size divisible by the returned number.
+ *
+ * \param[in]  pme  The PME data structure.
+ */
+GPU_FUNC_QUALIFIER int pme_gpu_get_block_size(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
+        GPU_FUNC_TERM_WITH_RETURN(0);
+
 // The following functions are all the PME GPU entry points,
 // currently inlining to nothing on non-CUDA builds.
 
@@ -305,7 +357,7 @@ inline bool pme_gpu_task_enabled(const gmx_pme_t *pme)
  *
  * \param[in] pme            The PME structure.
  */
-GPU_FUNC_QUALIFIER void pme_gpu_reset_timings(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM
+GPU_FUNC_QUALIFIER void pme_gpu_reset_timings(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM;
 
 /*! \brief
  * Copies the PME GPU timings to the gmx_wallclock_gpu_pme_t structure (for log output). To be called at the run end.
@@ -313,74 +365,68 @@ GPU_FUNC_QUALIFIER void pme_gpu_reset_timings(const gmx_pme_t *GPU_FUNC_ARGUMENT
  * \param[in] pme               The PME structure.
  * \param[in] timings           The gmx_wallclock_gpu_pme_t structure.
  */
-GPU_FUNC_QUALIFIER void pme_gpu_get_timings(const gmx_pme_t         *GPU_FUNC_ARGUMENT(pme),
-                                            gmx_wallclock_gpu_pme_t *GPU_FUNC_ARGUMENT(timings)) GPU_FUNC_TERM
+GPU_FUNC_QUALIFIER void pme_gpu_get_timings(const gmx_pme_tGPU_FUNC_ARGUMENT(pme),
+                                            gmx_wallclock_gpu_pme_t* GPU_FUNC_ARGUMENT(timings)) GPU_FUNC_TERM;
 
 /* The main PME GPU functions */
 
 /*! \brief
  * Prepares PME on GPU computation (updating the box if needed)
  * \param[in] pme               The PME data structure.
- * \param[in] needToUpdateBox   Tells if the stored unit cell parameters should be updated from \p box.
  * \param[in] box               The unit cell box.
  * \param[in] wcycle            The wallclock counter.
- * \param[in] flags             The combination of flags to affect this PME computation.
- *                              The flags are the GMX_PME_ flags from pme.h.
+ * \param[in] stepWork          The required work for this simulation step
  */
-GPU_FUNC_QUALIFIER void pme_gpu_prepare_computation(gmx_pme_t      *GPU_FUNC_ARGUMENT(pme),
-                                                    bool            GPU_FUNC_ARGUMENT(needToUpdateBox),
-                                                    const matrix    GPU_FUNC_ARGUMENT(box),
-                                                    gmx_wallcycle  *GPU_FUNC_ARGUMENT(wcycle),
-                                                    int             GPU_FUNC_ARGUMENT(flags)) GPU_FUNC_TERM
+GPU_FUNC_QUALIFIER void pme_gpu_prepare_computation(gmx_pme_t*     GPU_FUNC_ARGUMENT(pme),
+                                                    const matrix   GPU_FUNC_ARGUMENT(box),
+                                                    gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
+                                                    const gmx::StepWorkload& GPU_FUNC_ARGUMENT(stepWork)) GPU_FUNC_TERM;
 
 /*! \brief
- * Launches first stage of PME on GPU - H2D input transfers, spreading kernel, and D2H grid transfer if needed.
- *
- * \param[in] pme               The PME data structure.
- * \param[in] x                 The array of local atoms' coordinates.
- * \param[in] wcycle            The wallclock counter.
+ * Launches first stage of PME on GPU - spreading kernel.
+ *
+ * \param[in] pme                            The PME data structure.
+ * \param[in] xReadyOnDevice                 Event synchronizer indicating that the coordinates
+ *                                           are ready in the device memory; nullptr allowed only
+ *                                           on separate PME ranks.
+ * \param[in] wcycle                         The wallclock counter.
+ * \param[in] lambdaQ                        The Coulomb lambda of the current state of the
+ *                                           system. Only used if FEP of Coulomb is active.
+ * \param[in] useGpuDirectComm               Whether direct GPU PME-PP communication is active
+ * \param[in]  pmeCoordinateReceiverGpu      Coordinate receiver object, which must be valid when
+ *                                           direct GPU PME-PP communication is active
  */
-GPU_FUNC_QUALIFIER void pme_gpu_launch_spread(gmx_pme_t      *GPU_FUNC_ARGUMENT(pme),
-                                              const rvec     *GPU_FUNC_ARGUMENT(x),
-                                              gmx_wallcycle  *GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM
+GPU_FUNC_QUALIFIER void pme_gpu_launch_spread(
+        gmx_pme_t*                     GPU_FUNC_ARGUMENT(pme),
+        GpuEventSynchronizer*          GPU_FUNC_ARGUMENT(xReadyOnDevice),
+        gmx_wallcycle*                 GPU_FUNC_ARGUMENT(wcycle),
+        real                           GPU_FUNC_ARGUMENT(lambdaQ),
+        bool                           GPU_FUNC_ARGUMENT(useGpuDirectComm),
+        gmx::PmeCoordinateReceiverGpu* GPU_FUNC_ARGUMENT(pmeCoordinateReceiverGpu)) GPU_FUNC_TERM;
 
 /*! \brief
  * Launches middle stages of PME (FFT R2C, solving, FFT C2R) either on GPU or on CPU, depending on the run mode.
  *
  * \param[in] pme               The PME data structure.
  * \param[in] wcycle            The wallclock counter.
+ * \param[in] stepWork          The required work for this simulation step
  */
-GPU_FUNC_QUALIFIER void pme_gpu_launch_complex_transforms(gmx_pme_t       *GPU_FUNC_ARGUMENT(pme),
-                                                          gmx_wallcycle   *GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM
+GPU_FUNC_QUALIFIER void
+pme_gpu_launch_complex_transforms(gmx_pme_t*               GPU_FUNC_ARGUMENT(pme),
+                                  gmx_wallcycle*           GPU_FUNC_ARGUMENT(wcycle),
+                                  const gmx::StepWorkload& GPU_FUNC_ARGUMENT(stepWork)) GPU_FUNC_TERM;
 
 /*! \brief
  * Launches last stage of PME on GPU - force gathering and D2H force transfer.
  *
- * \param[in]  pme               The PME data structure.
- * \param[in]  wcycle            The wallclock counter.
- * \param[in]  forceTreatment    Tells how data should be treated. The gathering kernel either stores
- *                               the output reciprocal forces into the host array, or copies its contents to the GPU first
- *                               and accumulates. The reduction is non-atomic.
+ * \param[in] pme               The PME data structure.
+ * \param[in] wcycle            The wallclock counter.
+ * \param[in] lambdaQ           The Coulomb lambda to use when calculating the results.
  */
-GPU_FUNC_QUALIFIER void pme_gpu_launch_gather(const gmx_pme_t        *GPU_FUNC_ARGUMENT(pme),
-                                              gmx_wallcycle          *GPU_FUNC_ARGUMENT(wcycle),
-                                              PmeForceOutputHandling  GPU_FUNC_ARGUMENT(forceTreatment)) GPU_FUNC_TERM
+GPU_FUNC_QUALIFIER void pme_gpu_launch_gather(const gmx_pme_tGPU_FUNC_ARGUMENT(pme),
+                                              gmx_wallcycle*   GPU_FUNC_ARGUMENT(wcycle),
+                                              real GPU_FUNC_ARGUMENT(lambdaQ)) GPU_FUNC_TERM;
 
-/*! \brief
- * Blocks until PME GPU tasks are completed, and gets the output forces and virial/energy
- * (if they were to be computed).
- *
- * \param[in]  pme            The PME data structure.
- * \param[out] wcycle         The wallclock counter.
- * \param[out] forces         The output forces.
- * \param[out] virial         The output virial matrix.
- * \param[out] energy         The output energy.
- */
-GPU_FUNC_QUALIFIER void pme_gpu_wait_finish_task(const gmx_pme_t                *GPU_FUNC_ARGUMENT(pme),
-                                                 gmx_wallcycle                  *GPU_FUNC_ARGUMENT(wcycle),
-                                                 gmx::ArrayRef<const gmx::RVec> *GPU_FUNC_ARGUMENT(forces),
-                                                 matrix                          GPU_FUNC_ARGUMENT(virial),
-                                                 real                           *GPU_FUNC_ARGUMENT(energy)) GPU_FUNC_TERM
 /*! \brief
  * Attempts to complete PME GPU tasks.
  *
@@ -391,23 +437,42 @@ GPU_FUNC_QUALIFIER void pme_gpu_wait_finish_task(const gmx_pme_t
  * by assigning the ArrayRef to the \p forces pointer passed in.
  * Virial/energy are also outputs if they were to be computed.
  *
- * Note: also launches the reinitalization of the PME output buffers.
- * TODO: this should be moved out to avoid miscounting its wall-time (as wait iso launch).
+ * \param[in]  pme             The PME data structure.
+ * \param[in]  stepWork        The required work for this simulation step
+ * \param[in]  wcycle          The wallclock counter.
+ * \param[out] forceWithVirial The output force and virial
+ * \param[out] enerd           The output energies
+ * \param[in]  lambdaQ         The Coulomb lambda to use when calculating the results.
+ * \param[in]  completionKind  Indicates whether PME task completion should only be checked rather
+ *                             than waited for
+ * \returns                    True if the PME GPU tasks have completed
+ */
+GPU_FUNC_QUALIFIER bool pme_gpu_try_finish_task(gmx_pme_t*               GPU_FUNC_ARGUMENT(pme),
+                                                const gmx::StepWorkload& GPU_FUNC_ARGUMENT(stepWork),
+                                                gmx_wallcycle*           GPU_FUNC_ARGUMENT(wcycle),
+                                                gmx::ForceWithVirial* GPU_FUNC_ARGUMENT(forceWithVirial),
+                                                gmx_enerdata_t*       GPU_FUNC_ARGUMENT(enerd),
+                                                real                  GPU_FUNC_ARGUMENT(lambdaQ),
+                                                GpuTaskCompletion GPU_FUNC_ARGUMENT(completionKind))
+        GPU_FUNC_TERM_WITH_RETURN(false);
+
+/*! \brief
+ * Blocks until PME GPU tasks are completed, and gets the output forces and virial/energy
+ * (if they were to be computed).
  *
- * \param[in]  pme            The PME data structure.
- * \param[in]  wcycle         The wallclock counter.
- * \param[out] forces         The output forces.
- * \param[out] virial         The output virial matrix.
- * \param[out] energy         The output energy.
- * \param[in]  completionKind  Indicates whether PME task completion should only be checked rather than waited for
- * \returns                   True if the PME GPU tasks have completed
+ * \param[in]  pme             The PME data structure.
+ * \param[in]  stepWork        The required work for this simulation step
+ * \param[in]  wcycle          The wallclock counter.
+ * \param[out] forceWithVirial The output force and virial
+ * \param[out] enerd           The output energies
+ * \param[in]  lambdaQ         The Coulomb lambda to use when calculating the results.
  */
-GPU_FUNC_QUALIFIER bool pme_gpu_try_finish_task(const gmx_pme_t                *GPU_FUNC_ARGUMENT(pme),
-                                                gmx_wallcycle                  *GPU_FUNC_ARGUMENT(wcycle),
-                                                gmx::ArrayRef<const gmx::RVec> *GPU_FUNC_ARGUMENT(forces),
-                                                matrix                          GPU_FUNC_ARGUMENT(virial),
-                                                real                           *GPU_FUNC_ARGUMENT(energy),
-                                                GpuTaskCompletion               GPU_FUNC_ARGUMENT(completionKind)) GPU_FUNC_TERM_WITH_RETURN(false)
+GPU_FUNC_QUALIFIER void pme_gpu_wait_and_reduce(gmx_pme_t*               GPU_FUNC_ARGUMENT(pme),
+                                                const gmx::StepWorkload& GPU_FUNC_ARGUMENT(stepWork),
+                                                gmx_wallcycle*           GPU_FUNC_ARGUMENT(wcycle),
+                                                gmx::ForceWithVirial* GPU_FUNC_ARGUMENT(forceWithVirial),
+                                                gmx_enerdata_t*       GPU_FUNC_ARGUMENT(enerd),
+                                                real GPU_FUNC_ARGUMENT(lambdaQ)) GPU_FUNC_TERM;
 
 /*! \brief
  * The PME GPU reinitialization function that is called both at the end of any PME computation and on any load balancing.
@@ -422,7 +487,28 @@ GPU_FUNC_QUALIFIER bool pme_gpu_try_finish_task(const gmx_pme_t                *
  * \param[in] pme            The PME data structure.
  * \param[in] wcycle         The wallclock counter.
  */
-GPU_FUNC_QUALIFIER void pme_gpu_reinit_computation(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
-                                                   gmx_wallcycle   *GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM
+GPU_FUNC_QUALIFIER void pme_gpu_reinit_computation(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
+                                                   gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM;
+
+/*! \brief Set pointer to device copy of coordinate data.
+ * \param[in] pme            The PME data structure.
+ * \param[in] d_x            The pointer to the positions buffer to be set
+ */
+GPU_FUNC_QUALIFIER void pme_gpu_set_device_x(const gmx_pme_t*        GPU_FUNC_ARGUMENT(pme),
+                                             DeviceBuffer<gmx::RVec> GPU_FUNC_ARGUMENT(d_x)) GPU_FUNC_TERM;
+
+/*! \brief Get pointer to device copy of force data.
+ * \param[in] pme            The PME data structure.
+ * \returns                  Pointer to force data
+ */
+GPU_FUNC_QUALIFIER DeviceBuffer<gmx::RVec> pme_gpu_get_device_f(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
+        GPU_FUNC_TERM_WITH_RETURN(DeviceBuffer<gmx::RVec>{});
+
+/*! \brief Get pointer to the device synchronizer object that allows syncing on PME force calculation completion
+ * \param[in] pme            The PME data structure.
+ * \returns                  Pointer to synchronizer
+ */
+GPU_FUNC_QUALIFIER GpuEventSynchronizer* pme_gpu_get_f_ready_synchronizer(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
+        GPU_FUNC_TERM_WITH_RETURN(nullptr);
 
 #endif