Unify coordinate copy handling across GPU platforms
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu.cpp
index a5f54f004d1472fd15d7d2e5b5c3f29eb199c352..3f8e00f9953222e72934ca985fa368dea0e03449 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the GROMACS molecular simulation package.
  *
- * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2016,2017,2018,2019,2020,2021, by the GROMACS development team, led by
  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
  * and including many others, as listed in the AUTHORS file in the
  * top-level source directory and at http://www.gromacs.org.
@@ -59,6 +59,7 @@
 #include "gromacs/utility/fatalerror.h"
 #include "gromacs/utility/gmxassert.h"
 #include "gromacs/utility/stringutil.h"
+#include "gromacs/ewald/pme_coordinate_receiver_gpu.h"
 
 #include "pme_gpu_internal.h"
 #include "pme_gpu_settings.h"
@@ -123,26 +124,25 @@ int pme_gpu_get_block_size(const gmx_pme_t* pme)
 void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t*             pme,
                                                const int              gridIndex,
                                                enum gmx_fft_direction dir,
-                                               gmx_wallcycle_t        wcycle)
+                                               gmx_wallcycle        wcycle)
 {
-    GMX_ASSERT(gridIndex == 0, "Only single grid supported");
     if (pme_gpu_settings(pme->gpu).performGPUFFT)
     {
-        wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
-        wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
+        wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
+        wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuPme);
         pme_gpu_3dfft(pme->gpu, dir, gridIndex);
-        wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
-        wallcycle_stop(wcycle, ewcLAUNCH_GPU);
+        wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuPme);
+        wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
     }
     else
     {
-        wallcycle_start(wcycle, ewcPME_FFT_MIXED_MODE);
+        wallcycle_start(wcycle, WallCycleCounter::PmeFftMixedMode);
 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
         for (int thread = 0; thread < pme->nthread; thread++)
         {
             gmx_parallel_3dfft_execute(pme->pfft_setup[gridIndex], dir, thread, wcycle);
         }
-        wallcycle_stop(wcycle, ewcPME_FFT_MIXED_MODE);
+        wallcycle_stop(wcycle, WallCycleCounter::PmeFftMixedMode);
     }
 }
 
@@ -173,11 +173,11 @@ void pme_gpu_prepare_computation(gmx_pme_t*               pme,
 
     if (stepWork.haveDynamicBox || shouldUpdateBox) // || is to make the first computation always update
     {
-        wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
-        wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
+        wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
+        wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuPme);
         pme_gpu_update_input_box(pmeGpu, box);
-        wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
-        wallcycle_stop(wcycle, ewcLAUNCH_GPU);
+        wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuPme);
+        wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
 
         if (!pme_gpu_settings(pmeGpu).performGPUSolve)
         {
@@ -190,74 +190,91 @@ void pme_gpu_prepare_computation(gmx_pme_t*               pme,
     }
 }
 
-void pme_gpu_launch_spread(gmx_pme_t* pme, GpuEventSynchronizer* xReadyOnDevice, gmx_wallcycle* wcycle)
+void pme_gpu_launch_spread(gmx_pme_t*                     pme,
+                           GpuEventSynchronizer*          xReadyOnDevice,
+                           gmx_wallcycle*                 wcycle,
+                           const real                     lambdaQ,
+                           const bool                     useGpuDirectComm,
+                           gmx::PmeCoordinateReceiverGpu* pmeCoordinateReceiverGpu)
 {
     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
-    GMX_ASSERT(xReadyOnDevice || !pme->bPPnode || (GMX_GPU != GMX_GPU_CUDA),
-               "Need a valid xReadyOnDevice on PP+PME ranks with CUDA.");
+    GMX_ASSERT(xReadyOnDevice || !pme->bPPnode, "Need a valid xReadyOnDevice on PP+PME ranks.");
+    GMX_ASSERT(pme->doCoulomb, "Only Coulomb PME can be run on GPU.");
 
     PmeGpu* pmeGpu = pme->gpu;
 
-    const unsigned int gridIndex = 0;
-    real*              fftgrid   = pme->fftgrid[gridIndex];
+    GMX_ASSERT(pmeGpu->common->ngrids == 1 || (pmeGpu->common->ngrids == 2 && pme->bFEP_q),
+               "If not decoupling Coulomb interactions there should only be one FEP grid. If "
+               "decoupling Coulomb interactions there should be two grids.");
+
+    /* PME on GPU can currently manage two grids:
+     * grid_index=0: Coulomb PME with charges in the normal state or from FEP state A.
+     * grid_index=1: Coulomb PME with charges from FEP state B.
+     */
+    real** fftgrids = pme->fftgrid;
     /* Spread the coefficients on a grid */
     const bool computeSplines = true;
     const bool spreadCharges  = true;
-    wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
-    wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
-    pme_gpu_spread(pmeGpu, xReadyOnDevice, gridIndex, fftgrid, computeSplines, spreadCharges);
-    wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
-    wallcycle_stop(wcycle, ewcLAUNCH_GPU);
+    wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
+    wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuPme);
+    pme_gpu_spread(
+            pmeGpu, xReadyOnDevice, fftgrids, computeSplines, spreadCharges, lambdaQ, useGpuDirectComm, pmeCoordinateReceiverGpu);
+    wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuPme);
+    wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
 }
 
 void pme_gpu_launch_complex_transforms(gmx_pme_t* pme, gmx_wallcycle* wcycle, const gmx::StepWorkload& stepWork)
 {
-    PmeGpu*            pmeGpu    = pme->gpu;
-    const auto&        settings  = pmeGpu->settings;
-    const unsigned int gridIndex = 0;
-    t_complex*         cfftgrid  = pme->cfftgrid[gridIndex];
+    PmeGpu*     pmeGpu   = pme->gpu;
+    const auto& settings = pmeGpu->settings;
     // There's no support for computing energy without virial, or vice versa
     const bool computeEnergyAndVirial = stepWork.computeEnergy || stepWork.computeVirial;
     if (!settings.performGPUFFT)
     {
-        wallcycle_start(wcycle, ewcWAIT_GPU_PME_SPREAD);
+        wallcycle_start(wcycle, WallCycleCounter::WaitGpuPmeSpread);
         pme_gpu_sync_spread_grid(pme->gpu);
-        wallcycle_stop(wcycle, ewcWAIT_GPU_PME_SPREAD);
+        wallcycle_stop(wcycle, WallCycleCounter::WaitGpuPmeSpread);
     }
 
     try
     {
-        /* do R2C 3D-FFT */
-        parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_REAL_TO_COMPLEX, wcycle);
-
-        /* solve in k-space for our local cells */
-        if (settings.performGPUSolve)
-        {
-            // TODO grid ordering should be set up at pme init time.
-            const auto gridOrdering = settings.useDecomposition ? GridOrdering::YZX : GridOrdering::XYZ;
-            wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
-            wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
-            pme_gpu_solve(pmeGpu, cfftgrid, gridOrdering, computeEnergyAndVirial);
-            wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
-            wallcycle_stop(wcycle, ewcLAUNCH_GPU);
-        }
-        else
+        /* The 3dffts and the solve are done in a loop to simplify things, even if this means that
+         * there will be two kernel launches for solve. */
+        for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
         {
-            wallcycle_start(wcycle, ewcPME_SOLVE_MIXED_MODE);
-#pragma omp parallel for num_threads(pme->nthread) schedule(static)
-            for (int thread = 0; thread < pme->nthread; thread++)
+            /* do R2C 3D-FFT */
+            t_complex* cfftgrid = pme->cfftgrid[gridIndex];
+            parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_REAL_TO_COMPLEX, wcycle);
+
+            /* solve in k-space for our local cells */
+            if (settings.performGPUSolve)
             {
-                solve_pme_yzx(pme, cfftgrid, pme->boxVolume, computeEnergyAndVirial, pme->nthread, thread);
+                const auto gridOrdering =
+                        settings.useDecomposition ? GridOrdering::YZX : GridOrdering::XYZ;
+                wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
+                wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuPme);
+                pme_gpu_solve(pmeGpu, gridIndex, cfftgrid, gridOrdering, computeEnergyAndVirial);
+                wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuPme);
+                wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
+            }
+            else
+            {
+                wallcycle_start(wcycle, WallCycleCounter::PmeSolveMixedMode);
+#pragma omp parallel for num_threads(pme->nthread) schedule(static)
+                for (int thread = 0; thread < pme->nthread; thread++)
+                {
+                    solve_pme_yzx(pme, cfftgrid, pme->boxVolume, computeEnergyAndVirial, pme->nthread, thread);
+                }
+                wallcycle_stop(wcycle, WallCycleCounter::PmeSolveMixedMode);
             }
-            wallcycle_stop(wcycle, ewcPME_SOLVE_MIXED_MODE);
-        }
 
-        parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_COMPLEX_TO_REAL, wcycle);
+            parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_COMPLEX_TO_REAL, wcycle);
+        }
     }
     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
 }
 
-void pme_gpu_launch_gather(const gmx_pme_t* pme, gmx_wallcycle gmx_unused* wcycle)
+void pme_gpu_launch_gather(const gmx_pme_t* pme, gmx_wallcycle gmx_unused* wcycle, const real lambdaQ)
 {
     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
 
@@ -266,13 +283,13 @@ void pme_gpu_launch_gather(const gmx_pme_t* pme, gmx_wallcycle gmx_unused* wcycl
         return;
     }
 
-    wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
-    wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
-    const unsigned int gridIndex = 0;
-    real*              fftgrid   = pme->fftgrid[gridIndex];
-    pme_gpu_gather(pme->gpu, reinterpret_cast<float*>(fftgrid));
-    wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
-    wallcycle_stop(wcycle, ewcLAUNCH_GPU);
+    wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
+    wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuPme);
+
+    float** fftgrids = pme->fftgrid;
+    pme_gpu_gather(pme->gpu, fftgrids, lambdaQ);
+    wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuPme);
+    wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
 }
 
 //! Accumulate the \c forcesToAdd to \c f, using the available threads.
@@ -280,7 +297,7 @@ static void sum_forces(gmx::ArrayRef<gmx::RVec> f, gmx::ArrayRef<const gmx::RVec
 {
     const int end = forceToAdd.size();
 
-    int gmx_unused nt = gmx_omp_nthreads_get(emntPME);
+    int gmx_unused nt = gmx_omp_nthreads_get(ModuleMultiThread::Pme);
 #pragma omp parallel for num_threads(nt) schedule(static)
     for (int i = 0; i < end; i++)
     {
@@ -295,7 +312,7 @@ static void pme_gpu_reduce_outputs(const bool            computeEnergyAndVirial,
                                    gmx::ForceWithVirial* forceWithVirial,
                                    gmx_enerdata_t*       enerd)
 {
-    wallcycle_start(wcycle, ewcPME_GPU_F_REDUCTION);
+    wallcycle_start(wcycle, WallCycleCounter::PmeGpuFReduction);
     GMX_ASSERT(forceWithVirial, "Invalid force pointer");
 
     if (computeEnergyAndVirial)
@@ -303,12 +320,13 @@ static void pme_gpu_reduce_outputs(const bool            computeEnergyAndVirial,
         GMX_ASSERT(enerd, "Invalid energy output manager");
         forceWithVirial->addVirialContribution(output.coulombVirial_);
         enerd->term[F_COUL_RECIP] += output.coulombEnergy_;
+        enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Coul] += output.coulombDvdl_;
     }
     if (output.haveForceOutput_)
     {
         sum_forces(forceWithVirial->force_, output.forces_);
     }
-    wallcycle_stop(wcycle, ewcPME_GPU_F_REDUCTION);
+    wallcycle_stop(wcycle, WallCycleCounter::PmeGpuFReduction);
 }
 
 bool pme_gpu_try_finish_task(gmx_pme_t*               pme,
@@ -316,6 +334,7 @@ bool pme_gpu_try_finish_task(gmx_pme_t*               pme,
                              gmx_wallcycle*           wcycle,
                              gmx::ForceWithVirial*    forceWithVirial,
                              gmx_enerdata_t*          enerd,
+                             const real               lambdaQ,
                              GpuTaskCompletion        completionKind)
 {
     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
@@ -327,15 +346,16 @@ bool pme_gpu_try_finish_task(gmx_pme_t*               pme,
     // time needed for that checking, but do not yet record that the
     // gather has occured.
     bool           needToSynchronize      = true;
-    constexpr bool c_streamQuerySupported = (GMX_GPU == GMX_GPU_CUDA);
+    constexpr bool c_streamQuerySupported = GMX_GPU_CUDA;
+
     // TODO: implement c_streamQuerySupported with an additional GpuEventSynchronizer per stream (#2521)
     if ((completionKind == GpuTaskCompletion::Check) && c_streamQuerySupported)
     {
-        wallcycle_start_nocount(wcycle, ewcWAIT_GPU_PME_GATHER);
+        wallcycle_start_nocount(wcycle, WallCycleCounter::WaitGpuPmeGather);
         // Query the PME stream for completion of all tasks enqueued and
         // if we're not done, stop the timer before early return.
         const bool pmeGpuDone = pme_gpu_stream_query(pme->gpu);
-        wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
+        wallcycle_stop(wcycle, WallCycleCounter::WaitGpuPmeGather);
 
         if (!pmeGpuDone)
         {
@@ -344,7 +364,7 @@ bool pme_gpu_try_finish_task(gmx_pme_t*               pme,
         needToSynchronize = false;
     }
 
-    wallcycle_start(wcycle, ewcWAIT_GPU_PME_GATHER);
+    wallcycle_start(wcycle, WallCycleCounter::WaitGpuPmeGather);
     // If the above check passed, then there is no need to make an
     // explicit synchronization call.
     if (needToSynchronize)
@@ -355,8 +375,9 @@ bool pme_gpu_try_finish_task(gmx_pme_t*               pme,
     pme_gpu_update_timings(pme->gpu);
     // There's no support for computing energy without virial, or vice versa
     const bool computeEnergyAndVirial = stepWork.computeEnergy || stepWork.computeVirial;
-    PmeOutput  output                 = pme_gpu_getOutput(*pme, computeEnergyAndVirial);
-    wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
+    PmeOutput  output                 = pme_gpu_getOutput(
+            *pme, computeEnergyAndVirial, pme->gpu->common->ngrids > 1 ? lambdaQ : 1.0);
+    wallcycle_stop(wcycle, WallCycleCounter::WaitGpuPmeGather);
 
     GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_,
                "When forces are reduced on the CPU, there needs to be force output");
@@ -366,11 +387,14 @@ bool pme_gpu_try_finish_task(gmx_pme_t*               pme,
 }
 
 // This is used by PME-only ranks
-PmeOutput pme_gpu_wait_finish_task(gmx_pme_t* pme, const bool computeEnergyAndVirial, gmx_wallcycle* wcycle)
+PmeOutput pme_gpu_wait_finish_task(gmx_pme_t*     pme,
+                                   const bool     computeEnergyAndVirial,
+                                   const real     lambdaQ,
+                                   gmx_wallcycle* wcycle)
 {
     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
 
-    wallcycle_start(wcycle, ewcWAIT_GPU_PME_GATHER);
+    wallcycle_start(wcycle, WallCycleCounter::WaitGpuPmeGather);
 
     // Synchronize the whole PME stream at once, including D2H result transfers
     // if there are outputs we need to wait for at this step; we still call getOutputs
@@ -380,8 +404,9 @@ PmeOutput pme_gpu_wait_finish_task(gmx_pme_t* pme, const bool computeEnergyAndVi
         pme_gpu_synchronize(pme->gpu);
     }
 
-    PmeOutput output = pme_gpu_getOutput(*pme, computeEnergyAndVirial);
-    wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
+    PmeOutput output = pme_gpu_getOutput(
+            *pme, computeEnergyAndVirial, pme->gpu->common->ngrids > 1 ? lambdaQ : 1.0);
+    wallcycle_stop(wcycle, WallCycleCounter::WaitGpuPmeGather);
     return output;
 }
 
@@ -390,11 +415,13 @@ void pme_gpu_wait_and_reduce(gmx_pme_t*               pme,
                              const gmx::StepWorkload& stepWork,
                              gmx_wallcycle*           wcycle,
                              gmx::ForceWithVirial*    forceWithVirial,
-                             gmx_enerdata_t*          enerd)
+                             gmx_enerdata_t*          enerd,
+                             const real               lambdaQ)
 {
     // There's no support for computing energy without virial, or vice versa
     const bool computeEnergyAndVirial = stepWork.computeEnergy || stepWork.computeVirial;
-    PmeOutput  output = pme_gpu_wait_finish_task(pme, computeEnergyAndVirial, wcycle);
+    PmeOutput  output                 = pme_gpu_wait_finish_task(
+            pme, computeEnergyAndVirial, pme->gpu->common->ngrids > 1 ? lambdaQ : 1.0, wcycle);
     GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_,
                "When forces are reduced on the CPU, there needs to be force output");
     pme_gpu_reduce_outputs(computeEnergyAndVirial, output, wcycle, forceWithVirial, enerd);
@@ -404,23 +431,23 @@ void pme_gpu_reinit_computation(const gmx_pme_t* pme, gmx_wallcycle* wcycle)
 {
     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
 
-    wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
-    wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
+    wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
+    wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuPme);
 
     pme_gpu_update_timings(pme->gpu);
 
     pme_gpu_clear_grids(pme->gpu);
     pme_gpu_clear_energy_virial(pme->gpu);
 
-    wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
-    wallcycle_stop(wcycle, ewcLAUNCH_GPU);
+    wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuPme);
+    wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
 }
 
-void* pme_gpu_get_device_f(const gmx_pme_t* pme)
+DeviceBuffer<gmx::RVec> pme_gpu_get_device_f(const gmx_pme_t* pme)
 {
     if (!pme || !pme_gpu_active(pme))
     {
-        return nullptr;
+        return DeviceBuffer<gmx::RVec>{};
     }
     return pme_gpu_get_kernelparam_forces(pme->gpu);
 }