Apply clang-format to source tree
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu.cpp
index 565a63946a1cfee711bc9316bdaf5978b2ccbb42..31467da6da1683e0e655157eed515efc5672f1fa 100644 (file)
@@ -64,7 +64,7 @@
 #include "pme_internal.h"
 #include "pme_solve.h"
 
-void pme_gpu_reset_timings(const gmx_pme_t *pme)
+void pme_gpu_reset_timings(const gmx_pme_tpme)
 {
     if (pme_gpu_active(pme))
     {
@@ -72,7 +72,7 @@ void pme_gpu_reset_timings(const gmx_pme_t *pme)
     }
 }
 
-void pme_gpu_get_timings(const gmx_pme_t *pme, gmx_wallclock_gpu_pme_t *timings)
+void pme_gpu_get_timings(const gmx_pme_t* pme, gmx_wallclock_gpu_pme_t* timings)
 {
     if (pme_gpu_active(pme))
     {
@@ -80,7 +80,7 @@ void pme_gpu_get_timings(const gmx_pme_t *pme, gmx_wallclock_gpu_pme_t *timings)
     }
 }
 
-int pme_gpu_get_padding_size(const gmx_pme_t *pme)
+int pme_gpu_get_padding_size(const gmx_pme_tpme)
 {
 
     if (!pme || !pme_gpu_active(pme))
@@ -101,10 +101,10 @@ int pme_gpu_get_padding_size(const gmx_pme_t *pme)
  * \param[in] dir            The FFT direction enum.
  * \param[in] wcycle         The wallclock counter.
  */
-void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t              *pme,
-                                               const int               gridIndex,
-                                               enum gmx_fft_direction  dir,
-                                               gmx_wallcycle_t         wcycle)
+void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t*             pme,
+                                               const int              gridIndex,
+                                               enum gmx_fft_direction dir,
+                                               gmx_wallcycle_t        wcycle)
 {
     GMX_ASSERT(gridIndex == 0, "Only single grid supported");
     if (pme_gpu_performs_FFT(pme->gpu))
@@ -129,19 +129,19 @@ void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t              *pme,
 
 /* The PME computation code split into a few separate functions. */
 
-void pme_gpu_prepare_computation(gmx_pme_t            *pme,
-                                 bool                  needToUpdateBox,
-                                 const matrix          box,
-                                 gmx_wallcycle        *wcycle,
-                                 int                   flags,
-                                 bool                  useGpuForceReduction)
+void pme_gpu_prepare_computation(gmx_pme_t*     pme,
+                                 bool           needToUpdateBox,
+                                 const matrix   box,
+                                 gmx_wallcyclewcycle,
+                                 int            flags,
+                                 bool           useGpuForceReduction)
 {
     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
     GMX_ASSERT(pme->nnodes > 0, "");
     GMX_ASSERT(pme->nnodes == 1 || pme->ndecompdim > 0, "");
 
-    PmeGpu *pmeGpu = pme->gpu;
-    pmeGpu->settings.currentFlags         = flags;
+    PmeGpu* pmeGpu                = pme->gpu;
+    pmeGpu->settings.currentFlags = flags;
     // TODO these flags are only here to honor the CPU PME code, and probably should be removed
     pmeGpu->settings.useGpuForceReduction = useGpuForceReduction;
 
@@ -150,7 +150,7 @@ void pme_gpu_prepare_computation(gmx_pme_t            *pme,
     {
         for (int j = 0; j <= i; ++j)
         {
-            shouldUpdateBox                  |= (pmeGpu->common->previousBox[i][j] != box[i][j]);
+            shouldUpdateBox |= (pmeGpu->common->previousBox[i][j] != box[i][j]);
             pmeGpu->common->previousBox[i][j] = box[i][j];
         }
     }
@@ -174,17 +174,16 @@ void pme_gpu_prepare_computation(gmx_pme_t            *pme,
     }
 }
 
-void pme_gpu_launch_spread(gmx_pme_t            *pme,
-                           GpuEventSynchronizer *xReadyOnDevice,
-                           gmx_wallcycle        *wcycle)
+void pme_gpu_launch_spread(gmx_pme_t* pme, GpuEventSynchronizer* xReadyOnDevice, gmx_wallcycle* wcycle)
 {
     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
-    GMX_ASSERT(xReadyOnDevice || !pme->bPPnode || (GMX_GPU != GMX_GPU_CUDA), "Need a valid xReadyOnDevice on PP+PME ranks with CUDA.");
+    GMX_ASSERT(xReadyOnDevice || !pme->bPPnode || (GMX_GPU != GMX_GPU_CUDA),
+               "Need a valid xReadyOnDevice on PP+PME ranks with CUDA.");
 
-    PmeGpu            *pmeGpu = pme->gpu;
+    PmeGpupmeGpu = pme->gpu;
 
-    const unsigned int gridIndex  = 0;
-    real              *fftgrid    = pme->fftgrid[gridIndex];
+    const unsigned int gridIndex = 0;
+    real*              fftgrid   = pme->fftgrid[gridIndex];
     if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
     {
         /* Spread the coefficients on a grid */
@@ -198,14 +197,13 @@ void pme_gpu_launch_spread(gmx_pme_t            *pme,
     }
 }
 
-void pme_gpu_launch_complex_transforms(gmx_pme_t      *pme,
-                                       gmx_wallcycle  *wcycle)
+void pme_gpu_launch_complex_transforms(gmx_pme_t* pme, gmx_wallcycle* wcycle)
 {
-    PmeGpu            *pmeGpu                 = pme->gpu;
-    const bool         computeEnergyAndVirial = (pmeGpu->settings.currentFlags & GMX_PME_CALC_ENER_VIR) != 0;
-    const bool         performBackFFT         = (pmeGpu->settings.currentFlags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
-    const unsigned int gridIndex              = 0;
-    t_complex         *cfftgrid               = pme->cfftgrid[gridIndex];
+    PmeGpu*    pmeGpu                 = pme->gpu;
+    const bool computeEnergyAndVirial = (pmeGpu->settings.currentFlags & GMX_PME_CALC_ENER_VIR) != 0;
+    const bool performBackFFT = (pmeGpu->settings.currentFlags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
+    const unsigned int gridIndex = 0;
+    t_complex*         cfftgrid  = pme->cfftgrid[gridIndex];
 
     if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
     {
@@ -240,8 +238,8 @@ void pme_gpu_launch_complex_transforms(gmx_pme_t      *pme,
 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
                 for (int thread = 0; thread < pme->nthread; thread++)
                 {
-                    solve_pme_yzx(pme, cfftgrid, pme->boxVolume,
-                                  computeEnergyAndVirial, pme->nthread, thread);
+                    solve_pme_yzx(pme, cfftgrid, pme->boxVolume, computeEnergyAndVirial,
+                                  pme->nthread, thread);
                 }
                 wallcycle_stop(wcycle, ewcPME_SOLVE_MIXED_MODE);
             }
@@ -251,12 +249,11 @@ void pme_gpu_launch_complex_transforms(gmx_pme_t      *pme,
         {
             parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_COMPLEX_TO_REAL, wcycle);
         }
-    } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+    }
+    GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
 }
 
-void pme_gpu_launch_gather(const gmx_pme_t                 *pme,
-                           gmx_wallcycle gmx_unused        *wcycle,
-                           PmeForceOutputHandling           forceTreatment)
+void pme_gpu_launch_gather(const gmx_pme_t* pme, gmx_wallcycle gmx_unused* wcycle, PmeForceOutputHandling forceTreatment)
 {
     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
 
@@ -267,18 +264,17 @@ void pme_gpu_launch_gather(const gmx_pme_t                 *pme,
 
     wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
     wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
-    const unsigned int gridIndex  = 0;
-    real              *fftgrid    = pme->fftgrid[gridIndex];
-    pme_gpu_gather(pme->gpu, forceTreatment, reinterpret_cast<float *>(fftgrid));
+    const unsigned int gridIndex = 0;
+    real*              fftgrid   = pme->fftgrid[gridIndex];
+    pme_gpu_gather(pme->gpu, forceTreatment, reinterpret_cast<float*>(fftgrid));
     wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
     wallcycle_stop(wcycle, ewcLAUNCH_GPU);
 }
 
 //! Accumulate the \c forcesToAdd to \c f, using the available threads.
-static void sum_forces(gmx::ArrayRef<gmx::RVec>       f,
-                       gmx::ArrayRef<const gmx::RVec> forceToAdd)
+static void sum_forces(gmx::ArrayRef<gmx::RVec> f, gmx::ArrayRef<const gmx::RVec> forceToAdd)
 {
-    const int      end = forceToAdd.size();
+    const int end = forceToAdd.size();
 
     int gmx_unused nt = gmx_omp_nthreads_get(emntPME);
 #pragma omp parallel for num_threads(nt) schedule(static)
@@ -290,10 +286,10 @@ static void sum_forces(gmx::ArrayRef<gmx::RVec>       f,
 
 //! Reduce quantities from \c output to \c forceWithVirial and \c enerd.
 static void pme_gpu_reduce_outputs(const int             flags,
-                                   const PmeOutput      &output,
-                                   gmx_wallcycle        *wcycle,
-                                   gmx::ForceWithVirial *forceWithVirial,
-                                   gmx_enerdata_t       *enerd)
+                                   const PmeOutput&      output,
+                                   gmx_wallcycle*        wcycle,
+                                   gmx::ForceWithVirialforceWithVirial,
+                                   gmx_enerdata_t*       enerd)
 {
     wallcycle_start(wcycle, ewcPME_GPU_F_REDUCTION);
     GMX_ASSERT(forceWithVirial, "Invalid force pointer");
@@ -312,15 +308,16 @@ static void pme_gpu_reduce_outputs(const int             flags,
     wallcycle_stop(wcycle, ewcPME_GPU_F_REDUCTION);
 }
 
-bool pme_gpu_try_finish_task(gmx_pme_t            *pme,
+bool pme_gpu_try_finish_task(gmx_pme_t*            pme,
                              const int             flags,
-                             gmx_wallcycle        *wcycle,
-                             gmx::ForceWithVirial *forceWithVirial,
-                             gmx_enerdata_t       *enerd,
+                             gmx_wallcycle*        wcycle,
+                             gmx::ForceWithVirialforceWithVirial,
+                             gmx_enerdata_t*       enerd,
                              GpuTaskCompletion     completionKind)
 {
     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
-    GMX_ASSERT(!pme->gpu->settings.useGpuForceReduction, "GPU force reduction should not be active on the pme_gpu_try_finish_task() path");
+    GMX_ASSERT(!pme->gpu->settings.useGpuForceReduction,
+               "GPU force reduction should not be active on the pme_gpu_try_finish_task() path");
 
     // First, if possible, check whether all tasks on the stream have
     // completed, and return fast if not. Accumulate to wcycle the
@@ -356,16 +353,15 @@ bool pme_gpu_try_finish_task(gmx_pme_t            *pme,
     PmeOutput output = pme_gpu_getOutput(*pme, flags);
     wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
 
-    GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_, "When forces are reduced on the CPU, there needs to be force output");
+    GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_,
+               "When forces are reduced on the CPU, there needs to be force output");
     pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
 
     return true;
 }
 
 // This is used by PME-only ranks
-PmeOutput pme_gpu_wait_finish_task(gmx_pme_t     *pme,
-                                   const int      flags,
-                                   gmx_wallcycle *wcycle)
+PmeOutput pme_gpu_wait_finish_task(gmx_pme_t* pme, const int flags, gmx_wallcycle* wcycle)
 {
     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
 
@@ -386,19 +382,19 @@ PmeOutput pme_gpu_wait_finish_task(gmx_pme_t     *pme,
 }
 
 // This is used when not using the alternate-waiting reduction
-void pme_gpu_wait_and_reduce(gmx_pme_t            *pme,
+void pme_gpu_wait_and_reduce(gmx_pme_t*            pme,
                              const int             flags,
-                             gmx_wallcycle        *wcycle,
-                             gmx::ForceWithVirial *forceWithVirial,
-                             gmx_enerdata_t       *enerd)
+                             gmx_wallcycle*        wcycle,
+                             gmx::ForceWithVirialforceWithVirial,
+                             gmx_enerdata_t*       enerd)
 {
     PmeOutput output = pme_gpu_wait_finish_task(pme, flags, wcycle);
-    GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_, "When forces are reduced on the CPU, there needs to be force output");
+    GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_,
+               "When forces are reduced on the CPU, there needs to be force output");
     pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
 }
 
-void pme_gpu_reinit_computation(const gmx_pme_t *pme,
-                                gmx_wallcycle   *wcycle)
+void pme_gpu_reinit_computation(const gmx_pme_t* pme, gmx_wallcycle* wcycle)
 {
     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
 
@@ -414,13 +410,14 @@ void pme_gpu_reinit_computation(const gmx_pme_t *pme,
     wallcycle_stop(wcycle, ewcLAUNCH_GPU);
 }
 
-DeviceBuffer<float> pme_gpu_get_device_x(const gmx_pme_t *pme)
+DeviceBuffer<float> pme_gpu_get_device_x(const gmx_pme_tpme)
 {
-    GMX_ASSERT((pme && pme_gpu_active(pme)), "PME GPU coordinates buffer was requested from uninitialized PME module");
+    GMX_ASSERT((pme && pme_gpu_active(pme)),
+               "PME GPU coordinates buffer was requested from uninitialized PME module");
     return pme_gpu_get_kernelparam_coordinates(pme->gpu);
 }
 
-void *pme_gpu_get_device_f(const gmx_pme_t *pme)
+void* pme_gpu_get_device_f(const gmx_pme_t* pme)
 {
     if (!pme || !pme_gpu_active(pme))
     {
@@ -429,8 +426,7 @@ void *pme_gpu_get_device_f(const gmx_pme_t *pme)
     return pme_gpu_get_kernelparam_forces(pme->gpu);
 }
 
-void pme_gpu_set_device_x(const gmx_pme_t     *pme,
-                          DeviceBuffer<float>  d_x)
+void pme_gpu_set_device_x(const gmx_pme_t* pme, DeviceBuffer<float> d_x)
 {
     GMX_ASSERT(pme != nullptr, "Null pointer is passed as a PME to the set coordinates function.");
     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
@@ -438,7 +434,7 @@ void pme_gpu_set_device_x(const gmx_pme_t     *pme,
     pme_gpu_set_kernelparam_coordinates(pme->gpu, d_x);
 }
 
-void *pme_gpu_get_device_stream(const gmx_pme_t *pme)
+void* pme_gpu_get_device_stream(const gmx_pme_t* pme)
 {
     if (!pme || !pme_gpu_active(pme))
     {
@@ -447,7 +443,7 @@ void *pme_gpu_get_device_stream(const gmx_pme_t *pme)
     return pme_gpu_get_stream(pme->gpu);
 }
 
-void *pme_gpu_get_device_context(const gmx_pme_t *pme)
+void* pme_gpu_get_device_context(const gmx_pme_t* pme)
 {
     if (!pme || !pme_gpu_active(pme))
     {
@@ -456,7 +452,7 @@ void *pme_gpu_get_device_context(const gmx_pme_t *pme)
     return pme_gpu_get_context(pme->gpu);
 }
 
-GpuEventSynchronizer * pme_gpu_get_f_ready_synchronizer(const gmx_pme_t *pme)
+GpuEventSynchronizer* pme_gpu_get_f_ready_synchronizer(const gmx_pme_t* pme)
 {
     if (!pme || !pme_gpu_active(pme))
     {