#include "pme_internal.h"
#include "pme_solve.h"
-void pme_gpu_reset_timings(const gmx_pme_t *pme)
+void pme_gpu_reset_timings(const gmx_pme_t* pme)
{
if (pme_gpu_active(pme))
{
}
}
-void pme_gpu_get_timings(const gmx_pme_t *pme, gmx_wallclock_gpu_pme_t *timings)
+void pme_gpu_get_timings(const gmx_pme_t* pme, gmx_wallclock_gpu_pme_t* timings)
{
if (pme_gpu_active(pme))
{
}
}
-int pme_gpu_get_padding_size(const gmx_pme_t *pme)
+int pme_gpu_get_padding_size(const gmx_pme_t* pme)
{
if (!pme || !pme_gpu_active(pme))
* \param[in] dir The FFT direction enum.
* \param[in] wcycle The wallclock counter.
*/
-void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t *pme,
- const int gridIndex,
- enum gmx_fft_direction dir,
- gmx_wallcycle_t wcycle)
+void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t* pme,
+ const int gridIndex,
+ enum gmx_fft_direction dir,
+ gmx_wallcycle_t wcycle)
{
GMX_ASSERT(gridIndex == 0, "Only single grid supported");
if (pme_gpu_performs_FFT(pme->gpu))
/* The PME computation code split into a few separate functions. */
-void pme_gpu_prepare_computation(gmx_pme_t *pme,
- bool needToUpdateBox,
- const matrix box,
- gmx_wallcycle *wcycle,
- int flags,
- bool useGpuForceReduction)
+void pme_gpu_prepare_computation(gmx_pme_t* pme,
+ bool needToUpdateBox,
+ const matrix box,
+ gmx_wallcycle* wcycle,
+ int flags,
+ bool useGpuForceReduction)
{
GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
GMX_ASSERT(pme->nnodes > 0, "");
GMX_ASSERT(pme->nnodes == 1 || pme->ndecompdim > 0, "");
- PmeGpu *pmeGpu = pme->gpu;
- pmeGpu->settings.currentFlags = flags;
+ PmeGpu* pmeGpu = pme->gpu;
+ pmeGpu->settings.currentFlags = flags;
// TODO these flags are only here to honor the CPU PME code, and probably should be removed
pmeGpu->settings.useGpuForceReduction = useGpuForceReduction;
{
for (int j = 0; j <= i; ++j)
{
- shouldUpdateBox |= (pmeGpu->common->previousBox[i][j] != box[i][j]);
+ shouldUpdateBox |= (pmeGpu->common->previousBox[i][j] != box[i][j]);
pmeGpu->common->previousBox[i][j] = box[i][j];
}
}
}
}
-void pme_gpu_launch_spread(gmx_pme_t *pme,
- GpuEventSynchronizer *xReadyOnDevice,
- gmx_wallcycle *wcycle)
+void pme_gpu_launch_spread(gmx_pme_t* pme, GpuEventSynchronizer* xReadyOnDevice, gmx_wallcycle* wcycle)
{
GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
- GMX_ASSERT(xReadyOnDevice || !pme->bPPnode || (GMX_GPU != GMX_GPU_CUDA), "Need a valid xReadyOnDevice on PP+PME ranks with CUDA.");
+ GMX_ASSERT(xReadyOnDevice || !pme->bPPnode || (GMX_GPU != GMX_GPU_CUDA),
+ "Need a valid xReadyOnDevice on PP+PME ranks with CUDA.");
- PmeGpu *pmeGpu = pme->gpu;
+ PmeGpu* pmeGpu = pme->gpu;
- const unsigned int gridIndex = 0;
- real *fftgrid = pme->fftgrid[gridIndex];
+ const unsigned int gridIndex = 0;
+ real* fftgrid = pme->fftgrid[gridIndex];
if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
{
/* Spread the coefficients on a grid */
}
}
-void pme_gpu_launch_complex_transforms(gmx_pme_t *pme,
- gmx_wallcycle *wcycle)
+void pme_gpu_launch_complex_transforms(gmx_pme_t* pme, gmx_wallcycle* wcycle)
{
- PmeGpu *pmeGpu = pme->gpu;
- const bool computeEnergyAndVirial = (pmeGpu->settings.currentFlags & GMX_PME_CALC_ENER_VIR) != 0;
- const bool performBackFFT = (pmeGpu->settings.currentFlags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
- const unsigned int gridIndex = 0;
- t_complex *cfftgrid = pme->cfftgrid[gridIndex];
+ PmeGpu* pmeGpu = pme->gpu;
+ const bool computeEnergyAndVirial = (pmeGpu->settings.currentFlags & GMX_PME_CALC_ENER_VIR) != 0;
+ const bool performBackFFT = (pmeGpu->settings.currentFlags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
+ const unsigned int gridIndex = 0;
+ t_complex* cfftgrid = pme->cfftgrid[gridIndex];
if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
{
#pragma omp parallel for num_threads(pme->nthread) schedule(static)
for (int thread = 0; thread < pme->nthread; thread++)
{
- solve_pme_yzx(pme, cfftgrid, pme->boxVolume,
- computeEnergyAndVirial, pme->nthread, thread);
+ solve_pme_yzx(pme, cfftgrid, pme->boxVolume, computeEnergyAndVirial,
+ pme->nthread, thread);
}
wallcycle_stop(wcycle, ewcPME_SOLVE_MIXED_MODE);
}
{
parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_COMPLEX_TO_REAL, wcycle);
}
- } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+ }
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
}
-void pme_gpu_launch_gather(const gmx_pme_t *pme,
- gmx_wallcycle gmx_unused *wcycle,
- PmeForceOutputHandling forceTreatment)
+void pme_gpu_launch_gather(const gmx_pme_t* pme, gmx_wallcycle gmx_unused* wcycle, PmeForceOutputHandling forceTreatment)
{
GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
- const unsigned int gridIndex = 0;
- real *fftgrid = pme->fftgrid[gridIndex];
- pme_gpu_gather(pme->gpu, forceTreatment, reinterpret_cast<float *>(fftgrid));
+ const unsigned int gridIndex = 0;
+ real* fftgrid = pme->fftgrid[gridIndex];
+ pme_gpu_gather(pme->gpu, forceTreatment, reinterpret_cast<float*>(fftgrid));
wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
wallcycle_stop(wcycle, ewcLAUNCH_GPU);
}
//! Accumulate the \c forcesToAdd to \c f, using the available threads.
-static void sum_forces(gmx::ArrayRef<gmx::RVec> f,
- gmx::ArrayRef<const gmx::RVec> forceToAdd)
+static void sum_forces(gmx::ArrayRef<gmx::RVec> f, gmx::ArrayRef<const gmx::RVec> forceToAdd)
{
- const int end = forceToAdd.size();
+ const int end = forceToAdd.size();
int gmx_unused nt = gmx_omp_nthreads_get(emntPME);
#pragma omp parallel for num_threads(nt) schedule(static)
//! Reduce quantities from \c output to \c forceWithVirial and \c enerd.
static void pme_gpu_reduce_outputs(const int flags,
- const PmeOutput &output,
- gmx_wallcycle *wcycle,
- gmx::ForceWithVirial *forceWithVirial,
- gmx_enerdata_t *enerd)
+ const PmeOutput& output,
+ gmx_wallcycle* wcycle,
+ gmx::ForceWithVirial* forceWithVirial,
+ gmx_enerdata_t* enerd)
{
wallcycle_start(wcycle, ewcPME_GPU_F_REDUCTION);
GMX_ASSERT(forceWithVirial, "Invalid force pointer");
wallcycle_stop(wcycle, ewcPME_GPU_F_REDUCTION);
}
-bool pme_gpu_try_finish_task(gmx_pme_t *pme,
+bool pme_gpu_try_finish_task(gmx_pme_t* pme,
const int flags,
- gmx_wallcycle *wcycle,
- gmx::ForceWithVirial *forceWithVirial,
- gmx_enerdata_t *enerd,
+ gmx_wallcycle* wcycle,
+ gmx::ForceWithVirial* forceWithVirial,
+ gmx_enerdata_t* enerd,
GpuTaskCompletion completionKind)
{
GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
- GMX_ASSERT(!pme->gpu->settings.useGpuForceReduction, "GPU force reduction should not be active on the pme_gpu_try_finish_task() path");
+ GMX_ASSERT(!pme->gpu->settings.useGpuForceReduction,
+ "GPU force reduction should not be active on the pme_gpu_try_finish_task() path");
// First, if possible, check whether all tasks on the stream have
// completed, and return fast if not. Accumulate to wcycle the
PmeOutput output = pme_gpu_getOutput(*pme, flags);
wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
- GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_, "When forces are reduced on the CPU, there needs to be force output");
+ GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_,
+ "When forces are reduced on the CPU, there needs to be force output");
pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
return true;
}
// This is used by PME-only ranks
-PmeOutput pme_gpu_wait_finish_task(gmx_pme_t *pme,
- const int flags,
- gmx_wallcycle *wcycle)
+PmeOutput pme_gpu_wait_finish_task(gmx_pme_t* pme, const int flags, gmx_wallcycle* wcycle)
{
GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
}
// This is used when not using the alternate-waiting reduction
-void pme_gpu_wait_and_reduce(gmx_pme_t *pme,
+void pme_gpu_wait_and_reduce(gmx_pme_t* pme,
const int flags,
- gmx_wallcycle *wcycle,
- gmx::ForceWithVirial *forceWithVirial,
- gmx_enerdata_t *enerd)
+ gmx_wallcycle* wcycle,
+ gmx::ForceWithVirial* forceWithVirial,
+ gmx_enerdata_t* enerd)
{
PmeOutput output = pme_gpu_wait_finish_task(pme, flags, wcycle);
- GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_, "When forces are reduced on the CPU, there needs to be force output");
+ GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_,
+ "When forces are reduced on the CPU, there needs to be force output");
pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
}
-void pme_gpu_reinit_computation(const gmx_pme_t *pme,
- gmx_wallcycle *wcycle)
+void pme_gpu_reinit_computation(const gmx_pme_t* pme, gmx_wallcycle* wcycle)
{
GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
wallcycle_stop(wcycle, ewcLAUNCH_GPU);
}
-DeviceBuffer<float> pme_gpu_get_device_x(const gmx_pme_t *pme)
+DeviceBuffer<float> pme_gpu_get_device_x(const gmx_pme_t* pme)
{
- GMX_ASSERT((pme && pme_gpu_active(pme)), "PME GPU coordinates buffer was requested from uninitialized PME module");
+ GMX_ASSERT((pme && pme_gpu_active(pme)),
+ "PME GPU coordinates buffer was requested from uninitialized PME module");
return pme_gpu_get_kernelparam_coordinates(pme->gpu);
}
-void *pme_gpu_get_device_f(const gmx_pme_t *pme)
+void* pme_gpu_get_device_f(const gmx_pme_t* pme)
{
if (!pme || !pme_gpu_active(pme))
{
return pme_gpu_get_kernelparam_forces(pme->gpu);
}
-void pme_gpu_set_device_x(const gmx_pme_t *pme,
- DeviceBuffer<float> d_x)
+void pme_gpu_set_device_x(const gmx_pme_t* pme, DeviceBuffer<float> d_x)
{
GMX_ASSERT(pme != nullptr, "Null pointer is passed as a PME to the set coordinates function.");
GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
pme_gpu_set_kernelparam_coordinates(pme->gpu, d_x);
}
-void *pme_gpu_get_device_stream(const gmx_pme_t *pme)
+void* pme_gpu_get_device_stream(const gmx_pme_t* pme)
{
if (!pme || !pme_gpu_active(pme))
{
return pme_gpu_get_stream(pme->gpu);
}
-void *pme_gpu_get_device_context(const gmx_pme_t *pme)
+void* pme_gpu_get_device_context(const gmx_pme_t* pme)
{
if (!pme || !pme_gpu_active(pme))
{
return pme_gpu_get_context(pme->gpu);
}
-GpuEventSynchronizer * pme_gpu_get_f_ready_synchronizer(const gmx_pme_t *pme)
+GpuEventSynchronizer* pme_gpu_get_f_ready_synchronizer(const gmx_pme_t* pme)
{
if (!pme || !pme_gpu_active(pme))
{