/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2017,2018, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
return region;
}
-void ddOpenBalanceRegionCpu(const gmx_domdec_t *dd,
- DdAllowBalanceRegionReopen gmx_unused allowReopen)
+void DDBalanceRegionHandler::openRegionCpuImpl(DdAllowBalanceRegionReopen gmx_unused allowReopen) const
{
- BalanceRegion *reg = getBalanceRegion(dd);
- if (dd->comm->bRecordLoad)
+ BalanceRegion *reg = getBalanceRegion(dd_);
+ if (dd_->comm->bRecordLoad)
{
GMX_ASSERT(allowReopen == DdAllowBalanceRegionReopen::yes || !reg->isOpen, "Should not open an already opened region");
}
}
-void ddOpenBalanceRegionGpu(const gmx_domdec_t *dd)
+void DDBalanceRegionHandler::openRegionGpuImpl() const
{
- BalanceRegion *reg = getBalanceRegion(dd);
- if (reg->isOpen)
- {
- GMX_ASSERT(!reg->isOpenOnGpu, "Can not re-open a GPU balance region");
- reg->isOpenOnGpu = true;
- }
+ BalanceRegion *reg = getBalanceRegion(dd_);
+ GMX_ASSERT(reg->isOpen, "Can only open a GPU region inside an open CPU region");
+ GMX_ASSERT(!reg->isOpenOnGpu, "Can not re-open a GPU balance region");
+ reg->isOpenOnGpu = true;
}
void ddReopenBalanceRegionCpu(const gmx_domdec_t *dd)
}
}
-void ddCloseBalanceRegionCpu(const gmx_domdec_t *dd)
+void DDBalanceRegionHandler::closeRegionCpuImpl() const
{
- BalanceRegion *reg = getBalanceRegion(dd);
+ BalanceRegion *reg = getBalanceRegion(dd_);
if (reg->isOpen && reg->isOpenOnCpu)
{
GMX_ASSERT(reg->isOpenOnCpu, "Can only close an open region");
{
/* We can close the region */
float cyclesCpu = cycles - reg->cyclesOpenCpu;
- dd_cycles_add(dd, cyclesCpu, ddCyclF);
+ dd_cycles_add(dd_, cyclesCpu, ddCyclF);
reg->isOpen = false;
}
}
}
-void ddCloseBalanceRegionGpu(const gmx_domdec_t *dd,
- float waitGpuCyclesInCpuRegion,
- DdBalanceRegionWaitedForGpu waitedForGpu)
+void DDBalanceRegionHandler::closeRegionGpuImpl(float waitGpuCyclesInCpuRegion,
+ DdBalanceRegionWaitedForGpu waitedForGpu) const
{
- BalanceRegion *reg = getBalanceRegion(dd);
+ BalanceRegion *reg = getBalanceRegion(dd_);
if (reg->isOpen)
{
GMX_ASSERT(reg->isOpenOnGpu, "Can not close a non-open GPU balance region");
}
float cyclesCpu = reg->cyclesLastCpu - reg->cyclesOpenCpu;
- dd_cycles_add(dd, cyclesCpu + waitGpuCyclesEstimate, ddCyclF);
+ dd_cycles_add(dd_, cyclesCpu + waitGpuCyclesEstimate, ddCyclF);
/* Register the total GPU wait time, to redistribute with GPU sharing */
- dd_cycles_add(dd, waitGpuCyclesInCpuRegion + waitGpuCyclesEstimate, ddCyclWaitGPU);
+ dd_cycles_add(dd_, waitGpuCyclesInCpuRegion + waitGpuCyclesEstimate, ddCyclWaitGPU);
/* Close the region */
reg->isOpenOnGpu = false;
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2017,2018, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#ifndef GMX_DOMDEC_DLBTIMING_H
#define GMX_DOMDEC_DLBTIMING_H
+#include "gromacs/mdtypes/commrec.h"
+
struct BalanceRegion;
struct gmx_domdec_t;
struct t_nrnb;
-/*! \brief Tells if we should open the balancing region */
-enum class DdOpenBalanceRegionBeforeForceComputation
-{
- no, //!< Do not open a balancing region
- yes //!< Open the balancing region before update or after pair-search
-};
-
-/*! \brief Tells if we should close the balancing region after the force computation has completed */
-enum class DdCloseBalanceRegionAfterForceComputation
-{
- no, //!< Do not close a balancing region
- yes //!< Close the balancing region after computation completed
-};
-
/*! \brief Tells if we should open the balancing region */
enum class DdAllowBalanceRegionReopen
{
yes //!< We had to wait for the GPU to finish computation
};
-/*! \brief Returns a pointer to a constructed \p BalanceRegion struct
- *
- * Should be replaced by a proper constructor once BalanceRegion is a proper
- * class (requires restructering in domdec.cpp).
- */
-BalanceRegion *ddBalanceRegionAllocate();
-
-/*! \brief Open the load balance timing region on the CPU
- *
- * Opens the balancing region for timing how much time it takes to perform
- * the (balancable part of) the MD step. This should be called right after
- * the last communication during the previous step to maximize the region.
- * In practice this means right after the force communication finished
- * or just before neighbor search at search steps.
- * It is assumed that computation done in the region either scales along
- * with the domain size or takes constant time.
- *
- * \param[in,out] dd The domain decomposition struct
- * \param[in] allowReopen Allows calling with a potentially already opened region
- */
-void ddOpenBalanceRegionCpu(const gmx_domdec_t *dd,
- DdAllowBalanceRegionReopen allowReopen);
-
-/*! \brief Open the load balance timing region for the CPU
- *
- * This can only be called within a region that is open on the CPU side.
- */
-void ddOpenBalanceRegionGpu(const gmx_domdec_t *dd);
-
/*! \brief Re-open the, already opened, load balance timing region
*
* This function should be called after every MPI communication that occurs
*/
void ddReopenBalanceRegionCpu(const gmx_domdec_t *dd);
-/*! \brief Close the load balance timing region on the CPU side
- *
- * \param[in,out] dd The domain decomposition struct
+/*! \libinternal
+ * \brief Manager for starting and stopping the dynamic load balancing region
*/
-void ddCloseBalanceRegionCpu(const gmx_domdec_t *dd);
+class DDBalanceRegionHandler
+{
+ public:
+ //! Constructor, pass a pointer to t_commrec or nullptr when not using domain decomposition
+ DDBalanceRegionHandler(const t_commrec *cr) :
+ dd_(cr != nullptr ? cr->dd : nullptr)
+ {
+ useBalancingRegion_ = (cr != nullptr &&
+ cr->dd != nullptr &&
+ cr->nnodes - cr->npmenodes > 1);
+ }
+
+ /*! \brief Returns whether were are actually using the balancing region
+ */
+ bool useBalancingRegion() const
+ {
+ return useBalancingRegion_;
+ }
+
+ /*! \brief Open the load balance timing region on the CPU
+ *
+ * Opens the balancing region for timing how much time it takes to perform
+ * the (balancable part of) the MD step. This should be called right after
+ * the last communication during the previous step to maximize the region.
+ * In practice this means right after the force communication finished
+ * or just before neighbor search at search steps.
+ * It is assumed that computation done in the region either scales along
+ * with the domain size or takes constant time.
+ *
+ * \param[in] allowReopen Allows calling with a potentially already opened region
+ */
+ void openBeforeForceComputationCpu(DdAllowBalanceRegionReopen allowReopen) const
+ {
+ if (useBalancingRegion_)
+ {
+ openRegionCpuImpl(allowReopen);
+ }
+ }
+
+ /*! \brief Open the load balance timing region for the CPU
+ *
+ * This can only be called within a region that is open on the CPU side.
+ */
+ void openBeforeForceComputationGpu() const
+ {
+ if (useBalancingRegion_)
+ {
+ openRegionGpuImpl();
+ }
+ }
+
+ /*! \brief Re-open the, already opened, load balance timing region
+ *
+ * This function should be called after every MPI communication that occurs
+ * in the main MD loop.
+ * Note that the current setup assumes that all MPI communication acts like
+ * a global barrier. But if some ranks don't participate in communication
+ * or if some ranks communicate faster with neighbors than others,
+ * the obtained timings might not accurately reflect the computation time.
+ */
+ void reopenRegionCpu() const
+ {
+ if (useBalancingRegion_)
+ {
+ ddReopenBalanceRegionCpu(dd_);
+ }
+ }
+
+ /*! \brief Close the load balance timing region on the CPU side
+ */
+ void closeAfterForceComputationCpu() const
+ {
+ if (useBalancingRegion_)
+ {
+ closeRegionCpuImpl();
+ }
+ }
+
+ /*! \brief Close the load balance timing region on the GPU side
+ *
+ * This should be called after the CPU receives the last (local) results
+ * from the GPU. The wait time for these results is estimated, depending
+ * on the \p waitedForGpu parameter.
+ * If called on an already closed region, this call does nothing.
+ *
+ * \param[in] waitCyclesGpuInCpuRegion The time we waited for the GPU earlier, overlapping completely with the open CPU region
+ * \param[in] waitedForGpu Tells if we waited for the GPU to finish now
+ */
+ void closeAfterForceComputationGpu(float waitCyclesGpuInCpuRegion,
+ DdBalanceRegionWaitedForGpu waitedForGpu) const
+ {
+ if (useBalancingRegion_)
+ {
+ closeRegionGpuImpl(waitCyclesGpuInCpuRegion, waitedForGpu);
+ }
+ }
+
+ private:
+ /*! \brief Open the load balance timing region on the CPU
+ *
+ * \param[in] allowReopen Allows calling with a potentially already opened region
+ */
+ void openRegionCpuImpl(DdAllowBalanceRegionReopen allowReopen) const;
+
+ /*! \brief Open the load balance timing region for the GPU
+ *
+ * This can only be called within a region that is open on the CPU side.
+ */
+ void openRegionGpuImpl() const;
+
+ /*! \brief Close the load balance timing region on the CPU side
+ */
+ void closeRegionCpuImpl() const;
+
+ /*! \brief Close the load balance timing region on the GPU side
+ *
+ * \param[in] waitCyclesGpuInCpuRegion The time we waited for the GPU earlier, overlapping completely with the open CPU region
+ * \param[in] waitedForGpu Tells if we waited for the GPU to finish now
+ */
+ void closeRegionGpuImpl(float waitCyclesGpuInCpuRegion,
+ DdBalanceRegionWaitedForGpu waitedForGpu) const;
+
+ //! Tells whether the balancing region should be active
+ bool useBalancingRegion_;
+ //! A pointer to the DD struct, only valid with useBalancingRegion_=true
+ gmx_domdec_t *dd_;
+};
-/*! \brief Close the load balance timing region on the GPU side
- *
- * This should be called after the CPU receives the last (local) results
- * from the GPU. The wait time for these results is estimated, depending
- * on the \p waitedForGpu parameter.
- * If called on an already closed region, this call does nothing.
+/*! \brief Returns a pointer to a constructed \p BalanceRegion struct
*
- * \param[in,out] dd The domain decomposition struct
- * \param[in] waitCyclesGpuInCpuRegion The time we waited for the GPU earlier, overlapping completely with the open CPU region
- * \param[in] waitedForGpu Tells if we waited for the GPU to finish now
+ * Should be replaced by a proper constructor once BalanceRegion is a proper
+ * class (requires restructering in domdec.cpp).
*/
-void ddCloseBalanceRegionGpu(const gmx_domdec_t *dd,
- float waitCyclesGpuInCpuRegion,
- DdBalanceRegionWaitedForGpu waitedForGpu);
+BalanceRegion *ddBalanceRegionAllocate();
/*! \brief Start the force flop count */
void dd_force_flop_start(struct gmx_domdec_t *dd, t_nrnb *nrnb);
#include <cmath>
#include <cstring>
+#include "gromacs/domdec/dlbtiming.h"
#include "gromacs/domdec/domdec.h"
#include "gromacs/domdec/domdec_struct.h"
#include "gromacs/ewald/ewald.h"
}
}
-void do_force_lowlevel(t_forcerec *fr,
- const t_inputrec *ir,
- const t_idef *idef,
- const t_commrec *cr,
- const gmx_multisim_t *ms,
- t_nrnb *nrnb,
- gmx_wallcycle_t wcycle,
- const t_mdatoms *md,
- rvec x[],
- history_t *hist,
- rvec *forceForUseWithShiftForces,
- gmx::ForceWithVirial *forceWithVirial,
- gmx_enerdata_t *enerd,
- t_fcdata *fcd,
- matrix box,
- t_lambda *fepvals,
- real *lambda,
- const t_graph *graph,
- const t_blocka *excl,
- rvec mu_tot[],
- int flags,
- float *cycles_pme)
+void do_force_lowlevel(t_forcerec *fr,
+ const t_inputrec *ir,
+ const t_idef *idef,
+ const t_commrec *cr,
+ const gmx_multisim_t *ms,
+ t_nrnb *nrnb,
+ gmx_wallcycle_t wcycle,
+ const t_mdatoms *md,
+ rvec x[],
+ history_t *hist,
+ rvec *forceForUseWithShiftForces,
+ gmx::ForceWithVirial *forceWithVirial,
+ gmx_enerdata_t *enerd,
+ t_fcdata *fcd,
+ matrix box,
+ t_lambda *fepvals,
+ real *lambda,
+ const t_graph *graph,
+ const t_blocka *excl,
+ rvec mu_tot[],
+ int flags,
+ float *cycles_pme,
+ const DDBalanceRegionHandler &ddBalanceRegionHandler)
{
int i, j;
int donb_flags;
* balancing region here, because PME does global
* communication that acts as a global barrier.
*/
- if (DOMAINDECOMP(cr))
- {
- ddCloseBalanceRegionCpu(cr->dd);
- }
+ ddBalanceRegionHandler.closeAfterForceComputationCpu();
wallcycle_start(wcycle, ewcPMEMESH);
status = gmx_pme_do(fr->pmedata,
#ifndef GMX_MDLIB_FORCE_H
#define GMX_MDLIB_FORCE_H
-#include "gromacs/domdec/dlbtiming.h"
#include "gromacs/math/arrayrefwithpadding.h"
#include "gromacs/math/vectypes.h"
#include "gromacs/utility/arrayref.h"
+class DDBalanceRegionHandler;
struct gmx_edsam;
struct gmx_enerdata_t;
struct gmx_enfrot;
double t,
gmx_edsam *ed,
int flags,
- DdOpenBalanceRegionBeforeForceComputation ddOpenBalanceRegion,
- DdCloseBalanceRegionAfterForceComputation ddCloseBalanceRegion);
+ const DDBalanceRegionHandler &ddBalanceRegionHandler);
/* Communicate coordinates (if parallel).
* Do neighbor searching (if necessary).
const t_blocka *excl,
rvec mu_tot[2],
int flags,
- float *cycles_pme);
+ float *cycles_pme,
+ const DDBalanceRegionHandler &ddBalanceRegionHandler);
/* Call all the force routines */
#endif
double t,
rvec mu_tot,
const gmx_vsite_t *vsite,
- DdOpenBalanceRegionBeforeForceComputation ddOpenBalanceRegion,
- DdCloseBalanceRegionAfterForceComputation ddCloseBalanceRegion)
+ const DDBalanceRegionHandler &ddBalanceRegionHandler)
{
int nshell;
t_shell *shell;
state->lambda, graph,
fr, ppForceWorkload, vsite, mu_tot, t, nullptr,
(bDoNS ? GMX_FORCE_NS : 0) | shellfc_flags,
- ddOpenBalanceRegion, ddCloseBalanceRegion);
+ ddBalanceRegionHandler);
sf_dir = 0;
if (nflexcon)
md, enerd, fcd, state->lambda, graph,
fr, ppForceWorkload, vsite, mu_tot, t, nullptr,
shellfc_flags,
- ddOpenBalanceRegion, ddCloseBalanceRegion);
+ ddBalanceRegionHandler);
sum_epot(&(enerd->grpp), enerd->term);
if (gmx_debug_at)
{
#include <cstdio>
-#include "gromacs/domdec/dlbtiming.h"
#include "gromacs/math/arrayrefwithpadding.h"
#include "gromacs/mdlib/vsite.h"
#include "gromacs/timing/wallcycle.h"
+class DDBalanceRegionHandler;
struct gmx_enerdata_t;
struct gmx_enfrot;
struct gmx_groups_t;
double t,
rvec mu_tot,
const gmx_vsite_t *vsite,
- DdOpenBalanceRegionBeforeForceComputation ddOpenBalanceRegion,
- DdCloseBalanceRegionAfterForceComputation ddCloseBalanceRegion);
+ const DDBalanceRegionHandler &ddBalanceRegionHandler);
/* Print some final output */
void done_shellfc(FILE *fplog, gmx_shellfc_t *shellfc, int64_t numSteps);
double t,
gmx_edsam *ed,
const int flags,
- DdOpenBalanceRegionBeforeForceComputation ddOpenBalanceRegion,
- DdCloseBalanceRegionAfterForceComputation ddCloseBalanceRegion)
+ const DDBalanceRegionHandler &ddBalanceRegionHandler)
{
int cg1, i, j;
double mu[2*DIM];
* somewhere early inside the step after communication during domain
* decomposition (and not during the previous step as usual).
*/
- if (bNS &&
- ddOpenBalanceRegion == DdOpenBalanceRegionBeforeForceComputation::yes)
+ if (bNS)
{
- ddOpenBalanceRegionCpu(cr->dd, DdAllowBalanceRegionReopen::yes);
+ ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
}
cycles_wait_gpu = 0;
if (bUseGPU)
{
- if (DOMAINDECOMP(cr))
- {
- ddOpenBalanceRegionGpu(cr->dd);
- }
+ ddBalanceRegionHandler.openBeforeForceComputationGpu();
wallcycle_start(wcycle, ewcLAUNCH_GPU);
if (PAR(cr))
{
gmx_sumd(2*DIM, mu, cr);
- ddReopenBalanceRegionCpu(cr->dd);
+
+ ddBalanceRegionHandler.reopenRegionCpu();
}
for (i = 0; i < 2; i++)
cr, ms, nrnb, wcycle, mdatoms,
as_rvec_array(x.unpaddedArrayRef().data()), hist, f, &forceWithVirial, enerd, fcd,
box, inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
- flags, &cycles_pme);
+ flags,
+ &cycles_pme, ddBalanceRegionHandler);
wallcycle_stop(wcycle, ewcFORCE);
* If we use a GPU this will overlap with GPU work, so in that case
* we do not close the DD force balancing region here.
*/
- if (ddCloseBalanceRegion == DdCloseBalanceRegionAfterForceComputation::yes)
- {
- ddCloseBalanceRegionCpu(cr->dd);
- }
+ ddBalanceRegionHandler.closeAfterForceComputationCpu();
+
if (bDoForces)
{
dd_move_f(cr->dd, force.unpaddedArrayRef(), fr->fshift, wcycle);
fr->fshift);
float cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
- if (ddCloseBalanceRegion == DdCloseBalanceRegionAfterForceComputation::yes)
+ if (ddBalanceRegionHandler.useBalancingRegion())
{
DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
if (bDoForces && cycles_tmp <= gpuWaitApiOverheadMargin)
*/
waitedForGpu = DdBalanceRegionWaitedForGpu::no;
}
- ddCloseBalanceRegionGpu(cr->dd, cycles_wait_gpu, waitedForGpu);
+ ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
}
}
double t,
gmx_edsam *ed,
int flags,
- DdOpenBalanceRegionBeforeForceComputation ddOpenBalanceRegion,
- DdCloseBalanceRegionAfterForceComputation ddCloseBalanceRegion)
+ const DDBalanceRegionHandler &ddBalanceRegionHandler)
{
int cg0, cg1, i, j;
double mu[2*DIM];
dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
/* No GPU support, no move_x overlap, so reopen the balance region here */
- if (ddOpenBalanceRegion == DdOpenBalanceRegionBeforeForceComputation::yes)
- {
- ddReopenBalanceRegionCpu(cr->dd);
- }
+ ddBalanceRegionHandler.reopenRegionCpu();
}
if (inputrecNeedMutot(inputrec))
if (PAR(cr))
{
gmx_sumd(2*DIM, mu, cr);
- ddReopenBalanceRegionCpu(cr->dd);
+
+ ddBalanceRegionHandler.reopenRegionCpu();
}
for (i = 0; i < 2; i++)
{
box, inputrec->fepvals, lambda,
graph, &(top->excls), fr->mu_tot,
flags,
- &cycles_pme);
+ &cycles_pme, ddBalanceRegionHandler);
wallcycle_stop(wcycle, ewcFORCE);
{
dd_force_flop_stop(cr->dd, nrnb);
- if (ddCloseBalanceRegion == DdCloseBalanceRegionAfterForceComputation::yes)
- {
- ddCloseBalanceRegionCpu(cr->dd);
- }
+ ddBalanceRegionHandler.closeAfterForceComputationCpu();
}
computeSpecialForces(fplog, cr, inputrec, awh, enforcedRotation,
double t,
gmx_edsam *ed,
int flags,
- DdOpenBalanceRegionBeforeForceComputation ddOpenBalanceRegion,
- DdCloseBalanceRegionAfterForceComputation ddCloseBalanceRegion)
+ const DDBalanceRegionHandler &ddBalanceRegionHandler)
{
/* modify force flag if not doing nonbonded */
if (!fr->bNonbonded)
vsite, mu_tot,
t, ed,
flags,
- ddOpenBalanceRegion,
- ddCloseBalanceRegion);
+ ddBalanceRegionHandler);
break;
case ecutsGROUP:
do_force_cutsGROUP(fplog, cr, ms, inputrec,
fr, vsite, mu_tot,
t, ed,
flags,
- ddOpenBalanceRegion,
- ddCloseBalanceRegion);
+ ddBalanceRegionHandler);
break;
default:
gmx_incons("Invalid cut-off scheme passed!");
* virial calculation and COM pulling, is not thus not included in
* the balance timing, which is ok as most tasks do communication.
*/
- if (ddOpenBalanceRegion == DdOpenBalanceRegionBeforeForceComputation::yes)
- {
- ddOpenBalanceRegionCpu(cr->dd, DdAllowBalanceRegionReopen::no);
- }
+ ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);
}
#include "gromacs/awh/awh.h"
#include "gromacs/commandline/filenm.h"
#include "gromacs/domdec/collect.h"
+#include "gromacs/domdec/dlbtiming.h"
#include "gromacs/domdec/domdec.h"
#include "gromacs/domdec/domdec_network.h"
#include "gromacs/domdec/domdec_struct.h"
ir->nsteps, MASTER(cr), mdrunOptions.timingOptions.resetHalfway,
mdrunOptions.maximumHoursToRun, mdlog, wcycle, walltime_accounting);
- DdOpenBalanceRegionBeforeForceComputation ddOpenBalanceRegion = (DOMAINDECOMP(cr) ? DdOpenBalanceRegionBeforeForceComputation::yes : DdOpenBalanceRegionBeforeForceComputation::no);
- DdCloseBalanceRegionAfterForceComputation ddCloseBalanceRegion = (DOMAINDECOMP(cr) ? DdCloseBalanceRegionAfterForceComputation::yes : DdCloseBalanceRegionAfterForceComputation::no);
+ const DDBalanceRegionHandler ddBalanceRegionHandler(cr);
step = ir->init_step;
step_rel = 0;
nrnb, wcycle, graph, groups,
shellfc, fr, ppForceWorkload, t, mu_tot,
vsite,
- ddOpenBalanceRegion, ddCloseBalanceRegion);
+ ddBalanceRegionHandler);
}
else
{
state->lambda, graph,
fr, ppForceWorkload, vsite, mu_tot, t, ed ? ed->getLegacyED() : nullptr,
(bNS ? GMX_FORCE_NS : 0) | force_flags,
- ddOpenBalanceRegion, ddCloseBalanceRegion);
+ ddBalanceRegionHandler);
}
if (EI_VV(ir->eI) && !startingFromCheckpoint)
#include "gromacs/awh/awh.h"
#include "gromacs/commandline/filenm.h"
#include "gromacs/domdec/collect.h"
+#include "gromacs/domdec/dlbtiming.h"
#include "gromacs/domdec/domdec.h"
#include "gromacs/domdec/domdec_network.h"
#include "gromacs/domdec/domdec_struct.h"
// we don't do counter resetting in rerun - finish will always be valid
walltime_accounting_set_valid_finish(walltime_accounting);
- DdOpenBalanceRegionBeforeForceComputation ddOpenBalanceRegion =
- (DOMAINDECOMP(cr) ?
- DdOpenBalanceRegionBeforeForceComputation::yes :
- DdOpenBalanceRegionBeforeForceComputation::no);
- DdCloseBalanceRegionAfterForceComputation ddCloseBalanceRegion =
- (DOMAINDECOMP(cr) ?
- DdCloseBalanceRegionAfterForceComputation::yes :
- DdCloseBalanceRegionAfterForceComputation::no);
+ const DDBalanceRegionHandler ddBalanceRegionHandler(cr);
step = ir->init_step;
step_rel = 0;
nrnb, wcycle, graph, groups,
shellfc, fr, ppForceWorkload, t, mu_tot,
vsite,
- ddOpenBalanceRegion, ddCloseBalanceRegion);
+ ddBalanceRegionHandler);
}
else
{
state->lambda, graph,
fr, ppForceWorkload, vsite, mu_tot, t, ed,
GMX_FORCE_NS | force_flags,
- ddOpenBalanceRegion, ddCloseBalanceRegion);
+ ddBalanceRegionHandler);
}
/* Now we have the energies and forces corresponding to the
#include "gromacs/commandline/filenm.h"
#include "gromacs/domdec/collect.h"
+#include "gromacs/domdec/dlbtiming.h"
#include "gromacs/domdec/domdec.h"
#include "gromacs/domdec/domdec_struct.h"
#include "gromacs/domdec/partition.h"
GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES |
GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY |
(bNS ? GMX_FORCE_NS : 0),
- DOMAINDECOMP(cr) ?
- DdOpenBalanceRegionBeforeForceComputation::yes :
- DdOpenBalanceRegionBeforeForceComputation::no,
- DOMAINDECOMP(cr) ?
- DdCloseBalanceRegionAfterForceComputation::yes :
- DdCloseBalanceRegionAfterForceComputation::no);
+ DDBalanceRegionHandler(cr));
/* Clear the unused shake virial and pressure */
clear_mat(shake_vir);
t,
mu_tot,
vsite,
- DdOpenBalanceRegionBeforeForceComputation::no,
- DdCloseBalanceRegionAfterForceComputation::no);
+ DDBalanceRegionHandler(nullptr));
bNS = false;
step++;
}
#include "gromacs/awh/awh.h"
#include "gromacs/commandline/filenm.h"
#include "gromacs/domdec/collect.h"
+#include "gromacs/domdec/dlbtiming.h"
#include "gromacs/domdec/domdec.h"
#include "gromacs/domdec/domdec_network.h"
#include "gromacs/domdec/domdec_struct.h"
// we don't do counter resetting in rerun - finish will always be valid
walltime_accounting_set_valid_finish(walltime_accounting);
- DdOpenBalanceRegionBeforeForceComputation ddOpenBalanceRegion = (DOMAINDECOMP(cr) ? DdOpenBalanceRegionBeforeForceComputation::yes : DdOpenBalanceRegionBeforeForceComputation::no);
- DdCloseBalanceRegionAfterForceComputation ddCloseBalanceRegion = (DOMAINDECOMP(cr) ? DdCloseBalanceRegionAfterForceComputation::yes : DdCloseBalanceRegionAfterForceComputation::no);
+ const DDBalanceRegionHandler ddBalanceRegionHandler(cr);
step = ir->init_step;
step_rel = 0;
nrnb, wcycle, graph, groups,
shellfc, fr, ppForceWorkload, t, mu_tot,
vsite,
- ddOpenBalanceRegion, ddCloseBalanceRegion);
+ ddBalanceRegionHandler);
}
else
{
state->lambda, graph,
fr, ppForceWorkload, vsite, mu_tot, t, ed,
GMX_FORCE_NS | force_flags,
- ddOpenBalanceRegion, ddCloseBalanceRegion);
+ ddBalanceRegionHandler);
}
/* Now we have the energies and forces corresponding to the
#include <cfenv>
#include "gromacs/commandline/filenm.h"
+#include "gromacs/domdec/dlbtiming.h"
#include "gromacs/domdec/domdec.h"
#include "gromacs/ewald/pme.h"
#include "gromacs/fileio/confio.h"
GMX_FORCE_NONBONDED | GMX_FORCE_ENERGY |
(bNS ? GMX_FORCE_DYNAMICBOX | GMX_FORCE_NS : 0) |
(bStateChanged ? GMX_FORCE_STATECHANGED : 0),
- DdOpenBalanceRegionBeforeForceComputation::no,
- DdCloseBalanceRegionAfterForceComputation::no);
+ DDBalanceRegionHandler(nullptr));
std::feclearexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
std::feupdateenv(&floatingPointEnvironment);