Removed inputrec as argument to ddbox functions.
Changed all const pointers to references, this trickles down to
the PME load balancing code.
This is preparation for update groups addition to DD.
Change-Id: Ibc1c8b0f53397e4d15df7aeaa6f8561bd32e9a18
return ddnodeid;
}
-static gmx_bool dynamic_dd_box(const gmx_ddbox_t *ddbox, const t_inputrec *ir)
+static bool dynamic_dd_box(const gmx_domdec_t &dd)
{
- return (ddbox->nboundeddim < DIM || inputrecDynamicBox(ir));
+ return (dd.numBoundedDimensions < DIM || dd.haveDynamicBox);
}
int ddglatnr(const gmx_domdec_t *dd, int i)
const real tenPercentMargin = 1.1;
gmx_domdec_comm_t *comm = dd->comm;
- dd->npbcdim = ePBC2npbcdim(ir->ePBC);
- dd->bScrewPBC = (ir->ePBC == epbcSCREW);
+ dd->npbcdim = ePBC2npbcdim(ir->ePBC);
+ dd->numBoundedDimensions = inputrec2nboundeddim(ir);
+ dd->haveDynamicBox = inputrecDynamicBox(ir);
+ dd->bScrewPBC = (ir->ePBC == epbcSCREW);
dd->pme_recv_f_alloc = 0;
dd->pme_recv_f_buf = nullptr;
{
copy_ivec(options.numCells, dd->nc);
set_dd_dim(mdlog, dd);
- set_ddbox_cr(cr, &dd->nc, ir, box, xGlobal, ddbox);
+ set_ddbox_cr(*cr, &dd->nc, *ir, box, xGlobal, ddbox);
if (options.numPmeRanks >= 0)
{
}
else
{
- set_ddbox_cr(cr, nullptr, ir, box, xGlobal, ddbox);
+ set_ddbox_cr(*cr, nullptr, *ir, box, xGlobal, ddbox);
/* We need to choose the optimal DD grid and possibly PME nodes */
real limit =
}
else
{
- if (dynamic_dd_box(ddbox, ir))
+ if (dynamic_dd_box(*dd))
{
log->writeLine("(the following are initial values, they could change due to box deformation)");
}
return dd;
}
-static gmx_bool test_dd_cutoff(t_commrec *cr,
- t_state *state, const t_inputrec *ir,
- real cutoff_req)
+static gmx_bool test_dd_cutoff(t_commrec *cr,
+ const t_state &state,
+ real cutoffRequested)
{
gmx_domdec_t *dd;
gmx_ddbox_t ddbox;
dd = cr->dd;
- set_ddbox(dd, false, ir, state->box, true, state->x, &ddbox);
+ set_ddbox(*dd, false, state.box, true, state.x, &ddbox);
LocallyLimited = 0;
dim = dd->dim[d];
inv_cell_size = DD_CELL_MARGIN*dd->nc[dim]/ddbox.box_size[dim];
- if (dynamic_dd_box(&ddbox, ir))
+ if (dynamic_dd_box(*dd))
{
inv_cell_size *= DD_PRES_SCALE_MARGIN;
}
- np = 1 + static_cast<int>(cutoff_req*inv_cell_size*ddbox.skew_fac[dim]);
+ np = 1 + static_cast<int>(cutoffRequested*inv_cell_size*ddbox.skew_fac[dim]);
if (!isDlbDisabled(dd->comm) && (dim < ddbox.npbcdim) && (dd->comm->cd[d].np_dlb > 0))
{
* cut-off, we could still fix it, but this gets very complicated.
* Without fixing here, we might actually need more checks.
*/
- if ((dd->comm->cell_x1[dim] - dd->comm->cell_x0[dim])*ddbox.skew_fac[dim]*dd->comm->cd[d].np_dlb < cutoff_req)
+ real cellSizeAlongDim = (dd->comm->cell_x1[dim] - dd->comm->cell_x0[dim])*ddbox.skew_fac[dim];
+ if (cellSizeAlongDim*dd->comm->cd[d].np_dlb < cutoffRequested)
{
LocallyLimited = 1;
}
* Actually we shouldn't, because then the grid jump data is not set.
*/
if (isDlbOn(dd->comm) &&
- check_grid_jump(0, dd, cutoff_req, &ddbox, FALSE))
+ check_grid_jump(0, dd, cutoffRequested, &ddbox, FALSE))
{
LocallyLimited = 1;
}
return TRUE;
}
-gmx_bool change_dd_cutoff(t_commrec *cr, t_state *state, const t_inputrec *ir,
- real cutoff_req)
+gmx_bool change_dd_cutoff(t_commrec *cr,
+ const t_state &state,
+ real cutoffRequested)
{
gmx_bool bCutoffAllowed;
- bCutoffAllowed = test_dd_cutoff(cr, state, ir, cutoff_req);
+ bCutoffAllowed = test_dd_cutoff(cr, state, cutoffRequested);
if (bCutoffAllowed)
{
- cr->dd->comm->cutoff = cutoff_req;
+ cr->dd->comm->cutoff = cutoffRequested;
}
return bCutoffAllowed;
auto xGlobal = positionsFromStatePointer(state_global);
- set_ddbox(dd, true, ir,
+ set_ddbox(*dd, true,
DDMASTER(dd) ? state_global->box : nullptr,
true, xGlobal,
&ddbox);
dd_set_cginfo(dd->globalAtomGroupIndices, 0, dd->ncg_home, fr, comm->bLocalCG);
- set_ddbox(dd, bMasterState, ir, state_local->box,
+ set_ddbox(*dd, bMasterState, state_local->box,
true, state_local->x, &ddbox);
bRedist = isDlbOn(comm);
copy_rvec(comm->box0, ddbox.box0 );
copy_rvec(comm->box_size, ddbox.box_size);
}
- set_ddbox(dd, bMasterState, ir, state_local->box,
+ set_ddbox(*dd, bMasterState, state_local->box,
bNStGlobalComm, state_local->x, &ddbox);
bBoxChanged = TRUE;
copy_rvec(ddbox.box0, comm->box0 );
copy_rvec(ddbox.box_size, comm->box_size);
- set_dd_cell_sizes(dd, &ddbox, dynamic_dd_box(&ddbox, ir), bMasterState, bDoDLB,
+ set_dd_cell_sizes(dd, &ddbox, dynamic_dd_box(*dd), bMasterState, bDoDLB,
step, wcycle);
if (comm->nstDDDumpGrid > 0 && step % comm->nstDDDumpGrid == 0)
*
* This could fail when trying to increase the cut-off,
* then FALSE will be returned and the cut-off is not modified.
+ *
+ * \param[in] cr Communication recrod
+ * \param[in] state State, used for computing the dimensions of the system
+ * \param[in] cutoffRequested The requested atom to atom cut-off distance, usually the pair-list cutoff distance
*/
-gmx_bool change_dd_cutoff(struct t_commrec *cr,
- t_state *state, const t_inputrec *ir,
- real cutoff_req );
+gmx_bool change_dd_cutoff(t_commrec *cr,
+ const t_state &state,
+ real cutoffRequested);
/*! \brief Limit DLB to preserve the option of returning to the current cut-off.
*
/* In domdec_box.c */
/*! \brief Set the box and PBC data in \p ddbox */
-void set_ddbox(gmx_domdec_t *dd, bool masterRankHasTheSystemState,
- const t_inputrec *ir, const matrix box,
- bool calculateUnboundedSize,
- gmx::ArrayRef<const gmx::RVec> x,
- gmx_ddbox_t *ddbox);
+void set_ddbox(const gmx_domdec_t &dd,
+ bool masterRankHasTheSystemState,
+ const matrix box,
+ bool calculateUnboundedSize,
+ gmx::ArrayRef<const gmx::RVec> x,
+ gmx_ddbox_t *ddbox);
/*! \brief Set the box and PBC data in \p ddbox */
-void set_ddbox_cr(const t_commrec *cr, const ivec *dd_nc,
- const t_inputrec *ir, const matrix box,
- gmx::ArrayRef<const gmx::RVec> x,
- gmx_ddbox_t *ddbox);
+void set_ddbox_cr(const t_commrec &cr,
+ const ivec *dd_nc,
+ const t_inputrec &ir,
+ const matrix box,
+ gmx::ArrayRef<const gmx::RVec> x,
+ gmx_ddbox_t *ddbox);
#endif
#include "domdec_internal.h"
-/*! \brief Calculates the average and standard deviation in 3D of n atoms */
-static void calc_pos_av_stddev(int n, const rvec *x,
- rvec av, rvec stddev,
- const MPI_Comm *mpiCommunicator)
+/*! \brief Calculates the average and standard deviation in 3D of atoms */
+static void calc_pos_av_stddev(gmx::ArrayRef<const gmx::RVec> x,
+ rvec av,
+ rvec stddev,
+ const MPI_Comm *mpiCommunicator)
{
dvec s1, s2;
clear_dvec(s1);
clear_dvec(s2);
- for (int i = 0; i < n; i++)
+ for (const gmx::RVec &coord : x)
{
for (int d = 0; d < DIM; d++)
{
- s1[d] += x[i][d];
- s2[d] += x[i][d]*x[i][d];
+ s1[d] += coord[d];
+ s2[d] += coord[d]*coord[d];
}
}
+ /* With mpiCommunicator != nullptr, x.size() is the home atom count */
+ int numAtoms = x.size();
#if GMX_MPI
if (mpiCommunicator)
{
sendBuffer[d] = s1[d];
sendBuffer[DIM + d] = s2[d];
}
- sendBuffer[6] = n;
+ sendBuffer[6] = numAtoms;
MPI_Allreduce(sendBuffer, receiveBuffer, c_bufSize, MPI_DOUBLE,
MPI_SUM, *mpiCommunicator);
s1[d] = receiveBuffer[d];
s2[d] = receiveBuffer[DIM + d];
}
- n = gmx::roundToInt(receiveBuffer[6]);
+ numAtoms = gmx::roundToInt(receiveBuffer[6]);
}
#else // GMX_MPI
GMX_UNUSED_VALUE(mpiCommunicator);
#endif // GMX_MPI
- dsvmul(1.0/n, s1, s1);
- dsvmul(1.0/n, s2, s2);
+ dsvmul(1.0/numAtoms, s1, s1);
+ dsvmul(1.0/numAtoms, s2, s2);
for (int d = 0; d < DIM; d++)
{
}
/*! \brief This function calculates bounding box and pbc info and populates ddbox */
-static void low_set_ddbox(const t_inputrec *ir, const ivec *dd_nc, const matrix box,
- bool calculateUnboundedSize,
- int numAtoms, const rvec *x,
- const MPI_Comm *mpiCommunicator,
- gmx_ddbox_t *ddbox)
+static void low_set_ddbox(int numPbcDimensions,
+ int numBoundedDimensions,
+ const ivec *dd_nc,
+ const matrix box,
+ bool calculateUnboundedSize,
+ gmx::ArrayRef<const gmx::RVec> x,
+ const MPI_Comm *mpiCommunicator,
+ gmx_ddbox_t *ddbox)
{
rvec av, stddev;
real b0, b1;
int d;
- ddbox->npbcdim = ePBC2npbcdim(ir->ePBC);
- ddbox->nboundeddim = inputrec2nboundeddim(ir);
+ ddbox->npbcdim = numPbcDimensions;
+ ddbox->nboundeddim = numBoundedDimensions;
- for (d = 0; d < ddbox->nboundeddim; d++)
+ for (d = 0; d < numBoundedDimensions; d++)
{
ddbox->box0[d] = 0;
ddbox->box_size[d] = box[d][d];
if (ddbox->nboundeddim < DIM && calculateUnboundedSize)
{
- calc_pos_av_stddev(numAtoms, x, av, stddev, mpiCommunicator);
+ calc_pos_av_stddev(x, av, stddev, mpiCommunicator);
/* GRID_STDDEV_FAC * stddev
* gives a uniform load for a rectangular block of cg's.
set_tric_dir(dd_nc, ddbox, box);
}
-void set_ddbox(gmx_domdec_t *dd, bool masterRankHasTheSystemState,
- const t_inputrec *ir, const matrix box,
- bool calculateUnboundedSize,
- gmx::ArrayRef<const gmx::RVec> x,
- gmx_ddbox_t *ddbox)
+void set_ddbox(const gmx_domdec_t &dd,
+ bool masterRankHasTheSystemState,
+ const matrix box,
+ bool calculateUnboundedSize,
+ gmx::ArrayRef<const gmx::RVec> x,
+ gmx_ddbox_t *ddbox)
{
if (!masterRankHasTheSystemState || DDMASTER(dd))
{
- bool needToReduceCoordinateData = (!masterRankHasTheSystemState && dd->nnodes > 1);
+ bool needToReduceCoordinateData =
+ (!masterRankHasTheSystemState && dd.nnodes > 1);
+ gmx::ArrayRef<const gmx::RVec> xRef =
+ constArrayRefFromArray(x.data(), masterRankHasTheSystemState ? x.size() : dd.comm->atomRanges.numHomeAtoms());
- low_set_ddbox(ir, &dd->nc, box, calculateUnboundedSize,
- masterRankHasTheSystemState ? x.size() : dd->comm->atomRanges.numHomeAtoms(), as_rvec_array(x.data()),
- needToReduceCoordinateData ? &dd->mpi_comm_all : nullptr,
+ low_set_ddbox(dd.npbcdim, dd.numBoundedDimensions,
+ &dd.nc, box, calculateUnboundedSize, xRef,
+ needToReduceCoordinateData ? &dd.mpi_comm_all : nullptr,
ddbox);
}
if (masterRankHasTheSystemState)
{
- dd_bcast(dd, sizeof(gmx_ddbox_t), ddbox);
+ dd_bcast(&dd, sizeof(gmx_ddbox_t), ddbox);
}
}
-void set_ddbox_cr(const t_commrec *cr, const ivec *dd_nc,
- const t_inputrec *ir, const matrix box,
- gmx::ArrayRef<const gmx::RVec> x,
- gmx_ddbox_t *ddbox)
+void set_ddbox_cr(const t_commrec &cr,
+ const ivec *dd_nc,
+ const t_inputrec &ir,
+ const matrix box,
+ gmx::ArrayRef<const gmx::RVec> x,
+ gmx_ddbox_t *ddbox)
{
- if (MASTER(cr))
+ if (MASTER(&cr))
{
- low_set_ddbox(ir, dd_nc, box, true, x.size(), as_rvec_array(x.data()), nullptr, ddbox);
+ low_set_ddbox(ePBC2npbcdim(ir.ePBC), inputrec2nboundeddim(&ir),
+ dd_nc, box, true, x, nullptr, ddbox);
}
- gmx_bcast(sizeof(gmx_ddbox_t), ddbox, cr);
+ gmx_bcast(sizeof(gmx_ddbox_t), ddbox, &cr);
}
#define DD_MAXZONE 8
//! Max number of izones in domain decomposition
#define DD_MAXIZONE 4
-//! Are we the master node for domain decomposition
-#define DDMASTER(dd) ((dd)->rank == (dd)->masterrank)
struct AtomDistribution;
struct gmx_domdec_comm_t;
int ndim;
ivec dim; /* indexed by 0 to ndim */
- /* PBC from dim 0 to npbcdim */
- int npbcdim;
+ /* TODO: Move the next 4, and more from domdec_internal.h, to a simulation system */
+
+ /* PBC from dim 0 (X) to npbcdim */
+ int npbcdim;
+ /* The system is bounded from 0 (X) to numBoundedDimensions */
+ int numBoundedDimensions;
+ /* Does the box size change during the simulaton? */
+ bool haveDynamicBox;
/* Screw PBC? */
gmx_bool bScrewPBC;
rvec *pme_recv_f_buf = nullptr;
};
+//! Are we the master node for domain decomposition
+static inline bool DDMASTER(const gmx_domdec_t &dd)
+{
+ return dd.rank == dd.masterrank;
+};
+
+//! Are we the master node for domain decomposition, deprecated
+static inline bool DDMASTER(const gmx_domdec_t *dd)
+{
+ return dd->rank == dd->masterrank;
+};
+
#endif
void pme_loadbal_init(pme_load_balancing_t **pme_lb_p,
t_commrec *cr,
const gmx::MDLogger &mdlog,
- const t_inputrec *ir,
- matrix box,
- const interaction_const_t *ic,
- const NbnxnListParameters *listParams,
+ const t_inputrec &ir,
+ const matrix box,
+ const interaction_const_t &ic,
+ const NbnxnListParameters &listParams,
gmx_pme_t *pmedata,
gmx_bool bUseGPU,
gmx_bool *bPrinting)
{
- GMX_RELEASE_ASSERT(ir->cutoff_scheme != ecutsGROUP, "PME tuning is not supported with cutoff-scheme=group (because it contains bugs)");
+ GMX_RELEASE_ASSERT(ir.cutoff_scheme != ecutsGROUP, "PME tuning is not supported with cutoff-scheme=group (because it contains bugs)");
pme_load_balancing_t *pme_lb;
real spm, sp;
int d;
// Note that we don't (yet) support PME load balancing with LJ-PME only.
- GMX_RELEASE_ASSERT(EEL_PME(ir->coulombtype), "pme_loadbal_init called without PME electrostatics");
+ GMX_RELEASE_ASSERT(EEL_PME(ir.coulombtype), "pme_loadbal_init called without PME electrostatics");
// To avoid complexity, we require a single cut-off with PME for q+LJ.
// This is checked by grompp, but it doesn't hurt to check again.
- GMX_RELEASE_ASSERT(!(EEL_PME(ir->coulombtype) && EVDW_PME(ir->vdwtype) && ir->rcoulomb != ir->rvdw), "With Coulomb and LJ PME, rcoulomb should be equal to rvdw");
+ GMX_RELEASE_ASSERT(!(EEL_PME(ir.coulombtype) && EVDW_PME(ir.vdwtype) && ir.rcoulomb != ir.rvdw), "With Coulomb and LJ PME, rcoulomb should be equal to rvdw");
snew(pme_lb, 1);
/* Any number of stages >= 2 is supported */
pme_lb->nstage = 2;
- pme_lb->cutoff_scheme = ir->cutoff_scheme;
+ pme_lb->cutoff_scheme = ir.cutoff_scheme;
- pme_lb->rbufOuter_coulomb = listParams->rlistOuter - ic->rcoulomb;
- pme_lb->rbufOuter_vdw = listParams->rlistOuter - ic->rvdw;
- pme_lb->rbufInner_coulomb = listParams->rlistInner - ic->rcoulomb;
- pme_lb->rbufInner_vdw = listParams->rlistInner - ic->rvdw;
+ pme_lb->rbufOuter_coulomb = listParams.rlistOuter - ic.rcoulomb;
+ pme_lb->rbufOuter_vdw = listParams.rlistOuter - ic.rvdw;
+ pme_lb->rbufInner_coulomb = listParams.rlistInner - ic.rcoulomb;
+ pme_lb->rbufInner_vdw = listParams.rlistInner - ic.rvdw;
/* Scale box with Ewald wall factor; note that we pmedata->boxScaler
* can't always usedd as it's not available with separate PME ranks.
*/
- EwaldBoxZScaler boxScaler(*ir);
+ EwaldBoxZScaler boxScaler(ir);
boxScaler.scaleBox(box, pme_lb->box_start);
pme_lb->n = 1;
snew(pme_lb->setup, pme_lb->n);
- pme_lb->rcut_vdw = ic->rvdw;
- pme_lb->rcut_coulomb_start = ir->rcoulomb;
+ pme_lb->rcut_vdw = ic.rvdw;
+ pme_lb->rcut_coulomb_start = ir.rcoulomb;
pme_lb->cur = 0;
- pme_lb->setup[0].rcut_coulomb = ic->rcoulomb;
- pme_lb->setup[0].rlistOuter = listParams->rlistOuter;
- pme_lb->setup[0].rlistInner = listParams->rlistInner;
- pme_lb->setup[0].grid[XX] = ir->nkx;
- pme_lb->setup[0].grid[YY] = ir->nky;
- pme_lb->setup[0].grid[ZZ] = ir->nkz;
- pme_lb->setup[0].ewaldcoeff_q = ic->ewaldcoeff_q;
- pme_lb->setup[0].ewaldcoeff_lj = ic->ewaldcoeff_lj;
+ pme_lb->setup[0].rcut_coulomb = ic.rcoulomb;
+ pme_lb->setup[0].rlistOuter = listParams.rlistOuter;
+ pme_lb->setup[0].rlistInner = listParams.rlistInner;
+ pme_lb->setup[0].grid[XX] = ir.nkx;
+ pme_lb->setup[0].grid[YY] = ir.nky;
+ pme_lb->setup[0].grid[ZZ] = ir.nkz;
+ pme_lb->setup[0].ewaldcoeff_q = ic.ewaldcoeff_q;
+ pme_lb->setup[0].ewaldcoeff_lj = ic.ewaldcoeff_lj;
if (!pme_lb->bSepPMERanks)
{
}
pme_lb->setup[0].spacing = spm;
- if (ir->fourier_spacing > 0)
+ if (ir.fourier_spacing > 0)
{
- pme_lb->cut_spacing = ir->rcoulomb/ir->fourier_spacing;
+ pme_lb->cut_spacing = ir.rcoulomb/ir.fourier_spacing;
}
else
{
- pme_lb->cut_spacing = ir->rcoulomb/pme_lb->setup[0].spacing;
+ pme_lb->cut_spacing = ir.rcoulomb/pme_lb->setup[0].spacing;
}
pme_lb->stage = 0;
*/
pme_lb->bBalance = (pme_lb->bActive && (bUseGPU && !pme_lb->bSepPMERanks));
- pme_lb->step_rel_stop = PMETunePeriod*ir->nstlist;
+ pme_lb->step_rel_stop = PMETunePeriod*ir.nstlist;
/* Delay DD load balancing when GPUs are used */
if (pme_lb->bActive && DOMAINDECOMP(cr) && cr->dd->nnodes > 1 && bUseGPU)
FILE *fp_err,
FILE *fp_log,
const gmx::MDLogger &mdlog,
- const t_inputrec *ir,
- t_state *state,
+ const t_inputrec &ir,
+ const t_state &state,
double cycles,
interaction_const_t *ic,
struct nonbonded_verlet_t *nbv,
set = &pme_lb->setup[pme_lb->cur];
set->count++;
- rtab = ir->rlist + ir->tabext;
+ rtab = ir.rlist + ir.tabext;
if (set->count % 2 == 1)
{
else
{
/* Find the next setup */
- OK = pme_loadbal_increase_cutoff(pme_lb, ir->pme_order, cr->dd);
+ OK = pme_loadbal_increase_cutoff(pme_lb, ir.pme_order, cr->dd);
if (!OK)
{
pme_lb->elimited = epmelblimMAXSCALING;
}
- if (OK && ir->ePBC != epbcNONE)
+ if (OK && ir.ePBC != epbcNONE)
{
OK = (gmx::square(pme_lb->setup[pme_lb->cur+1].rlistOuter)
- <= max_cutoff2(ir->ePBC, state->box));
+ <= max_cutoff2(ir.ePBC, state.box));
if (!OK)
{
pme_lb->elimited = epmelblimBOX;
if (DOMAINDECOMP(cr))
{
- OK = change_dd_cutoff(cr, state, ir,
+ OK = change_dd_cutoff(cr, state,
pme_lb->setup[pme_lb->cur].rlistOuter);
if (!OK)
{
if (DOMAINDECOMP(cr) && pme_lb->stage > 0)
{
- OK = change_dd_cutoff(cr, state, ir, pme_lb->setup[pme_lb->cur].rlistOuter);
+ OK = change_dd_cutoff(cr, state, pme_lb->setup[pme_lb->cur].rlistOuter);
if (!OK)
{
/* For some reason the chosen cut-off is incompatible with DD.
* copying part of the old pointers.
*/
gmx_pme_reinit(&set->pmedata,
- cr, pme_lb->setup[0].pmedata, ir,
+ cr, pme_lb->setup[0].pmedata, &ir,
set->grid, set->ewaldcoeff_q, set->ewaldcoeff_lj);
}
*pmedata = set->pmedata;
FILE *fp_err,
FILE *fp_log,
const gmx::MDLogger &mdlog,
- const t_inputrec *ir,
+ const t_inputrec &ir,
t_forcerec *fr,
- t_state *state,
+ const t_state &state,
gmx_wallcycle_t wcycle,
int64_t step,
int64_t step_rel,
wallcycle_get(wcycle, ewcSTEP, &pme_lb->cycles_n, &pme_lb->cycles_c);
/* Before the first step we haven't done any steps yet.
- * Also handle cases where ir->init_step % ir->nstlist != 0.
+ * Also handle cases where ir.init_step % ir.nstlist != 0.
*/
- if (pme_lb->cycles_n < ir->nstlist)
+ if (pme_lb->cycles_n < ir.nstlist)
{
return;
}
/* Sanity check, we expect nstlist cycle counts */
- if (pme_lb->cycles_n - n_prev != ir->nstlist)
+ if (pme_lb->cycles_n - n_prev != ir.nstlist)
{
/* We could return here, but it's safer to issue an error and quit */
gmx_incons("pme_loadbal_do called at an interval != nstlist");
* is not over the last nstlist steps, but the nstlist steps before
* that. So the first useful ratio is available at step_rel=3*nstlist.
*/
- else if (step_rel >= 3*ir->nstlist)
+ else if (step_rel >= 3*ir.nstlist)
{
if (DDMASTER(cr->dd))
{
*/
continue_pme_loadbal(pme_lb, TRUE);
pme_lb->bTriggerOnDLB = TRUE;
- pme_lb->step_rel_stop = step_rel + PMETunePeriod*ir->nstlist;
+ pme_lb->step_rel_stop = step_rel + PMETunePeriod*ir.nstlist;
}
else
{
/* Update deprecated rlist in forcerec to stay in sync with fr->nbv */
fr->rlist = fr->nbv->listParams->rlistOuter;
- if (ir->eDispCorr != edispcNO)
+ if (ir.eDispCorr != edispcNO)
{
- calc_enervirdiff(nullptr, ir->eDispCorr, fr);
+ calc_enervirdiff(nullptr, ir.eDispCorr, fr);
}
}
void pme_loadbal_init(pme_load_balancing_t **pme_lb_p,
t_commrec *cr,
const gmx::MDLogger &mdlog,
- const t_inputrec *ir,
- matrix box,
- const interaction_const_t *ic,
- const NbnxnListParameters *listParams,
+ const t_inputrec &ir,
+ const matrix box,
+ const interaction_const_t &ic,
+ const NbnxnListParameters &listParams,
gmx_pme_t *pmedata,
gmx_bool bUseGPU,
gmx_bool *bPrinting);
FILE *fp_err,
FILE *fp_log,
const gmx::MDLogger &mdlog,
- const t_inputrec *ir,
+ const t_inputrec &ir,
t_forcerec *fr,
- t_state *state,
+ const t_state &state,
gmx_wallcycle_t wcycle,
int64_t step,
int64_t step_rel,
}
t_state state_tmp;
copy_mat(box, state_tmp.box);
- bDD = change_dd_cutoff(cr, &state_tmp, ir, rlist_new);
+ bDD = change_dd_cutoff(cr, state_tmp, rlist_new);
}
if (debug)
!mdrunOptions.reproducible && ir->cutoff_scheme != ecutsGROUP);
if (bPMETune)
{
- pme_loadbal_init(&pme_loadbal, cr, mdlog, ir, state->box,
- fr->ic, fr->nbv->listParams.get(), fr->pmedata, use_GPU(fr->nbv),
+ pme_loadbal_init(&pme_loadbal, cr, mdlog, *ir, state->box,
+ *fr->ic, *fr->nbv->listParams, fr->pmedata, use_GPU(fr->nbv),
&bPMETunePrinting);
}
pme_loadbal_do(pme_loadbal, cr,
(mdrunOptions.verbose && MASTER(cr)) ? stderr : nullptr,
fplog, mdlog,
- ir, fr, state,
+ *ir, fr, *state,
wcycle,
step, step_rel,
&bPMETunePrinting);