#include <ctime>
#include <algorithm>
+#include <limits>
#include <vector>
#include "gromacs/commandline/filenm.h"
#include "gromacs/mdtypes/md_enums.h"
#include "gromacs/mdtypes/mdatom.h"
#include "gromacs/mdtypes/mdrunoptions.h"
+#include "gromacs/mdtypes/observablesreducer.h"
#include "gromacs/mdtypes/state.h"
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/timing/wallcycle.h"
static void print_em_start(FILE* fplog,
const t_commrec* cr,
gmx_walltime_accounting_t walltime_accounting,
- gmx_wallcycle_t wcycle,
+ gmx_wallcycle* wcycle,
const char* name)
{
walltime_accounting_start_time(walltime_accounting);
- wallcycle_start(wcycle, ewcRUN);
+ wallcycle_start(wcycle, WallCycleCounter::Run);
print_start(fplog, cr, walltime_accounting, name);
}
//! Stop counting time for EM
-static void em_time_end(gmx_walltime_accounting_t walltime_accounting, gmx_wallcycle_t wcycle)
+static void em_time_end(gmx_walltime_accounting_t walltime_accounting, gmx_wallcycle* wcycle)
{
- wallcycle_stop(wcycle, ewcRUN);
+ wallcycle_stop(wcycle, WallCycleCounter::Run);
walltime_accounting_end_time(walltime_accounting);
}
}
}
- if (la_max >= 0 && DOMAINDECOMP(cr))
+ if (la_max >= 0 && haveDDAtomOrdering(*cr))
{
a_max = cr->dd->globalAtomIndices[la_max];
}
gmx::ImdSession* imdSession,
pull_t* pull_work,
t_state* state_global,
- const gmx_mtop_t* top_global,
+ const gmx_mtop_t& top_global,
em_state_t* ems,
gmx_localtop_t* top,
t_nrnb* nrnb,
}
int* fep_state = MASTER(cr) ? &state_global->fep_state : nullptr;
gmx::ArrayRef<real> lambda = MASTER(cr) ? state_global->lambda : gmx::ArrayRef<real>();
- initialize_lambdas(fplog, *ir, MASTER(cr), fep_state, lambda);
-
- if (ir->eI == eiNM)
+ initialize_lambdas(fplog,
+ ir->efep,
+ ir->bSimTemp,
+ *ir->fepvals,
+ ir->simtempvals->temperatures,
+ gmx::arrayRefFromArray(ir->opts.ref_t, ir->opts.ngtc),
+ MASTER(cr),
+ fep_state,
+ lambda);
+
+ if (ir->eI == IntegrationAlgorithm::NM)
{
GMX_ASSERT(shellfc != nullptr, "With NM we always support shells");
top_global,
constr ? constr->numFlexibleConstraints() : 0,
ir->nstcalcenergy,
- DOMAINDECOMP(cr),
+ haveDDAtomOrdering(*cr),
thisRankHasDuty(cr, DUTY_PME));
}
else
}
}
- if (DOMAINDECOMP(cr))
+ if (haveDDAtomOrdering(*cr))
{
- dd_init_local_state(cr->dd, state_global, &ems->s);
+ // Local state only becomes valid now.
+ dd_init_local_state(*cr->dd, state_global, &ems->s);
/* Distribute the charge groups over the nodes from the master node */
dd_partition_system(fplog,
TRUE,
1,
state_global,
- *top_global,
- ir,
+ top_global,
+ *ir,
imdSession,
pull_work,
&ems->s,
nrnb,
nullptr,
FALSE);
- dd_store_state(cr->dd, &ems->s);
+ dd_store_state(*cr->dd, &ems->s);
}
else
{
state_change_natoms(&ems->s, ems->s.natoms);
mdAlgorithmsSetupAtomData(
- cr, ir, *top_global, top, fr, &ems->f, mdAtoms, constr, vsite, shellfc ? *shellfc : nullptr);
+ cr, *ir, top_global, top, fr, &ems->f, mdAtoms, constr, vsite, shellfc ? *shellfc : nullptr);
}
- update_mdatoms(mdAtoms->mdatoms(), ems->s.lambda[efptMASS]);
+ update_mdatoms(mdAtoms->mdatoms(), ems->s.lambda[FreeEnergyPerturbationCouplingType::Mass]);
if (constr)
{
// TODO how should this cross-module support dependency be managed?
- if (ir->eConstrAlg == econtSHAKE && gmx_mtop_ftype_count(top_global, F_CONSTR) > 0)
+ if (ir->eConstrAlg == ConstraintAlgorithm::Shake && gmx_mtop_ftype_count(top_global, F_CONSTR) > 0)
{
gmx_fatal(FARGS,
"Can not do energy minimization with %s, use %s\n",
- econstr_names[econtSHAKE],
- econstr_names[econtLINCS]);
+ enumValueToString(ConstraintAlgorithm::Shake),
+ enumValueToString(ConstraintAlgorithm::Lincs));
}
if (!ir->bContinuation)
ems->s.x.arrayRefWithPadding(),
ArrayRef<RVec>(),
ems->s.box,
- ems->s.lambda[efptFEP],
+ ems->s.lambda[FreeEnergyPerturbationCouplingType::Fep],
&dvdl_constr,
gmx::ArrayRefWithPadding<RVec>(),
computeVirial,
static void finish_em(const t_commrec* cr,
gmx_mdoutf_t outf,
gmx_walltime_accounting_t walltime_accounting,
- gmx_wallcycle_t wcycle)
+ gmx_wallcycle* wcycle)
{
if (!thisRankHasDuty(cr, DUTY_PME))
{
gmx_bool bX,
gmx_bool bF,
const char* confout,
- const gmx_mtop_t* top_global,
+ const gmx_mtop_t& top_global,
const t_inputrec* ir,
int64_t step,
em_state_t* state,
cr,
outf,
mdof_flags,
- top_global->natoms,
+ top_global.natoms,
step,
static_cast<double>(step),
&state->s,
if (confout != nullptr)
{
- if (DOMAINDECOMP(cr))
+ if (haveDDAtomOrdering(*cr))
{
/* If bX=true, x was collected to state_global in the call above */
if (!bX)
if (MASTER(cr))
{
- if (ir->pbcType != PbcType::No && !ir->bPeriodicMols && DOMAINDECOMP(cr))
+ if (ir->pbcType != PbcType::No && !ir->bPeriodicMols && haveDDAtomOrdering(*cr))
{
/* Make molecules whole only for confout writing */
- do_pbc_mtop(ir->pbcType, state->s.box, top_global, state_global->x.rvec_array());
+ do_pbc_mtop(ir->pbcType, state->s.box, &top_global, state_global->x.rvec_array());
}
write_sto_conf_mtop(confout,
- *top_global->name,
+ *top_global.name,
top_global,
state_global->x.rvec_array(),
nullptr,
int64_t count)
{
- t_state *s1, *s2;
- int start, end;
- real dvdl_constr;
+ t_state * s1, *s2;
+ int start, end;
+ real dvdl_constr;
int nthreads gmx_unused;
bool validStep = true;
s1 = &ems1->s;
s2 = &ems2->s;
- if (DOMAINDECOMP(cr) && s1->ddp_count != cr->dd->ddp_count)
+ if (haveDDAtomOrdering(*cr) && s1->ddp_count != cr->dd->ddp_count)
{
gmx_incons("state mismatch in do_em_step");
}
state_change_natoms(s2, s1->natoms);
ems2->f.resize(s2->natoms);
}
- if (DOMAINDECOMP(cr) && s2->cg_gl.size() != s1->cg_gl.size())
+ if (haveDDAtomOrdering(*cr) && s2->cg_gl.size() != s1->cg_gl.size())
{
s2->cg_gl.resize(s1->cg_gl.size());
}
start = 0;
end = md->homenr;
- nthreads = gmx_omp_nthreads_get(emntUpdate);
+ nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel num_threads(nthreads)
{
const rvec* x1 = s1->x.rvec_array();
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
- if (s2->flags & (1 << estCGP))
+ if (s2->flags & enumValueToBitMask(StateEntry::Cgp))
{
/* Copy the CG p vector */
const rvec* p1 = s1->cg_p.rvec_array();
}
}
- if (DOMAINDECOMP(cr))
+ if (haveDDAtomOrdering(*cr))
{
/* OpenMP does not supported unsigned loop variables */
#pragma omp for schedule(static) nowait
}
}
- if (DOMAINDECOMP(cr))
+ if (haveDDAtomOrdering(*cr))
{
s2->ddp_count = s1->ddp_count;
s2->ddp_count_cg_gl = s1->ddp_count_cg_gl;
s2->x.arrayRefWithPadding(),
ArrayRef<RVec>(),
s2->box,
- s2->lambda[efptBONDED],
+ s2->lambda[FreeEnergyPerturbationCouplingType::Bonded],
&dvdl_constr,
gmx::ArrayRefWithPadding<RVec>(),
false,
}
// We should move this check to the different minimizers
- if (!validStep && ir->eI != eiSteep)
+ if (!validStep && ir->eI != IntegrationAlgorithm::Steep)
{
gmx_fatal(FARGS,
"The coordinates could not be constrained. Minimizer '%s' can not handle "
"constraint failures, use minimizer '%s' before using '%s'.",
- EI(ir->eI),
- EI(eiSteep),
- EI(ir->eI));
+ enumValueToString(ir->eI),
+ enumValueToString(IntegrationAlgorithm::Steep),
+ enumValueToString(ir->eI));
}
}
const gmx::MDLogger& mdlog,
int step,
const t_commrec* cr,
- const gmx_mtop_t* top_global,
+ const gmx_mtop_t& top_global,
const t_inputrec* ir,
gmx::ImdSession* imdSession,
pull_t* pull_work,
VirtualSitesHandler* vsite,
gmx::Constraints* constr,
t_nrnb* nrnb,
- gmx_wallcycle_t wcycle)
+ gmx_wallcycle* wcycle)
{
/* Repartition the domain decomposition */
dd_partition_system(fplog,
FALSE,
1,
nullptr,
- *top_global,
- ir,
+ top_global,
+ *ir,
imdSession,
pull_work,
&ems->s,
nrnb,
wcycle,
FALSE);
- dd_store_state(cr->dd, &ems->s);
+ dd_store_state(*cr->dd, &ems->s);
}
namespace
{
+//! Copy coordinates, OpenMP parallelized, from \p refCoords to coords
+void setCoordinates(std::vector<RVec>* coords, ArrayRef<const RVec> refCoords)
+{
+ coords->resize(refCoords.size());
+
+ const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
+#pragma omp parallel for num_threads(nthreads) schedule(static)
+ for (int i = 0; i < ssize(refCoords); i++)
+ {
+ (*coords)[i] = refCoords[i];
+ }
+}
+
+//! Returns the maximum difference an atom moved between two coordinate sets, over all ranks
+real maxCoordinateDifference(ArrayRef<const RVec> coords1, ArrayRef<const RVec> coords2, MPI_Comm mpiCommMyGroup)
+{
+ GMX_RELEASE_ASSERT(coords1.size() == coords2.size(), "Coordinate counts should match");
+
+ real maxDiffSquared = 0;
+
+ const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
+#pragma omp parallel for reduction(max : maxDiffSquared) num_threads(nthreads) schedule(static)
+ for (int i = 0; i < ssize(coords1); i++)
+ {
+ maxDiffSquared = std::max(maxDiffSquared, gmx::norm2(coords1[i] - coords2[i]));
+ }
+
+#if GMX_MPI
+ int numRanks = 1;
+ if (mpiCommMyGroup != MPI_COMM_NULL)
+ {
+ MPI_Comm_size(mpiCommMyGroup, &numRanks);
+ }
+ if (numRanks > 1)
+ {
+ real maxDiffSquaredReduced;
+ MPI_Allreduce(
+ &maxDiffSquared, &maxDiffSquaredReduced, 1, GMX_DOUBLE ? MPI_DOUBLE : MPI_FLOAT, MPI_MAX, mpiCommMyGroup);
+ maxDiffSquared = maxDiffSquaredReduced;
+ }
+#else
+ GMX_UNUSED_VALUE(mpiCommMyGroup);
+#endif
+
+ return std::sqrt(maxDiffSquared);
+}
+
/*! \brief Class to handle the work of setting and doing an energy evaluation.
*
* This class is a mere aggregate of parameters to pass to evaluate an
* unsuited for aggregate initialization. When the types
* improve, the call signature of this method can be reduced.
*/
- void run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst);
+ void run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst, int64_t step);
//! Handles logging (deprecated).
FILE* fplog;
//! Handles logging.
//! Coordinates multi-simulations.
const gmx_multisim_t* ms;
//! Holds the simulation topology.
- const gmx_mtop_t* top_global;
+ const gmx_mtop_t& top_global;
//! Holds the domain topology.
gmx_localtop_t* top;
//! User input options.
//! Manages flop accounting.
t_nrnb* nrnb;
//! Manages wall cycle accounting.
- gmx_wallcycle_t wcycle;
- //! Coordinates global reduction.
+ gmx_wallcycle* wcycle;
+ //! Legacy coordinator of global reduction.
gmx_global_stat_t gstat;
+ //! Coordinates reduction for observables
+ gmx::ObservablesReducer* observablesReducer;
//! Handles virtual sites.
VirtualSitesHandler* vsite;
//! Handles constraints.
MdrunScheduleWorkload* runScheduleWork;
//! Stores the computed energies.
gmx_enerdata_t* enerd;
+ //! The DD partitioning count at which the pair list was generated
+ int ddpCountPairSearch;
+ //! The local coordinates that were used for pair searching, stored for computing displacements
+ std::vector<RVec> pairSearchCoordinates;
};
-void EnergyEvaluator::run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst)
+void EnergyEvaluator::run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst, int64_t step)
{
real t;
gmx_bool bNS;
/* Set the time to the initial time, the time does not change during EM */
t = inputrec->init_t;
- if (bFirst || (DOMAINDECOMP(cr) && ems->s.ddp_count < cr->dd->ddp_count))
+ if (vsite)
+ {
+ vsite->construct(ems->s.x, {}, ems->s.box, gmx::VSiteOperation::Positions);
+ }
+
+ // Compute the buffer size of the pair list
+ const real bufferSize = inputrec->rlist - std::max(inputrec->rcoulomb, inputrec->rvdw);
+
+ if (bFirst || bufferSize <= 0 || (haveDDAtomOrdering(*cr) && ems->s.ddp_count != ddpCountPairSearch))
{
/* This is the first state or an old state used before the last ns */
bNS = TRUE;
}
else
{
- bNS = FALSE;
- if (inputrec->nstlist > 0)
- {
- bNS = TRUE;
- }
+ // We need to generate a new pairlist when one atom moved more than half the buffer size
+ ArrayRef<const RVec> localCoordinates =
+ ArrayRef<const RVec>(ems->s.x).subArray(0, mdAtoms->mdatoms()->homenr);
+ bNS = 2 * maxCoordinateDifference(pairSearchCoordinates, localCoordinates, cr->mpi_comm_mygroup)
+ > bufferSize;
}
- if (vsite)
- {
- vsite->construct(ems->s.x, 1, {}, ems->s.box);
- }
-
- if (DOMAINDECOMP(cr) && bNS)
+ if (haveDDAtomOrdering(*cr) && bNS)
{
/* Repartition the domain decomposition */
em_dd_partition_system(
fplog, mdlog, count, cr, top_global, inputrec, imdSession, pull_work, ems, top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
+ ddpCountPairSearch = cr->dd->ddp_count;
+ }
+
+ /* Store the local coordinates that will be used in the pair search, after we re-partitioned */
+ if (bufferSize > 0 && bNS)
+ {
+ ArrayRef<const RVec> localCoordinates =
+ constArrayRefFromArray(ems->s.x.data(), mdAtoms->mdatoms()->homenr);
+ setCoordinates(&pairSearchCoordinates, localCoordinates);
}
+ fr->longRangeNonbondeds->updateAfterPartition(*mdAtoms->mdatoms());
+
/* Calc force & energy on new trial position */
/* do_force always puts the charge groups in the box and shifts again
* We do not unshift, so molecules are always whole in congrad.c
do_force(fplog,
cr,
ms,
- inputrec,
+ *inputrec,
nullptr,
nullptr,
imdSession,
mu_tot,
t,
nullptr,
+ fr->longRangeNonbondeds.get(),
GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES | GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY
| (bNS ? GMX_FORCE_NS : 0),
DDBalanceRegionHandler(cr));
clear_mat(pres);
/* Communicate stuff when parallel */
- if (PAR(cr) && inputrec->eI != eiNM)
+ if (PAR(cr) && inputrec->eI != IntegrationAlgorithm::NM)
{
- wallcycle_start(wcycle, ewcMoveE);
+ wallcycle_start(wcycle, WallCycleCounter::MoveE);
- global_stat(gstat,
+ global_stat(*gstat,
cr,
enerd,
force_vir,
shake_vir,
- inputrec,
- nullptr,
- gmx::ArrayRef<real>{},
+ *inputrec,
nullptr,
- 1,
- &terminate,
nullptr,
+ std::vector<real>(1, terminate),
FALSE,
- CGLO_ENERGY | CGLO_PRESSURE | CGLO_CONSTRAINT);
+ CGLO_ENERGY | CGLO_PRESSURE | CGLO_CONSTRAINT,
+ step,
+ observablesReducer);
- wallcycle_stop(wcycle, ewcMoveE);
+ wallcycle_stop(wcycle, WallCycleCounter::MoveE);
}
if (fr->dispersionCorrection)
{
/* Calculate long range corrections to pressure and energy */
- const DispersionCorrection::Correction correction =
- fr->dispersionCorrection->calculate(ems->s.box, ems->s.lambda[efptVDW]);
+ const DispersionCorrection::Correction correction = fr->dispersionCorrection->calculate(
+ ems->s.box, ems->s.lambda[FreeEnergyPerturbationCouplingType::Vdw]);
enerd->term[F_DISPCORR] = correction.energy;
enerd->term[F_EPOT] += correction.energy;
f,
f.unpaddedArrayRef(),
ems->s.box,
- ems->s.lambda[efptBONDED],
+ ems->s.lambda[FreeEnergyPerturbationCouplingType::Bonded],
&dvdl_constr,
gmx::ArrayRefWithPadding<RVec>(),
computeVirial,
clear_mat(ekin);
enerd->term[F_PRES] = calc_pres(fr->pbcType, inputrec->nwall, ems->s.box, ekin, vir, pres);
- if (inputrec->efep != efepNO)
+ if (inputrec->efep != FreeEnergyPerturbationType::No)
{
accumulateKineticLambdaComponents(enerd, ems->s.lambda, *inputrec->fepvals);
}
//! Parallel utility summing energies and forces
static double reorder_partsum(const t_commrec* cr,
const t_grpopts* opts,
- const gmx_mtop_t* top_global,
+ const gmx_mtop_t& top_global,
const em_state_t* s_min,
const em_state_t* s_b)
{
* This conflicts with the spirit of domain decomposition,
* but to fully optimize this a much more complicated algorithm is required.
*/
- const int natoms = top_global->natoms;
+ const int natoms = top_global.natoms;
rvec* fmg;
snew(fmg, natoms);
copy_rvec(fm[i], fmg[a]);
i++;
}
- gmx_sum(top_global->natoms * 3, fmg[0], cr);
+ gmx_sum(top_global.natoms * 3, fmg[0], cr);
/* Now we will determine the part of the sum for the cgs in state s_b */
gmx::ArrayRef<const int> indicesB = s_b->s.cg_gl;
i = 0;
int gf = 0;
gmx::ArrayRef<const unsigned char> grpnrFREEZE =
- top_global->groups.groupNumbers[SimulationAtomGroupType::Freeze];
+ top_global.groups.groupNumbers[SimulationAtomGroupType::Freeze];
for (int a : indicesB)
{
if (!grpnrFREEZE.empty())
static real pr_beta(const t_commrec* cr,
const t_grpopts* opts,
t_mdatoms* mdatoms,
- const gmx_mtop_t* top_global,
+ const gmx_mtop_t& top_global,
const em_state_t* s_min,
const em_state_t* s_b)
{
* and might have to sum it in parallel runs.
*/
- if (!DOMAINDECOMP(cr)
+ if (!haveDDAtomOrdering(*cr)
|| (s_min->s.ddp_count == cr->dd->ddp_count && s_b->s.ddp_count == cr->dd->ddp_count))
{
auto fm = s_min->f.view().force();
{
const char* CG = "Polak-Ribiere Conjugate Gradients";
- gmx_localtop_t top(top_global->ffparams);
gmx_global_stat_t gstat;
double tmp, minstep;
real stepsize;
tensor vir, pres;
int number_steps, neval = 0, nstcg = inputrec->nstcgsteep;
int m, step, nminstep;
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
if (MASTER(cr))
{
// In CG, the state is extended with a search direction
- state_global->flags |= (1 << estCGP);
+ state_global->flags |= enumValueToBitMask(StateEntry::Cgp);
// Ensure the extra per-atom state array gets allocated
state_change_natoms(state_global, state_global->natoms);
em_state_t* s_b = &s2;
em_state_t* s_c = &s3;
+ ObservablesReducer observablesReducer = observablesReducerBuilder->build();
+
/* Init em and store the local state in s_min */
init_em(fplog,
mdlog,
state_global,
top_global,
s_min,
- &top,
+ top,
nrnb,
fr,
mdAtoms,
mdrunOptions,
cr,
outputProvider,
- mdModulesNotifier,
+ mdModulesNotifiers,
inputrec,
top_global,
nullptr,
ms);
gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
top_global,
- inputrec,
+ *inputrec,
pull_work,
nullptr,
false,
StartingBehavior::NewSimulation,
simulationsShareState,
- mdModulesNotifier);
+ mdModulesNotifiers);
/* Print to log file */
print_em_start(fplog, cr, walltime_accounting, wcycle, CG);
sp_header(fplog, CG, inputrec->em_tol, number_steps);
}
- EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
- inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
- vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
+ EnergyEvaluator energyEvaluator{ fplog,
+ mdlog,
+ cr,
+ ms,
+ top_global,
+ top,
+ inputrec,
+ imdSession,
+ pull_work,
+ nrnb,
+ wcycle,
+ gstat,
+ &observablesReducer,
+ vsite,
+ constr,
+ mdAtoms,
+ fr,
+ runScheduleWork,
+ enerd,
+ -1,
+ {} };
/* Call the force routine and some auxiliary (neighboursearching etc.) */
/* do_force always puts the charge groups in the box and shifts again
* We do not unshift, so molecules are always whole in congrad.c
*/
- energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE);
+ energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE, step);
+ observablesReducer.markAsReadyToReduce();
if (MASTER(cr))
{
mdatoms->tmass,
enerd,
nullptr,
- nullptr,
nullBox,
PTCouplingArrays(),
0,
- nullptr,
- nullptr,
vir,
pres,
nullptr,
gmx_sumd(1, &minstep, cr);
}
- minstep = GMX_REAL_EPS / sqrt(minstep / (3 * top_global->natoms));
+ minstep = GMX_REAL_EPS / sqrt(minstep / (3 * top_global.natoms));
if (stepsize < minstep)
{
a = 0.0;
c = a + stepsize; /* reference position along line is zero */
- if (DOMAINDECOMP(cr) && s_min->s.ddp_count < cr->dd->ddp_count)
+ if (haveDDAtomOrdering(*cr) && s_min->s.ddp_count < cr->dd->ddp_count)
{
em_dd_partition_system(fplog,
mdlog,
imdSession,
pull_work,
s_min,
- &top,
+ top,
mdAtoms,
fr,
vsite,
neval++;
/* Calculate energy for the trial step */
- energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE);
+ energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE, step);
+ observablesReducer.markAsReadyToReduce();
/* Calc derivative along line */
const rvec* pc = s_c->s.cg_p.rvec_array();
b = 0.5 * (a + c);
}
- if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
+ if (haveDDAtomOrdering(*cr) && s_min->s.ddp_count != cr->dd->ddp_count)
{
/* Reload the old state */
em_dd_partition_system(fplog,
imdSession,
pull_work,
s_min,
- &top,
+ top,
mdAtoms,
fr,
vsite,
neval++;
/* Calculate energy for the trial step */
- energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE);
+ energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE, step);
+ observablesReducer.markAsReadyToReduce();
/* p does not change within a step, but since the domain decomposition
* might change, we have to use cg_p of s_b here.
mdatoms->tmass,
enerd,
nullptr,
- nullptr,
nullBox,
PTCouplingArrays(),
0,
- nullptr,
- nullptr,
vir,
pres,
nullptr,
}
/* Send energies and positions to the IMD client if bIMD is TRUE. */
- if (MASTER(cr) && imdSession->run(step, TRUE, state_global->box, state_global->x.rvec_array(), 0))
+ if (MASTER(cr) && imdSession->run(step, TRUE, state_global->box, state_global->x, 0))
{
imdSession->sendPositionsAndEnergies();
}
* If we have reached machine precision, converged is already set to true.
*/
converged = converged || (s_min->fmax < inputrec->em_tol);
-
+ observablesReducer.markAsReadyToReduce();
} /* End of the loop */
if (converged)
{
static const char* LBFGS = "Low-Memory BFGS Minimizer";
em_state_t ems;
- gmx_localtop_t top(top_global->ffparams);
gmx_global_stat_t gstat;
- int ncorr, nmaxcorr, point, cp, neval, nminstep;
- double stepsize, step_taken, gpa, gpb, gpc, tmp, minstep;
- real * rho, *alpha, *p, *s, **dx, **dg;
- real a, b, c, maxdelta, delta;
- real diag, Epot0;
- real dgdx, dgdg, sq, yr, beta;
- gmx_bool converged;
- rvec mu_tot = { 0 };
- gmx_bool do_log, do_ene, do_x, do_f, foundlower, *frozen;
- tensor vir, pres;
- int start, end, number_steps;
- int i, k, m, n, gf, step;
- int mdof_flags;
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
"be available in a different form in a future version of GROMACS, "
"e.g. gmx minimize and an .mdp option.");
+ if (haveDDAtomOrdering(*cr))
+ {
+ gmx_fatal(FARGS, "L_BFGS is currently not supported");
+ }
if (PAR(cr))
{
gmx_fatal(FARGS, "L-BFGS minimization only supports a single rank");
"do not use constraints, or use another minimizer (e.g. steepest descent).");
}
- n = 3 * state_global->natoms;
- nmaxcorr = inputrec->nbfgscorr;
-
- snew(frozen, n);
+ const int n = 3 * state_global->natoms;
+ const int nmaxcorr = inputrec->nbfgscorr;
- snew(p, n);
- snew(rho, nmaxcorr);
- snew(alpha, nmaxcorr);
+ std::vector<real> p(n);
+ std::vector<real> rho(nmaxcorr);
+ std::vector<real> alpha(nmaxcorr);
- snew(dx, nmaxcorr);
- for (i = 0; i < nmaxcorr; i++)
+ std::vector<std::vector<real>> dx(nmaxcorr);
+ for (auto& dxCorr : dx)
{
- snew(dx[i], n);
+ dxCorr.resize(n);
}
- snew(dg, nmaxcorr);
- for (i = 0; i < nmaxcorr; i++)
+ std::vector<std::vector<real>> dg(nmaxcorr);
+ for (auto& dgCorr : dg)
{
- snew(dg[i], n);
+ dgCorr.resize(n);
}
- step = 0;
- neval = 0;
+ int step = 0;
+ int neval = 0;
+
+ ObservablesReducer observablesReducer = observablesReducerBuilder->build();
/* Init em */
init_em(fplog,
state_global,
top_global,
&ems,
- &top,
+ top,
nrnb,
fr,
mdAtoms,
mdrunOptions,
cr,
outputProvider,
- mdModulesNotifier,
+ mdModulesNotifiers,
inputrec,
top_global,
nullptr,
ms);
gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
top_global,
- inputrec,
+ *inputrec,
pull_work,
nullptr,
false,
StartingBehavior::NewSimulation,
simulationsShareState,
- mdModulesNotifier);
+ mdModulesNotifiers);
- start = 0;
- end = mdatoms->homenr;
+ const int start = 0;
+ const int end = mdatoms->homenr;
/* We need 4 working states */
em_state_t s0{}, s1{}, s2{}, s3{};
/* Print to log file */
print_em_start(fplog, cr, walltime_accounting, wcycle, LBFGS);
- do_log = do_ene = do_f = TRUE;
-
/* Max number of steps */
- number_steps = inputrec->nsteps;
+ const int number_steps = inputrec->nsteps;
/* Create a 3*natoms index to tell whether each degree of freedom is frozen */
- gf = 0;
- for (i = start; i < end; i++)
+ std::vector<bool> frozen(n);
+ int gf = 0;
+ for (int i = start; i < end; i++)
{
if (mdatoms->cFREEZE)
{
gf = mdatoms->cFREEZE[i];
}
- for (m = 0; m < DIM; m++)
+ for (int m = 0; m < DIM; m++)
{
frozen[3 * i + m] = (inputrec->opts.nFreeze[gf][m] != 0);
}
if (vsite)
{
- vsite->construct(state_global->x, 1, {}, state_global->box);
+ vsite->construct(state_global->x, {}, state_global->box, VSiteOperation::Positions);
}
/* Call the force routine and some auxiliary (neighboursearching etc.) */
* We do not unshift, so molecules are always whole
*/
neval++;
- EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
- inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
- vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
- energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE);
+ EnergyEvaluator energyEvaluator{ fplog,
+ mdlog,
+ cr,
+ ms,
+ top_global,
+ top,
+ inputrec,
+ imdSession,
+ pull_work,
+ nrnb,
+ wcycle,
+ gstat,
+ &observablesReducer,
+ vsite,
+ constr,
+ mdAtoms,
+ fr,
+ runScheduleWork,
+ enerd };
+ rvec mu_tot;
+ tensor vir;
+ tensor pres;
+ energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE, step);
if (MASTER(cr))
{
mdatoms->tmass,
enerd,
nullptr,
- nullptr,
nullBox,
PTCouplingArrays(),
0,
- nullptr,
- nullptr,
vir,
pres,
nullptr,
}
// Point is an index to the memory of search directions, where 0 is the first one.
- point = 0;
+ int point = 0;
// Set initial search direction to the force (-gradient), or 0 for frozen particles.
real* fInit = static_cast<real*>(ems.f.view().force().data()[0]);
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
if (!frozen[i])
{
// (the main efficiency in the algorithm comes from changing directions), but
// we still need an initial value, so estimate it as the inverse of the norm
// so we take small steps where the potential fluctuates a lot.
- stepsize = 1.0 / ems.fnorm;
+ double stepsize = 1.0 / ems.fnorm;
/* Start the loop over BFGS steps.
* Each successful step is counted, and we continue until
* we either converge or reach the max number of steps.
*/
- ncorr = 0;
+ bool do_log = true;
+ bool do_ene = true;
+
+ int ncorr = 0;
/* Set the gradient from the force */
- converged = FALSE;
- for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
+ bool converged = false;
+ for (int step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
{
/* Write coordinates if necessary */
- do_x = do_per_step(step, inputrec->nstxout);
- do_f = do_per_step(step, inputrec->nstfout);
+ const bool do_x = do_per_step(step, inputrec->nstxout);
+ const bool do_f = do_per_step(step, inputrec->nstfout);
- mdof_flags = 0;
+ int mdof_flags = 0;
if (do_x)
{
mdof_flags |= MDOF_X;
cr,
outf,
mdof_flags,
- top_global->natoms,
+ top_global.natoms,
step,
static_cast<real>(step),
&ems.s,
/* Do the linesearching in the direction dx[point][0..(n-1)] */
/* make s a pointer to current search direction - point=0 first time we get here */
- s = dx[point];
+ gmx::ArrayRef<const real> s = dx[point];
- real* xx = static_cast<real*>(ems.s.x.rvec_array()[0]);
- real* ff = static_cast<real*>(ems.f.view().force().data()[0]);
+ const real* xx = static_cast<real*>(ems.s.x.rvec_array()[0]);
+ const real* ff = static_cast<real*>(ems.f.view().force().data()[0]);
// calculate line gradient in position A
- for (gpa = 0, i = 0; i < n; i++)
+ double gpa = 0;
+ for (int i = 0; i < n; i++)
{
gpa -= s[i] * ff[i];
}
/* Calculate minimum allowed stepsize along the line, before the average (norm)
* relative change in coordinate is smaller than precision
*/
- for (minstep = 0, i = 0; i < n; i++)
+ double minstep = 0;
+ for (int i = 0; i < n; i++)
{
- tmp = fabs(xx[i]);
+ double tmp = fabs(xx[i]);
if (tmp < 1.0)
{
tmp = 1.0;
if (stepsize < minstep)
{
- converged = TRUE;
+ converged = true;
break;
}
// Before taking any steps along the line, store the old position
- *last = ems;
- real* lastx = static_cast<real*>(last->s.x.data()[0]);
- real* lastf = static_cast<real*>(last->f.view().force().data()[0]);
- Epot0 = ems.epot;
+ *last = ems;
+ real* lastx = static_cast<real*>(last->s.x.data()[0]);
+ real* lastf = static_cast<real*>(last->f.view().force().data()[0]);
+ const real Epot0 = ems.epot;
*sa = ems;
// State "A" is the first position along the line.
// reference position along line is initially zero
- a = 0.0;
+ real a = 0;
// Check stepsize first. We do not allow displacements
// larger than emstep.
//
+ real c;
+ real maxdelta;
do
{
// Pick a new position C by adding stepsize to A.
// Calculate what the largest change in any individual coordinate
// would be (translation along line * gradient along line)
maxdelta = 0;
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
- delta = c * s[i];
+ real delta = c * s[i];
if (delta > maxdelta)
{
maxdelta = delta;
// Take a trial step and move the coordinate array xc[] to position C
real* xc = static_cast<real*>(sc->s.x.rvec_array()[0]);
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
xc[i] = lastx[i] + c * s[i];
}
neval++;
// Calculate energy for the trial step in position C
- energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE);
+ energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE, step);
// Calc line gradient in position C
- real* fc = static_cast<real*>(sc->f.view().force()[0]);
- for (gpc = 0, i = 0; i < n; i++)
+ real* fc = static_cast<real*>(sc->f.view().force()[0]);
+ double gpc = 0;
+ for (int i = 0; i < n; i++)
{
gpc -= s[i] * fc[i]; /* f is negative gradient, thus the sign */
}
// This is the max amount of increase in energy we tolerate.
// By allowing VERY small changes (close to numerical precision) we
// frequently find even better (lower) final energies.
- tmp = std::sqrt(GMX_REAL_EPS) * fabs(sa->epot);
+ double tmp = std::sqrt(GMX_REAL_EPS) * fabs(sa->epot);
// Accept the step if the energy is lower in the new position C (compared to A),
// or if it is not significantly higher and the line derivative is still negative.
- foundlower = sc->epot < sa->epot || (gpc < 0 && sc->epot < (sa->epot + tmp));
+ bool foundlower = sc->epot < sa->epot || (gpc < 0 && sc->epot < (sa->epot + tmp));
// If true, great, we found a better energy. We no longer try to alter the
// stepsize, but simply accept this new better position. The we select a new
// search direction instead, which will be much more efficient than continuing
// than with the stepsize, so no need to modify it. For the next search direction
// it will be reset to 1/fnorm anyway.
+ double step_taken;
if (!foundlower)
{
// OK, if we didn't find a lower value we will have to locate one now - there must
// I also have a safeguard for potentially really pathological functions so we never
// take more than 20 steps before we give up.
// If we already found a lower value we just skip this step and continue to the update.
- real fnorm = 0;
- nminstep = 0;
+ real fnorm = 0;
+ int nminstep = 0;
do
{
// Select a new trial point B in the interval [A,C].
// If the derivatives at points a & c have different sign we interpolate to zero,
// otherwise just do a bisection since there might be multiple minima/maxima
// inside the interval.
+ real b;
if (gpa < 0 && gpc > 0)
{
b = a + gpa * (a - c) / (gpc - gpa);
// Take a trial step to point B
real* xb = static_cast<real*>(sb->s.x.rvec_array()[0]);
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
xb[i] = lastx[i] + b * s[i];
}
neval++;
// Calculate energy for the trial step in point B
- energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE);
+ energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE, step);
fnorm = sb->fnorm;
// Calculate gradient in point B
- real* fb = static_cast<real*>(sb->f.view().force()[0]);
- for (gpb = 0, i = 0; i < n; i++)
+ real* fb = static_cast<real*>(sb->f.view().force()[0]);
+ double gpb = 0;
+ for (int i = 0; i < n; i++)
{
gpb -= s[i] * fb[i]; /* f is negative gradient, thus the sign */
}
if (ncorr == 0)
{
/* Converged */
- converged = TRUE;
+ converged = true;
break;
}
else
/* Reset memory */
ncorr = 0;
/* Search in gradient direction */
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
dx[point][i] = ff[i];
}
ncorr++;
}
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
dg[point][i] = lastf[i] - ff[i];
dx[point][i] *= step_taken;
}
- dgdg = 0;
- dgdx = 0;
- for (i = 0; i < n; i++)
+ real dgdg = 0;
+ real dgdx = 0;
+ for (int i = 0; i < n; i++)
{
dgdg += dg[point][i] * dg[point][i];
dgdx += dg[point][i] * dx[point][i];
}
- diag = dgdx / dgdg;
+ const real diag = dgdx / dgdg;
rho[point] = 1.0 / dgdx;
point++;
}
/* Update */
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
p[i] = ff[i];
}
- cp = point;
+ int cp = point;
/* Recursive update. First go back over the memory points */
- for (k = 0; k < ncorr; k++)
+ for (int k = 0; k < ncorr; k++)
{
cp--;
if (cp < 0)
cp = ncorr - 1;
}
- sq = 0;
- for (i = 0; i < n; i++)
+ real sq = 0;
+ for (int i = 0; i < n; i++)
{
sq += dx[cp][i] * p[i];
}
alpha[cp] = rho[cp] * sq;
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
p[i] -= alpha[cp] * dg[cp][i];
}
}
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
p[i] *= diag;
}
/* And then go forward again */
- for (k = 0; k < ncorr; k++)
+ for (int k = 0; k < ncorr; k++)
{
- yr = 0;
- for (i = 0; i < n; i++)
+ real yr = 0;
+ for (int i = 0; i < n; i++)
{
yr += p[i] * dg[cp][i];
}
- beta = rho[cp] * yr;
- beta = alpha[cp] - beta;
+ real beta = rho[cp] * yr;
+ beta = alpha[cp] - beta;
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
p[i] += beta * dx[cp][i];
}
}
}
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
if (!frozen[i])
{
mdatoms->tmass,
enerd,
nullptr,
- nullptr,
nullBox,
PTCouplingArrays(),
0,
- nullptr,
- nullptr,
vir,
pres,
nullptr,
}
/* Send x and E to IMD client, if bIMD is TRUE. */
- if (imdSession->run(step, TRUE, state_global->box, state_global->x.rvec_array(), 0) && MASTER(cr))
+ if (imdSession->run(step, TRUE, state_global->box, state_global->x, 0) && MASTER(cr))
{
imdSession->sendPositionsAndEnergies();
}
* If we have reached machine precision, converged is already set to true.
*/
converged = converged || (ems.fmax < inputrec->em_tol);
-
+ observablesReducer.markAsReadyToReduce();
} /* End of the loop */
if (converged)
* However, we should only do it if we did NOT already write this step
* above (which we did if do_x or do_f was true).
*/
- do_x = !do_per_step(step, inputrec->nstxout);
- do_f = !do_per_step(step, inputrec->nstfout);
+ const bool do_x = !do_per_step(step, inputrec->nstxout);
+ const bool do_f = !do_per_step(step, inputrec->nstfout);
write_em_traj(
fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec, step, &ems, state_global, observablesHistory);
void LegacySimulator::do_steep()
{
const char* SD = "Steepest Descents";
- gmx_localtop_t top(top_global->ffparams);
gmx_global_stat_t gstat;
real stepsize;
real ustep;
int nsteps;
int count = 0;
int steps_accepted = 0;
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
em_state_t* s_min = &s0;
em_state_t* s_try = &s1;
+ ObservablesReducer observablesReducer = observablesReducerBuilder->build();
+
/* Init em and store the local state in s_try */
init_em(fplog,
mdlog,
state_global,
top_global,
s_try,
- &top,
+ top,
nrnb,
fr,
mdAtoms,
mdrunOptions,
cr,
outputProvider,
- mdModulesNotifier,
+ mdModulesNotifiers,
inputrec,
top_global,
nullptr,
ms);
gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
top_global,
- inputrec,
+ *inputrec,
pull_work,
nullptr,
false,
StartingBehavior::NewSimulation,
simulationsShareState,
- mdModulesNotifier);
+ mdModulesNotifiers);
/* Print to log file */
print_em_start(fplog, cr, walltime_accounting, wcycle, SD);
{
sp_header(fplog, SD, inputrec->em_tol, nsteps);
}
- EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
- inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
- vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
+ EnergyEvaluator energyEvaluator{ fplog,
+ mdlog,
+ cr,
+ ms,
+ top_global,
+ top,
+ inputrec,
+ imdSession,
+ pull_work,
+ nrnb,
+ wcycle,
+ gstat,
+ &observablesReducer,
+ vsite,
+ constr,
+ mdAtoms,
+ fr,
+ runScheduleWork,
+ enerd };
/**** HERE STARTS THE LOOP ****
* count is the counter for the number of steps
if (validStep)
{
- energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0);
+ energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0, count);
}
else
{
mdatoms->tmass,
enerd,
nullptr,
- nullptr,
nullBox,
PTCouplingArrays(),
0,
- nullptr,
- nullptr,
vir,
pres,
nullptr,
/* If energy is not smaller make the step smaller... */
ustep *= 0.5;
- if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
+ if (haveDDAtomOrdering(*cr) && s_min->s.ddp_count != cr->dd->ddp_count)
{
/* Reload the old state */
em_dd_partition_system(fplog,
imdSession,
pull_work,
s_min,
- &top,
+ top,
mdAtoms,
fr,
vsite,
if (imdSession->run(count,
TRUE,
MASTER(cr) ? state_global->box : nullptr,
- MASTER(cr) ? state_global->x.rvec_array() : nullptr,
+ MASTER(cr) ? state_global->x : gmx::ArrayRef<gmx::RVec>(),
0)
&& MASTER(cr))
{
}
count++;
+ observablesReducer.markAsReadyToReduce();
} /* End of the loop */
/* Print some data... */
{
const char* NM = "Normal Mode Analysis";
int nnodes;
- gmx_localtop_t top(top_global->ffparams);
gmx_global_stat_t gstat;
tensor vir, pres;
rvec mu_tot = { 0 };
real* full_matrix = nullptr;
/* added with respect to mdrun */
- int row, col;
- real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
- real x_min;
- bool bIsMaster = MASTER(cr);
- auto mdatoms = mdAtoms->mdatoms();
+ int row, col;
+ real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
+ real x_min;
+ bool bIsMaster = MASTER(cr);
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
em_state_t state_work{};
+ fr->longRangeNonbondeds->updateAfterPartition(*mdAtoms->mdatoms());
+ ObservablesReducer observablesReducer = observablesReducerBuilder->build();
+
/* Init em and store the local state in state_minimum */
init_em(fplog,
mdlog,
state_global,
top_global,
&state_work,
- &top,
+ top,
nrnb,
fr,
mdAtoms,
mdrunOptions,
cr,
outputProvider,
- mdModulesNotifier,
+ mdModulesNotifiers,
inputrec,
top_global,
nullptr,
{
fprintf(stderr,
"starting normal mode calculation '%s'\n%" PRId64 " steps.\n\n",
- *(top_global->name),
+ *(top_global.name),
inputrec->nsteps);
}
/* Make evaluate_energy do a single node force calculation */
cr->nnodes = 1;
- EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
- inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
- vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
- energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE);
+ EnergyEvaluator energyEvaluator{ fplog,
+ mdlog,
+ cr,
+ ms,
+ top_global,
+ top,
+ inputrec,
+ imdSession,
+ pull_work,
+ nrnb,
+ wcycle,
+ gstat,
+ &observablesReducer,
+ vsite,
+ constr,
+ mdAtoms,
+ fr,
+ runScheduleWork,
+ enerd };
+ energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE, 0);
cr->nnodes = nnodes;
/* if forces are not small, warn user */
pull_work,
bNS,
force_flags,
- &top,
+ top,
constr,
enerd,
state_work.s.natoms,
&state_work.s.hist,
&state_work.f.view(),
vir,
- mdatoms,
+ *mdatoms,
+ fr->longRangeNonbondeds.get(),
nrnb,
wcycle,
shellfc,
}
else
{
- energyEvaluator.run(&state_work, mu_tot, vir, pres, aid * 2 + dx, FALSE);
+ energyEvaluator.run(&state_work, mu_tot, vir, pres, aid * 2 + dx, FALSE, step);
}
cr->nnodes = nnodes;