#include <ctime>
#include <algorithm>
+#include <limits>
#include <vector>
#include "gromacs/commandline/filenm.h"
#include "gromacs/mdtypes/md_enums.h"
#include "gromacs/mdtypes/mdatom.h"
#include "gromacs/mdtypes/mdrunoptions.h"
+#include "gromacs/mdtypes/observablesreducer.h"
#include "gromacs/mdtypes/state.h"
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/timing/wallcycle.h"
static void print_em_start(FILE* fplog,
const t_commrec* cr,
gmx_walltime_accounting_t walltime_accounting,
- gmx_wallcycle_t wcycle,
+ gmx_wallcycle* wcycle,
const char* name)
{
walltime_accounting_start_time(walltime_accounting);
- wallcycle_start(wcycle, ewcRUN);
+ wallcycle_start(wcycle, WallCycleCounter::Run);
print_start(fplog, cr, walltime_accounting, name);
}
//! Stop counting time for EM
-static void em_time_end(gmx_walltime_accounting_t walltime_accounting, gmx_wallcycle_t wcycle)
+static void em_time_end(gmx_walltime_accounting_t walltime_accounting, gmx_wallcycle* wcycle)
{
- wallcycle_stop(wcycle, ewcRUN);
+ wallcycle_stop(wcycle, WallCycleCounter::Run);
walltime_accounting_end_time(walltime_accounting);
}
}
}
- if (la_max >= 0 && DOMAINDECOMP(cr))
+ if (la_max >= 0 && haveDDAtomOrdering(*cr))
{
a_max = cr->dd->globalAtomIndices[la_max];
}
}
int* fep_state = MASTER(cr) ? &state_global->fep_state : nullptr;
gmx::ArrayRef<real> lambda = MASTER(cr) ? state_global->lambda : gmx::ArrayRef<real>();
- initialize_lambdas(fplog, *ir, MASTER(cr), fep_state, lambda);
+ initialize_lambdas(fplog,
+ ir->efep,
+ ir->bSimTemp,
+ *ir->fepvals,
+ ir->simtempvals->temperatures,
+ gmx::arrayRefFromArray(ir->opts.ref_t, ir->opts.ngtc),
+ MASTER(cr),
+ fep_state,
+ lambda);
if (ir->eI == IntegrationAlgorithm::NM)
{
top_global,
constr ? constr->numFlexibleConstraints() : 0,
ir->nstcalcenergy,
- DOMAINDECOMP(cr),
+ haveDDAtomOrdering(*cr),
thisRankHasDuty(cr, DUTY_PME));
}
else
}
}
- if (DOMAINDECOMP(cr))
+ if (haveDDAtomOrdering(*cr))
{
+ // Local state only becomes valid now.
dd_init_local_state(*cr->dd, state_global, &ems->s);
/* Distribute the charge groups over the nodes from the master node */
static void finish_em(const t_commrec* cr,
gmx_mdoutf_t outf,
gmx_walltime_accounting_t walltime_accounting,
- gmx_wallcycle_t wcycle)
+ gmx_wallcycle* wcycle)
{
if (!thisRankHasDuty(cr, DUTY_PME))
{
if (confout != nullptr)
{
- if (DOMAINDECOMP(cr))
+ if (haveDDAtomOrdering(*cr))
{
/* If bX=true, x was collected to state_global in the call above */
if (!bX)
if (MASTER(cr))
{
- if (ir->pbcType != PbcType::No && !ir->bPeriodicMols && DOMAINDECOMP(cr))
+ if (ir->pbcType != PbcType::No && !ir->bPeriodicMols && haveDDAtomOrdering(*cr))
{
/* Make molecules whole only for confout writing */
do_pbc_mtop(ir->pbcType, state->s.box, &top_global, state_global->x.rvec_array());
int64_t count)
{
- t_state *s1, *s2;
- int start, end;
- real dvdl_constr;
+ t_state * s1, *s2;
+ int start, end;
+ real dvdl_constr;
int nthreads gmx_unused;
bool validStep = true;
s1 = &ems1->s;
s2 = &ems2->s;
- if (DOMAINDECOMP(cr) && s1->ddp_count != cr->dd->ddp_count)
+ if (haveDDAtomOrdering(*cr) && s1->ddp_count != cr->dd->ddp_count)
{
gmx_incons("state mismatch in do_em_step");
}
state_change_natoms(s2, s1->natoms);
ems2->f.resize(s2->natoms);
}
- if (DOMAINDECOMP(cr) && s2->cg_gl.size() != s1->cg_gl.size())
+ if (haveDDAtomOrdering(*cr) && s2->cg_gl.size() != s1->cg_gl.size())
{
s2->cg_gl.resize(s1->cg_gl.size());
}
start = 0;
end = md->homenr;
- nthreads = gmx_omp_nthreads_get(emntUpdate);
+ nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel num_threads(nthreads)
{
const rvec* x1 = s1->x.rvec_array();
}
}
- if (DOMAINDECOMP(cr))
+ if (haveDDAtomOrdering(*cr))
{
/* OpenMP does not supported unsigned loop variables */
#pragma omp for schedule(static) nowait
}
}
- if (DOMAINDECOMP(cr))
+ if (haveDDAtomOrdering(*cr))
{
s2->ddp_count = s1->ddp_count;
s2->ddp_count_cg_gl = s1->ddp_count_cg_gl;
VirtualSitesHandler* vsite,
gmx::Constraints* constr,
t_nrnb* nrnb,
- gmx_wallcycle_t wcycle)
+ gmx_wallcycle* wcycle)
{
/* Repartition the domain decomposition */
dd_partition_system(fplog,
namespace
{
+//! Copy coordinates, OpenMP parallelized, from \p refCoords to coords
+void setCoordinates(std::vector<RVec>* coords, ArrayRef<const RVec> refCoords)
+{
+ coords->resize(refCoords.size());
+
+ const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
+#pragma omp parallel for num_threads(nthreads) schedule(static)
+ for (int i = 0; i < ssize(refCoords); i++)
+ {
+ (*coords)[i] = refCoords[i];
+ }
+}
+
+//! Returns the maximum difference an atom moved between two coordinate sets, over all ranks
+real maxCoordinateDifference(ArrayRef<const RVec> coords1, ArrayRef<const RVec> coords2, MPI_Comm mpiCommMyGroup)
+{
+ GMX_RELEASE_ASSERT(coords1.size() == coords2.size(), "Coordinate counts should match");
+
+ real maxDiffSquared = 0;
+
+ const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
+#pragma omp parallel for reduction(max : maxDiffSquared) num_threads(nthreads) schedule(static)
+ for (int i = 0; i < ssize(coords1); i++)
+ {
+ maxDiffSquared = std::max(maxDiffSquared, gmx::norm2(coords1[i] - coords2[i]));
+ }
+
+#if GMX_MPI
+ int numRanks = 1;
+ if (mpiCommMyGroup != MPI_COMM_NULL)
+ {
+ MPI_Comm_size(mpiCommMyGroup, &numRanks);
+ }
+ if (numRanks > 1)
+ {
+ real maxDiffSquaredReduced;
+ MPI_Allreduce(
+ &maxDiffSquared, &maxDiffSquaredReduced, 1, GMX_DOUBLE ? MPI_DOUBLE : MPI_FLOAT, MPI_MAX, mpiCommMyGroup);
+ maxDiffSquared = maxDiffSquaredReduced;
+ }
+#else
+ GMX_UNUSED_VALUE(mpiCommMyGroup);
+#endif
+
+ return std::sqrt(maxDiffSquared);
+}
+
/*! \brief Class to handle the work of setting and doing an energy evaluation.
*
* This class is a mere aggregate of parameters to pass to evaluate an
* unsuited for aggregate initialization. When the types
* improve, the call signature of this method can be reduced.
*/
- void run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst);
+ void run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst, int64_t step);
//! Handles logging (deprecated).
FILE* fplog;
//! Handles logging.
//! Manages flop accounting.
t_nrnb* nrnb;
//! Manages wall cycle accounting.
- gmx_wallcycle_t wcycle;
- //! Coordinates global reduction.
+ gmx_wallcycle* wcycle;
+ //! Legacy coordinator of global reduction.
gmx_global_stat_t gstat;
+ //! Coordinates reduction for observables
+ gmx::ObservablesReducer* observablesReducer;
//! Handles virtual sites.
VirtualSitesHandler* vsite;
//! Handles constraints.
MdrunScheduleWorkload* runScheduleWork;
//! Stores the computed energies.
gmx_enerdata_t* enerd;
+ //! The DD partitioning count at which the pair list was generated
+ int ddpCountPairSearch;
+ //! The local coordinates that were used for pair searching, stored for computing displacements
+ std::vector<RVec> pairSearchCoordinates;
};
-void EnergyEvaluator::run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst)
+void EnergyEvaluator::run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst, int64_t step)
{
real t;
gmx_bool bNS;
/* Set the time to the initial time, the time does not change during EM */
t = inputrec->init_t;
- if (bFirst || (DOMAINDECOMP(cr) && ems->s.ddp_count < cr->dd->ddp_count))
+ if (vsite)
+ {
+ vsite->construct(ems->s.x, {}, ems->s.box, gmx::VSiteOperation::Positions);
+ }
+
+ // Compute the buffer size of the pair list
+ const real bufferSize = inputrec->rlist - std::max(inputrec->rcoulomb, inputrec->rvdw);
+
+ if (bFirst || bufferSize <= 0 || (haveDDAtomOrdering(*cr) && ems->s.ddp_count != ddpCountPairSearch))
{
/* This is the first state or an old state used before the last ns */
bNS = TRUE;
}
else
{
- bNS = FALSE;
- if (inputrec->nstlist > 0)
- {
- bNS = TRUE;
- }
+ // We need to generate a new pairlist when one atom moved more than half the buffer size
+ ArrayRef<const RVec> localCoordinates =
+ ArrayRef<const RVec>(ems->s.x).subArray(0, mdAtoms->mdatoms()->homenr);
+ bNS = 2 * maxCoordinateDifference(pairSearchCoordinates, localCoordinates, cr->mpi_comm_mygroup)
+ > bufferSize;
}
- if (vsite)
- {
- vsite->construct(ems->s.x, {}, ems->s.box, gmx::VSiteOperation::Positions);
- }
-
- if (DOMAINDECOMP(cr) && bNS)
+ if (haveDDAtomOrdering(*cr) && bNS)
{
/* Repartition the domain decomposition */
em_dd_partition_system(
fplog, mdlog, count, cr, top_global, inputrec, imdSession, pull_work, ems, top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
+ ddpCountPairSearch = cr->dd->ddp_count;
+ }
+
+ /* Store the local coordinates that will be used in the pair search, after we re-partitioned */
+ if (bufferSize > 0 && bNS)
+ {
+ ArrayRef<const RVec> localCoordinates =
+ constArrayRefFromArray(ems->s.x.data(), mdAtoms->mdatoms()->homenr);
+ setCoordinates(&pairSearchCoordinates, localCoordinates);
}
+ fr->longRangeNonbondeds->updateAfterPartition(*mdAtoms->mdatoms());
+
/* Calc force & energy on new trial position */
/* do_force always puts the charge groups in the box and shifts again
* We do not unshift, so molecules are always whole in congrad.c
mu_tot,
t,
nullptr,
+ fr->longRangeNonbondeds.get(),
GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES | GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY
| (bNS ? GMX_FORCE_NS : 0),
DDBalanceRegionHandler(cr));
/* Communicate stuff when parallel */
if (PAR(cr) && inputrec->eI != IntegrationAlgorithm::NM)
{
- wallcycle_start(wcycle, ewcMoveE);
+ wallcycle_start(wcycle, WallCycleCounter::MoveE);
global_stat(*gstat,
cr,
shake_vir,
*inputrec,
nullptr,
- gmx::ArrayRef<real>{},
nullptr,
std::vector<real>(1, terminate),
FALSE,
- CGLO_ENERGY | CGLO_PRESSURE | CGLO_CONSTRAINT);
+ CGLO_ENERGY | CGLO_PRESSURE | CGLO_CONSTRAINT,
+ step,
+ observablesReducer);
- wallcycle_stop(wcycle, ewcMoveE);
+ wallcycle_stop(wcycle, WallCycleCounter::MoveE);
}
if (fr->dispersionCorrection)
* and might have to sum it in parallel runs.
*/
- if (!DOMAINDECOMP(cr)
+ if (!haveDDAtomOrdering(*cr)
|| (s_min->s.ddp_count == cr->dd->ddp_count && s_b->s.ddp_count == cr->dd->ddp_count))
{
auto fm = s_min->f.view().force();
{
const char* CG = "Polak-Ribiere Conjugate Gradients";
- gmx_localtop_t top(top_global.ffparams);
gmx_global_stat_t gstat;
double tmp, minstep;
real stepsize;
tensor vir, pres;
int number_steps, neval = 0, nstcg = inputrec->nstcgsteep;
int m, step, nminstep;
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
em_state_t* s_b = &s2;
em_state_t* s_c = &s3;
+ ObservablesReducer observablesReducer = observablesReducerBuilder->build();
+
/* Init em and store the local state in s_min */
init_em(fplog,
mdlog,
state_global,
top_global,
s_min,
- &top,
+ top,
nrnb,
fr,
mdAtoms,
mdrunOptions,
cr,
outputProvider,
- mdModulesNotifier,
+ mdModulesNotifiers,
inputrec,
top_global,
nullptr,
false,
StartingBehavior::NewSimulation,
simulationsShareState,
- mdModulesNotifier);
+ mdModulesNotifiers);
/* Print to log file */
print_em_start(fplog, cr, walltime_accounting, wcycle, CG);
sp_header(fplog, CG, inputrec->em_tol, number_steps);
}
- EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
- inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
- vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
+ EnergyEvaluator energyEvaluator{ fplog,
+ mdlog,
+ cr,
+ ms,
+ top_global,
+ top,
+ inputrec,
+ imdSession,
+ pull_work,
+ nrnb,
+ wcycle,
+ gstat,
+ &observablesReducer,
+ vsite,
+ constr,
+ mdAtoms,
+ fr,
+ runScheduleWork,
+ enerd,
+ -1,
+ {} };
/* Call the force routine and some auxiliary (neighboursearching etc.) */
/* do_force always puts the charge groups in the box and shifts again
* We do not unshift, so molecules are always whole in congrad.c
*/
- energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE);
+ energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE, step);
+ observablesReducer.markAsReadyToReduce();
if (MASTER(cr))
{
mdatoms->tmass,
enerd,
nullptr,
- nullptr,
nullBox,
PTCouplingArrays(),
0,
- nullptr,
- nullptr,
vir,
pres,
nullptr,
a = 0.0;
c = a + stepsize; /* reference position along line is zero */
- if (DOMAINDECOMP(cr) && s_min->s.ddp_count < cr->dd->ddp_count)
+ if (haveDDAtomOrdering(*cr) && s_min->s.ddp_count < cr->dd->ddp_count)
{
em_dd_partition_system(fplog,
mdlog,
imdSession,
pull_work,
s_min,
- &top,
+ top,
mdAtoms,
fr,
vsite,
neval++;
/* Calculate energy for the trial step */
- energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE);
+ energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE, step);
+ observablesReducer.markAsReadyToReduce();
/* Calc derivative along line */
const rvec* pc = s_c->s.cg_p.rvec_array();
b = 0.5 * (a + c);
}
- if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
+ if (haveDDAtomOrdering(*cr) && s_min->s.ddp_count != cr->dd->ddp_count)
{
/* Reload the old state */
em_dd_partition_system(fplog,
imdSession,
pull_work,
s_min,
- &top,
+ top,
mdAtoms,
fr,
vsite,
neval++;
/* Calculate energy for the trial step */
- energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE);
+ energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE, step);
+ observablesReducer.markAsReadyToReduce();
/* p does not change within a step, but since the domain decomposition
* might change, we have to use cg_p of s_b here.
mdatoms->tmass,
enerd,
nullptr,
- nullptr,
nullBox,
PTCouplingArrays(),
0,
- nullptr,
- nullptr,
vir,
pres,
nullptr,
* If we have reached machine precision, converged is already set to true.
*/
converged = converged || (s_min->fmax < inputrec->em_tol);
-
+ observablesReducer.markAsReadyToReduce();
} /* End of the loop */
if (converged)
{
static const char* LBFGS = "Low-Memory BFGS Minimizer";
em_state_t ems;
- gmx_localtop_t top(top_global.ffparams);
gmx_global_stat_t gstat;
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
"be available in a different form in a future version of GROMACS, "
"e.g. gmx minimize and an .mdp option.");
+ if (haveDDAtomOrdering(*cr))
+ {
+ gmx_fatal(FARGS, "L_BFGS is currently not supported");
+ }
if (PAR(cr))
{
gmx_fatal(FARGS, "L-BFGS minimization only supports a single rank");
int step = 0;
int neval = 0;
+ ObservablesReducer observablesReducer = observablesReducerBuilder->build();
+
/* Init em */
init_em(fplog,
mdlog,
state_global,
top_global,
&ems,
- &top,
+ top,
nrnb,
fr,
mdAtoms,
mdrunOptions,
cr,
outputProvider,
- mdModulesNotifier,
+ mdModulesNotifiers,
inputrec,
top_global,
nullptr,
false,
StartingBehavior::NewSimulation,
simulationsShareState,
- mdModulesNotifier);
+ mdModulesNotifiers);
const int start = 0;
const int end = mdatoms->homenr;
* We do not unshift, so molecules are always whole
*/
neval++;
- EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
- inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
- vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
+ EnergyEvaluator energyEvaluator{ fplog,
+ mdlog,
+ cr,
+ ms,
+ top_global,
+ top,
+ inputrec,
+ imdSession,
+ pull_work,
+ nrnb,
+ wcycle,
+ gstat,
+ &observablesReducer,
+ vsite,
+ constr,
+ mdAtoms,
+ fr,
+ runScheduleWork,
+ enerd };
rvec mu_tot;
tensor vir;
tensor pres;
- energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE);
+ energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE, step);
if (MASTER(cr))
{
mdatoms->tmass,
enerd,
nullptr,
- nullptr,
nullBox,
PTCouplingArrays(),
0,
- nullptr,
- nullptr,
vir,
pres,
nullptr,
neval++;
// Calculate energy for the trial step in position C
- energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE);
+ energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE, step);
// Calc line gradient in position C
real* fc = static_cast<real*>(sc->f.view().force()[0]);
neval++;
// Calculate energy for the trial step in point B
- energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE);
+ energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE, step);
fnorm = sb->fnorm;
// Calculate gradient in point B
mdatoms->tmass,
enerd,
nullptr,
- nullptr,
nullBox,
PTCouplingArrays(),
0,
- nullptr,
- nullptr,
vir,
pres,
nullptr,
* If we have reached machine precision, converged is already set to true.
*/
converged = converged || (ems.fmax < inputrec->em_tol);
-
+ observablesReducer.markAsReadyToReduce();
} /* End of the loop */
if (converged)
void LegacySimulator::do_steep()
{
const char* SD = "Steepest Descents";
- gmx_localtop_t top(top_global.ffparams);
gmx_global_stat_t gstat;
real stepsize;
real ustep;
int nsteps;
int count = 0;
int steps_accepted = 0;
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
em_state_t* s_min = &s0;
em_state_t* s_try = &s1;
+ ObservablesReducer observablesReducer = observablesReducerBuilder->build();
+
/* Init em and store the local state in s_try */
init_em(fplog,
mdlog,
state_global,
top_global,
s_try,
- &top,
+ top,
nrnb,
fr,
mdAtoms,
mdrunOptions,
cr,
outputProvider,
- mdModulesNotifier,
+ mdModulesNotifiers,
inputrec,
top_global,
nullptr,
false,
StartingBehavior::NewSimulation,
simulationsShareState,
- mdModulesNotifier);
+ mdModulesNotifiers);
/* Print to log file */
print_em_start(fplog, cr, walltime_accounting, wcycle, SD);
{
sp_header(fplog, SD, inputrec->em_tol, nsteps);
}
- EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
- inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
- vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
+ EnergyEvaluator energyEvaluator{ fplog,
+ mdlog,
+ cr,
+ ms,
+ top_global,
+ top,
+ inputrec,
+ imdSession,
+ pull_work,
+ nrnb,
+ wcycle,
+ gstat,
+ &observablesReducer,
+ vsite,
+ constr,
+ mdAtoms,
+ fr,
+ runScheduleWork,
+ enerd };
/**** HERE STARTS THE LOOP ****
* count is the counter for the number of steps
if (validStep)
{
- energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0);
+ energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0, count);
}
else
{
mdatoms->tmass,
enerd,
nullptr,
- nullptr,
nullBox,
PTCouplingArrays(),
0,
- nullptr,
- nullptr,
vir,
pres,
nullptr,
/* If energy is not smaller make the step smaller... */
ustep *= 0.5;
- if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
+ if (haveDDAtomOrdering(*cr) && s_min->s.ddp_count != cr->dd->ddp_count)
{
/* Reload the old state */
em_dd_partition_system(fplog,
imdSession,
pull_work,
s_min,
- &top,
+ top,
mdAtoms,
fr,
vsite,
}
count++;
+ observablesReducer.markAsReadyToReduce();
} /* End of the loop */
/* Print some data... */
{
const char* NM = "Normal Mode Analysis";
int nnodes;
- gmx_localtop_t top(top_global.ffparams);
gmx_global_stat_t gstat;
tensor vir, pres;
rvec mu_tot = { 0 };
real* full_matrix = nullptr;
/* added with respect to mdrun */
- int row, col;
- real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
- real x_min;
- bool bIsMaster = MASTER(cr);
- auto mdatoms = mdAtoms->mdatoms();
+ int row, col;
+ real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
+ real x_min;
+ bool bIsMaster = MASTER(cr);
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
em_state_t state_work{};
+ fr->longRangeNonbondeds->updateAfterPartition(*mdAtoms->mdatoms());
+ ObservablesReducer observablesReducer = observablesReducerBuilder->build();
+
/* Init em and store the local state in state_minimum */
init_em(fplog,
mdlog,
state_global,
top_global,
&state_work,
- &top,
+ top,
nrnb,
fr,
mdAtoms,
mdrunOptions,
cr,
outputProvider,
- mdModulesNotifier,
+ mdModulesNotifiers,
inputrec,
top_global,
nullptr,
/* Make evaluate_energy do a single node force calculation */
cr->nnodes = 1;
- EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
- inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
- vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
- energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE);
+ EnergyEvaluator energyEvaluator{ fplog,
+ mdlog,
+ cr,
+ ms,
+ top_global,
+ top,
+ inputrec,
+ imdSession,
+ pull_work,
+ nrnb,
+ wcycle,
+ gstat,
+ &observablesReducer,
+ vsite,
+ constr,
+ mdAtoms,
+ fr,
+ runScheduleWork,
+ enerd };
+ energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE, 0);
cr->nnodes = nnodes;
/* if forces are not small, warn user */
pull_work,
bNS,
force_flags,
- &top,
+ top,
constr,
enerd,
state_work.s.natoms,
&state_work.f.view(),
vir,
*mdatoms,
+ fr->longRangeNonbondeds.get(),
nrnb,
wcycle,
shellfc,
}
else
{
- energyEvaluator.run(&state_work, mu_tot, vir, pres, aid * 2 + dx, FALSE);
+ energyEvaluator.run(&state_work, mu_tot, vir, pres, aid * 2 + dx, FALSE, step);
}
cr->nnodes = nnodes;