*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
- * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
+ * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include <ctime>
#include <algorithm>
+#include <limits>
#include <vector>
#include "gromacs/commandline/filenm.h"
#include "gromacs/domdec/domdec_struct.h"
#include "gromacs/domdec/mdsetup.h"
#include "gromacs/domdec/partition.h"
-#include "gromacs/ewald/pme.h"
+#include "gromacs/ewald/pme_pp.h"
#include "gromacs/fileio/confio.h"
#include "gromacs/fileio/mtxio.h"
#include "gromacs/gmxlib/network.h"
#include "gromacs/gmxlib/nrnb.h"
#include "gromacs/imd/imd.h"
#include "gromacs/linearalgebra/sparsematrix.h"
-#include "gromacs/listed_forces/manage_threading.h"
+#include "gromacs/listed_forces/listed_forces.h"
#include "gromacs/math/functions.h"
#include "gromacs/math/vec.h"
#include "gromacs/mdlib/constr.h"
+#include "gromacs/mdlib/coupling.h"
#include "gromacs/mdlib/dispersioncorrection.h"
#include "gromacs/mdlib/ebin.h"
#include "gromacs/mdlib/enerdata_utils.h"
#include "gromacs/mdlib/energyoutput.h"
#include "gromacs/mdlib/force.h"
+#include "gromacs/mdlib/force_flags.h"
#include "gromacs/mdlib/forcerec.h"
#include "gromacs/mdlib/gmx_omp_nthreads.h"
#include "gromacs/mdlib/md_support.h"
#include "gromacs/mdlib/vsite.h"
#include "gromacs/mdrunutility/handlerestart.h"
#include "gromacs/mdrunutility/printtime.h"
+#include "gromacs/mdtypes/checkpointdata.h"
#include "gromacs/mdtypes/commrec.h"
+#include "gromacs/mdtypes/forcebuffers.h"
+#include "gromacs/mdtypes/forcerec.h"
#include "gromacs/mdtypes/inputrec.h"
+#include "gromacs/mdtypes/interaction_const.h"
#include "gromacs/mdtypes/md_enums.h"
+#include "gromacs/mdtypes/mdatom.h"
#include "gromacs/mdtypes/mdrunoptions.h"
+#include "gromacs/mdtypes/observablesreducer.h"
#include "gromacs/mdtypes/state.h"
-#include "gromacs/pbcutil/mshift.h"
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/timing/wallcycle.h"
#include "gromacs/timing/walltime_accounting.h"
#include "legacysimulator.h"
#include "shellfc.h"
+using gmx::ArrayRef;
using gmx::MdrunScheduleWorkload;
+using gmx::RVec;
+using gmx::VirtualSitesHandler;
//! Utility structure for manipulating states during EM
-typedef struct
+typedef struct em_state
{
//! Copy of the global state
t_state s;
//! Force array
- PaddedHostVector<gmx::RVec> f;
+ gmx::ForceBuffers f;
//! Potential energy
real epot;
//! Norm of the force
static void print_em_start(FILE* fplog,
const t_commrec* cr,
gmx_walltime_accounting_t walltime_accounting,
- gmx_wallcycle_t wcycle,
+ gmx_wallcycle* wcycle,
const char* name)
{
walltime_accounting_start_time(walltime_accounting);
- wallcycle_start(wcycle, ewcRUN);
+ wallcycle_start(wcycle, WallCycleCounter::Run);
print_start(fplog, cr, walltime_accounting, name);
}
//! Stop counting time for EM
-static void em_time_end(gmx_walltime_accounting_t walltime_accounting, gmx_wallcycle_t wcycle)
+static void em_time_end(gmx_walltime_accounting_t walltime_accounting, gmx_wallcycle* wcycle)
{
- wallcycle_stop(wcycle, ewcRUN);
+ wallcycle_stop(wcycle, WallCycleCounter::Run);
walltime_accounting_end_time(walltime_accounting);
}
fprintf(fp,
"\n%s converged to machine precision in %s steps,\n"
"but did not reach the requested Fmax < %g.\n",
- alg, gmx_step_str(count, buf), ftol);
+ alg,
+ gmx_step_str(count, buf),
+ ftol);
}
else
{
- fprintf(fp, "\n%s did not converge to Fmax < %g in %s steps.\n", alg, ftol,
- gmx_step_str(count, buf));
+ fprintf(fp, "\n%s did not converge to Fmax < %g in %s steps.\n", alg, ftol, gmx_step_str(count, buf));
}
#if GMX_DOUBLE
}
//! Compute the norm and max of the force array in parallel
-static void get_f_norm_max(const t_commrec* cr,
- t_grpopts* opts,
- t_mdatoms* mdatoms,
- const rvec* f,
- real* fnorm,
- real* fmax,
- int* a_fmax)
+static void get_f_norm_max(const t_commrec* cr,
+ const t_grpopts* opts,
+ t_mdatoms* mdatoms,
+ gmx::ArrayRef<const gmx::RVec> f,
+ real* fnorm,
+ real* fmax,
+ int* a_fmax)
{
double fnorm2, *sum;
real fmax2, fam;
}
}
- if (la_max >= 0 && DOMAINDECOMP(cr))
+ if (la_max >= 0 && haveDDAtomOrdering(*cr))
{
a_max = cr->dd->globalAtomIndices[la_max];
}
}
//! Compute the norm of the force
-static void get_state_f_norm_max(const t_commrec* cr, t_grpopts* opts, t_mdatoms* mdatoms, em_state_t* ems)
+static void get_state_f_norm_max(const t_commrec* cr, const t_grpopts* opts, t_mdatoms* mdatoms, em_state_t* ems)
{
- get_f_norm_max(cr, opts, mdatoms, ems->f.rvec_array(), &ems->fnorm, &ems->fmax, &ems->a_fmax);
+ get_f_norm_max(cr, opts, mdatoms, ems->f.view().force(), &ems->fnorm, &ems->fmax, &ems->a_fmax);
}
//! Initialize the energy minimization
const gmx::MDLogger& mdlog,
const char* title,
const t_commrec* cr,
- t_inputrec* ir,
+ const t_inputrec* ir,
gmx::ImdSession* imdSession,
pull_t* pull_work,
t_state* state_global,
- gmx_mtop_t* top_global,
+ const gmx_mtop_t& top_global,
em_state_t* ems,
gmx_localtop_t* top,
t_nrnb* nrnb,
t_forcerec* fr,
- t_graph** graph,
gmx::MDAtoms* mdAtoms,
gmx_global_stat_t* gstat,
- gmx_vsite_t* vsite,
+ VirtualSitesHandler* vsite,
gmx::Constraints* constr,
gmx_shellfc_t** shellfc)
{
{
state_global->ngtc = 0;
}
- initialize_lambdas(fplog, *ir, MASTER(cr), &(state_global->fep_state), state_global->lambda, nullptr);
+ int* fep_state = MASTER(cr) ? &state_global->fep_state : nullptr;
+ gmx::ArrayRef<real> lambda = MASTER(cr) ? state_global->lambda : gmx::ArrayRef<real>();
+ initialize_lambdas(fplog,
+ ir->efep,
+ ir->bSimTemp,
+ *ir->fepvals,
+ ir->simtempvals->temperatures,
+ gmx::arrayRefFromArray(ir->opts.ref_t, ir->opts.ngtc),
+ MASTER(cr),
+ fep_state,
+ lambda);
- if (ir->eI == eiNM)
+ if (ir->eI == IntegrationAlgorithm::NM)
{
GMX_ASSERT(shellfc != nullptr, "With NM we always support shells");
- *shellfc = init_shell_flexcon(stdout, top_global, constr ? constr->numFlexibleConstraints() : 0,
- ir->nstcalcenergy, DOMAINDECOMP(cr));
+ *shellfc = init_shell_flexcon(stdout,
+ top_global,
+ constr ? constr->numFlexibleConstraints() : 0,
+ ir->nstcalcenergy,
+ haveDDAtomOrdering(*cr),
+ thisRankHasDuty(cr, DUTY_PME));
}
else
{
}
}
- auto mdatoms = mdAtoms->mdatoms();
- if (DOMAINDECOMP(cr))
+ if (haveDDAtomOrdering(*cr))
{
- top->useInDomainDecomp_ = true;
- dd_init_local_top(*top_global, top);
-
- dd_init_local_state(cr->dd, state_global, &ems->s);
+ // Local state only becomes valid now.
+ dd_init_local_state(*cr->dd, state_global, &ems->s);
/* Distribute the charge groups over the nodes from the master node */
- dd_partition_system(fplog, mdlog, ir->init_step, cr, TRUE, 1, state_global, *top_global, ir,
- imdSession, pull_work, &ems->s, &ems->f, mdAtoms, top, fr, vsite,
- constr, nrnb, nullptr, FALSE);
- dd_store_state(cr->dd, &ems->s);
-
- *graph = nullptr;
+ dd_partition_system(fplog,
+ mdlog,
+ ir->init_step,
+ cr,
+ TRUE,
+ 1,
+ state_global,
+ top_global,
+ *ir,
+ imdSession,
+ pull_work,
+ &ems->s,
+ &ems->f,
+ mdAtoms,
+ top,
+ fr,
+ vsite,
+ constr,
+ nrnb,
+ nullptr,
+ FALSE);
+ dd_store_state(*cr->dd, &ems->s);
}
else
{
/* Just copy the state */
ems->s = *state_global;
state_change_natoms(&ems->s, ems->s.natoms);
- ems->f.resizeWithPadding(ems->s.natoms);
- mdAlgorithmsSetupAtomData(cr, ir, *top_global, top, fr, graph, mdAtoms, constr, vsite,
- shellfc ? *shellfc : nullptr);
-
- if (vsite)
- {
- set_vsite_top(vsite, top, mdatoms);
- }
+ mdAlgorithmsSetupAtomData(
+ cr, *ir, top_global, top, fr, &ems->f, mdAtoms, constr, vsite, shellfc ? *shellfc : nullptr);
}
- update_mdatoms(mdAtoms->mdatoms(), ems->s.lambda[efptMASS]);
+ update_mdatoms(mdAtoms->mdatoms(), ems->s.lambda[FreeEnergyPerturbationCouplingType::Mass]);
if (constr)
{
// TODO how should this cross-module support dependency be managed?
- if (ir->eConstrAlg == econtSHAKE && gmx_mtop_ftype_count(top_global, F_CONSTR) > 0)
+ if (ir->eConstrAlg == ConstraintAlgorithm::Shake && gmx_mtop_ftype_count(top_global, F_CONSTR) > 0)
{
- gmx_fatal(FARGS, "Can not do energy minimization with %s, use %s\n",
- econstr_names[econtSHAKE], econstr_names[econtLINCS]);
+ gmx_fatal(FARGS,
+ "Can not do energy minimization with %s, use %s\n",
+ enumValueToString(ConstraintAlgorithm::Shake),
+ enumValueToString(ConstraintAlgorithm::Lincs));
}
if (!ir->bContinuation)
{
/* Constrain the starting coordinates */
- dvdl_constr = 0;
- constr->apply(TRUE, TRUE, -1, 0, 1.0, ems->s.x.rvec_array(), ems->s.x.rvec_array(),
- nullptr, ems->s.box, ems->s.lambda[efptFEP], &dvdl_constr, nullptr,
- nullptr, gmx::ConstraintVariable::Positions);
+ bool needsLogging = true;
+ bool computeEnergy = true;
+ bool computeVirial = false;
+ dvdl_constr = 0;
+ constr->apply(needsLogging,
+ computeEnergy,
+ -1,
+ 0,
+ 1.0,
+ ems->s.x.arrayRefWithPadding(),
+ ems->s.x.arrayRefWithPadding(),
+ ArrayRef<RVec>(),
+ ems->s.box,
+ ems->s.lambda[FreeEnergyPerturbationCouplingType::Fep],
+ &dvdl_constr,
+ gmx::ArrayRefWithPadding<RVec>(),
+ computeVirial,
+ nullptr,
+ gmx::ConstraintVariable::Positions);
}
}
static void finish_em(const t_commrec* cr,
gmx_mdoutf_t outf,
gmx_walltime_accounting_t walltime_accounting,
- gmx_wallcycle_t wcycle)
+ gmx_wallcycle* wcycle)
{
if (!thisRankHasDuty(cr, DUTY_PME))
{
gmx_bool bX,
gmx_bool bF,
const char* confout,
- gmx_mtop_t* top_global,
- t_inputrec* ir,
+ const gmx_mtop_t& top_global,
+ const t_inputrec* ir,
int64_t step,
em_state_t* state,
t_state* state_global,
mdof_flags |= MDOF_IMD;
}
- mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags, top_global->natoms, step,
- static_cast<double>(step), &state->s, state_global,
- observablesHistory, state->f);
+ gmx::WriteCheckpointDataHolder checkpointDataHolder;
+ mdoutf_write_to_trajectory_files(fplog,
+ cr,
+ outf,
+ mdof_flags,
+ top_global.natoms,
+ step,
+ static_cast<double>(step),
+ &state->s,
+ state_global,
+ observablesHistory,
+ state->f.view().force(),
+ &checkpointDataHolder);
if (confout != nullptr)
{
- if (DOMAINDECOMP(cr))
+ if (haveDDAtomOrdering(*cr))
{
/* If bX=true, x was collected to state_global in the call above */
if (!bX)
{
auto globalXRef = MASTER(cr) ? state_global->x : gmx::ArrayRef<gmx::RVec>();
- dd_collect_vec(cr->dd, &state->s, state->s.x, globalXRef);
+ dd_collect_vec(
+ cr->dd, state->s.ddp_count, state->s.ddp_count_cg_gl, state->s.cg_gl, state->s.x, globalXRef);
}
}
else
if (MASTER(cr))
{
- if (ir->ePBC != epbcNONE && !ir->bPeriodicMols && DOMAINDECOMP(cr))
+ if (ir->pbcType != PbcType::No && !ir->bPeriodicMols && haveDDAtomOrdering(*cr))
{
/* Make molecules whole only for confout writing */
- do_pbc_mtop(ir->ePBC, state->s.box, top_global, state_global->x.rvec_array());
+ do_pbc_mtop(ir->pbcType, state->s.box, &top_global, state_global->x.rvec_array());
}
- write_sto_conf_mtop(confout, *top_global->name, top_global,
- state_global->x.rvec_array(), nullptr, ir->ePBC, state->s.box);
+ write_sto_conf_mtop(confout,
+ *top_global.name,
+ top_global,
+ state_global->x.rvec_array(),
+ nullptr,
+ ir->pbcType,
+ state->s.box);
}
}
}
//! \brief Do one minimization step
//
// \returns true when the step succeeded, false when a constraint error occurred
-static bool do_em_step(const t_commrec* cr,
- t_inputrec* ir,
- t_mdatoms* md,
- em_state_t* ems1,
- real a,
- const PaddedHostVector<gmx::RVec>* force,
- em_state_t* ems2,
- gmx::Constraints* constr,
- int64_t count)
+static bool do_em_step(const t_commrec* cr,
+ const t_inputrec* ir,
+ t_mdatoms* md,
+ em_state_t* ems1,
+ real a,
+ gmx::ArrayRefWithPadding<const gmx::RVec> force,
+ em_state_t* ems2,
+ gmx::Constraints* constr,
+ int64_t count)
{
- t_state *s1, *s2;
- int start, end;
- real dvdl_constr;
+ t_state * s1, *s2;
+ int start, end;
+ real dvdl_constr;
int nthreads gmx_unused;
bool validStep = true;
s1 = &ems1->s;
s2 = &ems2->s;
- if (DOMAINDECOMP(cr) && s1->ddp_count != cr->dd->ddp_count)
+ if (haveDDAtomOrdering(*cr) && s1->ddp_count != cr->dd->ddp_count)
{
gmx_incons("state mismatch in do_em_step");
}
if (s2->natoms != s1->natoms)
{
state_change_natoms(s2, s1->natoms);
- ems2->f.resizeWithPadding(s2->natoms);
+ ems2->f.resize(s2->natoms);
}
- if (DOMAINDECOMP(cr) && s2->cg_gl.size() != s1->cg_gl.size())
+ if (haveDDAtomOrdering(*cr) && s2->cg_gl.size() != s1->cg_gl.size())
{
s2->cg_gl.resize(s1->cg_gl.size());
}
start = 0;
end = md->homenr;
- nthreads = gmx_omp_nthreads_get(emntUpdate);
+ nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel num_threads(nthreads)
{
const rvec* x1 = s1->x.rvec_array();
rvec* x2 = s2->x.rvec_array();
- const rvec* f = force->rvec_array();
+ const rvec* f = as_rvec_array(force.unpaddedArrayRef().data());
int gf = 0;
#pragma omp for schedule(static) nowait
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
- if (s2->flags & (1 << estCGP))
+ if (s2->flags & enumValueToBitMask(StateEntry::Cgp))
{
/* Copy the CG p vector */
const rvec* p1 = s1->cg_p.rvec_array();
}
}
- if (DOMAINDECOMP(cr))
+ if (haveDDAtomOrdering(*cr))
{
/* OpenMP does not supported unsigned loop variables */
#pragma omp for schedule(static) nowait
}
}
- if (DOMAINDECOMP(cr))
+ if (haveDDAtomOrdering(*cr))
{
s2->ddp_count = s1->ddp_count;
s2->ddp_count_cg_gl = s1->ddp_count_cg_gl;
if (constr)
{
dvdl_constr = 0;
- validStep = constr->apply(TRUE, TRUE, count, 0, 1.0, s1->x.rvec_array(), s2->x.rvec_array(),
- nullptr, s2->box, s2->lambda[efptBONDED], &dvdl_constr, nullptr,
- nullptr, gmx::ConstraintVariable::Positions);
+ validStep = constr->apply(TRUE,
+ TRUE,
+ count,
+ 0,
+ 1.0,
+ s1->x.arrayRefWithPadding(),
+ s2->x.arrayRefWithPadding(),
+ ArrayRef<RVec>(),
+ s2->box,
+ s2->lambda[FreeEnergyPerturbationCouplingType::Bonded],
+ &dvdl_constr,
+ gmx::ArrayRefWithPadding<RVec>(),
+ false,
+ nullptr,
+ gmx::ConstraintVariable::Positions);
if (cr->nnodes > 1)
{
}
// We should move this check to the different minimizers
- if (!validStep && ir->eI != eiSteep)
+ if (!validStep && ir->eI != IntegrationAlgorithm::Steep)
{
gmx_fatal(FARGS,
"The coordinates could not be constrained. Minimizer '%s' can not handle "
"constraint failures, use minimizer '%s' before using '%s'.",
- EI(ir->eI), EI(eiSteep), EI(ir->eI));
+ enumValueToString(ir->eI),
+ enumValueToString(IntegrationAlgorithm::Steep),
+ enumValueToString(ir->eI));
}
}
const gmx::MDLogger& mdlog,
int step,
const t_commrec* cr,
- gmx_mtop_t* top_global,
- t_inputrec* ir,
+ const gmx_mtop_t& top_global,
+ const t_inputrec* ir,
gmx::ImdSession* imdSession,
pull_t* pull_work,
em_state_t* ems,
gmx_localtop_t* top,
gmx::MDAtoms* mdAtoms,
t_forcerec* fr,
- gmx_vsite_t* vsite,
+ VirtualSitesHandler* vsite,
gmx::Constraints* constr,
t_nrnb* nrnb,
- gmx_wallcycle_t wcycle)
+ gmx_wallcycle* wcycle)
{
/* Repartition the domain decomposition */
- dd_partition_system(fplog, mdlog, step, cr, FALSE, 1, nullptr, *top_global, ir, imdSession, pull_work,
- &ems->s, &ems->f, mdAtoms, top, fr, vsite, constr, nrnb, wcycle, FALSE);
- dd_store_state(cr->dd, &ems->s);
+ dd_partition_system(fplog,
+ mdlog,
+ step,
+ cr,
+ FALSE,
+ 1,
+ nullptr,
+ top_global,
+ *ir,
+ imdSession,
+ pull_work,
+ &ems->s,
+ &ems->f,
+ mdAtoms,
+ top,
+ fr,
+ vsite,
+ constr,
+ nrnb,
+ wcycle,
+ FALSE);
+ dd_store_state(*cr->dd, &ems->s);
}
namespace
{
+//! Copy coordinates, OpenMP parallelized, from \p refCoords to coords
+void setCoordinates(std::vector<RVec>* coords, ArrayRef<const RVec> refCoords)
+{
+ coords->resize(refCoords.size());
+
+ const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
+#pragma omp parallel for num_threads(nthreads) schedule(static)
+ for (int i = 0; i < ssize(refCoords); i++)
+ {
+ (*coords)[i] = refCoords[i];
+ }
+}
+
+//! Returns the maximum difference an atom moved between two coordinate sets, over all ranks
+real maxCoordinateDifference(ArrayRef<const RVec> coords1, ArrayRef<const RVec> coords2, MPI_Comm mpiCommMyGroup)
+{
+ GMX_RELEASE_ASSERT(coords1.size() == coords2.size(), "Coordinate counts should match");
+
+ real maxDiffSquared = 0;
+
+ const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
+#pragma omp parallel for reduction(max : maxDiffSquared) num_threads(nthreads) schedule(static)
+ for (int i = 0; i < ssize(coords1); i++)
+ {
+ maxDiffSquared = std::max(maxDiffSquared, gmx::norm2(coords1[i] - coords2[i]));
+ }
+
+#if GMX_MPI
+ int numRanks = 1;
+ if (mpiCommMyGroup != MPI_COMM_NULL)
+ {
+ MPI_Comm_size(mpiCommMyGroup, &numRanks);
+ }
+ if (numRanks > 1)
+ {
+ real maxDiffSquaredReduced;
+ MPI_Allreduce(
+ &maxDiffSquared, &maxDiffSquaredReduced, 1, GMX_DOUBLE ? MPI_DOUBLE : MPI_FLOAT, MPI_MAX, mpiCommMyGroup);
+ maxDiffSquared = maxDiffSquaredReduced;
+ }
+#else
+ GMX_UNUSED_VALUE(mpiCommMyGroup);
+#endif
+
+ return std::sqrt(maxDiffSquared);
+}
+
/*! \brief Class to handle the work of setting and doing an energy evaluation.
*
* This class is a mere aggregate of parameters to pass to evaluate an
* unsuited for aggregate initialization. When the types
* improve, the call signature of this method can be reduced.
*/
- void run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst);
+ void run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst, int64_t step);
//! Handles logging (deprecated).
FILE* fplog;
//! Handles logging.
//! Coordinates multi-simulations.
const gmx_multisim_t* ms;
//! Holds the simulation topology.
- gmx_mtop_t* top_global;
+ const gmx_mtop_t& top_global;
//! Holds the domain topology.
gmx_localtop_t* top;
//! User input options.
- t_inputrec* inputrec;
+ const t_inputrec* inputrec;
//! The Interactive Molecular Dynamics session.
gmx::ImdSession* imdSession;
//! The pull work object.
//! Manages flop accounting.
t_nrnb* nrnb;
//! Manages wall cycle accounting.
- gmx_wallcycle_t wcycle;
- //! Coordinates global reduction.
+ gmx_wallcycle* wcycle;
+ //! Legacy coordinator of global reduction.
gmx_global_stat_t gstat;
+ //! Coordinates reduction for observables
+ gmx::ObservablesReducer* observablesReducer;
//! Handles virtual sites.
- gmx_vsite_t* vsite;
+ VirtualSitesHandler* vsite;
//! Handles constraints.
gmx::Constraints* constr;
- //! Handles strange things.
- t_fcdata* fcd;
- //! Molecular graph for SHAKE.
- t_graph* graph;
//! Per-atom data for this domain.
gmx::MDAtoms* mdAtoms;
//! Handles how to calculate the forces.
MdrunScheduleWorkload* runScheduleWork;
//! Stores the computed energies.
gmx_enerdata_t* enerd;
+ //! The DD partitioning count at which the pair list was generated
+ int ddpCountPairSearch;
+ //! The local coordinates that were used for pair searching, stored for computing displacements
+ std::vector<RVec> pairSearchCoordinates;
};
-void EnergyEvaluator::run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst)
+void EnergyEvaluator::run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst, int64_t step)
{
real t;
gmx_bool bNS;
/* Set the time to the initial time, the time does not change during EM */
t = inputrec->init_t;
- if (bFirst || (DOMAINDECOMP(cr) && ems->s.ddp_count < cr->dd->ddp_count))
+ if (vsite)
+ {
+ vsite->construct(ems->s.x, {}, ems->s.box, gmx::VSiteOperation::Positions);
+ }
+
+ // Compute the buffer size of the pair list
+ const real bufferSize = inputrec->rlist - std::max(inputrec->rcoulomb, inputrec->rvdw);
+
+ if (bFirst || bufferSize <= 0 || (haveDDAtomOrdering(*cr) && ems->s.ddp_count != ddpCountPairSearch))
{
/* This is the first state or an old state used before the last ns */
bNS = TRUE;
}
else
{
- bNS = FALSE;
- if (inputrec->nstlist > 0)
- {
- bNS = TRUE;
- }
+ // We need to generate a new pairlist when one atom moved more than half the buffer size
+ ArrayRef<const RVec> localCoordinates =
+ ArrayRef<const RVec>(ems->s.x).subArray(0, mdAtoms->mdatoms()->homenr);
+ bNS = 2 * maxCoordinateDifference(pairSearchCoordinates, localCoordinates, cr->mpi_comm_mygroup)
+ > bufferSize;
}
- if (vsite)
+ if (haveDDAtomOrdering(*cr) && bNS)
{
- construct_vsites(vsite, ems->s.x.rvec_array(), 1, nullptr, top->idef.iparams, top->idef.il,
- fr->ePBC, fr->bMolPBC, cr, ems->s.box);
+ /* Repartition the domain decomposition */
+ em_dd_partition_system(
+ fplog, mdlog, count, cr, top_global, inputrec, imdSession, pull_work, ems, top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
+ ddpCountPairSearch = cr->dd->ddp_count;
}
- if (DOMAINDECOMP(cr) && bNS)
+ /* Store the local coordinates that will be used in the pair search, after we re-partitioned */
+ if (bufferSize > 0 && bNS)
{
- /* Repartition the domain decomposition */
- em_dd_partition_system(fplog, mdlog, count, cr, top_global, inputrec, imdSession, pull_work,
- ems, top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
+ ArrayRef<const RVec> localCoordinates =
+ constArrayRefFromArray(ems->s.x.data(), mdAtoms->mdatoms()->homenr);
+ setCoordinates(&pairSearchCoordinates, localCoordinates);
}
+ fr->longRangeNonbondeds->updateAfterPartition(*mdAtoms->mdatoms());
+
/* Calc force & energy on new trial position */
/* do_force always puts the charge groups in the box and shifts again
* We do not unshift, so molecules are always whole in congrad.c
*/
- do_force(fplog, cr, ms, inputrec, nullptr, nullptr, imdSession, pull_work, count, nrnb, wcycle,
- top, ems->s.box, ems->s.x.arrayRefWithPadding(), &ems->s.hist,
- ems->f.arrayRefWithPadding(), force_vir, mdAtoms->mdatoms(), enerd, fcd, ems->s.lambda,
- graph, fr, runScheduleWork, vsite, mu_tot, t, nullptr,
+ do_force(fplog,
+ cr,
+ ms,
+ *inputrec,
+ nullptr,
+ nullptr,
+ imdSession,
+ pull_work,
+ count,
+ nrnb,
+ wcycle,
+ top,
+ ems->s.box,
+ ems->s.x.arrayRefWithPadding(),
+ &ems->s.hist,
+ &ems->f.view(),
+ force_vir,
+ mdAtoms->mdatoms(),
+ enerd,
+ ems->s.lambda,
+ fr,
+ runScheduleWork,
+ vsite,
+ mu_tot,
+ t,
+ nullptr,
+ fr->longRangeNonbondeds.get(),
GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES | GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY
| (bNS ? GMX_FORCE_NS : 0),
DDBalanceRegionHandler(cr));
clear_mat(pres);
/* Communicate stuff when parallel */
- if (PAR(cr) && inputrec->eI != eiNM)
+ if (PAR(cr) && inputrec->eI != IntegrationAlgorithm::NM)
{
- wallcycle_start(wcycle, ewcMoveE);
+ wallcycle_start(wcycle, WallCycleCounter::MoveE);
- global_stat(gstat, cr, enerd, force_vir, shake_vir, mu_tot, inputrec, nullptr, nullptr, nullptr,
- 1, &terminate, nullptr, FALSE, CGLO_ENERGY | CGLO_PRESSURE | CGLO_CONSTRAINT);
+ global_stat(*gstat,
+ cr,
+ enerd,
+ force_vir,
+ shake_vir,
+ *inputrec,
+ nullptr,
+ nullptr,
+ std::vector<real>(1, terminate),
+ FALSE,
+ CGLO_ENERGY | CGLO_PRESSURE | CGLO_CONSTRAINT,
+ step,
+ observablesReducer);
- wallcycle_stop(wcycle, ewcMoveE);
+ wallcycle_stop(wcycle, WallCycleCounter::MoveE);
}
if (fr->dispersionCorrection)
{
/* Calculate long range corrections to pressure and energy */
- const DispersionCorrection::Correction correction =
- fr->dispersionCorrection->calculate(ems->s.box, ems->s.lambda[efptVDW]);
+ const DispersionCorrection::Correction correction = fr->dispersionCorrection->calculate(
+ ems->s.box, ems->s.lambda[FreeEnergyPerturbationCouplingType::Vdw]);
enerd->term[F_DISPCORR] = correction.energy;
enerd->term[F_EPOT] += correction.energy;
if (constr)
{
/* Project out the constraint components of the force */
- dvdl_constr = 0;
- rvec* f_rvec = ems->f.rvec_array();
- constr->apply(FALSE, FALSE, count, 0, 1.0, ems->s.x.rvec_array(), f_rvec, f_rvec,
- ems->s.box, ems->s.lambda[efptBONDED], &dvdl_constr, nullptr, &shake_vir,
+ bool needsLogging = false;
+ bool computeEnergy = false;
+ bool computeVirial = true;
+ dvdl_constr = 0;
+ auto f = ems->f.view().forceWithPadding();
+ constr->apply(needsLogging,
+ computeEnergy,
+ count,
+ 0,
+ 1.0,
+ ems->s.x.arrayRefWithPadding(),
+ f,
+ f.unpaddedArrayRef(),
+ ems->s.box,
+ ems->s.lambda[FreeEnergyPerturbationCouplingType::Bonded],
+ &dvdl_constr,
+ gmx::ArrayRefWithPadding<RVec>(),
+ computeVirial,
+ shake_vir,
gmx::ConstraintVariable::ForceDispl);
enerd->term[F_DVDL_CONSTR] += dvdl_constr;
m_add(force_vir, shake_vir, vir);
}
clear_mat(ekin);
- enerd->term[F_PRES] = calc_pres(fr->ePBC, inputrec->nwall, ems->s.box, ekin, vir, pres);
+ enerd->term[F_PRES] = calc_pres(fr->pbcType, inputrec->nwall, ems->s.box, ekin, vir, pres);
- sum_dhdl(enerd, ems->s.lambda, *inputrec->fepvals);
+ if (inputrec->efep != FreeEnergyPerturbationType::No)
+ {
+ accumulateKineticLambdaComponents(enerd, ems->s.lambda, *inputrec->fepvals);
+ }
if (EI_ENERGY_MINIMIZATION(inputrec->eI))
{
} // namespace
//! Parallel utility summing energies and forces
-static double reorder_partsum(const t_commrec* cr,
- t_grpopts* opts,
- gmx_mtop_t* top_global,
- em_state_t* s_min,
- em_state_t* s_b)
+static double reorder_partsum(const t_commrec* cr,
+ const t_grpopts* opts,
+ const gmx_mtop_t& top_global,
+ const em_state_t* s_min,
+ const em_state_t* s_b)
{
if (debug)
{
fprintf(debug, "Doing reorder_partsum\n");
}
- const rvec* fm = s_min->f.rvec_array();
- const rvec* fb = s_b->f.rvec_array();
+ auto fm = s_min->f.view().force();
+ auto fb = s_b->f.view().force();
/* Collect fm in a global vector fmg.
* This conflicts with the spirit of domain decomposition,
* but to fully optimize this a much more complicated algorithm is required.
*/
- const int natoms = top_global->natoms;
+ const int natoms = top_global.natoms;
rvec* fmg;
snew(fmg, natoms);
copy_rvec(fm[i], fmg[a]);
i++;
}
- gmx_sum(top_global->natoms * 3, fmg[0], cr);
+ gmx_sum(top_global.natoms * 3, fmg[0], cr);
/* Now we will determine the part of the sum for the cgs in state s_b */
gmx::ArrayRef<const int> indicesB = s_b->s.cg_gl;
- double partsum = 0;
- i = 0;
- int gf = 0;
- gmx::ArrayRef<unsigned char> grpnrFREEZE =
- top_global->groups.groupNumbers[SimulationAtomGroupType::Freeze];
+ double partsum = 0;
+ i = 0;
+ int gf = 0;
+ gmx::ArrayRef<const unsigned char> grpnrFREEZE =
+ top_global.groups.groupNumbers[SimulationAtomGroupType::Freeze];
for (int a : indicesB)
{
if (!grpnrFREEZE.empty())
}
//! Print some stuff, like beta, whatever that means.
-static real pr_beta(const t_commrec* cr,
- t_grpopts* opts,
- t_mdatoms* mdatoms,
- gmx_mtop_t* top_global,
- em_state_t* s_min,
- em_state_t* s_b)
+static real pr_beta(const t_commrec* cr,
+ const t_grpopts* opts,
+ t_mdatoms* mdatoms,
+ const gmx_mtop_t& top_global,
+ const em_state_t* s_min,
+ const em_state_t* s_b)
{
double sum;
* and might have to sum it in parallel runs.
*/
- if (!DOMAINDECOMP(cr)
+ if (!haveDDAtomOrdering(*cr)
|| (s_min->s.ddp_count == cr->dd->ddp_count && s_b->s.ddp_count == cr->dd->ddp_count))
{
- const rvec* fm = s_min->f.rvec_array();
- const rvec* fb = s_b->f.rvec_array();
- sum = 0;
- int gf = 0;
+ auto fm = s_min->f.view().force();
+ auto fb = s_b->f.view().force();
+ sum = 0;
+ int gf = 0;
/* This part of code can be incorrect with DD,
* since the atom ordering in s_b and s_min might differ.
*/
{
const char* CG = "Polak-Ribiere Conjugate Gradients";
- gmx_localtop_t top;
gmx_global_stat_t gstat;
- t_graph* graph;
double tmp, minstep;
real stepsize;
real a, b, c, beta = 0.0;
tensor vir, pres;
int number_steps, neval = 0, nstcg = inputrec->nstcgsteep;
int m, step, nminstep;
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
if (MASTER(cr))
{
// In CG, the state is extended with a search direction
- state_global->flags |= (1 << estCGP);
+ state_global->flags |= enumValueToBitMask(StateEntry::Cgp);
// Ensure the extra per-atom state array gets allocated
state_change_natoms(state_global, state_global->natoms);
em_state_t* s_b = &s2;
em_state_t* s_c = &s3;
+ ObservablesReducer observablesReducer = observablesReducerBuilder->build();
+
/* Init em and store the local state in s_min */
- init_em(fplog, mdlog, CG, cr, inputrec, imdSession, pull_work, state_global, top_global, s_min,
- &top, nrnb, fr, &graph, mdAtoms, &gstat, vsite, constr, nullptr);
- gmx_mdoutf* outf =
- init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider, mdModulesNotifier,
- inputrec, top_global, nullptr, wcycle, StartingBehavior::NewSimulation);
- gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf), top_global, inputrec, pull_work, nullptr,
- false, StartingBehavior::NewSimulation, mdModulesNotifier);
+ init_em(fplog,
+ mdlog,
+ CG,
+ cr,
+ inputrec,
+ imdSession,
+ pull_work,
+ state_global,
+ top_global,
+ s_min,
+ top,
+ nrnb,
+ fr,
+ mdAtoms,
+ &gstat,
+ vsite,
+ constr,
+ nullptr);
+ const bool simulationsShareState = false;
+ gmx_mdoutf* outf = init_mdoutf(fplog,
+ nfile,
+ fnm,
+ mdrunOptions,
+ cr,
+ outputProvider,
+ mdModulesNotifiers,
+ inputrec,
+ top_global,
+ nullptr,
+ wcycle,
+ StartingBehavior::NewSimulation,
+ simulationsShareState,
+ ms);
+ gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
+ top_global,
+ *inputrec,
+ pull_work,
+ nullptr,
+ false,
+ StartingBehavior::NewSimulation,
+ simulationsShareState,
+ mdModulesNotifiers);
/* Print to log file */
print_em_start(fplog, cr, walltime_accounting, wcycle, CG);
sp_header(fplog, CG, inputrec->em_tol, number_steps);
}
- EnergyEvaluator energyEvaluator{
- fplog, mdlog, cr, ms, top_global, &top, inputrec,
- imdSession, pull_work, nrnb, wcycle, gstat, vsite, constr,
- fcd, graph, mdAtoms, fr, runScheduleWork, enerd
- };
+ EnergyEvaluator energyEvaluator{ fplog,
+ mdlog,
+ cr,
+ ms,
+ top_global,
+ top,
+ inputrec,
+ imdSession,
+ pull_work,
+ nrnb,
+ wcycle,
+ gstat,
+ &observablesReducer,
+ vsite,
+ constr,
+ mdAtoms,
+ fr,
+ runScheduleWork,
+ enerd,
+ -1,
+ {} };
/* Call the force routine and some auxiliary (neighboursearching etc.) */
/* do_force always puts the charge groups in the box and shifts again
* We do not unshift, so molecules are always whole in congrad.c
*/
- energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE);
+ energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE, step);
+ observablesReducer.markAsReadyToReduce();
if (MASTER(cr))
{
/* Copy stuff to the energy bin for easy printing etc. */
matrix nullBox = {};
- energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
- enerd, nullptr, nullptr, nullptr, nullBox, nullptr,
- nullptr, vir, pres, nullptr, mu_tot, constr);
-
- energyOutput.printHeader(fplog, step, step);
- energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step,
- step, fcd, nullptr);
+ energyOutput.addDataAtEnergyStep(false,
+ false,
+ static_cast<double>(step),
+ mdatoms->tmass,
+ enerd,
+ nullptr,
+ nullBox,
+ PTCouplingArrays(),
+ 0,
+ vir,
+ pres,
+ nullptr,
+ mu_tot,
+ constr);
+
+ EnergyOutput::printHeader(fplog, step, step);
+ energyOutput.printStepToEnergyFile(
+ mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step, step, fr->fcdata.get(), nullptr);
}
/* Estimate/guess the initial stepsize */
*/
/* Calculate the new direction in p, and the gradient in this direction, gpa */
- rvec* pm = s_min->s.cg_p.rvec_array();
- const rvec* sfm = s_min->f.rvec_array();
- double gpa = 0;
- int gf = 0;
+ gmx::ArrayRef<gmx::RVec> pm = s_min->s.cg_p;
+ gmx::ArrayRef<const gmx::RVec> sfm = s_min->f.view().force();
+ double gpa = 0;
+ int gf = 0;
for (int i = 0; i < mdatoms->homenr; i++)
{
if (mdatoms->cFREEZE)
gmx_sumd(1, &minstep, cr);
}
- minstep = GMX_REAL_EPS / sqrt(minstep / (3 * top_global->natoms));
+ minstep = GMX_REAL_EPS / sqrt(minstep / (3 * top_global.natoms));
if (stepsize < minstep)
{
do_x = do_per_step(step, inputrec->nstxout);
do_f = do_per_step(step, inputrec->nstfout);
- write_em_traj(fplog, cr, outf, do_x, do_f, nullptr, top_global, inputrec, step, s_min,
- state_global, observablesHistory);
+ write_em_traj(
+ fplog, cr, outf, do_x, do_f, nullptr, top_global, inputrec, step, s_min, state_global, observablesHistory);
/* Take a step downhill.
* In theory, we should minimize the function along this direction.
a = 0.0;
c = a + stepsize; /* reference position along line is zero */
- if (DOMAINDECOMP(cr) && s_min->s.ddp_count < cr->dd->ddp_count)
- {
- em_dd_partition_system(fplog, mdlog, step, cr, top_global, inputrec, imdSession,
- pull_work, s_min, &top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
+ if (haveDDAtomOrdering(*cr) && s_min->s.ddp_count < cr->dd->ddp_count)
+ {
+ em_dd_partition_system(fplog,
+ mdlog,
+ step,
+ cr,
+ top_global,
+ inputrec,
+ imdSession,
+ pull_work,
+ s_min,
+ top,
+ mdAtoms,
+ fr,
+ vsite,
+ constr,
+ nrnb,
+ wcycle);
}
/* Take a trial step (new coords in s_c) */
- do_em_step(cr, inputrec, mdatoms, s_min, c, &s_min->s.cg_p, s_c, constr, -1);
+ do_em_step(cr, inputrec, mdatoms, s_min, c, s_min->s.cg_p.constArrayRefWithPadding(), s_c, constr, -1);
neval++;
/* Calculate energy for the trial step */
- energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE);
+ energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE, step);
+ observablesReducer.markAsReadyToReduce();
/* Calc derivative along line */
- const rvec* pc = s_c->s.cg_p.rvec_array();
- const rvec* sfc = s_c->f.rvec_array();
- double gpc = 0;
+ const rvec* pc = s_c->s.cg_p.rvec_array();
+ gmx::ArrayRef<const gmx::RVec> sfc = s_c->f.view().force();
+ double gpc = 0;
for (int i = 0; i < mdatoms->homenr; i++)
{
for (m = 0; m < DIM; m++)
b = 0.5 * (a + c);
}
- if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
+ if (haveDDAtomOrdering(*cr) && s_min->s.ddp_count != cr->dd->ddp_count)
{
/* Reload the old state */
- em_dd_partition_system(fplog, mdlog, -1, cr, top_global, inputrec, imdSession, pull_work,
- s_min, &top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
+ em_dd_partition_system(fplog,
+ mdlog,
+ -1,
+ cr,
+ top_global,
+ inputrec,
+ imdSession,
+ pull_work,
+ s_min,
+ top,
+ mdAtoms,
+ fr,
+ vsite,
+ constr,
+ nrnb,
+ wcycle);
}
/* Take a trial step to this new point - new coords in s_b */
- do_em_step(cr, inputrec, mdatoms, s_min, b, &s_min->s.cg_p, s_b, constr, -1);
+ do_em_step(cr, inputrec, mdatoms, s_min, b, s_min->s.cg_p.constArrayRefWithPadding(), s_b, constr, -1);
neval++;
/* Calculate energy for the trial step */
- energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE);
+ energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE, step);
+ observablesReducer.markAsReadyToReduce();
/* p does not change within a step, but since the domain decomposition
* might change, we have to use cg_p of s_b here.
*/
- const rvec* pb = s_b->s.cg_p.rvec_array();
- const rvec* sfb = s_b->f.rvec_array();
- gpb = 0;
+ const rvec* pb = s_b->s.cg_p.rvec_array();
+ gmx::ArrayRef<const gmx::RVec> sfb = s_b->f.view().force();
+ gpb = 0;
for (int i = 0; i < mdatoms->homenr; i++)
{
for (m = 0; m < DIM; m++)
if (debug)
{
- fprintf(debug, "CGE: EpotA %f EpotB %f EpotC %f gpb %f\n", s_a->epot, s_b->epot,
- s_c->epot, gpb);
+ fprintf(debug, "CGE: EpotA %f EpotB %f EpotC %f gpb %f\n", s_a->epot, s_b->epot, s_c->epot, gpb);
}
epot_repl = s_b->epot;
{
if (debug)
{
- fprintf(debug, "CGE: C (%f) is lower than A (%f), moving C to B\n", s_c->epot,
- s_a->epot);
+ fprintf(debug, "CGE: C (%f) is lower than A (%f), moving C to B\n", s_c->epot, s_a->epot);
}
swap_em_state(&s_b, &s_c);
gpb = gpc;
{
if (debug)
{
- fprintf(debug, "CGE: A (%f) is lower than C (%f), moving A to B\n", s_a->epot,
- s_c->epot);
+ fprintf(debug, "CGE: A (%f) is lower than C (%f), moving A to B\n", s_a->epot, s_c->epot);
}
swap_em_state(&s_b, &s_a);
gpb = gpa;
if (mdrunOptions.verbose)
{
double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
- fprintf(stderr, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n", step,
- s_min->epot, s_min->fnorm / sqrtNumAtoms, s_min->fmax, s_min->a_fmax + 1);
+ fprintf(stderr,
+ "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n",
+ step,
+ s_min->epot,
+ s_min->fnorm / sqrtNumAtoms,
+ s_min->fmax,
+ s_min->a_fmax + 1);
fflush(stderr);
}
/* Store the new (lower) energies */
matrix nullBox = {};
- energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
- enerd, nullptr, nullptr, nullptr, nullBox, nullptr,
- nullptr, vir, pres, nullptr, mu_tot, constr);
+ energyOutput.addDataAtEnergyStep(false,
+ false,
+ static_cast<double>(step),
+ mdatoms->tmass,
+ enerd,
+ nullptr,
+ nullBox,
+ PTCouplingArrays(),
+ 0,
+ vir,
+ pres,
+ nullptr,
+ mu_tot,
+ constr);
do_log = do_per_step(step, inputrec->nstlog);
do_ene = do_per_step(step, inputrec->nstenergy);
if (do_log)
{
- energyOutput.printHeader(fplog, step, step);
+ EnergyOutput::printHeader(fplog, step, step);
}
- energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), do_ene, FALSE, FALSE,
- do_log ? fplog : nullptr, step, step, fcd, nullptr);
+ energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
+ do_ene,
+ FALSE,
+ FALSE,
+ do_log ? fplog : nullptr,
+ step,
+ step,
+ fr->fcdata.get(),
+ nullptr);
}
/* Send energies and positions to the IMD client if bIMD is TRUE. */
- if (MASTER(cr) && imdSession->run(step, TRUE, state_global->box, state_global->x.rvec_array(), 0))
+ if (MASTER(cr) && imdSession->run(step, TRUE, state_global->box, state_global->x, 0))
{
imdSession->sendPositionsAndEnergies();
}
* If we have reached machine precision, converged is already set to true.
*/
converged = converged || (s_min->fmax < inputrec->em_tol);
-
+ observablesReducer.markAsReadyToReduce();
} /* End of the loop */
if (converged)
if (!do_log)
{
/* Write final value to log since we didn't do anything the last step */
- energyOutput.printHeader(fplog, step, step);
+ EnergyOutput::printHeader(fplog, step, step);
}
if (!do_ene || !do_log)
{
/* Write final energy file entries */
- energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), !do_ene, FALSE, FALSE,
- !do_log ? fplog : nullptr, step, step, fcd, nullptr);
+ energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
+ !do_ene,
+ FALSE,
+ FALSE,
+ !do_log ? fplog : nullptr,
+ step,
+ step,
+ fr->fcdata.get(),
+ nullptr);
}
}
do_x = !do_per_step(step, inputrec->nstxout);
do_f = (inputrec->nstfout > 0 && !do_per_step(step, inputrec->nstfout));
- write_em_traj(fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec,
- step, s_min, state_global, observablesHistory);
+ write_em_traj(
+ fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec, step, s_min, state_global, observablesHistory);
if (MASTER(cr))
{
static const char* LBFGS = "Low-Memory BFGS Minimizer";
em_state_t ems;
- gmx_localtop_t top;
gmx_global_stat_t gstat;
- t_graph* graph;
- int ncorr, nmaxcorr, point, cp, neval, nminstep;
- double stepsize, step_taken, gpa, gpb, gpc, tmp, minstep;
- real * rho, *alpha, *p, *s, **dx, **dg;
- real a, b, c, maxdelta, delta;
- real diag, Epot0;
- real dgdx, dgdg, sq, yr, beta;
- gmx_bool converged;
- rvec mu_tot = { 0 };
- gmx_bool do_log, do_ene, do_x, do_f, foundlower, *frozen;
- tensor vir, pres;
- int start, end, number_steps;
- int i, k, m, n, gf, step;
- int mdof_flags;
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
"be available in a different form in a future version of GROMACS, "
"e.g. gmx minimize and an .mdp option.");
+ if (haveDDAtomOrdering(*cr))
+ {
+ gmx_fatal(FARGS, "L_BFGS is currently not supported");
+ }
if (PAR(cr))
{
gmx_fatal(FARGS, "L-BFGS minimization only supports a single rank");
"do not use constraints, or use another minimizer (e.g. steepest descent).");
}
- n = 3 * state_global->natoms;
- nmaxcorr = inputrec->nbfgscorr;
+ const int n = 3 * state_global->natoms;
+ const int nmaxcorr = inputrec->nbfgscorr;
- snew(frozen, n);
+ std::vector<real> p(n);
+ std::vector<real> rho(nmaxcorr);
+ std::vector<real> alpha(nmaxcorr);
- snew(p, n);
- snew(rho, nmaxcorr);
- snew(alpha, nmaxcorr);
-
- snew(dx, nmaxcorr);
- for (i = 0; i < nmaxcorr; i++)
+ std::vector<std::vector<real>> dx(nmaxcorr);
+ for (auto& dxCorr : dx)
{
- snew(dx[i], n);
+ dxCorr.resize(n);
}
- snew(dg, nmaxcorr);
- for (i = 0; i < nmaxcorr; i++)
+ std::vector<std::vector<real>> dg(nmaxcorr);
+ for (auto& dgCorr : dg)
{
- snew(dg[i], n);
+ dgCorr.resize(n);
}
- step = 0;
- neval = 0;
+ int step = 0;
+ int neval = 0;
- /* Init em */
- init_em(fplog, mdlog, LBFGS, cr, inputrec, imdSession, pull_work, state_global, top_global,
- &ems, &top, nrnb, fr, &graph, mdAtoms, &gstat, vsite, constr, nullptr);
- gmx_mdoutf* outf =
- init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider, mdModulesNotifier,
- inputrec, top_global, nullptr, wcycle, StartingBehavior::NewSimulation);
- gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf), top_global, inputrec, pull_work, nullptr,
- false, StartingBehavior::NewSimulation, mdModulesNotifier);
+ ObservablesReducer observablesReducer = observablesReducerBuilder->build();
- start = 0;
- end = mdatoms->homenr;
+ /* Init em */
+ init_em(fplog,
+ mdlog,
+ LBFGS,
+ cr,
+ inputrec,
+ imdSession,
+ pull_work,
+ state_global,
+ top_global,
+ &ems,
+ top,
+ nrnb,
+ fr,
+ mdAtoms,
+ &gstat,
+ vsite,
+ constr,
+ nullptr);
+ const bool simulationsShareState = false;
+ gmx_mdoutf* outf = init_mdoutf(fplog,
+ nfile,
+ fnm,
+ mdrunOptions,
+ cr,
+ outputProvider,
+ mdModulesNotifiers,
+ inputrec,
+ top_global,
+ nullptr,
+ wcycle,
+ StartingBehavior::NewSimulation,
+ simulationsShareState,
+ ms);
+ gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
+ top_global,
+ *inputrec,
+ pull_work,
+ nullptr,
+ false,
+ StartingBehavior::NewSimulation,
+ simulationsShareState,
+ mdModulesNotifiers);
+
+ const int start = 0;
+ const int end = mdatoms->homenr;
/* We need 4 working states */
em_state_t s0{}, s1{}, s2{}, s3{};
/* Print to log file */
print_em_start(fplog, cr, walltime_accounting, wcycle, LBFGS);
- do_log = do_ene = do_x = do_f = TRUE;
-
/* Max number of steps */
- number_steps = inputrec->nsteps;
+ const int number_steps = inputrec->nsteps;
/* Create a 3*natoms index to tell whether each degree of freedom is frozen */
- gf = 0;
- for (i = start; i < end; i++)
+ std::vector<bool> frozen(n);
+ int gf = 0;
+ for (int i = start; i < end; i++)
{
if (mdatoms->cFREEZE)
{
gf = mdatoms->cFREEZE[i];
}
- for (m = 0; m < DIM; m++)
+ for (int m = 0; m < DIM; m++)
{
frozen[3 * i + m] = (inputrec->opts.nFreeze[gf][m] != 0);
}
if (vsite)
{
- construct_vsites(vsite, state_global->x.rvec_array(), 1, nullptr, top.idef.iparams,
- top.idef.il, fr->ePBC, fr->bMolPBC, cr, state_global->box);
+ vsite->construct(state_global->x, {}, state_global->box, VSiteOperation::Positions);
}
/* Call the force routine and some auxiliary (neighboursearching etc.) */
* We do not unshift, so molecules are always whole
*/
neval++;
- EnergyEvaluator energyEvaluator{
- fplog, mdlog, cr, ms, top_global, &top, inputrec,
- imdSession, pull_work, nrnb, wcycle, gstat, vsite, constr,
- fcd, graph, mdAtoms, fr, runScheduleWork, enerd
- };
- energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE);
+ EnergyEvaluator energyEvaluator{ fplog,
+ mdlog,
+ cr,
+ ms,
+ top_global,
+ top,
+ inputrec,
+ imdSession,
+ pull_work,
+ nrnb,
+ wcycle,
+ gstat,
+ &observablesReducer,
+ vsite,
+ constr,
+ mdAtoms,
+ fr,
+ runScheduleWork,
+ enerd };
+ rvec mu_tot;
+ tensor vir;
+ tensor pres;
+ energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE, step);
if (MASTER(cr))
{
/* Copy stuff to the energy bin for easy printing etc. */
matrix nullBox = {};
- energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
- enerd, nullptr, nullptr, nullptr, nullBox, nullptr,
- nullptr, vir, pres, nullptr, mu_tot, constr);
-
- energyOutput.printHeader(fplog, step, step);
- energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step,
- step, fcd, nullptr);
+ energyOutput.addDataAtEnergyStep(false,
+ false,
+ static_cast<double>(step),
+ mdatoms->tmass,
+ enerd,
+ nullptr,
+ nullBox,
+ PTCouplingArrays(),
+ 0,
+ vir,
+ pres,
+ nullptr,
+ mu_tot,
+ constr);
+
+ EnergyOutput::printHeader(fplog, step, step);
+ energyOutput.printStepToEnergyFile(
+ mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step, step, fr->fcdata.get(), nullptr);
}
/* Set the initial step.
}
// Point is an index to the memory of search directions, where 0 is the first one.
- point = 0;
+ int point = 0;
// Set initial search direction to the force (-gradient), or 0 for frozen particles.
- real* fInit = static_cast<real*>(ems.f.rvec_array()[0]);
- for (i = 0; i < n; i++)
+ real* fInit = static_cast<real*>(ems.f.view().force().data()[0]);
+ for (int i = 0; i < n; i++)
{
if (!frozen[i])
{
// (the main efficiency in the algorithm comes from changing directions), but
// we still need an initial value, so estimate it as the inverse of the norm
// so we take small steps where the potential fluctuates a lot.
- stepsize = 1.0 / ems.fnorm;
+ double stepsize = 1.0 / ems.fnorm;
/* Start the loop over BFGS steps.
* Each successful step is counted, and we continue until
* we either converge or reach the max number of steps.
*/
- ncorr = 0;
+ bool do_log = true;
+ bool do_ene = true;
+
+ int ncorr = 0;
/* Set the gradient from the force */
- converged = FALSE;
- for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
+ bool converged = false;
+ for (int step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
{
/* Write coordinates if necessary */
- do_x = do_per_step(step, inputrec->nstxout);
- do_f = do_per_step(step, inputrec->nstfout);
+ const bool do_x = do_per_step(step, inputrec->nstxout);
+ const bool do_f = do_per_step(step, inputrec->nstfout);
- mdof_flags = 0;
+ int mdof_flags = 0;
if (do_x)
{
mdof_flags |= MDOF_X;
mdof_flags |= MDOF_IMD;
}
- mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags, top_global->natoms, step,
- static_cast<real>(step), &ems.s, state_global,
- observablesHistory, ems.f);
+ gmx::WriteCheckpointDataHolder checkpointDataHolder;
+ mdoutf_write_to_trajectory_files(fplog,
+ cr,
+ outf,
+ mdof_flags,
+ top_global.natoms,
+ step,
+ static_cast<real>(step),
+ &ems.s,
+ state_global,
+ observablesHistory,
+ ems.f.view().force(),
+ &checkpointDataHolder);
/* Do the linesearching in the direction dx[point][0..(n-1)] */
/* make s a pointer to current search direction - point=0 first time we get here */
- s = dx[point];
+ gmx::ArrayRef<const real> s = dx[point];
- real* xx = static_cast<real*>(ems.s.x.rvec_array()[0]);
- real* ff = static_cast<real*>(ems.f.rvec_array()[0]);
+ const real* xx = static_cast<real*>(ems.s.x.rvec_array()[0]);
+ const real* ff = static_cast<real*>(ems.f.view().force().data()[0]);
// calculate line gradient in position A
- for (gpa = 0, i = 0; i < n; i++)
+ double gpa = 0;
+ for (int i = 0; i < n; i++)
{
gpa -= s[i] * ff[i];
}
/* Calculate minimum allowed stepsize along the line, before the average (norm)
* relative change in coordinate is smaller than precision
*/
- for (minstep = 0, i = 0; i < n; i++)
+ double minstep = 0;
+ for (int i = 0; i < n; i++)
{
- tmp = fabs(xx[i]);
+ double tmp = fabs(xx[i]);
if (tmp < 1.0)
{
tmp = 1.0;
if (stepsize < minstep)
{
- converged = TRUE;
+ converged = true;
break;
}
// Before taking any steps along the line, store the old position
- *last = ems;
- real* lastx = static_cast<real*>(last->s.x.data()[0]);
- real* lastf = static_cast<real*>(last->f.data()[0]);
- Epot0 = ems.epot;
+ *last = ems;
+ real* lastx = static_cast<real*>(last->s.x.data()[0]);
+ real* lastf = static_cast<real*>(last->f.view().force().data()[0]);
+ const real Epot0 = ems.epot;
*sa = ems;
// State "A" is the first position along the line.
// reference position along line is initially zero
- a = 0.0;
+ real a = 0;
// Check stepsize first. We do not allow displacements
// larger than emstep.
//
+ real c;
+ real maxdelta;
do
{
// Pick a new position C by adding stepsize to A.
// Calculate what the largest change in any individual coordinate
// would be (translation along line * gradient along line)
maxdelta = 0;
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
- delta = c * s[i];
+ real delta = c * s[i];
if (delta > maxdelta)
{
maxdelta = delta;
// Take a trial step and move the coordinate array xc[] to position C
real* xc = static_cast<real*>(sc->s.x.rvec_array()[0]);
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
xc[i] = lastx[i] + c * s[i];
}
neval++;
// Calculate energy for the trial step in position C
- energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE);
+ energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE, step);
// Calc line gradient in position C
- real* fc = static_cast<real*>(sc->f.rvec_array()[0]);
- for (gpc = 0, i = 0; i < n; i++)
+ real* fc = static_cast<real*>(sc->f.view().force()[0]);
+ double gpc = 0;
+ for (int i = 0; i < n; i++)
{
gpc -= s[i] * fc[i]; /* f is negative gradient, thus the sign */
}
// This is the max amount of increase in energy we tolerate.
// By allowing VERY small changes (close to numerical precision) we
// frequently find even better (lower) final energies.
- tmp = std::sqrt(GMX_REAL_EPS) * fabs(sa->epot);
+ double tmp = std::sqrt(GMX_REAL_EPS) * fabs(sa->epot);
// Accept the step if the energy is lower in the new position C (compared to A),
// or if it is not significantly higher and the line derivative is still negative.
- foundlower = sc->epot < sa->epot || (gpc < 0 && sc->epot < (sa->epot + tmp));
+ bool foundlower = sc->epot < sa->epot || (gpc < 0 && sc->epot < (sa->epot + tmp));
// If true, great, we found a better energy. We no longer try to alter the
// stepsize, but simply accept this new better position. The we select a new
// search direction instead, which will be much more efficient than continuing
// than with the stepsize, so no need to modify it. For the next search direction
// it will be reset to 1/fnorm anyway.
+ double step_taken;
if (!foundlower)
{
// OK, if we didn't find a lower value we will have to locate one now - there must
// I also have a safeguard for potentially really pathological functions so we never
// take more than 20 steps before we give up.
// If we already found a lower value we just skip this step and continue to the update.
- real fnorm = 0;
- nminstep = 0;
+ real fnorm = 0;
+ int nminstep = 0;
do
{
// Select a new trial point B in the interval [A,C].
// If the derivatives at points a & c have different sign we interpolate to zero,
// otherwise just do a bisection since there might be multiple minima/maxima
// inside the interval.
+ real b;
if (gpa < 0 && gpc > 0)
{
b = a + gpa * (a - c) / (gpc - gpa);
// Take a trial step to point B
real* xb = static_cast<real*>(sb->s.x.rvec_array()[0]);
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
xb[i] = lastx[i] + b * s[i];
}
neval++;
// Calculate energy for the trial step in point B
- energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE);
+ energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE, step);
fnorm = sb->fnorm;
// Calculate gradient in point B
- real* fb = static_cast<real*>(sb->f.rvec_array()[0]);
- for (gpb = 0, i = 0; i < n; i++)
+ real* fb = static_cast<real*>(sb->f.view().force()[0]);
+ double gpb = 0;
+ for (int i = 0; i < n; i++)
{
gpb -= s[i] * fb[i]; /* f is negative gradient, thus the sign */
}
if (ncorr == 0)
{
/* Converged */
- converged = TRUE;
+ converged = true;
break;
}
else
/* Reset memory */
ncorr = 0;
/* Search in gradient direction */
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
dx[point][i] = ff[i];
}
ncorr++;
}
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
dg[point][i] = lastf[i] - ff[i];
dx[point][i] *= step_taken;
}
- dgdg = 0;
- dgdx = 0;
- for (i = 0; i < n; i++)
+ real dgdg = 0;
+ real dgdx = 0;
+ for (int i = 0; i < n; i++)
{
dgdg += dg[point][i] * dg[point][i];
dgdx += dg[point][i] * dx[point][i];
}
- diag = dgdx / dgdg;
+ const real diag = dgdx / dgdg;
rho[point] = 1.0 / dgdx;
point++;
}
/* Update */
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
p[i] = ff[i];
}
- cp = point;
+ int cp = point;
/* Recursive update. First go back over the memory points */
- for (k = 0; k < ncorr; k++)
+ for (int k = 0; k < ncorr; k++)
{
cp--;
if (cp < 0)
cp = ncorr - 1;
}
- sq = 0;
- for (i = 0; i < n; i++)
+ real sq = 0;
+ for (int i = 0; i < n; i++)
{
sq += dx[cp][i] * p[i];
}
alpha[cp] = rho[cp] * sq;
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
p[i] -= alpha[cp] * dg[cp][i];
}
}
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
p[i] *= diag;
}
/* And then go forward again */
- for (k = 0; k < ncorr; k++)
+ for (int k = 0; k < ncorr; k++)
{
- yr = 0;
- for (i = 0; i < n; i++)
+ real yr = 0;
+ for (int i = 0; i < n; i++)
{
yr += p[i] * dg[cp][i];
}
- beta = rho[cp] * yr;
- beta = alpha[cp] - beta;
+ real beta = rho[cp] * yr;
+ beta = alpha[cp] - beta;
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
p[i] += beta * dx[cp][i];
}
}
}
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
{
if (!frozen[i])
{
if (mdrunOptions.verbose)
{
double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
- fprintf(stderr, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n", step,
- ems.epot, ems.fnorm / sqrtNumAtoms, ems.fmax, ems.a_fmax + 1);
+ fprintf(stderr,
+ "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n",
+ step,
+ ems.epot,
+ ems.fnorm / sqrtNumAtoms,
+ ems.fmax,
+ ems.a_fmax + 1);
fflush(stderr);
}
/* Store the new (lower) energies */
matrix nullBox = {};
- energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
- enerd, nullptr, nullptr, nullptr, nullBox, nullptr,
- nullptr, vir, pres, nullptr, mu_tot, constr);
+ energyOutput.addDataAtEnergyStep(false,
+ false,
+ static_cast<double>(step),
+ mdatoms->tmass,
+ enerd,
+ nullptr,
+ nullBox,
+ PTCouplingArrays(),
+ 0,
+ vir,
+ pres,
+ nullptr,
+ mu_tot,
+ constr);
do_log = do_per_step(step, inputrec->nstlog);
do_ene = do_per_step(step, inputrec->nstenergy);
if (do_log)
{
- energyOutput.printHeader(fplog, step, step);
+ EnergyOutput::printHeader(fplog, step, step);
}
- energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), do_ene, FALSE, FALSE,
- do_log ? fplog : nullptr, step, step, fcd, nullptr);
+ energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
+ do_ene,
+ FALSE,
+ FALSE,
+ do_log ? fplog : nullptr,
+ step,
+ step,
+ fr->fcdata.get(),
+ nullptr);
}
/* Send x and E to IMD client, if bIMD is TRUE. */
- if (imdSession->run(step, TRUE, state_global->box, state_global->x.rvec_array(), 0) && MASTER(cr))
+ if (imdSession->run(step, TRUE, state_global->box, state_global->x, 0) && MASTER(cr))
{
imdSession->sendPositionsAndEnergies();
}
* If we have reached machine precision, converged is already set to true.
*/
converged = converged || (ems.fmax < inputrec->em_tol);
-
+ observablesReducer.markAsReadyToReduce();
} /* End of the loop */
if (converged)
*/
if (!do_log) /* Write final value to log since we didn't do anythin last step */
{
- energyOutput.printHeader(fplog, step, step);
+ EnergyOutput::printHeader(fplog, step, step);
}
if (!do_ene || !do_log) /* Write final energy file entries */
{
- energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), !do_ene, FALSE, FALSE,
- !do_log ? fplog : nullptr, step, step, fcd, nullptr);
+ energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
+ !do_ene,
+ FALSE,
+ FALSE,
+ !do_log ? fplog : nullptr,
+ step,
+ step,
+ fr->fcdata.get(),
+ nullptr);
}
/* Print some stuff... */
* However, we should only do it if we did NOT already write this step
* above (which we did if do_x or do_f was true).
*/
- do_x = !do_per_step(step, inputrec->nstxout);
- do_f = !do_per_step(step, inputrec->nstfout);
- write_em_traj(fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec,
- step, &ems, state_global, observablesHistory);
+ const bool do_x = !do_per_step(step, inputrec->nstxout);
+ const bool do_f = !do_per_step(step, inputrec->nstfout);
+ write_em_traj(
+ fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec, step, &ems, state_global, observablesHistory);
if (MASTER(cr))
{
void LegacySimulator::do_steep()
{
const char* SD = "Steepest Descents";
- gmx_localtop_t top;
gmx_global_stat_t gstat;
- t_graph* graph;
real stepsize;
real ustep;
gmx_bool bDone, bAbort, do_x, do_f;
int nsteps;
int count = 0;
int steps_accepted = 0;
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
em_state_t* s_min = &s0;
em_state_t* s_try = &s1;
+ ObservablesReducer observablesReducer = observablesReducerBuilder->build();
+
/* Init em and store the local state in s_try */
- init_em(fplog, mdlog, SD, cr, inputrec, imdSession, pull_work, state_global, top_global, s_try,
- &top, nrnb, fr, &graph, mdAtoms, &gstat, vsite, constr, nullptr);
- gmx_mdoutf* outf =
- init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider, mdModulesNotifier,
- inputrec, top_global, nullptr, wcycle, StartingBehavior::NewSimulation);
- gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf), top_global, inputrec, pull_work, nullptr,
- false, StartingBehavior::NewSimulation, mdModulesNotifier);
+ init_em(fplog,
+ mdlog,
+ SD,
+ cr,
+ inputrec,
+ imdSession,
+ pull_work,
+ state_global,
+ top_global,
+ s_try,
+ top,
+ nrnb,
+ fr,
+ mdAtoms,
+ &gstat,
+ vsite,
+ constr,
+ nullptr);
+ const bool simulationsShareState = false;
+ gmx_mdoutf* outf = init_mdoutf(fplog,
+ nfile,
+ fnm,
+ mdrunOptions,
+ cr,
+ outputProvider,
+ mdModulesNotifiers,
+ inputrec,
+ top_global,
+ nullptr,
+ wcycle,
+ StartingBehavior::NewSimulation,
+ simulationsShareState,
+ ms);
+ gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
+ top_global,
+ *inputrec,
+ pull_work,
+ nullptr,
+ false,
+ StartingBehavior::NewSimulation,
+ simulationsShareState,
+ mdModulesNotifiers);
/* Print to log file */
print_em_start(fplog, cr, walltime_accounting, wcycle, SD);
{
sp_header(fplog, SD, inputrec->em_tol, nsteps);
}
- EnergyEvaluator energyEvaluator{
- fplog, mdlog, cr, ms, top_global, &top, inputrec,
- imdSession, pull_work, nrnb, wcycle, gstat, vsite, constr,
- fcd, graph, mdAtoms, fr, runScheduleWork, enerd
- };
+ EnergyEvaluator energyEvaluator{ fplog,
+ mdlog,
+ cr,
+ ms,
+ top_global,
+ top,
+ inputrec,
+ imdSession,
+ pull_work,
+ nrnb,
+ wcycle,
+ gstat,
+ &observablesReducer,
+ vsite,
+ constr,
+ mdAtoms,
+ fr,
+ runScheduleWork,
+ enerd };
/**** HERE STARTS THE LOOP ****
* count is the counter for the number of steps
bool validStep = true;
if (count > 0)
{
- validStep = do_em_step(cr, inputrec, mdatoms, s_min, stepsize, &s_min->f, s_try, constr, count);
+ validStep = do_em_step(
+ cr, inputrec, mdatoms, s_min, stepsize, s_min->f.view().forceWithPadding(), s_try, constr, count);
}
if (validStep)
{
- energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0);
+ energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0, count);
}
else
{
if (MASTER(cr))
{
- energyOutput.printHeader(fplog, count, count);
+ EnergyOutput::printHeader(fplog, count, count);
}
if (count == 0)
{
if (mdrunOptions.verbose)
{
- fprintf(stderr, "Step=%5d, Dmax= %6.1e nm, Epot= %12.5e Fmax= %11.5e, atom= %d%c",
- count, ustep, s_try->epot, s_try->fmax, s_try->a_fmax + 1,
+ fprintf(stderr,
+ "Step=%5d, Dmax= %6.1e nm, Epot= %12.5e Fmax= %11.5e, atom= %d%c",
+ count,
+ ustep,
+ s_try->epot,
+ s_try->fmax,
+ s_try->a_fmax + 1,
((count == 0) || (s_try->epot < s_min->epot)) ? '\n' : '\r');
fflush(stderr);
}
{
/* Store the new (lower) energies */
matrix nullBox = {};
- energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(count), mdatoms->tmass,
- enerd, nullptr, nullptr, nullptr, nullBox, nullptr,
- nullptr, vir, pres, nullptr, mu_tot, constr);
+ energyOutput.addDataAtEnergyStep(false,
+ false,
+ static_cast<double>(count),
+ mdatoms->tmass,
+ enerd,
+ nullptr,
+ nullBox,
+ PTCouplingArrays(),
+ 0,
+ vir,
+ pres,
+ nullptr,
+ mu_tot,
+ constr);
imdSession->fillEnergyRecord(count, TRUE);
const bool do_dr = do_per_step(steps_accepted, inputrec->nstdisreout);
const bool do_or = do_per_step(steps_accepted, inputrec->nstorireout);
- energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), TRUE, do_dr, do_or,
- fplog, count, count, fcd, nullptr);
+ energyOutput.printStepToEnergyFile(
+ mdoutf_get_fp_ene(outf), TRUE, do_dr, do_or, fplog, count, count, fr->fcdata.get(), nullptr);
fflush(fplog);
}
}
/* Write to trn, if necessary */
do_x = do_per_step(steps_accepted, inputrec->nstxout);
do_f = do_per_step(steps_accepted, inputrec->nstfout);
- write_em_traj(fplog, cr, outf, do_x, do_f, nullptr, top_global, inputrec, count, s_min,
- state_global, observablesHistory);
+ write_em_traj(
+ fplog, cr, outf, do_x, do_f, nullptr, top_global, inputrec, count, s_min, state_global, observablesHistory);
}
else
{
/* If energy is not smaller make the step smaller... */
ustep *= 0.5;
- if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
+ if (haveDDAtomOrdering(*cr) && s_min->s.ddp_count != cr->dd->ddp_count)
{
/* Reload the old state */
- em_dd_partition_system(fplog, mdlog, count, cr, top_global, inputrec, imdSession,
- pull_work, s_min, &top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
+ em_dd_partition_system(fplog,
+ mdlog,
+ count,
+ cr,
+ top_global,
+ inputrec,
+ imdSession,
+ pull_work,
+ s_min,
+ top,
+ mdAtoms,
+ fr,
+ vsite,
+ constr,
+ nrnb,
+ wcycle);
}
}
}
/* Send IMD energies and positions, if bIMD is TRUE. */
- if (imdSession->run(count, TRUE, state_global->box,
- MASTER(cr) ? state_global->x.rvec_array() : nullptr, 0)
+ if (imdSession->run(count,
+ TRUE,
+ MASTER(cr) ? state_global->box : nullptr,
+ MASTER(cr) ? state_global->x : gmx::ArrayRef<gmx::RVec>(),
+ 0)
&& MASTER(cr))
{
imdSession->sendPositionsAndEnergies();
}
count++;
+ observablesReducer.markAsReadyToReduce();
} /* End of the loop */
/* Print some data... */
{
fprintf(stderr, "\nwriting lowest energy coordinates.\n");
}
- write_em_traj(fplog, cr, outf, TRUE, inputrec->nstfout != 0, ftp2fn(efSTO, nfile, fnm),
- top_global, inputrec, count, s_min, state_global, observablesHistory);
+ write_em_traj(fplog,
+ cr,
+ outf,
+ TRUE,
+ inputrec->nstfout != 0,
+ ftp2fn(efSTO, nfile, fnm),
+ top_global,
+ inputrec,
+ count,
+ s_min,
+ state_global,
+ observablesHistory);
if (MASTER(cr))
{
finish_em(cr, outf, walltime_accounting, wcycle);
/* To print the actual number of steps we needed somewhere */
- inputrec->nsteps = count;
+ {
+ // TODO: Avoid changing inputrec (#3854)
+ auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
+ nonConstInputrec->nsteps = count;
+ }
walltime_accounting_set_nsteps_done(walltime_accounting, count);
}
{
const char* NM = "Normal Mode Analysis";
int nnodes;
- gmx_localtop_t top;
gmx_global_stat_t gstat;
- t_graph* graph;
tensor vir, pres;
rvec mu_tot = { 0 };
rvec* dfdx;
real* full_matrix = nullptr;
/* added with respect to mdrun */
- int row, col;
- real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
- real x_min;
- bool bIsMaster = MASTER(cr);
- auto mdatoms = mdAtoms->mdatoms();
+ int row, col;
+ real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
+ real x_min;
+ bool bIsMaster = MASTER(cr);
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
em_state_t state_work{};
+ fr->longRangeNonbondeds->updateAfterPartition(*mdAtoms->mdatoms());
+ ObservablesReducer observablesReducer = observablesReducerBuilder->build();
+
/* Init em and store the local state in state_minimum */
- init_em(fplog, mdlog, NM, cr, inputrec, imdSession, pull_work, state_global, top_global,
- &state_work, &top, nrnb, fr, &graph, mdAtoms, &gstat, vsite, constr, &shellfc);
- gmx_mdoutf* outf =
- init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider, mdModulesNotifier,
- inputrec, top_global, nullptr, wcycle, StartingBehavior::NewSimulation);
+ init_em(fplog,
+ mdlog,
+ NM,
+ cr,
+ inputrec,
+ imdSession,
+ pull_work,
+ state_global,
+ top_global,
+ &state_work,
+ top,
+ nrnb,
+ fr,
+ mdAtoms,
+ &gstat,
+ vsite,
+ constr,
+ &shellfc);
+ const bool simulationsShareState = false;
+ gmx_mdoutf* outf = init_mdoutf(fplog,
+ nfile,
+ fnm,
+ mdrunOptions,
+ cr,
+ outputProvider,
+ mdModulesNotifiers,
+ inputrec,
+ top_global,
+ nullptr,
+ wcycle,
+ StartingBehavior::NewSimulation,
+ simulationsShareState,
+ ms);
std::vector<int> atom_index = get_atom_index(top_global);
std::vector<gmx::RVec> fneg(atom_index.size(), { 0, 0, 0 });
print_em_start(fplog, cr, walltime_accounting, wcycle, NM);
/* fudge nr of steps to nr of atoms */
- inputrec->nsteps = atom_index.size() * 2;
+ {
+ // TODO: Avoid changing inputrec (#3854)
+ auto* nonConstInputrec = const_cast<t_inputrec*>(inputrec);
+ nonConstInputrec->nsteps = atom_index.size() * 2;
+ }
if (bIsMaster)
{
- fprintf(stderr, "starting normal mode calculation '%s'\n%" PRId64 " steps.\n\n",
- *(top_global->name), inputrec->nsteps);
+ fprintf(stderr,
+ "starting normal mode calculation '%s'\n%" PRId64 " steps.\n\n",
+ *(top_global.name),
+ inputrec->nsteps);
}
nnodes = cr->nnodes;
/* Make evaluate_energy do a single node force calculation */
cr->nnodes = 1;
- EnergyEvaluator energyEvaluator{
- fplog, mdlog, cr, ms, top_global, &top, inputrec,
- imdSession, pull_work, nrnb, wcycle, gstat, vsite, constr,
- fcd, graph, mdAtoms, fr, runScheduleWork, enerd
- };
- energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE);
+ EnergyEvaluator energyEvaluator{ fplog,
+ mdlog,
+ cr,
+ ms,
+ top_global,
+ top,
+ inputrec,
+ imdSession,
+ pull_work,
+ nrnb,
+ wcycle,
+ gstat,
+ &observablesReducer,
+ vsite,
+ constr,
+ mdAtoms,
+ fr,
+ runScheduleWork,
+ enerd };
+ energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE, 0);
cr->nnodes = nnodes;
/* if forces are not small, warn user */
/* Steps are divided one by one over the nodes */
bool bNS = true;
auto state_work_x = makeArrayRef(state_work.s.x);
- auto state_work_f = makeArrayRef(state_work.f);
+ auto state_work_f = state_work.f.view().force();
for (index aid = cr->nodeid; aid < ssize(atom_index); aid += nnodes)
{
size_t atom = atom_index[aid];
if (shellfc)
{
/* Now is the time to relax the shells */
- relax_shell_flexcon(fplog, cr, ms, mdrunOptions.verbose, nullptr, step, inputrec,
- imdSession, pull_work, bNS, force_flags, &top, constr, enerd,
- fcd, state_work.s.natoms, state_work.s.x.arrayRefWithPadding(),
- state_work.s.v.arrayRefWithPadding(), state_work.s.box,
- state_work.s.lambda, &state_work.s.hist,
- state_work.f.arrayRefWithPadding(), vir, mdatoms, nrnb,
- wcycle, graph, shellfc, fr, runScheduleWork, t, mu_tot,
- vsite, DDBalanceRegionHandler(nullptr));
+ relax_shell_flexcon(fplog,
+ cr,
+ ms,
+ mdrunOptions.verbose,
+ nullptr,
+ step,
+ inputrec,
+ imdSession,
+ pull_work,
+ bNS,
+ force_flags,
+ top,
+ constr,
+ enerd,
+ state_work.s.natoms,
+ state_work.s.x.arrayRefWithPadding(),
+ state_work.s.v.arrayRefWithPadding(),
+ state_work.s.box,
+ state_work.s.lambda,
+ &state_work.s.hist,
+ &state_work.f.view(),
+ vir,
+ *mdatoms,
+ fr->longRangeNonbondeds.get(),
+ nrnb,
+ wcycle,
+ shellfc,
+ fr,
+ runScheduleWork,
+ t,
+ mu_tot,
+ vsite,
+ DDBalanceRegionHandler(nullptr));
bNS = false;
step++;
}
else
{
- energyEvaluator.run(&state_work, mu_tot, vir, pres, aid * 2 + dx, FALSE);
+ energyEvaluator.run(&state_work, mu_tot, vir, pres, aid * 2 + dx, FALSE, step);
}
cr->nnodes = nnodes;
if (dx == 0)
{
- std::copy(state_work_f.begin(), state_work_f.begin() + atom_index.size(),
- fneg.begin());
+ std::copy(state_work_f.begin(), state_work_f.begin() + atom_index.size(), fneg.begin());
}
}
{
#if GMX_MPI
# define mpi_type GMX_MPI_REAL
- MPI_Send(dfdx[0], atom_index.size() * DIM, mpi_type, MASTER(cr), cr->nodeid,
- cr->mpi_comm_mygroup);
+ MPI_Send(dfdx[0], atom_index.size() * DIM, mpi_type, MASTER(cr), cr->nodeid, cr->mpi_comm_mygroup);
#endif
}
else
{
#if GMX_MPI
MPI_Status stat;
- MPI_Recv(dfdx[0], atom_index.size() * DIM, mpi_type, node, node,
- cr->mpi_comm_mygroup, &stat);
+ MPI_Recv(dfdx[0], atom_index.size() * DIM, mpi_type, node, node, cr->mpi_comm_mygroup, &stat);
# undef mpi_type
#endif
}
/* write progress */
if (bIsMaster && mdrunOptions.verbose)
{
- fprintf(stderr, "\rFinished step %d out of %td",
- std::min<int>(atom + nnodes, atom_index.size()), ssize(atom_index));
+ fprintf(stderr,
+ "\rFinished step %d out of %td",
+ std::min<int>(atom + nnodes, atom_index.size()),
+ ssize(atom_index));
fflush(stderr);
}
}