#include <memory>
#include "gromacs/domdec/domdec_struct.h"
-#include "gromacs/fileio/confio.h"
#include "gromacs/gmxlib/network.h"
#include "gromacs/gmxlib/nrnb.h"
#include "gromacs/listed_forces/disre.h"
#include "gromacs/listed_forces/orires.h"
#include "gromacs/math/functions.h"
-#include "gromacs/math/invertmatrix.h"
#include "gromacs/math/paddedvector.h"
#include "gromacs/math/units.h"
#include "gromacs/math/vec.h"
-#include "gromacs/math/vecdump.h"
#include "gromacs/mdlib/boxdeformation.h"
#include "gromacs/mdlib/constr.h"
#include "gromacs/mdlib/gmx_omp_nthreads.h"
-#include "gromacs/mdlib/mdatoms.h"
#include "gromacs/mdlib/stat.h"
#include "gromacs/mdlib/tgroup.h"
#include "gromacs/mdtypes/commrec.h"
#include "gromacs/mdtypes/group.h"
#include "gromacs/mdtypes/inputrec.h"
#include "gromacs/mdtypes/md_enums.h"
-#include "gromacs/mdtypes/mdatom.h"
#include "gromacs/mdtypes/state.h"
-#include "gromacs/pbcutil/boxutilities.h"
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/pulling/pull.h"
#include "gromacs/random/tabulatednormaldistribution.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/futil.h"
#include "gromacs/utility/gmxassert.h"
-#include "gromacs/utility/gmxomp.h"
#include "gromacs/utility/smalloc.h"
using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
void update_coords(const t_inputrec& inputRecord,
int64_t step,
- const t_mdatoms* md,
+ int homenr,
+ bool havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const real> invMass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
t_state* state,
const gmx::ArrayRefWithPadding<const gmx::RVec>& f,
- const t_fcdata& fcdata,
+ t_fcdata* fcdata,
const gmx_ekindata_t* ekind,
const matrix M,
int UpdatePart,
const t_commrec* cr,
bool haveConstraints);
- void finish_update(const t_inputrec& inputRecord,
- const t_mdatoms* md,
- t_state* state,
- gmx_wallcycle_t wcycle,
- bool haveConstraints);
-
- void update_sd_second_half(const t_inputrec& inputRecord,
- int64_t step,
- real* dvdlambda,
- const t_mdatoms* md,
- t_state* state,
- const t_commrec* cr,
- t_nrnb* nrnb,
- gmx_wallcycle_t wcycle,
- gmx::Constraints* constr,
- bool do_log,
- bool do_ene);
-
- void update_for_constraint_virial(const t_inputrec& inputRecord,
- const t_mdatoms& md,
- const t_state& state,
+ void finish_update(const t_inputrec& inputRecord,
+ bool havePartiallyFrozenAtoms,
+ int homenr,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ t_state* state,
+ gmx_wallcycle* wcycle,
+ bool haveConstraints);
+
+ void update_sd_second_half(const t_inputrec& inputRecord,
+ int64_t step,
+ real* dvdlambda,
+ int homenr,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const real> invMass,
+ t_state* state,
+ const t_commrec* cr,
+ t_nrnb* nrnb,
+ gmx_wallcycle* wcycle,
+ gmx::Constraints* constr,
+ bool do_log,
+ bool do_ene);
+
+ void update_for_constraint_virial(const t_inputrec& inputRecord,
+ int homenr,
+ bool havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ const t_state& state,
const gmx::ArrayRefWithPadding<const gmx::RVec>& f,
const gmx_ekindata_t& ekind);
BoxDeformation* deform() const { return deform_; }
+ //! Group index for freezing
+ gmx::ArrayRef<const unsigned short> cFREEZE_;
+ //! Group index for temperature coupling
+ gmx::ArrayRef<const unsigned short> cTC_;
+ //! Group index for accleration groups
+ gmx::ArrayRef<const unsigned short> cAcceleration_;
+
private:
//! stochastic dynamics struct
gmx_stochd_t sd_;
return impl_->deform();
}
-void Update::update_coords(const t_inputrec& inputRecord,
- int64_t step,
- const t_mdatoms* md,
- t_state* state,
+void Update::update_coords(const t_inputrec& inputRecord,
+ int64_t step,
+ const int homenr,
+ const bool havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const real> invMass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ t_state* state,
const gmx::ArrayRefWithPadding<const gmx::RVec>& f,
- const t_fcdata& fcdata,
+ t_fcdata* fcdata,
const gmx_ekindata_t* ekind,
const matrix M,
int updatePart,
const t_commrec* cr,
const bool haveConstraints)
{
- return impl_->update_coords(
- inputRecord, step, md, state, f, fcdata, ekind, M, updatePart, cr, haveConstraints);
+ return impl_->update_coords(inputRecord,
+ step,
+ homenr,
+ havePartiallyFrozenAtoms,
+ ptype,
+ invMass,
+ invMassPerDim,
+ state,
+ f,
+ fcdata,
+ ekind,
+ M,
+ updatePart,
+ cr,
+ haveConstraints);
}
void Update::finish_update(const t_inputrec& inputRecord,
- const t_mdatoms* md,
+ const bool havePartiallyFrozenAtoms,
+ const int homenr,
t_state* state,
- gmx_wallcycle_t wcycle,
+ gmx_wallcycle* wcycle,
const bool haveConstraints)
{
- return impl_->finish_update(inputRecord, md, state, wcycle, haveConstraints);
+ return impl_->finish_update(
+ inputRecord, havePartiallyFrozenAtoms, homenr, impl_->cFREEZE_, state, wcycle, haveConstraints);
}
-void Update::update_sd_second_half(const t_inputrec& inputRecord,
- int64_t step,
- real* dvdlambda,
- const t_mdatoms* md,
- t_state* state,
- const t_commrec* cr,
- t_nrnb* nrnb,
- gmx_wallcycle_t wcycle,
- gmx::Constraints* constr,
- bool do_log,
- bool do_ene)
+void Update::update_sd_second_half(const t_inputrec& inputRecord,
+ int64_t step,
+ real* dvdlambda,
+ const int homenr,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const real> invMass,
+ t_state* state,
+ const t_commrec* cr,
+ t_nrnb* nrnb,
+ gmx_wallcycle* wcycle,
+ gmx::Constraints* constr,
+ bool do_log,
+ bool do_ene)
{
return impl_->update_sd_second_half(
- inputRecord, step, dvdlambda, md, state, cr, nrnb, wcycle, constr, do_log, do_ene);
+ inputRecord, step, dvdlambda, homenr, ptype, invMass, state, cr, nrnb, wcycle, constr, do_log, do_ene);
}
-void Update::update_for_constraint_virial(const t_inputrec& inputRecord,
- const t_mdatoms& md,
- const t_state& state,
+void Update::update_for_constraint_virial(const t_inputrec& inputRecord,
+ const int homenr,
+ const bool havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ const t_state& state,
const gmx::ArrayRefWithPadding<const gmx::RVec>& f,
const gmx_ekindata_t& ekind)
{
- return impl_->update_for_constraint_virial(inputRecord, md, state, f, ekind);
+ return impl_->update_for_constraint_virial(
+ inputRecord, homenr, havePartiallyFrozenAtoms, invmass, invMassPerDim, state, f, ekind);
}
void Update::update_temperature_constants(const t_inputrec& inputRecord)
return impl_->update_temperature_constants(inputRecord);
}
-/*! \brief Sets the velocities of virtual sites to zero */
-static void clearVsiteVelocities(int start, int nrend, const unsigned short* particleType, rvec* gmx_restrict v)
-{
- for (int a = start; a < nrend; a++)
- {
- if (particleType[a] == eptVSite)
- {
- clear_rvec(v[a]);
- }
- }
-}
-
/*! \brief Sets whether we store the updated velocities */
enum class StoreUpdatedVelocities
{
* \param[in] pRVScaleMatrixDiagonal Parrinello-Rahman v-scale matrix diagonal
* \param[in] x Input coordinates
* \param[out] xprime Updated coordinates
- * \param[inout] v Velocities
+ * \param[inout] v Velocities, type either rvec* or const rvec*
* \param[in] f Forces
*
* We expect this template to get good SIMD acceleration by most compilers,
* Note that we might get even better SIMD acceleration when we introduce
* aligned (and padded) memory, possibly with some hints for the compilers.
*/
-template<StoreUpdatedVelocities storeUpdatedVelocities, NumTempScaleValues numTempScaleValues, ApplyParrinelloRahmanVScaling applyPRVScaling>
-static void updateMDLeapfrogSimple(int start,
- int nrend,
- real dt,
- real dtPressureCouple,
- const rvec* gmx_restrict invMassPerDim,
- gmx::ArrayRef<const t_grp_tcstat> tcstat,
- const unsigned short* cTC,
- const rvec pRVScaleMatrixDiagonal,
- const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
- const rvec* gmx_restrict f)
+template<StoreUpdatedVelocities storeUpdatedVelocities, NumTempScaleValues numTempScaleValues, ApplyParrinelloRahmanVScaling applyPRVScaling, typename VelocityType>
+static std::enable_if_t<std::is_same<VelocityType, rvec>::value || std::is_same<VelocityType, const rvec>::value, void>
+updateMDLeapfrogSimple(int start,
+ int nrend,
+ real dt,
+ real dtPressureCouple,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ gmx::ArrayRef<const t_grp_tcstat> tcstat,
+ gmx::ArrayRef<const unsigned short> cTC,
+ const rvec pRVScaleMatrixDiagonal,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ VelocityType* gmx_restrict v,
+ const rvec* gmx_restrict f)
{
real lambdaGroup;
{
vNew -= dtPressureCouple * pRVScaleMatrixDiagonal[d] * v[a][d];
}
- if (storeUpdatedVelocities == StoreUpdatedVelocities::yes)
+ if constexpr (storeUpdatedVelocities == StoreUpdatedVelocities::yes)
{
v[a][d] = vNew;
}
+ // NOLINTNEXTLINE(readability-misleading-indentation) remove when clang-tidy-13 is required
xprime[a][d] = x[a][d] + vNew * dt;
}
}
* \param[in] tcstat Temperature coupling information
* \param[in] x Input coordinates
* \param[out] xprime Updated coordinates
- * \param[inout] v Velocities
+ * \param[inout] v Velocities, type either rvec* or const rvec*
* \param[in] f Forces
*/
-template<StoreUpdatedVelocities storeUpdatedVelocities>
-static void updateMDLeapfrogSimpleSimd(int start,
- int nrend,
- real dt,
- const real* gmx_restrict invMass,
- gmx::ArrayRef<const t_grp_tcstat> tcstat,
- const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
- const rvec* gmx_restrict f)
+template<StoreUpdatedVelocities storeUpdatedVelocities, typename VelocityType>
+static std::enable_if_t<std::is_same<VelocityType, rvec>::value || std::is_same<VelocityType, const rvec>::value, void>
+updateMDLeapfrogSimpleSimd(int start,
+ int nrend,
+ real dt,
+ gmx::ArrayRef<const real> invMass,
+ gmx::ArrayRef<const t_grp_tcstat> tcstat,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ VelocityType* gmx_restrict v,
+ const rvec* gmx_restrict f)
{
SimdReal timestep(dt);
SimdReal lambdaSystem(tcstat[0].lambda);
/* We declare variables here, since code is often slower when declaring them inside the loop */
/* Note: We should implement a proper PaddedVector, so we don't need this check */
- GMX_ASSERT(isSimdAligned(invMass), "invMass should be aligned");
+ GMX_ASSERT(isSimdAligned(invMass.data()), "invMass should be aligned");
for (int a = start; a < nrend; a += GMX_SIMD_REAL_WIDTH)
{
SimdReal invMass0, invMass1, invMass2;
- expandScalarsToTriplets(simdLoad(invMass + a), &invMass0, &invMass1, &invMass2);
+ expandScalarsToTriplets(simdLoad(invMass.data() + a), &invMass0, &invMass1, &invMass2);
SimdReal v0, v1, v2;
SimdReal f0, f1, f2;
v1 = fma(f1 * invMass1, timestep, lambdaSystem * v1);
v2 = fma(f2 * invMass2, timestep, lambdaSystem * v2);
- if (storeUpdatedVelocities == StoreUpdatedVelocities::yes)
+ if constexpr (storeUpdatedVelocities == StoreUpdatedVelocities::yes)
{
simdStoreRvecs(v, a, v0, v1, v2);
}
+ // NOLINTNEXTLINE(readability-misleading-indentation) remove when clang-tidy-13 is required
SimdReal x0, x1, x2;
simdLoadRvecs(x, a, &x0, &x1, &x2);
enum class AccelerationType
{
none,
+ group,
cosine
};
* \param[in] dt The time step.
* \param[in] dtPressureCouple Time step for pressure coupling, is 0 when no pressure
* coupling should be applied at this step.
- * \param[in] md Atom properties.
+ * \param[in] cTC Temperature coupling group indices
+ * \param[in] cAcceleration Acceleration group indices
+ * \param[in] acceleration Acceleration per group.
+ * \param[in] invMassPerDim Inverse mass per dimension
* \param[in] ekind Kinetic energy data.
* \param[in] box The box dimensions.
* \param[in] x Input coordinates.
* \param[in] M Parrinello-Rahman scaling matrix.
*/
template<AccelerationType accelerationType>
-static void updateMDLeapfrogGeneral(int start,
- int nrend,
- bool doNoseHoover,
- real dt,
- real dtPressureCouple,
- const t_mdatoms* md,
- const gmx_ekindata_t* ekind,
- const matrix box,
- const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
- const rvec* gmx_restrict f,
- const double* gmx_restrict nh_vxi,
- const int nsttcouple,
- const matrix M)
+static void updateMDLeapfrogGeneral(int start,
+ int nrend,
+ bool doNoseHoover,
+ real dt,
+ real dtPressureCouple,
+ gmx::ArrayRef<const unsigned short> cTC,
+ gmx::ArrayRef<const unsigned short> cAcceleration,
+ const rvec* gmx_restrict acceleration,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ const gmx_ekindata_t* ekind,
+ const matrix box,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ rvec* gmx_restrict v,
+ const rvec* gmx_restrict f,
+ const double* gmx_restrict nh_vxi,
+ const int nsttcouple,
+ const matrix M)
{
/* This is a version of the leap-frog integrator that supports
* all combinations of T-coupling, P-coupling and NEMD.
*/
gmx::ArrayRef<const t_grp_tcstat> tcstat = ekind->tcstat;
- const unsigned short* cTC = md->cTC;
-
- const rvec* gmx_restrict invMassPerDim = md->invMassPerDim;
/* Initialize group values, changed later when multiple groups are used */
+ int ga = 0;
int gt = 0;
real omega_Z = 2 * static_cast<real>(M_PI) / box[ZZ][ZZ];
for (int n = start; n < nrend; n++)
{
- if (cTC)
+ if (!cTC.empty())
{
gt = cTC[n];
}
rvec vRel;
real cosineZ, vCosine;
-#ifdef __INTEL_COMPILER
-# pragma warning(disable : 280)
-#endif
switch (accelerationType)
{
case AccelerationType::none: copy_rvec(v[n], vRel); break;
+ case AccelerationType::group:
+ if (!cAcceleration.empty())
+ {
+ ga = cAcceleration[n];
+ }
+ /* With constant acceleration we do scale the velocity of the accelerated groups */
+ copy_rvec(v[n], vRel);
+ break;
case AccelerationType::cosine:
cosineZ = std::cos(x[n][ZZ] * omega_Z);
vCosine = cosineZ * ekind->cosacc.vcos;
switch (accelerationType)
{
case AccelerationType::none: break;
+ case AccelerationType::group:
+ /* Apply the constant acceleration */
+ vNew += acceleration[ga][d] * dt;
+ break;
case AccelerationType::cosine:
if (d == XX)
{
}
/*! \brief Handles the Leap-frog MD x and v integration */
-static void do_update_md(int start,
- int nrend,
- real dt,
- int64_t step,
- const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
- const rvec* gmx_restrict f,
- const int etc,
- const int epc,
- const int nsttcouple,
- const int nstpcouple,
- const t_mdatoms* md,
- const gmx_ekindata_t* ekind,
- const matrix box,
- const double* gmx_restrict nh_vxi,
- const matrix M)
+static void do_update_md(int start,
+ int nrend,
+ real dt,
+ int64_t step,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ rvec* gmx_restrict v,
+ const rvec* gmx_restrict f,
+ const TemperatureCoupling etc,
+ const PressureCoupling epc,
+ const int nsttcouple,
+ const int nstpcouple,
+ gmx::ArrayRef<const unsigned short> cTC,
+ const bool useConstantAcceleration,
+ gmx::ArrayRef<const unsigned short> cAcceleration,
+ const rvec* acceleration,
+ gmx::ArrayRef<const real> gmx_unused invmass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ const gmx_ekindata_t* ekind,
+ const matrix box,
+ const double* gmx_restrict nh_vxi,
+ const matrix M,
+ bool gmx_unused havePartiallyFrozenAtoms)
{
GMX_ASSERT(nrend == start || xprime != x,
"For SIMD optimization certain compilers need to have xprime != x");
/* Note: Berendsen pressure scaling is handled after do_update_md() */
- bool doTempCouple = (etc != etcNO && do_per_step(step + nsttcouple - 1, nsttcouple));
- bool doNoseHoover = (etc == etcNOSEHOOVER && doTempCouple);
- bool doParrinelloRahman =
- (epc == epcPARRINELLORAHMAN && do_per_step(step + nstpcouple - 1, nstpcouple));
+ bool doTempCouple =
+ (etc != TemperatureCoupling::No && do_per_step(step + nsttcouple - 1, nsttcouple));
+ bool doNoseHoover = (etc == TemperatureCoupling::NoseHoover && doTempCouple);
+ bool doParrinelloRahman = (epc == PressureCoupling::ParrinelloRahman
+ && do_per_step(step + nstpcouple - 1, nstpcouple));
bool doPROffDiagonal = (doParrinelloRahman && (M[YY][XX] != 0 || M[ZZ][XX] != 0 || M[ZZ][YY] != 0));
real dtPressureCouple = (doParrinelloRahman ? nstpcouple * dt : 0);
- /* NEMD cosine acceleration is applied in updateMDLeapFrogGeneral */
- bool doAcceleration = (ekind->cosacc.cos_accel != 0);
+ /* NEMD (also cosine) acceleration is applied in updateMDLeapFrogGeneral */
+ const bool doAcceleration = (useConstantAcceleration || ekind->cosacc.cos_accel != 0);
if (doNoseHoover || doPROffDiagonal || doAcceleration)
{
if (!doAcceleration)
{
- updateMDLeapfrogGeneral<AccelerationType::none>(
- start, nrend, doNoseHoover, dt, dtPressureCouple, md, ekind, box, x, xprime, v, f, nh_vxi, nsttcouple, stepM);
+ updateMDLeapfrogGeneral<AccelerationType::none>(start,
+ nrend,
+ doNoseHoover,
+ dt,
+ dtPressureCouple,
+ cTC,
+ cAcceleration,
+ acceleration,
+ invMassPerDim,
+ ekind,
+ box,
+ x,
+ xprime,
+ v,
+ f,
+ nh_vxi,
+ nsttcouple,
+ stepM);
+ }
+ else if (useConstantAcceleration)
+ {
+ updateMDLeapfrogGeneral<AccelerationType::group>(start,
+ nrend,
+ doNoseHoover,
+ dt,
+ dtPressureCouple,
+ cTC,
+ cAcceleration,
+ acceleration,
+ invMassPerDim,
+ ekind,
+ box,
+ x,
+ xprime,
+ v,
+ f,
+ nh_vxi,
+ nsttcouple,
+ stepM);
}
else
{
- updateMDLeapfrogGeneral<AccelerationType::cosine>(
- start, nrend, doNoseHoover, dt, dtPressureCouple, md, ekind, box, x, xprime, v, f, nh_vxi, nsttcouple, stepM);
+ updateMDLeapfrogGeneral<AccelerationType::cosine>(start,
+ nrend,
+ doNoseHoover,
+ dt,
+ dtPressureCouple,
+ cTC,
+ cAcceleration,
+ acceleration,
+ invMassPerDim,
+ ekind,
+ box,
+ x,
+ xprime,
+ v,
+ f,
+ nh_vxi,
+ nsttcouple,
+ stepM);
}
}
else
{
- /* Use a simple and thus more efficient integration loop. */
- /* The simple loop does not check for particle type (so it can
- * be vectorized), which means we need to clear the velocities
- * of virtual sites in advance, when present. Note that vsite
- * velocities are computed after update and constraints from
- * their displacement.
- */
- if (md->haveVsites)
- {
- /* Note: The overhead of this loop is completely neligible */
- clearVsiteVelocities(start, nrend, md->ptype, v);
- }
-
/* We determine if we have a single T-coupling lambda value for all
* atoms. That allows for better SIMD acceleration in the template.
* If we do not do temperature coupling (in the run or this step),
bool haveSingleTempScaleValue = (!doTempCouple || ekind->ngtc == 1);
/* Extract some pointers needed by all cases */
- const unsigned short* cTC = md->cTC;
- gmx::ArrayRef<const t_grp_tcstat> tcstat = ekind->tcstat;
- const rvec* invMassPerDim = md->invMassPerDim;
+ gmx::ArrayRef<const t_grp_tcstat> tcstat = ekind->tcstat;
if (doParrinelloRahman)
{
*/
#if GMX_HAVE_SIMD_UPDATE
/* Check if we can use invmass instead of invMassPerDim */
- if (!md->havePartiallyFrozenAtoms)
+ if (!havePartiallyFrozenAtoms)
{
updateMDLeapfrogSimpleSimd<StoreUpdatedVelocities::yes>(
- start, nrend, dt, md->invmass, tcstat, x, xprime, v, f);
+ start, nrend, dt, invmass, tcstat, x, xprime, v, f);
}
else
#endif
}
}
/*! \brief Handles the Leap-frog MD x and v integration */
-static void doUpdateMDDoNotUpdateVelocities(int start,
- int nrend,
- real dt,
+static void doUpdateMDDoNotUpdateVelocities(int start,
+ int nrend,
+ real dt,
const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
+ rvec* gmx_restrict xprime,
+ const rvec* gmx_restrict v,
const rvec* gmx_restrict f,
- const t_mdatoms& md,
- const gmx_ekindata_t& ekind)
+ bool gmx_unused havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const real> gmx_unused invmass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ const gmx_ekindata_t& ekind)
{
GMX_ASSERT(nrend == start || xprime != x,
"For SIMD optimization certain compilers need to have xprime != x");
/* Check if we can use invmass instead of invMassPerDim */
#if GMX_HAVE_SIMD_UPDATE
- if (!md.havePartiallyFrozenAtoms)
+ if (!havePartiallyFrozenAtoms)
{
updateMDLeapfrogSimpleSimd<StoreUpdatedVelocities::no>(
- start, nrend, dt, md.invmass, tcstat, x, xprime, v, f);
+ start, nrend, dt, invmass, tcstat, x, xprime, v, f);
}
else
#endif
{
updateMDLeapfrogSimple<StoreUpdatedVelocities::no, NumTempScaleValues::single, ApplyParrinelloRahmanVScaling::no>(
- start, nrend, dt, dt, md.invMassPerDim, tcstat, nullptr, nullptr, x, xprime, v, f);
+ start, nrend, dt, dt, invMassPerDim, tcstat, gmx::ArrayRef<const unsigned short>(), nullptr, x, xprime, v, f);
}
}
-static void do_update_vv_vel(int start,
- int nrend,
- real dt,
- const ivec nFreeze[],
- const real invmass[],
- const unsigned short ptype[],
- const unsigned short cFREEZE[],
- rvec v[],
- const rvec f[],
- gmx_bool bExtended,
- real veta,
- real alpha)
+static void do_update_vv_vel(int start,
+ int nrend,
+ real dt,
+ gmx::ArrayRef<const ivec> nFreeze,
+ gmx::ArrayRef<const unsigned short> cAcceleration,
+ const rvec* acceleration,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ rvec v[],
+ const rvec f[],
+ gmx_bool bExtended,
+ real veta,
+ real alpha)
{
- int gf = 0;
+ int gf = 0, ga = 0;
int n, d;
real g, mv1, mv2;
for (n = start; n < nrend; n++)
{
real w_dt = invmass[n] * dt;
- if (cFREEZE)
+ if (!cFREEZE.empty())
{
gf = cFREEZE[n];
}
+ if (!cAcceleration.empty())
+ {
+ ga = cAcceleration[n];
+ }
for (d = 0; d < DIM; d++)
{
- if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
+ if ((ptype[n] != ParticleType::Shell) && !nFreeze[gf][d])
{
- v[n][d] = mv1 * (mv1 * v[n][d] + 0.5 * (w_dt * mv2 * f[n][d]));
+ v[n][d] = mv1 * (mv1 * v[n][d] + 0.5 * (w_dt * mv2 * f[n][d]))
+ + 0.5 * acceleration[ga][d] * dt;
}
else
{
}
} /* do_update_vv_vel */
-static void do_update_vv_pos(int start,
- int nrend,
- real dt,
- const ivec nFreeze[],
- const unsigned short ptype[],
- const unsigned short cFREEZE[],
- const rvec x[],
- rvec xprime[],
- const rvec v[],
- gmx_bool bExtended,
- real veta)
+static void do_update_vv_pos(int start,
+ int nrend,
+ real dt,
+ gmx::ArrayRef<const ivec> nFreeze,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ const rvec x[],
+ rvec xprime[],
+ const rvec v[],
+ gmx_bool bExtended,
+ real veta)
{
int gf = 0;
int n, d;
for (n = start; n < nrend; n++)
{
- if (cFREEZE)
+ if (!cFREEZE.empty())
{
gf = cFREEZE[n];
}
for (d = 0; d < DIM; d++)
{
- if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
+ if ((ptype[n] != ParticleType::Shell) && !nFreeze[gf][d])
{
xprime[n][d] = mr1 * (mr1 * x[n][d] + mr2 * dt * v[n][d]);
}
const t_grpopts* opts = &inputRecord.opts;
const int ngtc = opts->ngtc;
- if (inputRecord.eI == eiBD)
+ if (inputRecord.eI == IntegrationAlgorithm::BD)
{
bd_rf.resize(ngtc);
}
&& (reft > 0)) /* tau_t or ref_t = 0 means that no randomization is done */
{
randomize_group[gt] = true;
- boltzfac[gt] = BOLTZ * opts->ref_t[gt];
+ boltzfac[gt] = gmx::c_boltz * opts->ref_t[gt];
}
else
{
void Update::Impl::update_temperature_constants(const t_inputrec& inputRecord)
{
- if (inputRecord.eI == eiBD)
+ if (inputRecord.eI == IntegrationAlgorithm::BD)
{
if (inputRecord.bd_fric != 0)
{
for (int gt = 0; gt < inputRecord.opts.ngtc; gt++)
{
- sd_.bd_rf[gt] = std::sqrt(2.0 * BOLTZ * inputRecord.opts.ref_t[gt]
+ sd_.bd_rf[gt] = std::sqrt(2.0 * gmx::c_boltz * inputRecord.opts.ref_t[gt]
/ (inputRecord.bd_fric * inputRecord.delta_t));
}
}
{
for (int gt = 0; gt < inputRecord.opts.ngtc; gt++)
{
- sd_.bd_rf[gt] = std::sqrt(2.0 * BOLTZ * inputRecord.opts.ref_t[gt]);
+ sd_.bd_rf[gt] = std::sqrt(2.0 * gmx::c_boltz * inputRecord.opts.ref_t[gt]);
}
}
}
- if (inputRecord.eI == eiSD1)
+ if (inputRecord.eI == IntegrationAlgorithm::SD1)
{
for (int gt = 0; gt < inputRecord.opts.ngtc; gt++)
{
- real kT = BOLTZ * inputRecord.opts.ref_t[gt];
+ real kT = gmx::c_boltz * inputRecord.opts.ref_t[gt];
/* The mass is accounted for later, since this differs per atom */
sd_.sdsig[gt].V = std::sqrt(kT * (1 - sd_.sdc[gt].em * sd_.sdc[gt].em));
}
}
Update::Impl::Impl(const t_inputrec& inputRecord, BoxDeformation* boxDeformation) :
- sd_(inputRecord),
- deform_(boxDeformation)
+ sd_(inputRecord), deform_(boxDeformation)
{
update_temperature_constants(inputRecord);
xp_.resizeWithPadding(0);
}
-void Update::setNumAtoms(int numAtoms)
+void Update::updateAfterPartition(int numAtoms,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ gmx::ArrayRef<const unsigned short> cTC,
+ gmx::ArrayRef<const unsigned short> cAcceleration)
{
impl_->xp()->resizeWithPadding(numAtoms);
+ impl_->cFREEZE_ = cFREEZE;
+ impl_->cTC_ = cTC;
+ impl_->cAcceleration_ = cAcceleration;
}
/*! \brief Sets the SD update type */
* Thus three instantiations of this templated function will be made,
* two with only one contribution, and one with both contributions. */
template<SDUpdate updateType>
-static void doSDUpdateGeneral(const gmx_stochd_t& sd,
- int start,
- int nrend,
- real dt,
- const ivec nFreeze[],
- const real invmass[],
- const unsigned short ptype[],
- const unsigned short cFREEZE[],
- const unsigned short cTC[],
- const rvec x[],
- rvec xprime[],
- rvec v[],
- const rvec f[],
- int64_t step,
- int seed,
- const int* gatindex)
+static void doSDUpdateGeneral(const gmx_stochd_t& sd,
+ int start,
+ int nrend,
+ real dt,
+ gmx::ArrayRef<const ivec> nFreeze,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ gmx::ArrayRef<const unsigned short> cTC,
+ gmx::ArrayRef<const unsigned short> cAcceleration,
+ const rvec* acceleration,
+ const rvec x[],
+ rvec xprime[],
+ rvec v[],
+ const rvec f[],
+ int64_t step,
+ int seed,
+ const int* gatindex)
{
- // cTC and cFREEZE can be nullptr any time, but various
+ // cTC, cACC and cFREEZE can be nullptr any time, but various
// instantiations do not make sense with particular pointer
// values.
if (updateType == SDUpdate::ForcesOnly)
{
GMX_ASSERT(f != nullptr, "SD update with only forces requires forces");
- GMX_ASSERT(cTC == nullptr, "SD update with only forces cannot handle temperature groups");
+ GMX_ASSERT(cTC.empty(), "SD update with only forces cannot handle temperature groups");
}
if (updateType == SDUpdate::FrictionAndNoiseOnly)
{
GMX_ASSERT(f == nullptr, "SD update with only noise cannot handle forces");
+ GMX_ASSERT(cAcceleration.empty(),
+ "SD update with only noise cannot handle acceleration groups");
}
if (updateType == SDUpdate::Combined)
{
real inverseMass = invmass[n];
real invsqrtMass = std::sqrt(inverseMass);
- int freezeGroup = cFREEZE ? cFREEZE[n] : 0;
- int temperatureGroup = cTC ? cTC[n] : 0;
+ int freezeGroup = !cFREEZE.empty() ? cFREEZE[n] : 0;
+ int accelerationGroup = !cAcceleration.empty() ? cAcceleration[n] : 0;
+ int temperatureGroup = !cTC.empty() ? cTC[n] : 0;
for (int d = 0; d < DIM; d++)
{
- if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[freezeGroup][d])
+ if ((ptype[n] != ParticleType::Shell) && !nFreeze[freezeGroup][d])
{
if (updateType == SDUpdate::ForcesOnly)
{
- real vn = v[n][d] + inverseMass * f[n][d] * dt;
+ real vn = v[n][d] + (inverseMass * f[n][d] + acceleration[accelerationGroup][d]) * dt;
v[n][d] = vn;
// Simple position update.
xprime[n][d] = x[n][d] + v[n][d] * dt;
}
else
{
- real vn = v[n][d] + inverseMass * f[n][d] * dt;
+ real vn = v[n][d] + (inverseMass * f[n][d] + acceleration[accelerationGroup][d]) * dt;
v[n][d] = (vn * sd.sdc[temperatureGroup].em
+ invsqrtMass * sd.sdsig[temperatureGroup].V * dist(rng));
// Here we include half of the friction+noise
}
}
-static void do_update_sd(int start,
- int nrend,
- real dt,
- int64_t step,
- const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
- const rvec* gmx_restrict f,
- const ivec nFreeze[],
- const real invmass[],
- const unsigned short ptype[],
- const unsigned short cFREEZE[],
- const unsigned short cTC[],
- int seed,
- const t_commrec* cr,
- const gmx_stochd_t& sd,
- bool haveConstraints)
+static void do_update_sd(int start,
+ int nrend,
+ real dt,
+ int64_t step,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ rvec* gmx_restrict v,
+ const rvec* gmx_restrict f,
+ gmx::ArrayRef<const ivec> nFreeze,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ gmx::ArrayRef<const unsigned short> cTC,
+ gmx::ArrayRef<const unsigned short> cAcceleration,
+ const rvec* acceleration,
+ int seed,
+ const t_commrec* cr,
+ const gmx_stochd_t& sd,
+ bool haveConstraints)
{
if (haveConstraints)
{
// With constraints, the SD update is done in 2 parts
- doSDUpdateGeneral<SDUpdate::ForcesOnly>(
- sd, start, nrend, dt, nFreeze, invmass, ptype, cFREEZE, nullptr, x, xprime, v, f, step, seed, nullptr);
+ doSDUpdateGeneral<SDUpdate::ForcesOnly>(sd,
+ start,
+ nrend,
+ dt,
+ nFreeze,
+ invmass,
+ ptype,
+ cFREEZE,
+ gmx::ArrayRef<const unsigned short>(),
+ cAcceleration,
+ acceleration,
+ x,
+ xprime,
+ v,
+ f,
+ step,
+ seed,
+ nullptr);
}
else
{
- doSDUpdateGeneral<SDUpdate::Combined>(sd,
- start,
- nrend,
- dt,
- nFreeze,
- invmass,
- ptype,
- cFREEZE,
- cTC,
- x,
- xprime,
- v,
- f,
- step,
- seed,
- DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr);
+ doSDUpdateGeneral<SDUpdate::Combined>(
+ sd,
+ start,
+ nrend,
+ dt,
+ nFreeze,
+ invmass,
+ ptype,
+ cFREEZE,
+ cTC,
+ cAcceleration,
+ acceleration,
+ x,
+ xprime,
+ v,
+ f,
+ step,
+ seed,
+ haveDDAtomOrdering(*cr) ? cr->dd->globalAtomIndices.data() : nullptr);
}
}
-static void do_update_bd(int start,
- int nrend,
- real dt,
- int64_t step,
- const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
- const rvec* gmx_restrict f,
- const ivec nFreeze[],
- const real invmass[],
- const unsigned short ptype[],
- const unsigned short cFREEZE[],
- const unsigned short cTC[],
- real friction_coefficient,
- const real* rf,
- int seed,
- const int* gatindex)
+static void do_update_bd(int start,
+ int nrend,
+ real dt,
+ int64_t step,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ rvec* gmx_restrict v,
+ const rvec* gmx_restrict f,
+ gmx::ArrayRef<const ivec> nFreeze,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ gmx::ArrayRef<const unsigned short> cTC,
+ real friction_coefficient,
+ const real* rf,
+ int seed,
+ const int* gatindex)
{
/* note -- these appear to be full step velocities . . . */
int gf = 0, gt = 0;
rng.restart(step, ng);
dist.reset();
- if (cFREEZE)
+ if (!cFREEZE.empty())
{
gf = cFREEZE[n];
}
- if (cTC)
+ if (!cTC.empty())
{
gt = cTC[n];
}
for (d = 0; (d < DIM); d++)
{
- if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
+ if ((ptype[n] != ParticleType::Shell) && !nFreeze[gf][d])
{
if (friction_coefficient != 0)
{
}
}
-void Update::Impl::update_sd_second_half(const t_inputrec& inputRecord,
- int64_t step,
- real* dvdlambda,
- const t_mdatoms* md,
- t_state* state,
- const t_commrec* cr,
- t_nrnb* nrnb,
- gmx_wallcycle_t wcycle,
- gmx::Constraints* constr,
- bool do_log,
- bool do_ene)
+void Update::Impl::update_sd_second_half(const t_inputrec& inputRecord,
+ int64_t step,
+ real* dvdlambda,
+ int homenr,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const real> invMass,
+ t_state* state,
+ const t_commrec* cr,
+ t_nrnb* nrnb,
+ gmx_wallcycle* wcycle,
+ gmx::Constraints* constr,
+ bool do_log,
+ bool do_ene)
{
if (!constr)
{
return;
}
- if (inputRecord.eI == eiSD1)
+ if (inputRecord.eI == IntegrationAlgorithm::SD1)
{
- int homenr = md->homenr;
/* Cast delta_t from double to real to make the integrators faster.
* The only reason for having delta_t double is to get accurate values
*/
real dt = inputRecord.delta_t;
- wallcycle_start(wcycle, ewcUPDATE);
+ wallcycle_start(wcycle, WallCycleCounter::Update);
- int nth = gmx_omp_nthreads_get(emntUpdate);
+ int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nth) schedule(static)
for (int th = 0; th < nth; th++)
start_th,
end_th,
dt,
- inputRecord.opts.nFreeze,
- md->invmass,
- md->ptype,
- md->cFREEZE,
- md->cTC,
+ gmx::arrayRefFromArray(inputRecord.opts.nFreeze, inputRecord.opts.ngfrz),
+ invMass,
+ ptype,
+ cFREEZE_,
+ cTC_,
+ cAcceleration_,
+ inputRecord.opts.acceleration,
state->x.rvec_array(),
xp_.rvec_array(),
state->v.rvec_array(),
nullptr,
step,
inputRecord.ld_seed,
- DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr);
+ haveDDAtomOrdering(*cr) ? cr->dd->globalAtomIndices.data() : nullptr);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
inc_nrnb(nrnb, eNR_UPDATE, homenr);
- wallcycle_stop(wcycle, ewcUPDATE);
+ wallcycle_stop(wcycle, WallCycleCounter::Update);
/* Constrain the coordinates upd->xp for half a time step */
bool computeVirial = false;
xp_.arrayRefWithPadding(),
ArrayRef<RVec>(),
state->box,
- state->lambda[efptBONDED],
+ state->lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Bonded)],
dvdlambda,
state->v.arrayRefWithPadding(),
computeVirial,
}
}
-void Update::Impl::finish_update(const t_inputrec& inputRecord,
- const t_mdatoms* md,
- t_state* state,
- gmx_wallcycle_t wcycle,
- const bool haveConstraints)
+void Update::Impl::finish_update(const t_inputrec& inputRecord,
+ const bool havePartiallyFrozenAtoms,
+ const int homenr,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ t_state* state,
+ gmx_wallcycle* wcycle,
+ const bool haveConstraints)
{
/* NOTE: Currently we always integrate to a temporary buffer and
* then copy the results back here.
*/
- wallcycle_start_nocount(wcycle, ewcUPDATE);
+ wallcycle_start_nocount(wcycle, WallCycleCounter::Update);
- const int homenr = md->homenr;
- auto xp = makeConstArrayRef(xp_).subArray(0, homenr);
- auto x = makeArrayRef(state->x).subArray(0, homenr);
+ auto xp = makeConstArrayRef(xp_).subArray(0, homenr);
+ auto x = makeArrayRef(state->x).subArray(0, homenr);
- if (md->havePartiallyFrozenAtoms && haveConstraints)
+ if (havePartiallyFrozenAtoms && haveConstraints)
{
/* We have atoms that are frozen along some, but not all dimensions,
* then constraints will have moved them also along the frozen dimensions.
for (int i = 0; i < homenr; i++)
{
- const int g = md->cFREEZE[i];
+ const int g = cFREEZE[i];
for (int d = 0; d < DIM; d++)
{
/* We have no frozen atoms or fully frozen atoms which have not
* been moved by the update, so we can simply copy all coordinates.
*/
- int gmx_unused nth = gmx_omp_nthreads_get(emntUpdate);
+ int gmx_unused nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nth) schedule(static)
for (int i = 0; i < homenr; i++)
{
}
}
- wallcycle_stop(wcycle, ewcUPDATE);
+ wallcycle_stop(wcycle, WallCycleCounter::Update);
}
-void Update::Impl::update_coords(const t_inputrec& inputRecord,
- int64_t step,
- const t_mdatoms* md,
- t_state* state,
+void Update::Impl::update_coords(const t_inputrec& inputRecord,
+ int64_t step,
+ int homenr,
+ bool havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const real> invMass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ t_state* state,
const gmx::ArrayRefWithPadding<const gmx::RVec>& f,
- const t_fcdata& fcdata,
+ t_fcdata* fcdata,
const gmx_ekindata_t* ekind,
const matrix M,
int updatePart,
gmx_incons("update_coords called for velocity without VV integrator");
}
- int homenr = md->homenr;
-
/* Cast to real for faster code, no loss in precision (see comment above) */
real dt = inputRecord.delta_t;
/* We need to update the NMR restraint history when time averaging is used */
- if (state->flags & (1 << estDISRE_RM3TAV))
+ if (state->flags & enumValueToBitMask(StateEntry::DisreRm3Tav))
{
- update_disres_history(*fcdata.disres, &state->hist);
+ update_disres_history(*fcdata->disres, &state->hist);
}
- if (state->flags & (1 << estORIRE_DTAV))
+ if (state->flags & enumValueToBitMask(StateEntry::OrireDtav))
{
- update_orires_history(*fcdata.orires, &state->hist);
+ GMX_ASSERT(fcdata, "Need valid fcdata");
+ fcdata->orires->updateHistory();
}
/* ############# START The update of velocities and positions ######### */
- int nth = gmx_omp_nthreads_get(emntUpdate);
+ int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nth) schedule(static)
for (int th = 0; th < nth; th++)
switch (inputRecord.eI)
{
- case (eiMD):
+ case (IntegrationAlgorithm::MD):
do_update_md(start_th,
end_th,
dt,
inputRecord.epc,
inputRecord.nsttcouple,
inputRecord.nstpcouple,
- md,
+ cTC_,
+ inputRecord.useConstantAcceleration,
+ cAcceleration_,
+ inputRecord.opts.acceleration,
+ invMass,
+ invMassPerDim,
ekind,
state->box,
state->nosehoover_vxi.data(),
- M);
+ M,
+ havePartiallyFrozenAtoms);
break;
- case (eiSD1):
+ case (IntegrationAlgorithm::SD1):
do_update_sd(start_th,
end_th,
dt,
xp_rvec,
v_rvec,
f_rvec,
- inputRecord.opts.nFreeze,
- md->invmass,
- md->ptype,
- md->cFREEZE,
- md->cTC,
+ gmx::arrayRefFromArray(inputRecord.opts.nFreeze, inputRecord.opts.ngfrz),
+ invMass,
+ ptype,
+ cFREEZE_,
+ cTC_,
+ cAcceleration_,
+ inputRecord.opts.acceleration,
inputRecord.ld_seed,
cr,
sd_,
haveConstraints);
break;
- case (eiBD):
+ case (IntegrationAlgorithm::BD):
do_update_bd(start_th,
end_th,
dt,
xp_rvec,
v_rvec,
f_rvec,
- inputRecord.opts.nFreeze,
- md->invmass,
- md->ptype,
- md->cFREEZE,
- md->cTC,
+ gmx::arrayRefFromArray(inputRecord.opts.nFreeze, inputRecord.opts.ngfrz),
+ invMass,
+ ptype,
+ cFREEZE_,
+ cTC_,
inputRecord.bd_fric,
sd_.bd_rf.data(),
inputRecord.ld_seed,
- DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr);
+ haveDDAtomOrdering(*cr) ? cr->dd->globalAtomIndices.data() : nullptr);
break;
- case (eiVV):
- case (eiVVAK):
+ case (IntegrationAlgorithm::VV):
+ case (IntegrationAlgorithm::VVAK):
{
- gmx_bool bExtended = (inputRecord.etc == etcNOSEHOOVER || inputRecord.epc == epcPARRINELLORAHMAN
- || inputRecord.epc == epcMTTK);
+ gmx_bool bExtended = (inputRecord.etc == TemperatureCoupling::NoseHoover
+ || inputRecord.epc == PressureCoupling::ParrinelloRahman
+ || inputRecord.epc == PressureCoupling::Mttk);
/* assuming barostat coupled to group 0 */
real alpha = 1.0 + DIM / static_cast<real>(inputRecord.opts.nrdf[0]);
do_update_vv_vel(start_th,
end_th,
dt,
- inputRecord.opts.nFreeze,
- md->invmass,
- md->ptype,
- md->cFREEZE,
+ gmx::arrayRefFromArray(inputRecord.opts.nFreeze,
+ inputRecord.opts.ngfrz),
+ cAcceleration_,
+ inputRecord.opts.acceleration,
+ invMass,
+ ptype,
+ cFREEZE_,
v_rvec,
f_rvec,
bExtended,
do_update_vv_pos(start_th,
end_th,
dt,
- inputRecord.opts.nFreeze,
- md->ptype,
- md->cFREEZE,
+ gmx::arrayRefFromArray(inputRecord.opts.nFreeze,
+ inputRecord.opts.ngfrz),
+ ptype,
+ cFREEZE_,
x_rvec,
xp_rvec,
v_rvec,
}
}
-void Update::Impl::update_for_constraint_virial(const t_inputrec& inputRecord,
- const t_mdatoms& md,
- const t_state& state,
+void Update::Impl::update_for_constraint_virial(const t_inputrec& inputRecord,
+ int homenr,
+ bool havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ const t_state& state,
const gmx::ArrayRefWithPadding<const gmx::RVec>& f,
const gmx_ekindata_t& ekind)
{
- GMX_ASSERT(inputRecord.eI == eiMD || inputRecord.eI == eiSD1,
+ GMX_ASSERT(inputRecord.eI == IntegrationAlgorithm::MD || inputRecord.eI == IntegrationAlgorithm::SD1,
"Only leap-frog is supported here");
// Cast to real for faster code, no loss in precision
const real dt = inputRecord.delta_t;
- const int nth = gmx_omp_nthreads_get(emntUpdate);
+ const int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nth) schedule(static)
for (int th = 0; th < nth; th++)
try
{
int start_th, end_th;
- getThreadAtomRange(nth, th, md.homenr, &start_th, &end_th);
+ getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
const rvec* x_rvec = state.x.rvec_array();
rvec* xp_rvec = xp_.rvec_array();
- rvec* v_rvec = const_cast<rvec*>(state.v.rvec_array());
+ const rvec* v_rvec = state.v.rvec_array();
const rvec* f_rvec = as_rvec_array(f.unpaddedConstArrayRef().data());
doUpdateMDDoNotUpdateVelocities(
- start_th, end_th, dt, x_rvec, xp_rvec, v_rvec, f_rvec, md, ekind);
+ start_th, end_th, dt, x_rvec, xp_rvec, v_rvec, f_rvec, havePartiallyFrozenAtoms, invmass, invMassPerDim, ekind);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}