* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
* Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
- * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include <memory>
#include "gromacs/domdec/domdec_struct.h"
-#include "gromacs/fileio/confio.h"
#include "gromacs/gmxlib/network.h"
#include "gromacs/gmxlib/nrnb.h"
#include "gromacs/listed_forces/disre.h"
#include "gromacs/listed_forces/orires.h"
#include "gromacs/math/functions.h"
-#include "gromacs/math/invertmatrix.h"
#include "gromacs/math/paddedvector.h"
#include "gromacs/math/units.h"
#include "gromacs/math/vec.h"
-#include "gromacs/math/vecdump.h"
#include "gromacs/mdlib/boxdeformation.h"
#include "gromacs/mdlib/constr.h"
#include "gromacs/mdlib/gmx_omp_nthreads.h"
-#include "gromacs/mdlib/mdatoms.h"
#include "gromacs/mdlib/stat.h"
#include "gromacs/mdlib/tgroup.h"
#include "gromacs/mdtypes/commrec.h"
+#include "gromacs/mdtypes/fcdata.h"
#include "gromacs/mdtypes/group.h"
#include "gromacs/mdtypes/inputrec.h"
#include "gromacs/mdtypes/md_enums.h"
-#include "gromacs/mdtypes/mdatom.h"
#include "gromacs/mdtypes/state.h"
-#include "gromacs/pbcutil/boxutilities.h"
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/pulling/pull.h"
#include "gromacs/random/tabulatednormaldistribution.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/futil.h"
#include "gromacs/utility/gmxassert.h"
-#include "gromacs/utility/gmxomp.h"
#include "gromacs/utility/smalloc.h"
using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
void update_coords(const t_inputrec& inputRecord,
int64_t step,
- const t_mdatoms* md,
+ int homenr,
+ bool havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const real> invMass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
t_state* state,
const gmx::ArrayRefWithPadding<const gmx::RVec>& f,
- const t_fcdata* fcd,
+ t_fcdata* fcdata,
const gmx_ekindata_t* ekind,
const matrix M,
int UpdatePart,
const t_commrec* cr,
bool haveConstraints);
- void finish_update(const t_inputrec& inputRecord,
- const t_mdatoms* md,
- t_state* state,
- gmx_wallcycle_t wcycle,
- bool haveConstraints);
-
- void update_sd_second_half(const t_inputrec& inputRecord,
- int64_t step,
- real* dvdlambda,
- const t_mdatoms* md,
- t_state* state,
- const t_commrec* cr,
- t_nrnb* nrnb,
- gmx_wallcycle_t wcycle,
- gmx::Constraints* constr,
- bool do_log,
- bool do_ene);
+ void finish_update(const t_inputrec& inputRecord,
+ bool havePartiallyFrozenAtoms,
+ int homenr,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ t_state* state,
+ gmx_wallcycle* wcycle,
+ bool haveConstraints);
+
+ void update_sd_second_half(const t_inputrec& inputRecord,
+ int64_t step,
+ real* dvdlambda,
+ int homenr,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const real> invMass,
+ t_state* state,
+ const t_commrec* cr,
+ t_nrnb* nrnb,
+ gmx_wallcycle* wcycle,
+ gmx::Constraints* constr,
+ bool do_log,
+ bool do_ene);
+
+ void update_for_constraint_virial(const t_inputrec& inputRecord,
+ int homenr,
+ bool havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ const t_state& state,
+ const gmx::ArrayRefWithPadding<const gmx::RVec>& f,
+ const gmx_ekindata_t& ekind);
void update_temperature_constants(const t_inputrec& inputRecord);
BoxDeformation* deform() const { return deform_; }
+ //! Group index for freezing
+ gmx::ArrayRef<const unsigned short> cFREEZE_;
+ //! Group index for temperature coupling
+ gmx::ArrayRef<const unsigned short> cTC_;
+ //! Group index for accleration groups
+ gmx::ArrayRef<const unsigned short> cAcceleration_;
+
private:
//! stochastic dynamics struct
gmx_stochd_t sd_;
return impl_->deform();
}
-void Update::update_coords(const t_inputrec& inputRecord,
- int64_t step,
- const t_mdatoms* md,
- t_state* state,
+void Update::update_coords(const t_inputrec& inputRecord,
+ int64_t step,
+ const int homenr,
+ const bool havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const real> invMass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ t_state* state,
const gmx::ArrayRefWithPadding<const gmx::RVec>& f,
- const t_fcdata* fcd,
+ t_fcdata* fcdata,
const gmx_ekindata_t* ekind,
const matrix M,
int updatePart,
const t_commrec* cr,
const bool haveConstraints)
{
- return impl_->update_coords(inputRecord, step, md, state, f, fcd, ekind, M, updatePart, cr,
+ return impl_->update_coords(inputRecord,
+ step,
+ homenr,
+ havePartiallyFrozenAtoms,
+ ptype,
+ invMass,
+ invMassPerDim,
+ state,
+ f,
+ fcdata,
+ ekind,
+ M,
+ updatePart,
+ cr,
haveConstraints);
}
void Update::finish_update(const t_inputrec& inputRecord,
- const t_mdatoms* md,
+ const bool havePartiallyFrozenAtoms,
+ const int homenr,
t_state* state,
- gmx_wallcycle_t wcycle,
+ gmx_wallcycle* wcycle,
const bool haveConstraints)
{
- return impl_->finish_update(inputRecord, md, state, wcycle, haveConstraints);
+ return impl_->finish_update(
+ inputRecord, havePartiallyFrozenAtoms, homenr, impl_->cFREEZE_, state, wcycle, haveConstraints);
+}
+
+void Update::update_sd_second_half(const t_inputrec& inputRecord,
+ int64_t step,
+ real* dvdlambda,
+ const int homenr,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const real> invMass,
+ t_state* state,
+ const t_commrec* cr,
+ t_nrnb* nrnb,
+ gmx_wallcycle* wcycle,
+ gmx::Constraints* constr,
+ bool do_log,
+ bool do_ene)
+{
+ return impl_->update_sd_second_half(
+ inputRecord, step, dvdlambda, homenr, ptype, invMass, state, cr, nrnb, wcycle, constr, do_log, do_ene);
}
-void Update::update_sd_second_half(const t_inputrec& inputRecord,
- int64_t step,
- real* dvdlambda,
- const t_mdatoms* md,
- t_state* state,
- const t_commrec* cr,
- t_nrnb* nrnb,
- gmx_wallcycle_t wcycle,
- gmx::Constraints* constr,
- bool do_log,
- bool do_ene)
+void Update::update_for_constraint_virial(const t_inputrec& inputRecord,
+ const int homenr,
+ const bool havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ const t_state& state,
+ const gmx::ArrayRefWithPadding<const gmx::RVec>& f,
+ const gmx_ekindata_t& ekind)
{
- return impl_->update_sd_second_half(inputRecord, step, dvdlambda, md, state, cr, nrnb, wcycle,
- constr, do_log, do_ene);
+ return impl_->update_for_constraint_virial(
+ inputRecord, homenr, havePartiallyFrozenAtoms, invmass, invMassPerDim, state, f, ekind);
}
void Update::update_temperature_constants(const t_inputrec& inputRecord)
return impl_->update_temperature_constants(inputRecord);
}
-/*! \brief Sets the velocities of virtual sites to zero */
-static void clearVsiteVelocities(int start, int nrend, const unsigned short* particleType, rvec* gmx_restrict v)
+/*! \brief Sets whether we store the updated velocities */
+enum class StoreUpdatedVelocities
{
- for (int a = start; a < nrend; a++)
- {
- if (particleType[a] == eptVSite)
- {
- clear_rvec(v[a]);
- }
- }
-}
+ yes, //!< Store the updated velocities
+ no //!< Do not store the updated velocities
+};
/*! \brief Sets the number of different temperature coupling values */
enum class NumTempScaleValues
/*! \brief Integrate using leap-frog with T-scaling and optionally diagonal Parrinello-Rahman p-coupling
*
+ * \tparam storeUpdatedVelocities Tells whether we should store the updated velocities
* \tparam numTempScaleValues The number of different T-couple values
* \tparam applyPRVScaling Apply Parrinello-Rahman velocity scaling
* \param[in] start Index of first atom to update
* \param[in] pRVScaleMatrixDiagonal Parrinello-Rahman v-scale matrix diagonal
* \param[in] x Input coordinates
* \param[out] xprime Updated coordinates
- * \param[inout] v Velocities
+ * \param[inout] v Velocities, type either rvec* or const rvec*
* \param[in] f Forces
*
* We expect this template to get good SIMD acceleration by most compilers,
* Note that we might get even better SIMD acceleration when we introduce
* aligned (and padded) memory, possibly with some hints for the compilers.
*/
-template<NumTempScaleValues numTempScaleValues, ApplyParrinelloRahmanVScaling applyPRVScaling>
-static void updateMDLeapfrogSimple(int start,
- int nrend,
- real dt,
- real dtPressureCouple,
- const rvec* gmx_restrict invMassPerDim,
- gmx::ArrayRef<const t_grp_tcstat> tcstat,
- const unsigned short* cTC,
- const rvec pRVScaleMatrixDiagonal,
- const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
- const rvec* gmx_restrict f)
+template<StoreUpdatedVelocities storeUpdatedVelocities, NumTempScaleValues numTempScaleValues, ApplyParrinelloRahmanVScaling applyPRVScaling, typename VelocityType>
+static std::enable_if_t<std::is_same<VelocityType, rvec>::value || std::is_same<VelocityType, const rvec>::value, void>
+updateMDLeapfrogSimple(int start,
+ int nrend,
+ real dt,
+ real dtPressureCouple,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ gmx::ArrayRef<const t_grp_tcstat> tcstat,
+ gmx::ArrayRef<const unsigned short> cTC,
+ const rvec pRVScaleMatrixDiagonal,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ VelocityType* gmx_restrict v,
+ const rvec* gmx_restrict f)
{
real lambdaGroup;
{
vNew -= dtPressureCouple * pRVScaleMatrixDiagonal[d] * v[a][d];
}
- v[a][d] = vNew;
+ if constexpr (storeUpdatedVelocities == StoreUpdatedVelocities::yes)
+ {
+ v[a][d] = vNew;
+ }
+ // NOLINTNEXTLINE(readability-misleading-indentation) remove when clang-tidy-13 is required
xprime[a][d] = x[a][d] + vNew * dt;
}
}
/*! \brief Integrate using leap-frog with single group T-scaling and SIMD
*
+ * \tparam storeUpdatedVelocities Tells whether we should store the updated velocities
* \param[in] start Index of first atom to update
* \param[in] nrend Last atom to update: \p nrend - 1
* \param[in] dt The time step
* \param[in] tcstat Temperature coupling information
* \param[in] x Input coordinates
* \param[out] xprime Updated coordinates
- * \param[inout] v Velocities
+ * \param[inout] v Velocities, type either rvec* or const rvec*
* \param[in] f Forces
*/
-static void updateMDLeapfrogSimpleSimd(int start,
- int nrend,
- real dt,
- const real* gmx_restrict invMass,
- gmx::ArrayRef<const t_grp_tcstat> tcstat,
- const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
- const rvec* gmx_restrict f)
+template<StoreUpdatedVelocities storeUpdatedVelocities, typename VelocityType>
+static std::enable_if_t<std::is_same<VelocityType, rvec>::value || std::is_same<VelocityType, const rvec>::value, void>
+updateMDLeapfrogSimpleSimd(int start,
+ int nrend,
+ real dt,
+ gmx::ArrayRef<const real> invMass,
+ gmx::ArrayRef<const t_grp_tcstat> tcstat,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ VelocityType* gmx_restrict v,
+ const rvec* gmx_restrict f)
{
SimdReal timestep(dt);
SimdReal lambdaSystem(tcstat[0].lambda);
/* We declare variables here, since code is often slower when declaring them inside the loop */
/* Note: We should implement a proper PaddedVector, so we don't need this check */
- GMX_ASSERT(isSimdAligned(invMass), "invMass should be aligned");
+ GMX_ASSERT(isSimdAligned(invMass.data()), "invMass should be aligned");
for (int a = start; a < nrend; a += GMX_SIMD_REAL_WIDTH)
{
SimdReal invMass0, invMass1, invMass2;
- expandScalarsToTriplets(simdLoad(invMass + a), &invMass0, &invMass1, &invMass2);
+ expandScalarsToTriplets(simdLoad(invMass.data() + a), &invMass0, &invMass1, &invMass2);
SimdReal v0, v1, v2;
SimdReal f0, f1, f2;
v1 = fma(f1 * invMass1, timestep, lambdaSystem * v1);
v2 = fma(f2 * invMass2, timestep, lambdaSystem * v2);
- simdStoreRvecs(v, a, v0, v1, v2);
+ if constexpr (storeUpdatedVelocities == StoreUpdatedVelocities::yes)
+ {
+ simdStoreRvecs(v, a, v0, v1, v2);
+ }
+ // NOLINTNEXTLINE(readability-misleading-indentation) remove when clang-tidy-13 is required
SimdReal x0, x1, x2;
simdLoadRvecs(x, a, &x0, &x1, &x2);
* \param[in] dt The time step.
* \param[in] dtPressureCouple Time step for pressure coupling, is 0 when no pressure
* coupling should be applied at this step.
- * \param[in] accel Acceleration per group.
- * \param[in] md Atom properties.
+ * \param[in] cTC Temperature coupling group indices
+ * \param[in] cAcceleration Acceleration group indices
+ * \param[in] acceleration Acceleration per group.
+ * \param[in] invMassPerDim Inverse mass per dimension
* \param[in] ekind Kinetic energy data.
* \param[in] box The box dimensions.
* \param[in] x Input coordinates.
* \param[in] M Parrinello-Rahman scaling matrix.
*/
template<AccelerationType accelerationType>
-static void updateMDLeapfrogGeneral(int start,
- int nrend,
- bool doNoseHoover,
- real dt,
- real dtPressureCouple,
- const rvec* accel,
- const t_mdatoms* md,
- const gmx_ekindata_t* ekind,
- const matrix box,
- const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
- const rvec* gmx_restrict f,
- const double* gmx_restrict nh_vxi,
- const int nsttcouple,
- const matrix M)
+static void updateMDLeapfrogGeneral(int start,
+ int nrend,
+ bool doNoseHoover,
+ real dt,
+ real dtPressureCouple,
+ gmx::ArrayRef<const unsigned short> cTC,
+ gmx::ArrayRef<const unsigned short> cAcceleration,
+ const rvec* gmx_restrict acceleration,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ const gmx_ekindata_t* ekind,
+ const matrix box,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ rvec* gmx_restrict v,
+ const rvec* gmx_restrict f,
+ const double* gmx_restrict nh_vxi,
+ const int nsttcouple,
+ const matrix M)
{
/* This is a version of the leap-frog integrator that supports
* all combinations of T-coupling, P-coupling and NEMD.
* Holian et al. Phys Rev E 52(3) : 2338, 1995
*/
- gmx::ArrayRef<const t_grp_tcstat> tcstat = ekind->tcstat;
- gmx::ArrayRef<const t_grp_acc> grpstat = ekind->grpstat;
- const unsigned short* cTC = md->cTC;
- const unsigned short* cACC = md->cACC;
-
- const rvec* gmx_restrict invMassPerDim = md->invMassPerDim;
+ gmx::ArrayRef<const t_grp_tcstat> tcstat = ekind->tcstat;
/* Initialize group values, changed later when multiple groups are used */
int ga = 0;
for (int n = start; n < nrend; n++)
{
- if (cTC)
+ if (!cTC.empty())
{
gt = cTC[n];
}
rvec vRel;
real cosineZ, vCosine;
-#ifdef __INTEL_COMPILER
-# pragma warning(disable : 280)
-#endif
switch (accelerationType)
{
case AccelerationType::none: copy_rvec(v[n], vRel); break;
case AccelerationType::group:
- if (cACC)
+ if (!cAcceleration.empty())
{
- ga = cACC[n];
+ ga = cAcceleration[n];
}
- /* Avoid scaling the group velocity */
- rvec_sub(v[n], grpstat[ga].u, vRel);
+ /* With constant acceleration we do scale the velocity of the accelerated groups */
+ copy_rvec(v[n], vRel);
break;
case AccelerationType::cosine:
cosineZ = std::cos(x[n][ZZ] * omega_Z);
{
case AccelerationType::none: break;
case AccelerationType::group:
- /* Add back the mean velocity and apply acceleration */
- vNew += grpstat[ga].u[d] + accel[ga][d] * dt;
+ /* Apply the constant acceleration */
+ vNew += acceleration[ga][d] * dt;
break;
case AccelerationType::cosine:
if (d == XX)
}
/*! \brief Handles the Leap-frog MD x and v integration */
-static void do_update_md(int start,
- int nrend,
- real dt,
- int64_t step,
- const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
- const rvec* gmx_restrict f,
- const rvec* gmx_restrict accel,
- const int etc,
- const int epc,
- const int nsttcouple,
- const int nstpcouple,
- const t_mdatoms* md,
- const gmx_ekindata_t* ekind,
- const matrix box,
- const double* gmx_restrict nh_vxi,
- const matrix M)
+static void do_update_md(int start,
+ int nrend,
+ real dt,
+ int64_t step,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ rvec* gmx_restrict v,
+ const rvec* gmx_restrict f,
+ const TemperatureCoupling etc,
+ const PressureCoupling epc,
+ const int nsttcouple,
+ const int nstpcouple,
+ gmx::ArrayRef<const unsigned short> cTC,
+ const bool useConstantAcceleration,
+ gmx::ArrayRef<const unsigned short> cAcceleration,
+ const rvec* acceleration,
+ gmx::ArrayRef<const real> gmx_unused invmass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ const gmx_ekindata_t* ekind,
+ const matrix box,
+ const double* gmx_restrict nh_vxi,
+ const matrix M,
+ bool gmx_unused havePartiallyFrozenAtoms)
{
GMX_ASSERT(nrend == start || xprime != x,
"For SIMD optimization certain compilers need to have xprime != x");
/* Note: Berendsen pressure scaling is handled after do_update_md() */
- bool doTempCouple = (etc != etcNO && do_per_step(step + nsttcouple - 1, nsttcouple));
- bool doNoseHoover = (etc == etcNOSEHOOVER && doTempCouple);
- bool doParrinelloRahman =
- (epc == epcPARRINELLORAHMAN && do_per_step(step + nstpcouple - 1, nstpcouple));
+ bool doTempCouple =
+ (etc != TemperatureCoupling::No && do_per_step(step + nsttcouple - 1, nsttcouple));
+ bool doNoseHoover = (etc == TemperatureCoupling::NoseHoover && doTempCouple);
+ bool doParrinelloRahman = (epc == PressureCoupling::ParrinelloRahman
+ && do_per_step(step + nstpcouple - 1, nstpcouple));
bool doPROffDiagonal = (doParrinelloRahman && (M[YY][XX] != 0 || M[ZZ][XX] != 0 || M[ZZ][YY] != 0));
real dtPressureCouple = (doParrinelloRahman ? nstpcouple * dt : 0);
/* NEMD (also cosine) acceleration is applied in updateMDLeapFrogGeneral */
- bool doAcceleration = (ekind->bNEMD || ekind->cosacc.cos_accel != 0);
+ const bool doAcceleration = (useConstantAcceleration || ekind->cosacc.cos_accel != 0);
if (doNoseHoover || doPROffDiagonal || doAcceleration)
{
if (!doAcceleration)
{
- updateMDLeapfrogGeneral<AccelerationType::none>(start, nrend, doNoseHoover, dt,
- dtPressureCouple, accel, md, ekind, box, x,
- xprime, v, f, nh_vxi, nsttcouple, stepM);
+ updateMDLeapfrogGeneral<AccelerationType::none>(start,
+ nrend,
+ doNoseHoover,
+ dt,
+ dtPressureCouple,
+ cTC,
+ cAcceleration,
+ acceleration,
+ invMassPerDim,
+ ekind,
+ box,
+ x,
+ xprime,
+ v,
+ f,
+ nh_vxi,
+ nsttcouple,
+ stepM);
}
- else if (ekind->bNEMD)
+ else if (useConstantAcceleration)
{
- updateMDLeapfrogGeneral<AccelerationType::group>(
- start, nrend, doNoseHoover, dt, dtPressureCouple, accel, md, ekind, box, x,
- xprime, v, f, nh_vxi, nsttcouple, stepM);
+ updateMDLeapfrogGeneral<AccelerationType::group>(start,
+ nrend,
+ doNoseHoover,
+ dt,
+ dtPressureCouple,
+ cTC,
+ cAcceleration,
+ acceleration,
+ invMassPerDim,
+ ekind,
+ box,
+ x,
+ xprime,
+ v,
+ f,
+ nh_vxi,
+ nsttcouple,
+ stepM);
}
else
{
- updateMDLeapfrogGeneral<AccelerationType::cosine>(
- start, nrend, doNoseHoover, dt, dtPressureCouple, accel, md, ekind, box, x,
- xprime, v, f, nh_vxi, nsttcouple, stepM);
+ updateMDLeapfrogGeneral<AccelerationType::cosine>(start,
+ nrend,
+ doNoseHoover,
+ dt,
+ dtPressureCouple,
+ cTC,
+ cAcceleration,
+ acceleration,
+ invMassPerDim,
+ ekind,
+ box,
+ x,
+ xprime,
+ v,
+ f,
+ nh_vxi,
+ nsttcouple,
+ stepM);
}
}
else
{
- /* Use a simple and thus more efficient integration loop. */
- /* The simple loop does not check for particle type (so it can
- * be vectorized), which means we need to clear the velocities
- * of virtual sites in advance, when present. Note that vsite
- * velocities are computed after update and constraints from
- * their displacement.
- */
- if (md->haveVsites)
- {
- /* Note: The overhead of this loop is completely neligible */
- clearVsiteVelocities(start, nrend, md->ptype, v);
- }
-
/* We determine if we have a single T-coupling lambda value for all
* atoms. That allows for better SIMD acceleration in the template.
* If we do not do temperature coupling (in the run or this step),
bool haveSingleTempScaleValue = (!doTempCouple || ekind->ngtc == 1);
/* Extract some pointers needed by all cases */
- const unsigned short* cTC = md->cTC;
- gmx::ArrayRef<const t_grp_tcstat> tcstat = ekind->tcstat;
- const rvec* invMassPerDim = md->invMassPerDim;
+ gmx::ArrayRef<const t_grp_tcstat> tcstat = ekind->tcstat;
if (doParrinelloRahman)
{
if (haveSingleTempScaleValue)
{
- updateMDLeapfrogSimple<NumTempScaleValues::single, ApplyParrinelloRahmanVScaling::diagonal>(
- start, nrend, dt, dtPressureCouple, invMassPerDim, tcstat, cTC, diagM, x,
- xprime, v, f);
+ updateMDLeapfrogSimple<StoreUpdatedVelocities::yes, NumTempScaleValues::single, ApplyParrinelloRahmanVScaling::diagonal>(
+ start, nrend, dt, dtPressureCouple, invMassPerDim, tcstat, cTC, diagM, x, xprime, v, f);
}
else
{
- updateMDLeapfrogSimple<NumTempScaleValues::multiple, ApplyParrinelloRahmanVScaling::diagonal>(
- start, nrend, dt, dtPressureCouple, invMassPerDim, tcstat, cTC, diagM, x,
- xprime, v, f);
+ updateMDLeapfrogSimple<StoreUpdatedVelocities::yes, NumTempScaleValues::multiple, ApplyParrinelloRahmanVScaling::diagonal>(
+ start, nrend, dt, dtPressureCouple, invMassPerDim, tcstat, cTC, diagM, x, xprime, v, f);
}
}
else
*/
#if GMX_HAVE_SIMD_UPDATE
/* Check if we can use invmass instead of invMassPerDim */
- if (!md->havePartiallyFrozenAtoms)
+ if (!havePartiallyFrozenAtoms)
{
- updateMDLeapfrogSimpleSimd(start, nrend, dt, md->invmass, tcstat, x, xprime, v, f);
+ updateMDLeapfrogSimpleSimd<StoreUpdatedVelocities::yes>(
+ start, nrend, dt, invmass, tcstat, x, xprime, v, f);
}
else
#endif
{
- updateMDLeapfrogSimple<NumTempScaleValues::single, ApplyParrinelloRahmanVScaling::no>(
- start, nrend, dt, dtPressureCouple, invMassPerDim, tcstat, cTC, nullptr,
- x, xprime, v, f);
+ updateMDLeapfrogSimple<StoreUpdatedVelocities::yes, NumTempScaleValues::single, ApplyParrinelloRahmanVScaling::no>(
+ start, nrend, dt, dtPressureCouple, invMassPerDim, tcstat, cTC, nullptr, x, xprime, v, f);
}
}
else
{
- updateMDLeapfrogSimple<NumTempScaleValues::multiple, ApplyParrinelloRahmanVScaling::no>(
- start, nrend, dt, dtPressureCouple, invMassPerDim, tcstat, cTC, nullptr, x,
- xprime, v, f);
+ updateMDLeapfrogSimple<StoreUpdatedVelocities::yes, NumTempScaleValues::multiple, ApplyParrinelloRahmanVScaling::no>(
+ start, nrend, dt, dtPressureCouple, invMassPerDim, tcstat, cTC, nullptr, x, xprime, v, f);
}
}
}
}
+/*! \brief Handles the Leap-frog MD x and v integration */
+static void doUpdateMDDoNotUpdateVelocities(int start,
+ int nrend,
+ real dt,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ const rvec* gmx_restrict v,
+ const rvec* gmx_restrict f,
+ bool gmx_unused havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const real> gmx_unused invmass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ const gmx_ekindata_t& ekind)
+{
+ GMX_ASSERT(nrend == start || xprime != x,
+ "For SIMD optimization certain compilers need to have xprime != x");
+
+ gmx::ArrayRef<const t_grp_tcstat> tcstat = ekind.tcstat;
-static void do_update_vv_vel(int start,
- int nrend,
- real dt,
- const rvec accel[],
- const ivec nFreeze[],
- const real invmass[],
- const unsigned short ptype[],
- const unsigned short cFREEZE[],
- const unsigned short cACC[],
- rvec v[],
- const rvec f[],
- gmx_bool bExtended,
- real veta,
- real alpha)
+ /* Check if we can use invmass instead of invMassPerDim */
+#if GMX_HAVE_SIMD_UPDATE
+ if (!havePartiallyFrozenAtoms)
+ {
+ updateMDLeapfrogSimpleSimd<StoreUpdatedVelocities::no>(
+ start, nrend, dt, invmass, tcstat, x, xprime, v, f);
+ }
+ else
+#endif
+ {
+ updateMDLeapfrogSimple<StoreUpdatedVelocities::no, NumTempScaleValues::single, ApplyParrinelloRahmanVScaling::no>(
+ start, nrend, dt, dt, invMassPerDim, tcstat, gmx::ArrayRef<const unsigned short>(), nullptr, x, xprime, v, f);
+ }
+}
+
+static void do_update_vv_vel(int start,
+ int nrend,
+ real dt,
+ gmx::ArrayRef<const ivec> nFreeze,
+ gmx::ArrayRef<const unsigned short> cAcceleration,
+ const rvec* acceleration,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ rvec v[],
+ const rvec f[],
+ gmx_bool bExtended,
+ real veta,
+ real alpha)
{
int gf = 0, ga = 0;
int n, d;
for (n = start; n < nrend; n++)
{
real w_dt = invmass[n] * dt;
- if (cFREEZE)
+ if (!cFREEZE.empty())
{
gf = cFREEZE[n];
}
- if (cACC)
+ if (!cAcceleration.empty())
{
- ga = cACC[n];
+ ga = cAcceleration[n];
}
for (d = 0; d < DIM; d++)
{
- if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
+ if ((ptype[n] != ParticleType::Shell) && !nFreeze[gf][d])
{
- v[n][d] = mv1 * (mv1 * v[n][d] + 0.5 * (w_dt * mv2 * f[n][d])) + 0.5 * accel[ga][d] * dt;
+ v[n][d] = mv1 * (mv1 * v[n][d] + 0.5 * (w_dt * mv2 * f[n][d]))
+ + 0.5 * acceleration[ga][d] * dt;
}
else
{
}
} /* do_update_vv_vel */
-static void do_update_vv_pos(int start,
- int nrend,
- real dt,
- const ivec nFreeze[],
- const unsigned short ptype[],
- const unsigned short cFREEZE[],
- const rvec x[],
- rvec xprime[],
- const rvec v[],
- gmx_bool bExtended,
- real veta)
+static void do_update_vv_pos(int start,
+ int nrend,
+ real dt,
+ gmx::ArrayRef<const ivec> nFreeze,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ const rvec x[],
+ rvec xprime[],
+ const rvec v[],
+ gmx_bool bExtended,
+ real veta)
{
int gf = 0;
int n, d;
for (n = start; n < nrend; n++)
{
- if (cFREEZE)
+ if (!cFREEZE.empty())
{
gf = cFREEZE[n];
}
for (d = 0; d < DIM; d++)
{
- if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
+ if ((ptype[n] != ParticleType::Shell) && !nFreeze[gf][d])
{
xprime[n][d] = mr1 * (mr1 * x[n][d] + mr2 * dt * v[n][d]);
}
const t_grpopts* opts = &inputRecord.opts;
const int ngtc = opts->ngtc;
- if (inputRecord.eI == eiBD)
+ if (inputRecord.eI == IntegrationAlgorithm::BD)
{
bd_rf.resize(ngtc);
}
&& (reft > 0)) /* tau_t or ref_t = 0 means that no randomization is done */
{
randomize_group[gt] = true;
- boltzfac[gt] = BOLTZ * opts->ref_t[gt];
+ boltzfac[gt] = gmx::c_boltz * opts->ref_t[gt];
}
else
{
void Update::Impl::update_temperature_constants(const t_inputrec& inputRecord)
{
- if (inputRecord.eI == eiBD)
+ if (inputRecord.eI == IntegrationAlgorithm::BD)
{
if (inputRecord.bd_fric != 0)
{
for (int gt = 0; gt < inputRecord.opts.ngtc; gt++)
{
- sd_.bd_rf[gt] = std::sqrt(2.0 * BOLTZ * inputRecord.opts.ref_t[gt]
+ sd_.bd_rf[gt] = std::sqrt(2.0 * gmx::c_boltz * inputRecord.opts.ref_t[gt]
/ (inputRecord.bd_fric * inputRecord.delta_t));
}
}
{
for (int gt = 0; gt < inputRecord.opts.ngtc; gt++)
{
- sd_.bd_rf[gt] = std::sqrt(2.0 * BOLTZ * inputRecord.opts.ref_t[gt]);
+ sd_.bd_rf[gt] = std::sqrt(2.0 * gmx::c_boltz * inputRecord.opts.ref_t[gt]);
}
}
}
- if (inputRecord.eI == eiSD1)
+ if (inputRecord.eI == IntegrationAlgorithm::SD1)
{
for (int gt = 0; gt < inputRecord.opts.ngtc; gt++)
{
- real kT = BOLTZ * inputRecord.opts.ref_t[gt];
+ real kT = gmx::c_boltz * inputRecord.opts.ref_t[gt];
/* The mass is accounted for later, since this differs per atom */
sd_.sdsig[gt].V = std::sqrt(kT * (1 - sd_.sdc[gt].em * sd_.sdc[gt].em));
}
}
Update::Impl::Impl(const t_inputrec& inputRecord, BoxDeformation* boxDeformation) :
- sd_(inputRecord),
- deform_(boxDeformation)
+ sd_(inputRecord), deform_(boxDeformation)
{
update_temperature_constants(inputRecord);
xp_.resizeWithPadding(0);
}
-void Update::setNumAtoms(int numAtoms)
+void Update::updateAfterPartition(int numAtoms,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ gmx::ArrayRef<const unsigned short> cTC,
+ gmx::ArrayRef<const unsigned short> cAcceleration)
{
impl_->xp()->resizeWithPadding(numAtoms);
+ impl_->cFREEZE_ = cFREEZE;
+ impl_->cTC_ = cTC;
+ impl_->cAcceleration_ = cAcceleration;
}
/*! \brief Sets the SD update type */
* Thus three instantiations of this templated function will be made,
* two with only one contribution, and one with both contributions. */
template<SDUpdate updateType>
-static void doSDUpdateGeneral(const gmx_stochd_t& sd,
- int start,
- int nrend,
- real dt,
- const rvec accel[],
- const ivec nFreeze[],
- const real invmass[],
- const unsigned short ptype[],
- const unsigned short cFREEZE[],
- const unsigned short cACC[],
- const unsigned short cTC[],
- const rvec x[],
- rvec xprime[],
- rvec v[],
- const rvec f[],
- int64_t step,
- int seed,
- const int* gatindex)
+static void doSDUpdateGeneral(const gmx_stochd_t& sd,
+ int start,
+ int nrend,
+ real dt,
+ gmx::ArrayRef<const ivec> nFreeze,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ gmx::ArrayRef<const unsigned short> cTC,
+ gmx::ArrayRef<const unsigned short> cAcceleration,
+ const rvec* acceleration,
+ const rvec x[],
+ rvec xprime[],
+ rvec v[],
+ const rvec f[],
+ int64_t step,
+ int seed,
+ const int* gatindex)
{
// cTC, cACC and cFREEZE can be nullptr any time, but various
// instantiations do not make sense with particular pointer
if (updateType == SDUpdate::ForcesOnly)
{
GMX_ASSERT(f != nullptr, "SD update with only forces requires forces");
- GMX_ASSERT(cTC == nullptr, "SD update with only forces cannot handle temperature groups");
+ GMX_ASSERT(cTC.empty(), "SD update with only forces cannot handle temperature groups");
}
if (updateType == SDUpdate::FrictionAndNoiseOnly)
{
GMX_ASSERT(f == nullptr, "SD update with only noise cannot handle forces");
- GMX_ASSERT(cACC == nullptr, "SD update with only noise cannot handle acceleration groups");
+ GMX_ASSERT(cAcceleration.empty(),
+ "SD update with only noise cannot handle acceleration groups");
}
if (updateType == SDUpdate::Combined)
{
real inverseMass = invmass[n];
real invsqrtMass = std::sqrt(inverseMass);
- int freezeGroup = cFREEZE ? cFREEZE[n] : 0;
- int accelerationGroup = cACC ? cACC[n] : 0;
- int temperatureGroup = cTC ? cTC[n] : 0;
+ int freezeGroup = !cFREEZE.empty() ? cFREEZE[n] : 0;
+ int accelerationGroup = !cAcceleration.empty() ? cAcceleration[n] : 0;
+ int temperatureGroup = !cTC.empty() ? cTC[n] : 0;
for (int d = 0; d < DIM; d++)
{
- if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[freezeGroup][d])
+ if ((ptype[n] != ParticleType::Shell) && !nFreeze[freezeGroup][d])
{
if (updateType == SDUpdate::ForcesOnly)
{
- real vn = v[n][d] + (inverseMass * f[n][d] + accel[accelerationGroup][d]) * dt;
+ real vn = v[n][d] + (inverseMass * f[n][d] + acceleration[accelerationGroup][d]) * dt;
v[n][d] = vn;
// Simple position update.
xprime[n][d] = x[n][d] + v[n][d] * dt;
}
else
{
- real vn = v[n][d] + (inverseMass * f[n][d] + accel[accelerationGroup][d]) * dt;
+ real vn = v[n][d] + (inverseMass * f[n][d] + acceleration[accelerationGroup][d]) * dt;
v[n][d] = (vn * sd.sdc[temperatureGroup].em
+ invsqrtMass * sd.sdsig[temperatureGroup].V * dist(rng));
// Here we include half of the friction+noise
}
}
-static void do_update_sd(int start,
- int nrend,
- real dt,
- int64_t step,
- const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
- const rvec* gmx_restrict f,
- const rvec accel[],
- const ivec nFreeze[],
- const real invmass[],
- const unsigned short ptype[],
- const unsigned short cFREEZE[],
- const unsigned short cACC[],
- const unsigned short cTC[],
- int seed,
- const t_commrec* cr,
- const gmx_stochd_t& sd,
- bool haveConstraints)
+static void do_update_sd(int start,
+ int nrend,
+ real dt,
+ int64_t step,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ rvec* gmx_restrict v,
+ const rvec* gmx_restrict f,
+ gmx::ArrayRef<const ivec> nFreeze,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ gmx::ArrayRef<const unsigned short> cTC,
+ gmx::ArrayRef<const unsigned short> cAcceleration,
+ const rvec* acceleration,
+ int seed,
+ const t_commrec* cr,
+ const gmx_stochd_t& sd,
+ bool haveConstraints)
{
if (haveConstraints)
{
// With constraints, the SD update is done in 2 parts
- doSDUpdateGeneral<SDUpdate::ForcesOnly>(sd, start, nrend, dt, accel, nFreeze, invmass,
- ptype, cFREEZE, cACC, nullptr, x, xprime, v, f,
- step, seed, nullptr);
+ doSDUpdateGeneral<SDUpdate::ForcesOnly>(sd,
+ start,
+ nrend,
+ dt,
+ nFreeze,
+ invmass,
+ ptype,
+ cFREEZE,
+ gmx::ArrayRef<const unsigned short>(),
+ cAcceleration,
+ acceleration,
+ x,
+ xprime,
+ v,
+ f,
+ step,
+ seed,
+ nullptr);
}
else
{
doSDUpdateGeneral<SDUpdate::Combined>(
- sd, start, nrend, dt, accel, nFreeze, invmass, ptype, cFREEZE, cACC, cTC, x, xprime,
- v, f, step, seed, DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr);
+ sd,
+ start,
+ nrend,
+ dt,
+ nFreeze,
+ invmass,
+ ptype,
+ cFREEZE,
+ cTC,
+ cAcceleration,
+ acceleration,
+ x,
+ xprime,
+ v,
+ f,
+ step,
+ seed,
+ haveDDAtomOrdering(*cr) ? cr->dd->globalAtomIndices.data() : nullptr);
}
}
-static void do_update_bd(int start,
- int nrend,
- real dt,
- int64_t step,
- const rvec* gmx_restrict x,
- rvec* gmx_restrict xprime,
- rvec* gmx_restrict v,
- const rvec* gmx_restrict f,
- const ivec nFreeze[],
- const real invmass[],
- const unsigned short ptype[],
- const unsigned short cFREEZE[],
- const unsigned short cTC[],
- real friction_coefficient,
- const real* rf,
- int seed,
- const int* gatindex)
+static void do_update_bd(int start,
+ int nrend,
+ real dt,
+ int64_t step,
+ const rvec* gmx_restrict x,
+ rvec* gmx_restrict xprime,
+ rvec* gmx_restrict v,
+ const rvec* gmx_restrict f,
+ gmx::ArrayRef<const ivec> nFreeze,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ gmx::ArrayRef<const unsigned short> cTC,
+ real friction_coefficient,
+ const real* rf,
+ int seed,
+ const int* gatindex)
{
/* note -- these appear to be full step velocities . . . */
int gf = 0, gt = 0;
rng.restart(step, ng);
dist.reset();
- if (cFREEZE)
+ if (!cFREEZE.empty())
{
gf = cFREEZE[n];
}
- if (cTC)
+ if (!cTC.empty())
{
gt = cTC[n];
}
for (d = 0; (d < DIM); d++)
{
- if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
+ if ((ptype[n] != ParticleType::Shell) && !nFreeze[gf][d])
{
if (friction_coefficient != 0)
{
gmx_bcast(sizeof(n), &n, cr->mpi_comm_mygroup);
for (i = 0; i < n; i++)
{
- gmx_bcast(DIM * DIM * sizeof(ekind->tcstat[i].ekinh[0][0]), ekind->tcstat[i].ekinh[0],
+ gmx_bcast(DIM * DIM * sizeof(ekind->tcstat[i].ekinh[0][0]),
+ ekind->tcstat[i].ekinh[0],
cr->mpi_comm_mygroup);
- gmx_bcast(DIM * DIM * sizeof(ekind->tcstat[i].ekinf[0][0]), ekind->tcstat[i].ekinf[0],
+ gmx_bcast(DIM * DIM * sizeof(ekind->tcstat[i].ekinf[0][0]),
+ ekind->tcstat[i].ekinf[0],
cr->mpi_comm_mygroup);
gmx_bcast(DIM * DIM * sizeof(ekind->tcstat[i].ekinh_old[0][0]),
- ekind->tcstat[i].ekinh_old[0], cr->mpi_comm_mygroup);
-
- gmx_bcast(sizeof(ekind->tcstat[i].ekinscalef_nhc), &(ekind->tcstat[i].ekinscalef_nhc),
+ ekind->tcstat[i].ekinh_old[0],
cr->mpi_comm_mygroup);
- gmx_bcast(sizeof(ekind->tcstat[i].ekinscaleh_nhc), &(ekind->tcstat[i].ekinscaleh_nhc),
+
+ gmx_bcast(sizeof(ekind->tcstat[i].ekinscalef_nhc),
+ &(ekind->tcstat[i].ekinscalef_nhc),
cr->mpi_comm_mygroup);
- gmx_bcast(sizeof(ekind->tcstat[i].vscale_nhc), &(ekind->tcstat[i].vscale_nhc),
+ gmx_bcast(sizeof(ekind->tcstat[i].ekinscaleh_nhc),
+ &(ekind->tcstat[i].ekinscaleh_nhc),
cr->mpi_comm_mygroup);
+ gmx_bcast(sizeof(ekind->tcstat[i].vscale_nhc), &(ekind->tcstat[i].vscale_nhc), cr->mpi_comm_mygroup);
}
gmx_bcast(DIM * DIM * sizeof(ekind->ekin[0][0]), ekind->ekin[0], cr->mpi_comm_mygroup);
}
}
-void update_tcouple(int64_t step,
- const t_inputrec* inputrec,
- t_state* state,
- gmx_ekindata_t* ekind,
- const t_extmass* MassQ,
- const t_mdatoms* md)
-
-{
- // This condition was explicitly checked in previous version, but should have never been satisfied
- GMX_ASSERT(!(EI_VV(inputrec->eI)
- && (inputrecNvtTrotter(inputrec) || inputrecNptTrotter(inputrec)
- || inputrecNphTrotter(inputrec))),
- "Temperature coupling was requested with velocity verlet and trotter");
-
- bool doTemperatureCoupling = false;
-
- // For VV temperature coupling parameters are updated on the current
- // step, for the others - one step before.
- if (inputrec->etc == etcNO)
- {
- doTemperatureCoupling = false;
- }
- else if (EI_VV(inputrec->eI))
- {
- doTemperatureCoupling = do_per_step(step, inputrec->nsttcouple);
- }
- else
- {
- doTemperatureCoupling = do_per_step(step + inputrec->nsttcouple - 1, inputrec->nsttcouple);
- }
-
- if (doTemperatureCoupling)
- {
- real dttc = inputrec->nsttcouple * inputrec->delta_t;
-
- // TODO: berendsen_tcoupl(...), nosehoover_tcoupl(...) and vrescale_tcoupl(...) update
- // temperature coupling parameters, which should be reflected in the name of these
- // subroutines
- switch (inputrec->etc)
- {
- case etcNO: break;
- case etcBERENDSEN:
- berendsen_tcoupl(inputrec, ekind, dttc, state->therm_integral);
- break;
- case etcNOSEHOOVER:
- nosehoover_tcoupl(&(inputrec->opts), ekind, dttc, state->nosehoover_xi.data(),
- state->nosehoover_vxi.data(), MassQ);
- break;
- case etcVRESCALE:
- vrescale_tcoupl(inputrec, step, ekind, dttc, state->therm_integral.data());
- break;
- }
- /* rescale in place here */
- if (EI_VV(inputrec->eI))
- {
- rescale_velocities(ekind, md, 0, md->homenr, state->v.rvec_array());
- }
- }
- else
- {
- // Set the T scaling lambda to 1 to have no scaling
- // TODO: Do we have to do it on every non-t-couple step?
- for (int i = 0; (i < inputrec->opts.ngtc); i++)
- {
- ekind->tcstat[i].lambda = 1.0;
- }
- }
-}
-
void getThreadAtomRange(int numThreads, int threadIndex, int numAtoms, int* startAtom, int* endAtom)
{
#if GMX_HAVE_SIMD_UPDATE
}
}
-void update_pcouple_before_coordinates(FILE* fplog,
- int64_t step,
- const t_inputrec* inputrec,
- t_state* state,
- matrix parrinellorahmanMu,
- matrix M,
- gmx_bool bInitStep)
-{
- /* Berendsen P-coupling is completely handled after the coordinate update.
- * Trotter P-coupling is handled by separate calls to trotter_update().
- */
- if (inputrec->epc == epcPARRINELLORAHMAN
- && do_per_step(step + inputrec->nstpcouple - 1, inputrec->nstpcouple))
- {
- real dtpc = inputrec->nstpcouple * inputrec->delta_t;
-
- parrinellorahman_pcoupl(fplog, step, inputrec, dtpc, state->pres_prev, state->box,
- state->box_rel, state->boxv, M, parrinellorahmanMu, bInitStep);
- }
-}
-
-void Update::Impl::update_sd_second_half(const t_inputrec& inputRecord,
- int64_t step,
- real* dvdlambda,
- const t_mdatoms* md,
- t_state* state,
- const t_commrec* cr,
- t_nrnb* nrnb,
- gmx_wallcycle_t wcycle,
- gmx::Constraints* constr,
- bool do_log,
- bool do_ene)
+void Update::Impl::update_sd_second_half(const t_inputrec& inputRecord,
+ int64_t step,
+ real* dvdlambda,
+ int homenr,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const real> invMass,
+ t_state* state,
+ const t_commrec* cr,
+ t_nrnb* nrnb,
+ gmx_wallcycle* wcycle,
+ gmx::Constraints* constr,
+ bool do_log,
+ bool do_ene)
{
if (!constr)
{
return;
}
- if (inputRecord.eI == eiSD1)
+ if (inputRecord.eI == IntegrationAlgorithm::SD1)
{
- int homenr = md->homenr;
/* Cast delta_t from double to real to make the integrators faster.
* The only reason for having delta_t double is to get accurate values
*/
real dt = inputRecord.delta_t;
- wallcycle_start(wcycle, ewcUPDATE);
+ wallcycle_start(wcycle, WallCycleCounter::Update);
- int nth = gmx_omp_nthreads_get(emntUpdate);
+ int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nth) schedule(static)
for (int th = 0; th < nth; th++)
getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
doSDUpdateGeneral<SDUpdate::FrictionAndNoiseOnly>(
- sd_, start_th, end_th, dt, inputRecord.opts.acc, inputRecord.opts.nFreeze,
- md->invmass, md->ptype, md->cFREEZE, nullptr, md->cTC, state->x.rvec_array(),
- xp_.rvec_array(), state->v.rvec_array(), nullptr, step, inputRecord.ld_seed,
- DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr);
+ sd_,
+ start_th,
+ end_th,
+ dt,
+ gmx::arrayRefFromArray(inputRecord.opts.nFreeze, inputRecord.opts.ngfrz),
+ invMass,
+ ptype,
+ cFREEZE_,
+ cTC_,
+ cAcceleration_,
+ inputRecord.opts.acceleration,
+ state->x.rvec_array(),
+ xp_.rvec_array(),
+ state->v.rvec_array(),
+ nullptr,
+ step,
+ inputRecord.ld_seed,
+ haveDDAtomOrdering(*cr) ? cr->dd->globalAtomIndices.data() : nullptr);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
inc_nrnb(nrnb, eNR_UPDATE, homenr);
- wallcycle_stop(wcycle, ewcUPDATE);
+ wallcycle_stop(wcycle, WallCycleCounter::Update);
/* Constrain the coordinates upd->xp for half a time step */
bool computeVirial = false;
- constr->apply(do_log, do_ene, step, 1, 0.5, state->x.arrayRefWithPadding(),
- xp_.arrayRefWithPadding(), ArrayRef<RVec>(), state->box,
- state->lambda[efptBONDED], dvdlambda, state->v.arrayRefWithPadding(),
- computeVirial, nullptr, ConstraintVariable::Positions);
+ constr->apply(do_log,
+ do_ene,
+ step,
+ 1,
+ 0.5,
+ state->x.arrayRefWithPadding(),
+ xp_.arrayRefWithPadding(),
+ ArrayRef<RVec>(),
+ state->box,
+ state->lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Bonded)],
+ dvdlambda,
+ state->v.arrayRefWithPadding(),
+ computeVirial,
+ nullptr,
+ ConstraintVariable::Positions);
}
}
-void Update::Impl::finish_update(const t_inputrec& inputRecord,
- const t_mdatoms* md,
- t_state* state,
- gmx_wallcycle_t wcycle,
- const bool haveConstraints)
+void Update::Impl::finish_update(const t_inputrec& inputRecord,
+ const bool havePartiallyFrozenAtoms,
+ const int homenr,
+ gmx::ArrayRef<const unsigned short> cFREEZE,
+ t_state* state,
+ gmx_wallcycle* wcycle,
+ const bool haveConstraints)
{
/* NOTE: Currently we always integrate to a temporary buffer and
* then copy the results back here.
*/
- wallcycle_start_nocount(wcycle, ewcUPDATE);
+ wallcycle_start_nocount(wcycle, WallCycleCounter::Update);
- const int homenr = md->homenr;
- auto xp = makeConstArrayRef(xp_).subArray(0, homenr);
- auto x = makeArrayRef(state->x).subArray(0, homenr);
+ auto xp = makeConstArrayRef(xp_).subArray(0, homenr);
+ auto x = makeArrayRef(state->x).subArray(0, homenr);
- if (md->havePartiallyFrozenAtoms && haveConstraints)
+ if (havePartiallyFrozenAtoms && haveConstraints)
{
/* We have atoms that are frozen along some, but not all dimensions,
* then constraints will have moved them also along the frozen dimensions.
for (int i = 0; i < homenr; i++)
{
- const int g = md->cFREEZE[i];
+ const int g = cFREEZE[i];
for (int d = 0; d < DIM; d++)
{
/* We have no frozen atoms or fully frozen atoms which have not
* been moved by the update, so we can simply copy all coordinates.
*/
- int gmx_unused nth = gmx_omp_nthreads_get(emntUpdate);
+ int gmx_unused nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nth) schedule(static)
for (int i = 0; i < homenr; i++)
{
}
}
- wallcycle_stop(wcycle, ewcUPDATE);
+ wallcycle_stop(wcycle, WallCycleCounter::Update);
}
-void update_pcouple_after_coordinates(FILE* fplog,
- int64_t step,
- const t_inputrec* inputrec,
- const t_mdatoms* md,
- const matrix pressure,
- const matrix forceVirial,
- const matrix constraintVirial,
- matrix pressureCouplingMu,
- t_state* state,
- t_nrnb* nrnb,
- gmx::BoxDeformation* boxDeformation,
- const bool scaleCoordinates)
-{
- int start = 0;
- int homenr = md->homenr;
-
- /* Cast to real for faster code, no loss in precision (see comment above) */
- real dt = inputrec->delta_t;
-
-
- /* now update boxes */
- switch (inputrec->epc)
- {
- case (epcNO): break;
- case (epcBERENDSEN):
- if (do_per_step(step, inputrec->nstpcouple))
- {
- real dtpc = inputrec->nstpcouple * dt;
- berendsen_pcoupl(fplog, step, inputrec, dtpc, pressure, state->box, forceVirial,
- constraintVirial, pressureCouplingMu, &state->baros_integral);
- berendsen_pscale(inputrec, pressureCouplingMu, state->box, state->box_rel, start,
- homenr, state->x.rvec_array(), md->cFREEZE, nrnb, scaleCoordinates);
- }
- break;
- case (epcPARRINELLORAHMAN):
- if (do_per_step(step + inputrec->nstpcouple - 1, inputrec->nstpcouple))
- {
- /* The box velocities were updated in do_pr_pcoupl,
- * but we dont change the box vectors until we get here
- * since we need to be able to shift/unshift above.
- */
- real dtpc = inputrec->nstpcouple * dt;
- for (int i = 0; i < DIM; i++)
- {
- for (int m = 0; m <= i; m++)
- {
- state->box[i][m] += dtpc * state->boxv[i][m];
- }
- }
- preserve_box_shape(inputrec, state->box_rel, state->box);
-
- /* Scale the coordinates */
- if (scaleCoordinates)
- {
- auto x = state->x.rvec_array();
- for (int n = start; n < start + homenr; n++)
- {
- tmvmul_ur0(pressureCouplingMu, x[n], x[n]);
- }
- }
- }
- break;
- case (epcMTTK):
- switch (inputrec->epct)
- {
- case (epctISOTROPIC):
- /* DIM * eta = ln V. so DIM*eta_new = DIM*eta_old + DIM*dt*veta =>
- ln V_new = ln V_old + 3*dt*veta => V_new = V_old*exp(3*dt*veta) =>
- Side length scales as exp(veta*dt) */
-
- msmul(state->box, std::exp(state->veta * dt), state->box);
-
- /* Relate veta to boxv. veta = d(eta)/dT = (1/DIM)*1/V dV/dT.
- o If we assume isotropic scaling, and box length scaling
- factor L, then V = L^DIM (det(M)). So dV/dt = DIM
- L^(DIM-1) dL/dt det(M), and veta = (1/L) dL/dt. The
- determinant of B is L^DIM det(M), and the determinant
- of dB/dt is (dL/dT)^DIM det (M). veta will be
- (det(dB/dT)/det(B))^(1/3). Then since M =
- B_new*(vol_new)^(1/3), dB/dT_new = (veta_new)*B(new). */
-
- msmul(state->box, state->veta, state->boxv);
- break;
- default: break;
- }
- break;
- default: break;
- }
-
- if (boxDeformation)
- {
- auto localX = makeArrayRef(state->x).subArray(start, homenr);
- boxDeformation->apply(localX, state->box, step);
- }
-}
-
-void Update::Impl::update_coords(const t_inputrec& inputRecord,
- int64_t step,
- const t_mdatoms* md,
- t_state* state,
+void Update::Impl::update_coords(const t_inputrec& inputRecord,
+ int64_t step,
+ int homenr,
+ bool havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const ParticleType> ptype,
+ gmx::ArrayRef<const real> invMass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ t_state* state,
const gmx::ArrayRefWithPadding<const gmx::RVec>& f,
- const t_fcdata* fcd,
+ t_fcdata* fcdata,
const gmx_ekindata_t* ekind,
const matrix M,
int updatePart,
gmx_incons("update_coords called for velocity without VV integrator");
}
- int homenr = md->homenr;
-
/* Cast to real for faster code, no loss in precision (see comment above) */
real dt = inputRecord.delta_t;
/* We need to update the NMR restraint history when time averaging is used */
- if (state->flags & (1 << estDISRE_RM3TAV))
+ if (state->flags & enumValueToBitMask(StateEntry::DisreRm3Tav))
{
- update_disres_history(fcd, &state->hist);
+ update_disres_history(*fcdata->disres, &state->hist);
}
- if (state->flags & (1 << estORIRE_DTAV))
+ if (state->flags & enumValueToBitMask(StateEntry::OrireDtav))
{
- update_orires_history(fcd, &state->hist);
+ GMX_ASSERT(fcdata, "Need valid fcdata");
+ fcdata->orires->updateHistory();
}
/* ############# START The update of velocities and positions ######### */
- int nth = gmx_omp_nthreads_get(emntUpdate);
+ int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nth) schedule(static)
for (int th = 0; th < nth; th++)
switch (inputRecord.eI)
{
- case (eiMD):
- do_update_md(start_th, end_th, dt, step, x_rvec, xp_rvec, v_rvec, f_rvec,
- inputRecord.opts.acc, inputRecord.etc, inputRecord.epc,
- inputRecord.nsttcouple, inputRecord.nstpcouple, md, ekind,
- state->box, state->nosehoover_vxi.data(), M);
+ case (IntegrationAlgorithm::MD):
+ do_update_md(start_th,
+ end_th,
+ dt,
+ step,
+ x_rvec,
+ xp_rvec,
+ v_rvec,
+ f_rvec,
+ inputRecord.etc,
+ inputRecord.epc,
+ inputRecord.nsttcouple,
+ inputRecord.nstpcouple,
+ cTC_,
+ inputRecord.useConstantAcceleration,
+ cAcceleration_,
+ inputRecord.opts.acceleration,
+ invMass,
+ invMassPerDim,
+ ekind,
+ state->box,
+ state->nosehoover_vxi.data(),
+ M,
+ havePartiallyFrozenAtoms);
break;
- case (eiSD1):
- do_update_sd(start_th, end_th, dt, step, x_rvec, xp_rvec, v_rvec, f_rvec,
- inputRecord.opts.acc, inputRecord.opts.nFreeze, md->invmass,
- md->ptype, md->cFREEZE, md->cACC, md->cTC, inputRecord.ld_seed, cr,
- sd_, haveConstraints);
+ case (IntegrationAlgorithm::SD1):
+ do_update_sd(start_th,
+ end_th,
+ dt,
+ step,
+ x_rvec,
+ xp_rvec,
+ v_rvec,
+ f_rvec,
+ gmx::arrayRefFromArray(inputRecord.opts.nFreeze, inputRecord.opts.ngfrz),
+ invMass,
+ ptype,
+ cFREEZE_,
+ cTC_,
+ cAcceleration_,
+ inputRecord.opts.acceleration,
+ inputRecord.ld_seed,
+ cr,
+ sd_,
+ haveConstraints);
break;
- case (eiBD):
- do_update_bd(start_th, end_th, dt, step, x_rvec, xp_rvec, v_rvec, f_rvec,
- inputRecord.opts.nFreeze, md->invmass, md->ptype, md->cFREEZE,
- md->cTC, inputRecord.bd_fric, sd_.bd_rf.data(), inputRecord.ld_seed,
- DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr);
+ case (IntegrationAlgorithm::BD):
+ do_update_bd(start_th,
+ end_th,
+ dt,
+ step,
+ x_rvec,
+ xp_rvec,
+ v_rvec,
+ f_rvec,
+ gmx::arrayRefFromArray(inputRecord.opts.nFreeze, inputRecord.opts.ngfrz),
+ invMass,
+ ptype,
+ cFREEZE_,
+ cTC_,
+ inputRecord.bd_fric,
+ sd_.bd_rf.data(),
+ inputRecord.ld_seed,
+ haveDDAtomOrdering(*cr) ? cr->dd->globalAtomIndices.data() : nullptr);
break;
- case (eiVV):
- case (eiVVAK):
+ case (IntegrationAlgorithm::VV):
+ case (IntegrationAlgorithm::VVAK):
{
- gmx_bool bExtended = (inputRecord.etc == etcNOSEHOOVER || inputRecord.epc == epcPARRINELLORAHMAN
- || inputRecord.epc == epcMTTK);
+ gmx_bool bExtended = (inputRecord.etc == TemperatureCoupling::NoseHoover
+ || inputRecord.epc == PressureCoupling::ParrinelloRahman
+ || inputRecord.epc == PressureCoupling::Mttk);
/* assuming barostat coupled to group 0 */
real alpha = 1.0 + DIM / static_cast<real>(inputRecord.opts.nrdf[0]);
{
case etrtVELOCITY1:
case etrtVELOCITY2:
- do_update_vv_vel(start_th, end_th, dt, inputRecord.opts.acc,
- inputRecord.opts.nFreeze, md->invmass, md->ptype, md->cFREEZE,
- md->cACC, v_rvec, f_rvec, bExtended, state->veta, alpha);
+ do_update_vv_vel(start_th,
+ end_th,
+ dt,
+ gmx::arrayRefFromArray(inputRecord.opts.nFreeze,
+ inputRecord.opts.ngfrz),
+ cAcceleration_,
+ inputRecord.opts.acceleration,
+ invMass,
+ ptype,
+ cFREEZE_,
+ v_rvec,
+ f_rvec,
+ bExtended,
+ state->veta,
+ alpha);
break;
case etrtPOSITION:
- do_update_vv_pos(start_th, end_th, dt, inputRecord.opts.nFreeze,
- md->ptype, md->cFREEZE, x_rvec, xp_rvec, v_rvec,
- bExtended, state->veta);
+ do_update_vv_pos(start_th,
+ end_th,
+ dt,
+ gmx::arrayRefFromArray(inputRecord.opts.nFreeze,
+ inputRecord.opts.ngfrz),
+ ptype,
+ cFREEZE_,
+ x_rvec,
+ xp_rvec,
+ v_rvec,
+ bExtended,
+ state->veta);
break;
}
break;
}
}
-extern gmx_bool update_randomize_velocities(const t_inputrec* ir,
- int64_t step,
- const t_commrec* cr,
- const t_mdatoms* md,
- gmx::ArrayRef<gmx::RVec> v,
- const Update* upd,
- const gmx::Constraints* constr)
+void Update::Impl::update_for_constraint_virial(const t_inputrec& inputRecord,
+ int homenr,
+ bool havePartiallyFrozenAtoms,
+ gmx::ArrayRef<const real> invmass,
+ gmx::ArrayRef<const rvec> invMassPerDim,
+ const t_state& state,
+ const gmx::ArrayRefWithPadding<const gmx::RVec>& f,
+ const gmx_ekindata_t& ekind)
{
+ GMX_ASSERT(inputRecord.eI == IntegrationAlgorithm::MD || inputRecord.eI == IntegrationAlgorithm::SD1,
+ "Only leap-frog is supported here");
- real rate = (ir->delta_t) / ir->opts.tau_t[0];
+ // Cast to real for faster code, no loss in precision
+ const real dt = inputRecord.delta_t;
- if (ir->etc == etcANDERSEN && constr != nullptr)
- {
- /* Currently, Andersen thermostat does not support constrained
- systems. Functionality exists in the andersen_tcoupl
- function in GROMACS 4.5.7 to allow this combination. That
- code could be ported to the current random-number
- generation approach, but has not yet been done because of
- lack of time and resources. */
- gmx_fatal(FARGS,
- "Normal Andersen is currently not supported with constraints, use massive "
- "Andersen instead");
- }
+ const int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
- /* proceed with andersen if 1) it's fixed probability per
- particle andersen or 2) it's massive andersen and it's tau_t/dt */
- if ((ir->etc == etcANDERSEN) || do_per_step(step, roundToInt(1.0 / rate)))
+#pragma omp parallel for num_threads(nth) schedule(static)
+ for (int th = 0; th < nth; th++)
{
- andersen_tcoupl(ir, step, cr, md, v, rate, upd->getAndersenRandomizeGroup(),
- upd->getBoltzmanFactor());
- return TRUE;
+ try
+ {
+ int start_th, end_th;
+ getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
+
+ const rvec* x_rvec = state.x.rvec_array();
+ rvec* xp_rvec = xp_.rvec_array();
+ const rvec* v_rvec = state.v.rvec_array();
+ const rvec* f_rvec = as_rvec_array(f.unpaddedConstArrayRef().data());
+
+ doUpdateMDDoNotUpdateVelocities(
+ start_th, end_th, dt, x_rvec, xp_rvec, v_rvec, f_rvec, havePartiallyFrozenAtoms, invmass, invMassPerDim, ekind);
+ }
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
- return FALSE;
}