* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
* Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
- * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
gmx::AlignedAllocationPolicy::free(mdatoms_->invmass);
sfree(mdatoms_->invMassPerDim);
sfree(mdatoms_->typeA);
- sfree(mdatoms_->chargeB);
sfree(mdatoms_->typeB);
+ /* mdatoms->chargeA and mdatoms->chargeB point at chargeA_.data()
+ * and chargeB_.data() respectively. They get freed automatically. */
sfree(mdatoms_->sqrt_c6A);
sfree(mdatoms_->sigmaA);
sfree(mdatoms_->sigma3A);
sfree(mdatoms_->bPerturbed);
sfree(mdatoms_->cU1);
sfree(mdatoms_->cU2);
- sfree(mdatoms_->bQM);
}
-void MDAtoms::resize(int newSize)
+void MDAtoms::resizeChargeA(const int newSize)
{
chargeA_.resizeWithPadding(newSize);
mdatoms_->chargeA = chargeA_.data();
}
-void MDAtoms::reserve(int newCapacity)
+void MDAtoms::resizeChargeB(const int newSize)
+{
+ chargeB_.resizeWithPadding(newSize);
+ mdatoms_->chargeB = chargeB_.data();
+}
+
+void MDAtoms::reserveChargeA(const int newCapacity)
{
chargeA_.reserveWithPadding(newCapacity);
mdatoms_->chargeA = chargeA_.data();
}
+void MDAtoms::reserveChargeB(const int newCapacity)
+{
+ chargeB_.reserveWithPadding(newCapacity);
+ mdatoms_->chargeB = chargeB_.data();
+}
+
std::unique_ptr<MDAtoms> makeMDAtoms(FILE* fp, const gmx_mtop_t& mtop, const t_inputrec& ir, const bool rankHasPmeGpuTask)
{
auto mdAtoms = std::make_unique<MDAtoms>();
if (rankHasPmeGpuTask)
{
changePinningPolicy(&mdAtoms->chargeA_, pme_get_pinning_policy());
+ changePinningPolicy(&mdAtoms->chargeB_, pme_get_pinning_policy());
}
t_mdatoms* md;
snew(md, 1);
double totalMassB = 0.0;
md->haveVsites = FALSE;
- gmx_mtop_atomloop_block_t aloop = gmx_mtop_atomloop_block_init(&mtop);
+ gmx_mtop_atomloop_block_t aloop = gmx_mtop_atomloop_block_init(mtop);
const t_atom* atom;
int nmol;
while (gmx_mtop_atomloop_block_next(aloop, &atom, &nmol))
totalMassA += nmol * atom->m;
totalMassB += nmol * atom->mB;
- if (atom->ptype == eptVSite)
+ if (atom->ptype == ParticleType::VSite)
{
md->haveVsites = TRUE;
}
- if (ir.efep != efepNO && PERTURBED(*atom))
+ if (ir.efep != FreeEnergyPerturbationType::No && PERTURBED(*atom))
{
md->nPerturbed++;
if (atom->mB != atom->m)
md->tmassA = totalMassA;
md->tmassB = totalMassB;
- if (ir.efep != efepNO && fp)
+ if (ir.efep != FreeEnergyPerturbationType::No && fp)
{
- fprintf(fp, "There are %d atoms and %d charges for free energy perturbation\n",
- md->nPerturbed, md->nChargePerturbed);
+ fprintf(fp,
+ "There are %d atoms and %d charges for free energy perturbation\n",
+ md->nPerturbed,
+ md->nChargePerturbed);
}
md->havePartiallyFrozenAtoms = FALSE;
}
}
- md->bOrires = (gmx_mtop_ftype_count(&mtop, F_ORIRES) != 0);
+ md->bOrires = (gmx_mtop_ftype_count(mtop, F_ORIRES) != 0);
return mdAtoms;
}
} // namespace gmx
-void atoms2md(const gmx_mtop_t* mtop, const t_inputrec* ir, int nindex, const int* index, int homenr, gmx::MDAtoms* mdAtoms)
+void atoms2md(const gmx_mtop_t& mtop,
+ const t_inputrec& inputrec,
+ int nindex,
+ gmx::ArrayRef<int> index,
+ int homenr,
+ gmx::MDAtoms* mdAtoms)
{
gmx_bool bLJPME;
const t_grpopts* opts;
- int nthreads gmx_unused;
+ int nthreads gmx_unused;
- bLJPME = EVDW_PME(ir->vdwtype);
+ bLJPME = EVDW_PME(inputrec.vdwtype);
- opts = &ir->opts;
+ opts = &inputrec.opts;
- const SimulationGroups& groups = mtop->groups;
+ const SimulationGroups& groups = mtop.groups;
- auto md = mdAtoms->mdatoms();
+ auto* md = mdAtoms->mdatoms();
/* nindex>=0 indicates DD where we use an index */
if (nindex >= 0)
{
}
else
{
- md->nr = mtop->natoms;
+ md->nr = mtop.natoms;
}
if (md->nr > md->nalloc)
// everything, but for now the semantics of md->nalloc being
// the capacity are preserved by keeping vectors within
// mdAtoms having the same properties as the other arrays.
- mdAtoms->reserve(md->nalloc);
- mdAtoms->resize(md->nr);
+ mdAtoms->reserveChargeA(md->nalloc);
+ mdAtoms->resizeChargeA(md->nr);
+ if (md->nPerturbed > 0)
+ {
+ mdAtoms->reserveChargeB(md->nalloc);
+ mdAtoms->resizeChargeB(md->nr);
+ }
srenew(md->typeA, md->nalloc);
if (md->nPerturbed)
{
- srenew(md->chargeB, md->nalloc);
srenew(md->typeB, md->nalloc);
}
if (bLJPME)
/* We always copy cTC with domain decomposition */
}
srenew(md->cENER, md->nalloc);
- if (opts->ngacc > 1)
+ if (inputrec.useConstantAcceleration)
{
srenew(md->cACC, md->nalloc);
}
- if (opts->nFreeze
- && (opts->ngfrz > 1 || opts->nFreeze[0][XX] || opts->nFreeze[0][YY] || opts->nFreeze[0][ZZ]))
+ if (inputrecFrozenAtoms(&inputrec))
{
srenew(md->cFREEZE, md->nalloc);
}
* Therefore, when adding code, the user should use something like:
* gprnrU1 = (md->cU1==NULL ? 0 : md->cU1[localatindex])
*/
- if (!mtop->groups.groupNumbers[SimulationAtomGroupType::User1].empty())
+ if (!mtop.groups.groupNumbers[SimulationAtomGroupType::User1].empty())
{
srenew(md->cU1, md->nalloc);
}
- if (!mtop->groups.groupNumbers[SimulationAtomGroupType::User2].empty())
+ if (!mtop.groups.groupNumbers[SimulationAtomGroupType::User2].empty())
{
srenew(md->cU2, md->nalloc);
}
-
- if (ir->bQMMM)
- {
- srenew(md->bQM, md->nalloc);
- }
}
int molb = 0;
- nthreads = gmx_omp_nthreads_get(emntDefault);
+ nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
#pragma omp parallel for num_threads(nthreads) schedule(static) firstprivate(molb)
for (int i = 0; i < md->nr; i++)
{
real mA, mB, fac;
real c6, c12;
- if (index == nullptr)
+ if (index.empty())
{
ag = i;
}
{
md->cFREEZE[i] = getGroupType(groups, SimulationAtomGroupType::Freeze, ag);
}
- if (EI_ENERGY_MINIMIZATION(ir->eI))
+ if (EI_ENERGY_MINIMIZATION(inputrec.eI))
{
/* Displacement is proportional to F, masses used for constraints */
mA = 1.0;
mB = 1.0;
}
- else if (ir->eI == eiBD)
+ else if (inputrec.eI == IntegrationAlgorithm::BD)
{
/* With BD the physical masses are irrelevant.
* To keep the code simple we use most of the normal MD code path
* Thus with BD v*dt will give the displacement and the reported
* temperature can signal bad integration (too large time step).
*/
- if (ir->bd_fric > 0)
+ if (inputrec.bd_fric > 0)
{
- mA = 0.5 * ir->bd_fric * ir->delta_t;
- mB = 0.5 * ir->bd_fric * ir->delta_t;
+ mA = 0.5 * inputrec.bd_fric * inputrec.delta_t;
+ mB = 0.5 * inputrec.bd_fric * inputrec.delta_t;
}
else
{
/* The friction coefficient is mass/tau_t */
- fac = ir->delta_t
+ fac = inputrec.delta_t
/ opts->tau_t[md->cTC ? groups.groupNumbers[SimulationAtomGroupType::TemperatureCoupling][ag] : 0];
mA = 0.5 * atom.m * fac;
mB = 0.5 * atom.mB * fac;
else if (md->cFREEZE)
{
g = md->cFREEZE[i];
- GMX_ASSERT(opts->nFreeze != nullptr,
- "Must have freeze groups to initialize masses");
+ GMX_ASSERT(opts->nFreeze != nullptr, "Must have freeze groups to initialize masses");
if (opts->nFreeze[g][XX] && opts->nFreeze[g][YY] && opts->nFreeze[g][ZZ])
{
/* Set the mass of completely frozen particles to ALMOST_ZERO
md->typeA[i] = atom.type;
if (bLJPME)
{
- c6 = mtop->ffparams.iparams[atom.type * (mtop->ffparams.atnr + 1)].lj.c6;
- c12 = mtop->ffparams.iparams[atom.type * (mtop->ffparams.atnr + 1)].lj.c12;
- md->sqrt_c6A[i] = sqrt(c6);
+ c6 = mtop.ffparams.iparams[atom.type * (mtop.ffparams.atnr + 1)].lj.c6;
+ c12 = mtop.ffparams.iparams[atom.type * (mtop.ffparams.atnr + 1)].lj.c12;
+ md->sqrt_c6A[i] = std::sqrt(c6);
if (c6 == 0.0 || c12 == 0)
{
md->sigmaA[i] = 1.0;
md->typeB[i] = atom.typeB;
if (bLJPME)
{
- c6 = mtop->ffparams.iparams[atom.typeB * (mtop->ffparams.atnr + 1)].lj.c6;
- c12 = mtop->ffparams.iparams[atom.typeB * (mtop->ffparams.atnr + 1)].lj.c12;
- md->sqrt_c6B[i] = sqrt(c6);
+ c6 = mtop.ffparams.iparams[atom.typeB * (mtop.ffparams.atnr + 1)].lj.c6;
+ c12 = mtop.ffparams.iparams[atom.typeB * (mtop.ffparams.atnr + 1)].lj.c12;
+ md->sqrt_c6B[i] = std::sqrt(c6);
if (c6 == 0.0 || c12 == 0)
{
md->sigmaB[i] = 1.0;
{
md->cU2[i] = groups.groupNumbers[SimulationAtomGroupType::User2][ag];
}
-
- if (ir->bQMMM)
- {
- if (groups.groupNumbers[SimulationAtomGroupType::QuantumMechanics].empty()
- || groups.groupNumbers[SimulationAtomGroupType::QuantumMechanics][ag]
- < groups.groups[SimulationAtomGroupType::QuantumMechanics].size() - 1)
- {
- md->bQM[i] = TRUE;
- }
- else
- {
- md->bQM[i] = FALSE;
- }
- }
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
real L1 = 1 - lambda;
/* Update masses of perturbed atoms for the change in lambda */
- int gmx_unused nthreads = gmx_omp_nthreads_get(emntDefault);
+ int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int i = 0; i < md->nr; i++)
{