Reimplement constant acceleration groups
[alexxy/gromacs.git] / src / gromacs / mdlib / mdatoms.cpp
index b9633373c6edb248c65c0972bf12254216376d58..e4ee7b7b57abc81466d0d94f3b80790d1e5917a4 100644 (file)
@@ -3,7 +3,8 @@
  *
  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
  * Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
+ * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
  * and including many others, as listed in the AUTHORS file in the
  * top-level source directory and at http://www.gromacs.org.
 
 #include <memory>
 
-#include "gromacs/compat/make_unique.h"
+#include "gromacs/ewald/pme.h"
 #include "gromacs/gpu_utils/hostallocator.h"
 #include "gromacs/math/functions.h"
 #include "gromacs/mdlib/gmx_omp_nthreads.h"
-#include "gromacs/mdlib/qmmm.h"
 #include "gromacs/mdtypes/inputrec.h"
 #include "gromacs/mdtypes/md_enums.h"
+#include "gromacs/mdtypes/mdatom.h"
 #include "gromacs/topology/mtop_lookup.h"
 #include "gromacs/topology/mtop_util.h"
 #include "gromacs/topology/topology.h"
 namespace gmx
 {
 
-MDAtoms::MDAtoms()
-    : mdatoms_(nullptr), chargeA_()
+MDAtoms::MDAtoms() : mdatoms_(nullptr) {}
+
+MDAtoms::~MDAtoms()
 {
+    if (mdatoms_ == nullptr)
+    {
+        return;
+    }
+    sfree(mdatoms_->massA);
+    sfree(mdatoms_->massB);
+    sfree(mdatoms_->massT);
+    gmx::AlignedAllocationPolicy::free(mdatoms_->invmass);
+    sfree(mdatoms_->invMassPerDim);
+    sfree(mdatoms_->typeA);
+    sfree(mdatoms_->typeB);
+    /* mdatoms->chargeA and mdatoms->chargeB point at chargeA_.data()
+     * and chargeB_.data() respectively. They get freed automatically. */
+    sfree(mdatoms_->sqrt_c6A);
+    sfree(mdatoms_->sigmaA);
+    sfree(mdatoms_->sigma3A);
+    sfree(mdatoms_->sqrt_c6B);
+    sfree(mdatoms_->sigmaB);
+    sfree(mdatoms_->sigma3B);
+    sfree(mdatoms_->ptype);
+    sfree(mdatoms_->cTC);
+    sfree(mdatoms_->cENER);
+    sfree(mdatoms_->cACC);
+    sfree(mdatoms_->cFREEZE);
+    sfree(mdatoms_->cVCM);
+    sfree(mdatoms_->cORF);
+    sfree(mdatoms_->bPerturbed);
+    sfree(mdatoms_->cU1);
+    sfree(mdatoms_->cU2);
 }
 
-void MDAtoms::resize(int newSize)
+void MDAtoms::resizeChargeA(const int newSize)
 {
-    chargeA_.resize(newSize);
+    chargeA_.resizeWithPadding(newSize);
     mdatoms_->chargeA = chargeA_.data();
 }
 
-void MDAtoms::reserve(int newCapacity)
+void MDAtoms::resizeChargeB(const int newSize)
 {
-    chargeA_.reserve(newCapacity);
+    chargeB_.resizeWithPadding(newSize);
+    mdatoms_->chargeB = chargeB_.data();
+}
+
+void MDAtoms::reserveChargeA(const int newCapacity)
+{
+    chargeA_.reserveWithPadding(newCapacity);
     mdatoms_->chargeA = chargeA_.data();
 }
 
-std::unique_ptr<MDAtoms>
-makeMDAtoms(FILE *fp, const gmx_mtop_t &mtop, const t_inputrec &ir,
-            bool useGpuForPme)
+void MDAtoms::reserveChargeB(const int newCapacity)
+{
+    chargeB_.reserveWithPadding(newCapacity);
+    mdatoms_->chargeB = chargeB_.data();
+}
+
+std::unique_ptr<MDAtoms> makeMDAtoms(FILE* fp, const gmx_mtop_t& mtop, const t_inputrec& ir, const bool rankHasPmeGpuTask)
 {
-    auto       mdAtoms = compat::make_unique<MDAtoms>();
-    // GPU transfers want to use the pinning mode.
-    changePinningPolicy(&mdAtoms->chargeA_, useGpuForPme ? PinningPolicy::CanBePinned : PinningPolicy::CannotBePinned);
-    t_mdatoms *md;
+    auto mdAtoms = std::make_unique<MDAtoms>();
+    // GPU transfers may want to use a suitable pinning mode.
+    if (rankHasPmeGpuTask)
+    {
+        changePinningPolicy(&mdAtoms->chargeA_, pme_get_pinning_policy());
+        changePinningPolicy(&mdAtoms->chargeB_, pme_get_pinning_policy());
+    }
+    t_mdatoms* md;
     snew(md, 1);
     mdAtoms->mdatoms_.reset(md);
 
-    md->nenergrp = mtop.groups.grps[egcENER].nr;
-    md->bVCMgrps = (mtop.groups.grps[egcVCM].nr > 1);
+    md->nenergrp = mtop.groups.groups[SimulationAtomGroupType::EnergyOutput].size();
+    md->bVCMgrps = FALSE;
+    for (int i = 0; i < mtop.natoms; i++)
+    {
+        if (getGroupType(mtop.groups, SimulationAtomGroupType::MassCenterVelocityRemoval, i) > 0)
+        {
+            md->bVCMgrps = TRUE;
+        }
+    }
 
     /* Determine the total system mass and perturbed atom counts */
-    double                     totalMassA = 0.0;
-    double                     totalMassB = 0.0;
+    double totalMassA = 0.0;
+    double totalMassB = 0.0;
 
-    md->haveVsites = FALSE;
-    gmx_mtop_atomloop_block_t  aloop = gmx_mtop_atomloop_block_init(&mtop);
-    const t_atom              *atom;
-    int                        nmol;
+    md->haveVsites                  = FALSE;
+    gmx_mtop_atomloop_block_t aloop = gmx_mtop_atomloop_block_init(mtop);
+    const t_atom*             atom;
+    int                       nmol;
     while (gmx_mtop_atomloop_block_next(aloop, &atom, &nmol))
     {
-        totalMassA += nmol*atom->m;
-        totalMassB += nmol*atom->mB;
+        totalMassA += nmol * atom->m;
+        totalMassB += nmol * atom->mB;
 
-        if (atom->ptype == eptVSite)
+        if (atom->ptype == ParticleType::VSite)
         {
             md->haveVsites = TRUE;
         }
 
-        if (ir.efep != efepNO && PERTURBED(*atom))
+        if (ir.efep != FreeEnergyPerturbationType::No && PERTURBED(*atom))
         {
             md->nPerturbed++;
             if (atom->mB != atom->m)
@@ -130,11 +182,12 @@ makeMDAtoms(FILE *fp, const gmx_mtop_t &mtop, const t_inputrec &ir,
     md->tmassA = totalMassA;
     md->tmassB = totalMassB;
 
-    if (ir.efep != efepNO && fp)
+    if (ir.efep != FreeEnergyPerturbationType::No && fp)
     {
         fprintf(fp,
                 "There are %d atoms and %d charges for free energy perturbation\n",
-                md->nPerturbed, md->nChargePerturbed);
+                md->nPerturbed,
+                md->nChargePerturbed);
     }
 
     md->havePartiallyFrozenAtoms = FALSE;
@@ -142,37 +195,38 @@ makeMDAtoms(FILE *fp, const gmx_mtop_t &mtop, const t_inputrec &ir,
     {
         for (int d = YY; d < DIM; d++)
         {
-            if (ir.opts.nFreeze[d] != ir.opts.nFreeze[XX])
+            if (ir.opts.nFreeze[g][d] != ir.opts.nFreeze[g][XX])
             {
                 md->havePartiallyFrozenAtoms = TRUE;
             }
         }
     }
 
-    md->bOrires = gmx_mtop_ftype_count(&mtop, F_ORIRES);
+    md->bOrires = (gmx_mtop_ftype_count(mtop, F_ORIRES) != 0);
 
     return mdAtoms;
 }
 
-} // namespace
+} // namespace gmx
 
-void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
-              int nindex, const int *index,
-              int homenr,
-              gmx::MDAtoms *mdAtoms)
+void atoms2md(const gmx_mtop_t&  mtop,
+              const t_inputrec&  inputrec,
+              int                nindex,
+              gmx::ArrayRef<int> index,
+              int                homenr,
+              gmx::MDAtoms*      mdAtoms)
 {
-    gmx_bool              bLJPME;
-    const t_grpopts      *opts;
-    const gmx_groups_t   *groups;
-    int                   nthreads gmx_unused;
+    gmx_bool         bLJPME;
+    const t_grpopts* opts;
+    int nthreads     gmx_unused;
 
-    bLJPME = EVDW_PME(ir->vdwtype);
+    bLJPME = EVDW_PME(inputrec.vdwtype);
 
-    opts = &ir->opts;
+    opts = &inputrec.opts;
 
-    groups = &mtop->groups;
+    const SimulationGroups& groups = mtop.groups;
 
-    auto md = mdAtoms->mdatoms();
+    auto* md = mdAtoms->mdatoms();
     /* nindex>=0 indicates DD where we use an index */
     if (nindex >= 0)
     {
@@ -180,7 +234,7 @@ void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
     }
     else
     {
-        md->nr = mtop->natoms;
+        md->nr = mtop.natoms;
     }
 
     if (md->nr > md->nalloc)
@@ -197,18 +251,23 @@ void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
          * The padding needs to be with zeros, which we set later below.
          */
         gmx::AlignedAllocationPolicy::free(md->invmass);
-        md->invmass = new(gmx::AlignedAllocationPolicy::malloc((md->nalloc + GMX_REAL_MAX_SIMD_WIDTH)*sizeof(*md->invmass)))real;
+        md->invmass = new (gmx::AlignedAllocationPolicy::malloc(
+                (md->nalloc + GMX_REAL_MAX_SIMD_WIDTH) * sizeof(*md->invmass))) real;
         srenew(md->invMassPerDim, md->nalloc);
         // TODO eventually we will have vectors and just resize
         // everything, but for now the semantics of md->nalloc being
         // the capacity are preserved by keeping vectors within
         // mdAtoms having the same properties as the other arrays.
-        mdAtoms->reserve(md->nalloc);
-        mdAtoms->resize(md->nr);
+        mdAtoms->reserveChargeA(md->nalloc);
+        mdAtoms->resizeChargeA(md->nr);
+        if (md->nPerturbed > 0)
+        {
+            mdAtoms->reserveChargeB(md->nalloc);
+            mdAtoms->resizeChargeB(md->nr);
+        }
         srenew(md->typeA, md->nalloc);
         if (md->nPerturbed)
         {
-            srenew(md->chargeB, md->nalloc);
             srenew(md->typeB, md->nalloc);
         }
         if (bLJPME)
@@ -230,13 +289,11 @@ void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
             /* We always copy cTC with domain decomposition */
         }
         srenew(md->cENER, md->nalloc);
-        if (opts->ngacc > 1)
+        if (inputrec.useConstantAcceleration)
         {
             srenew(md->cACC, md->nalloc);
         }
-        if (opts->nFreeze &&
-            (opts->ngfrz > 1 ||
-             opts->nFreeze[0][XX] || opts->nFreeze[0][YY] || opts->nFreeze[0][ZZ]))
+        if (inputrecFrozenAtoms(&inputrec))
         {
             srenew(md->cFREEZE, md->nalloc);
         }
@@ -258,35 +315,29 @@ void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
          * Therefore, when adding code, the user should use something like:
          * gprnrU1 = (md->cU1==NULL ? 0 : md->cU1[localatindex])
          */
-        if (mtop->groups.grpnr[egcUser1] != nullptr)
+        if (!mtop.groups.groupNumbers[SimulationAtomGroupType::User1].empty())
         {
             srenew(md->cU1, md->nalloc);
         }
-        if (mtop->groups.grpnr[egcUser2] != nullptr)
+        if (!mtop.groups.groupNumbers[SimulationAtomGroupType::User2].empty())
         {
             srenew(md->cU2, md->nalloc);
         }
-
-        if (ir->bQMMM)
-        {
-            srenew(md->bQM, md->nalloc);
-        }
     }
 
     int molb = 0;
 
-    // cppcheck-suppress unreadVariable
-    nthreads = gmx_omp_nthreads_get(emntDefault);
+    nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
 #pragma omp parallel for num_threads(nthreads) schedule(static) firstprivate(molb)
     for (int i = 0; i < md->nr; i++)
     {
         try
         {
-            int      g, ag;
-            real     mA, mB, fac;
-            real     c6, c12;
+            int  g, ag;
+            real mA, mB, fac;
+            real c6, c12;
 
-            if (index == nullptr)
+            if (index.empty())
             {
                 ag = i;
             }
@@ -294,19 +345,19 @@ void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
             {
                 ag = index[i];
             }
-            const t_atom &atom = mtopGetAtomParameters(mtop, ag, &molb);
+            const t_atomatom = mtopGetAtomParameters(mtop, ag, &molb);
 
             if (md->cFREEZE)
             {
-                md->cFREEZE[i] = ggrpnr(groups, egcFREEZE, ag);
+                md->cFREEZE[i] = getGroupType(groups, SimulationAtomGroupType::Freeze, ag);
             }
-            if (EI_ENERGY_MINIMIZATION(ir->eI))
+            if (EI_ENERGY_MINIMIZATION(inputrec.eI))
             {
                 /* Displacement is proportional to F, masses used for constraints */
                 mA = 1.0;
                 mB = 1.0;
             }
-            else if (ir->eI == eiBD)
+            else if (inputrec.eI == IntegrationAlgorithm::BD)
             {
                 /* With BD the physical masses are irrelevant.
                  * To keep the code simple we use most of the normal MD code path
@@ -318,17 +369,18 @@ void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
                  * Thus with BD v*dt will give the displacement and the reported
                  * temperature can signal bad integration (too large time step).
                  */
-                if (ir->bd_fric > 0)
+                if (inputrec.bd_fric > 0)
                 {
-                    mA = 0.5*ir->bd_fric*ir->delta_t;
-                    mB = 0.5*ir->bd_fric*ir->delta_t;
+                    mA = 0.5 * inputrec.bd_fric * inputrec.delta_t;
+                    mB = 0.5 * inputrec.bd_fric * inputrec.delta_t;
                 }
                 else
                 {
                     /* The friction coefficient is mass/tau_t */
-                    fac = ir->delta_t/opts->tau_t[md->cTC ? groups->grpnr[egcTC][ag] : 0];
-                    mA  = 0.5*atom.m*fac;
-                    mB  = 0.5*atom.mB*fac;
+                    fac = inputrec.delta_t
+                          / opts->tau_t[md->cTC ? groups.groupNumbers[SimulationAtomGroupType::TemperatureCoupling][ag] : 0];
+                    mA = 0.5 * atom.m * fac;
+                    mB = 0.5 * atom.mB * fac;
                 }
             }
             else
@@ -338,10 +390,10 @@ void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
             }
             if (md->nMassPerturbed)
             {
-                md->massA[i]  = mA;
-                md->massB[i]  = mB;
+                md->massA[i] = mA;
+                md->massB[i] = mB;
             }
-            md->massT[i]    = mA;
+            md->massT[i] = mA;
 
             if (mA == 0.0)
             {
@@ -353,12 +405,13 @@ void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
             else if (md->cFREEZE)
             {
                 g = md->cFREEZE[i];
+                GMX_ASSERT(opts->nFreeze != nullptr, "Must have freeze groups to initialize masses");
                 if (opts->nFreeze[g][XX] && opts->nFreeze[g][YY] && opts->nFreeze[g][ZZ])
                 {
                     /* Set the mass of completely frozen particles to ALMOST_ZERO
                      * iso 0 to avoid div by zero in lincs or shake.
                      */
-                    md->invmass[i]  = ALMOST_ZERO;
+                    md->invmass[i] = ALMOST_ZERO;
                 }
                 else
                 {
@@ -366,38 +419,38 @@ void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
                      * If such particles are constrained, the frozen dimensions
                      * should not be updated with the constrained coordinates.
                      */
-                    md->invmass[i]  = 1.0/mA;
+                    md->invmass[i] = 1.0 / mA;
                 }
                 for (int d = 0; d < DIM; d++)
                 {
-                    md->invMassPerDim[i][d] = (opts->nFreeze[g][d] ? 0 : 1.0/mA);
+                    md->invMassPerDim[i][d] = (opts->nFreeze[g][d] ? 0 : 1.0 / mA);
                 }
             }
             else
             {
-                md->invmass[i]  = 1.0/mA;
+                md->invmass[i] = 1.0 / mA;
                 for (int d = 0; d < DIM; d++)
                 {
-                    md->invMassPerDim[i][d] = 1.0/mA;
+                    md->invMassPerDim[i][d] = 1.0 / mA;
                 }
             }
 
-            md->chargeA[i]      = atom.q;
-            md->typeA[i]        = atom.type;
+            md->chargeA[i] = atom.q;
+            md->typeA[i]   = atom.type;
             if (bLJPME)
             {
-                c6                = mtop->ffparams.iparams[atom.type*(mtop->ffparams.atnr+1)].lj.c6;
-                c12               = mtop->ffparams.iparams[atom.type*(mtop->ffparams.atnr+1)].lj.c12;
-                md->sqrt_c6A[i]   = sqrt(c6);
+                c6  = mtop.ffparams.iparams[atom.type * (mtop.ffparams.atnr + 1)].lj.c6;
+                c12 = mtop.ffparams.iparams[atom.type * (mtop.ffparams.atnr + 1)].lj.c12;
+                md->sqrt_c6A[i] = std::sqrt(c6);
                 if (c6 == 0.0 || c12 == 0)
                 {
                     md->sigmaA[i] = 1.0;
                 }
                 else
                 {
-                    md->sigmaA[i] = gmx::sixthroot(c12/c6);
+                    md->sigmaA[i] = gmx::sixthroot(c12 / c6);
                 }
-                md->sigma3A[i]    = 1/(md->sigmaA[i]*md->sigmaA[i]*md->sigmaA[i]);
+                md->sigma3A[i] = 1 / (md->sigmaA[i] * md->sigmaA[i] * md->sigmaA[i]);
             }
             if (md->nPerturbed)
             {
@@ -406,62 +459,49 @@ void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
                 md->typeB[i]      = atom.typeB;
                 if (bLJPME)
                 {
-                    c6                = mtop->ffparams.iparams[atom.typeB*(mtop->ffparams.atnr+1)].lj.c6;
-                    c12               = mtop->ffparams.iparams[atom.typeB*(mtop->ffparams.atnr+1)].lj.c12;
-                    md->sqrt_c6B[i]   = sqrt(c6);
+                    c6  = mtop.ffparams.iparams[atom.typeB * (mtop.ffparams.atnr + 1)].lj.c6;
+                    c12 = mtop.ffparams.iparams[atom.typeB * (mtop.ffparams.atnr + 1)].lj.c12;
+                    md->sqrt_c6B[i] = std::sqrt(c6);
                     if (c6 == 0.0 || c12 == 0)
                     {
                         md->sigmaB[i] = 1.0;
                     }
                     else
                     {
-                        md->sigmaB[i] = gmx::sixthroot(c12/c6);
+                        md->sigmaB[i] = gmx::sixthroot(c12 / c6);
                     }
-                    md->sigma3B[i]    = 1/(md->sigmaB[i]*md->sigmaB[i]*md->sigmaB[i]);
+                    md->sigma3B[i] = 1 / (md->sigmaB[i] * md->sigmaB[i] * md->sigmaB[i]);
                 }
             }
-            md->ptype[i]    = atom.ptype;
+            md->ptype[i] = atom.ptype;
             if (md->cTC)
             {
-                md->cTC[i]    = groups->grpnr[egcTC][ag];
+                md->cTC[i] = groups.groupNumbers[SimulationAtomGroupType::TemperatureCoupling][ag];
             }
-            md->cENER[i]    = ggrpnr(groups, egcENER, ag);
+            md->cENER[i] = getGroupType(groups, SimulationAtomGroupType::EnergyOutput, ag);
             if (md->cACC)
             {
-                md->cACC[i]   = groups->grpnr[egcACC][ag];
+                md->cACC[i] = groups.groupNumbers[SimulationAtomGroupType::Acceleration][ag];
             }
             if (md->cVCM)
             {
-                md->cVCM[i]       = groups->grpnr[egcVCM][ag];
+                md->cVCM[i] = groups.groupNumbers[SimulationAtomGroupType::MassCenterVelocityRemoval][ag];
             }
             if (md->cORF)
             {
-                md->cORF[i]       = ggrpnr(groups, egcORFIT, ag);
+                md->cORF[i] = getGroupType(groups, SimulationAtomGroupType::OrientationRestraintsFit, ag);
             }
 
             if (md->cU1)
             {
-                md->cU1[i]        = groups->grpnr[egcUser1][ag];
+                md->cU1[i] = groups.groupNumbers[SimulationAtomGroupType::User1][ag];
             }
             if (md->cU2)
             {
-                md->cU2[i]        = groups->grpnr[egcUser2][ag];
-            }
-
-            if (ir->bQMMM)
-            {
-                if (groups->grpnr[egcQMMM] == nullptr ||
-                    groups->grpnr[egcQMMM][ag] < groups->grps[egcQMMM].nr-1)
-                {
-                    md->bQM[i]      = TRUE;
-                }
-                else
-                {
-                    md->bQM[i]      = FALSE;
-                }
+                md->cU2[i] = groups.groupNumbers[SimulationAtomGroupType::User2][ag];
             }
         }
-        GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+        GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
     }
 
     if (md->nr > 0)
@@ -481,30 +521,29 @@ void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
     md->lambda = 0;
 }
 
-void update_mdatoms(t_mdatoms *md, real lambda)
+void update_mdatoms(t_mdatomsmd, real lambda)
 {
     if (md->nMassPerturbed && lambda != md->lambda)
     {
         real L1 = 1 - lambda;
 
         /* Update masses of perturbed atoms for the change in lambda */
-        // cppcheck-suppress unreadVariable
-        int gmx_unused nthreads = gmx_omp_nthreads_get(emntDefault);
+        int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
 #pragma omp parallel for num_threads(nthreads) schedule(static)
         for (int i = 0; i < md->nr; i++)
         {
             if (md->bPerturbed[i])
             {
-                md->massT[i] = L1*md->massA[i] + lambda*md->massB[i];
+                md->massT[i] = L1 * md->massA[i] + lambda * md->massB[i];
                 /* Atoms with invmass 0 or ALMOST_ZERO are massless or frozen
                  * and their invmass does not depend on lambda.
                  */
-                if (md->invmass[i] > 1.1*ALMOST_ZERO)
+                if (md->invmass[i] > 1.1 * ALMOST_ZERO)
                 {
-                    md->invmass[i] = 1.0/md->massT[i];
+                    md->invmass[i] = 1.0 / md->massT[i];
                     for (int d = 0; d < DIM; d++)
                     {
-                        if (md->invMassPerDim[i][d] > 1.1*ALMOST_ZERO)
+                        if (md->invMassPerDim[i][d] > 1.1 * ALMOST_ZERO)
                         {
                             md->invMassPerDim[i][d] = md->invmass[i];
                         }
@@ -514,7 +553,7 @@ void update_mdatoms(t_mdatoms *md, real lambda)
         }
 
         /* Update the system mass for the change in lambda */
-        md->tmass  = L1*md->tmassA + lambda*md->tmassB;
+        md->tmass = L1 * md->tmassA + lambda * md->tmassB;
     }
 
     md->lambda = lambda;