Convert vsite structs to C++
authorBerk Hess <hess@kth.se>
Tue, 11 Sep 2018 14:18:00 +0000 (16:18 +0200)
committerMark Abraham <mark.j.abraham@gmail.com>
Wed, 12 Sep 2018 16:20:07 +0000 (18:20 +0200)
Change-Id: I164e78e981404df535d1115d5eaab399382924ae

src/gromacs/domdec/domdec_topology.cpp
src/gromacs/mdlib/vsite.cpp
src/gromacs/mdlib/vsite.h
src/gromacs/mdrun/runner.cpp

index 51e8beb770caa2d93c15ac0df663647d9c6e29b6..d0cde4d3663ec0305e97218d644c043fdc447416 100644 (file)
@@ -52,6 +52,7 @@
 #include <algorithm>
 #include <string>
 
+#include "gromacs/compat/make_unique.h"
 #include "gromacs/domdec/domdec.h"
 #include "gromacs/domdec/domdec_network.h"
 #include "gromacs/domdec/ga2la.h"
@@ -107,12 +108,11 @@ struct MolblockIndices
 /*! \brief Struct for thread local work data for local topology generation */
 struct thread_work_t
 {
-    t_idef     idef;             /**< Partial local topology */
-    int      **vsite_pbc;        /**< vsite PBC structure */
-    int       *vsite_pbc_nalloc; /**< Allocation sizes for vsite_pbc */
-    int        nbonded;          /**< The number of bondeds in this struct */
-    t_blocka   excl;             /**< List of exclusions */
-    int        excl_count;       /**< The total exclusion count for \p excl */
+    t_idef                    idef;       /**< Partial local topology */
+    std::unique_ptr<VsitePbc> vsitePbc;   /**< vsite PBC structure */
+    int                       nbonded;    /**< The number of bondeds in this struct */
+    t_blocka                  excl;       /**< List of exclusions */
+    int                       excl_count; /**< The total exclusion count for \p excl */
 };
 
 /*! \brief Struct for the reverse topology: links bonded interactions to atomsx */
@@ -494,7 +494,7 @@ static void count_excls(const t_block *cgs, const t_blocka *excls,
 
 /*! \brief Run the reverse ilist generation and store it in r_il when \p bAssign = TRUE */
 static int low_make_reverse_ilist(const t_ilist *il_mt, const t_atom *atom,
-                                  const int * const * vsite_pbc,
+                                  gmx::ArrayRef < const std::vector < int>> vsitePbc,
                                   int *count,
                                   gmx_bool bConstr, gmx_bool bSettle,
                                   gmx_bool bBCheck,
@@ -574,7 +574,7 @@ static int low_make_reverse_ilist(const t_ilist *il_mt, const t_atom *atom,
                             }
                             /* Store vsite pbc atom in a second extra entry */
                             r_il[r_index[a]+count[a]+2+nral+1] =
-                                (vsite_pbc ? vsite_pbc[ftype-F_VSITE2][i/(1+nral)] : -2);
+                                (!vsitePbc.empty() ? vsitePbc[ftype-F_VSITE2][i/(1+nral)] : -2);
                         }
                     }
                     else
@@ -601,7 +601,7 @@ static int low_make_reverse_ilist(const t_ilist *il_mt, const t_atom *atom,
 /*! \brief Make the reverse ilist: a list of bonded interactions linked to atoms */
 static int make_reverse_ilist(const t_ilist *ilist,
                               const t_atoms *atoms,
-                              const int * const * vsite_pbc,
+                              gmx::ArrayRef < const std::vector < int>> vsitePbc,
                               gmx_bool bConstr, gmx_bool bSettle,
                               gmx_bool bBCheck,
                               gmx_bool bLinkToAllAtoms,
@@ -612,7 +612,7 @@ static int make_reverse_ilist(const t_ilist *ilist,
     /* Count the interactions */
     nat_mt = atoms->nr;
     snew(count, nat_mt);
-    low_make_reverse_ilist(ilist, atoms->atom, vsite_pbc,
+    low_make_reverse_ilist(ilist, atoms->atom, vsitePbc,
                            count,
                            bConstr, bSettle, bBCheck,
                            gmx::EmptyArrayRef(), gmx::EmptyArrayRef(),
@@ -628,7 +628,7 @@ static int make_reverse_ilist(const t_ilist *ilist,
 
     /* Store the interactions */
     nint_mt =
-        low_make_reverse_ilist(ilist, atoms->atom, vsite_pbc,
+        low_make_reverse_ilist(ilist, atoms->atom, vsitePbc,
                                count,
                                bConstr, bSettle, bBCheck,
                                ril_mt->index, ril_mt->il,
@@ -643,7 +643,7 @@ static int make_reverse_ilist(const t_ilist *ilist,
 
 /*! \brief Generate the reverse topology */
 static gmx_reverse_top_t make_reverse_top(const gmx_mtop_t *mtop, gmx_bool bFE,
-                                          const int * const * const * vsite_pbc_molt,
+                                          gmx::ArrayRef<const VsitePbc> vsitePbcPerMoltype,
                                           gmx_bool bConstr, gmx_bool bSettle,
                                           gmx_bool bBCheck, int *nint)
 {
@@ -667,9 +667,13 @@ static gmx_reverse_top_t make_reverse_top(const gmx_mtop_t *mtop, gmx_bool bFE,
         }
 
         /* Make the atom to interaction list for this molecule type */
+        gmx::ArrayRef < const std::vector < int>> vsitePbc;
+        if (!vsitePbcPerMoltype.empty())
+        {
+            vsitePbc = gmx::makeConstArrayRef(vsitePbcPerMoltype[mt]);
+        }
         int numberOfInteractions =
-            make_reverse_ilist(molt.ilist, &molt.atoms,
-                               vsite_pbc_molt ? vsite_pbc_molt[mt] : nullptr,
+            make_reverse_ilist(molt.ilist, &molt.atoms, vsitePbc,
                                rt.bConstr, rt.bSettle, rt.bBCheck, FALSE,
                                &rt.ril_mt[mt]);
         nint_mt.push_back(numberOfInteractions);
@@ -698,7 +702,7 @@ static gmx_reverse_top_t make_reverse_top(const gmx_mtop_t *mtop, gmx_bool bFE,
 
         *nint +=
             make_reverse_ilist(mtop->intermolecular_ilist, &atoms_global,
-                               nullptr,
+                               gmx::EmptyArrayRef(),
                                rt.bConstr, rt.bSettle, rt.bBCheck, FALSE,
                                &rt.ril_intermol);
     }
@@ -728,12 +732,11 @@ static gmx_reverse_top_t make_reverse_top(const gmx_mtop_t *mtop, gmx_bool bFE,
     }
 
     rt.th_work.resize(gmx_omp_nthreads_get(emntDomdec));
-    if (vsite_pbc_molt != nullptr)
+    if (!vsitePbcPerMoltype.empty())
     {
         for (thread_work_t &th_work : rt.th_work)
         {
-            snew(th_work.vsite_pbc,        F_VSITEN - F_VSITE2 + 1);
-            snew(th_work.vsite_pbc_nalloc, F_VSITEN - F_VSITE2 + 1);
+            th_work.vsitePbc = gmx::compat::make_unique<VsitePbc>();
         }
     }
 
@@ -756,10 +759,15 @@ void dd_make_reverse_top(FILE *fplog,
      * the parallel version constraint algorithm(s).
      */
 
+    gmx::ArrayRef<const VsitePbc> vsitePbcPerMoltype;
+    if (vsite)
+    {
+        vsitePbcPerMoltype = gmx::makeConstArrayRef(vsite->vsite_pbc_molt);
+    }
+
     dd->reverse_top  = new gmx_reverse_top_t;
     *dd->reverse_top =
-        make_reverse_top(mtop, ir->efep != efepNO,
-                         vsite ? vsite->vsite_pbc_molt : nullptr,
+        make_reverse_top(mtop, ir->efep != efepNO, vsitePbcPerMoltype,
                          !dd->bInterCGcons, !dd->bInterCGsettles,
                          bBCheck, &dd->nbonded_global);
 
@@ -988,22 +996,23 @@ static void add_vsite(const gmx_ga2la_t &ga2la,
                       int ftype, int nral,
                       gmx_bool bHomeA, int a, int a_gl, int a_mol,
                       const t_iatom *iatoms,
-                      t_idef *idef, int **vsite_pbc, int *vsite_pbc_nalloc)
+                      t_idef *idef,
+                      VsitePbc *vsitePbc)
 {
-    int     k, vsi, pbc_a_mol;
+    int     k, pbc_a_mol;
     t_iatom tiatoms[1+MAXATOMLIST];
     int     j, ftype_r, nral_r;
 
     /* Add this interaction to the local topology */
     add_ifunc_for_vsites(tiatoms, ga2la, nral, bHomeA, a, a_gl, a_mol, iatoms, &idef->il[ftype]);
 
-    if (vsite_pbc)
+    if (vsitePbc)
     {
-        vsi = idef->il[ftype].nr/(1+nral) - 1;
-        if (vsi >= vsite_pbc_nalloc[ftype-F_VSITE2])
+        std::vector<int> &vsitePbcFtype = (*vsitePbc)[ftype - c_ftypeVsiteStart];
+        const int         vsi           = idef->il[ftype].nr/(1+nral) - 1;
+        if (static_cast<size_t>(vsi) >= vsitePbcFtype.size())
         {
-            vsite_pbc_nalloc[ftype-F_VSITE2] = over_alloc_large(vsi+1);
-            srenew(vsite_pbc[ftype-F_VSITE2], vsite_pbc_nalloc[ftype-F_VSITE2]);
+            vsitePbcFtype.resize(vsi + 1);
         }
         if (bHomeA)
         {
@@ -1014,7 +1023,7 @@ static void add_vsite(const gmx_ga2la_t &ga2la,
                  * -2: vsite and all constructing atoms are within the same cg, no pbc
                  * -1: vsite and its first constructing atom are in the same cg, do pbc
                  */
-                vsite_pbc[ftype-F_VSITE2][vsi] = pbc_a_mol;
+                vsitePbcFtype[vsi] = pbc_a_mol;
             }
             else
             {
@@ -1023,7 +1032,7 @@ static void add_vsite(const gmx_ga2la_t &ga2la,
                  * Since the order of the atoms does not change within a charge
                  * group, we do not need the global to local atom index.
                  */
-                vsite_pbc[ftype-F_VSITE2][vsi] = a + pbc_a_mol - iatoms[1];
+                vsitePbcFtype[vsi] = a + pbc_a_mol - iatoms[1];
             }
         }
         else
@@ -1033,7 +1042,7 @@ static void add_vsite(const gmx_ga2la_t &ga2la,
              * But we always turn on full_pbc to assure that higher order
              * recursion works correctly.
              */
-            vsite_pbc[ftype-F_VSITE2][vsi] = -1;
+            vsitePbcFtype[vsi] = -1;
         }
     }
 
@@ -1063,7 +1072,7 @@ static void add_vsite(const gmx_ga2la_t &ga2la,
                         add_vsite(ga2la, index, rtil, ftype_r, nral_r,
                                   FALSE, -1, a_gl+iatoms[k]-iatoms[1], iatoms[k],
                                   rtil.data() + j,
-                                  idef, vsite_pbc, vsite_pbc_nalloc);
+                                  idef, vsitePbc);
                         j += 1 + nral_r + 2;
                     }
                     else
@@ -1172,44 +1181,32 @@ static void combine_idef(t_idef                             *dest,
                 srenew(ild->iatoms, ild->nalloc);
             }
 
-            gmx_bool vpbc;
-            int      nral1 = 0, ftv = 0;
-
-            vpbc = (((interaction_function[ftype].flags & IF_VSITE) != 0u) &&
-                    vsite->vsite_pbc_loc != nullptr);
-            if (vpbc)
-            {
-                nral1 = 1 + NRAL(ftype);
-                ftv   = ftype - F_VSITE2;
-                if ((ild->nr + n)/nral1 > vsite->vsite_pbc_loc_nalloc[ftv])
-                {
-                    vsite->vsite_pbc_loc_nalloc[ftv] =
-                        over_alloc_large((ild->nr + n)/nral1);
-                    srenew(vsite->vsite_pbc_loc[ftv],
-                           vsite->vsite_pbc_loc_nalloc[ftv]);
-                }
-            }
+            const bool vpbc  =
+                (((interaction_function[ftype].flags & IF_VSITE) != 0u) &&
+                 vsite->vsite_pbc_loc);
+            const int  nral1 = 1 + NRAL(ftype);
+            const int  ftv   = ftype - c_ftypeVsiteStart;
 
             for (gmx::index s = 1; s < src.size(); s++)
             {
-                const t_ilist *ils;
-                int            i;
+                const t_ilist &ils = src[s].idef.il[ftype];
 
-                ils = &src[s].idef.il[ftype];
-                for (i = 0; i < ils->nr; i++)
+                for (int i = 0; i < ils.nr; i++)
                 {
-                    ild->iatoms[ild->nr+i] = ils->iatoms[i];
+                    ild->iatoms[ild->nr + i] = ils.iatoms[i];
                 }
                 if (vpbc)
                 {
-                    for (i = 0; i < ils->nr; i += nral1)
+                    const std::vector<int> &pbcSrc  = (*src[s].vsitePbc)[ftv];
+                    std::vector<int>       &pbcDest = (*vsite->vsite_pbc_loc)[ftv];
+                    pbcDest.resize((ild->nr + ils.nr)/nral1);
+                    for (int i = 0; i < ils.nr; i += nral1)
                     {
-                        vsite->vsite_pbc_loc[ftv][(ild->nr+i)/nral1] =
-                            src[s].vsite_pbc[ftv][i/nral1];
+                        pbcDest[(ild->nr + i)/nral1] = pbcSrc[i/nral1];
                     }
                 }
 
-                ild->nr += ils->nr;
+                ild->nr += ils.nr;
             }
 
             /* Position restraints need an additional treatment */
@@ -1269,7 +1266,7 @@ check_assign_interactions_atom(int i, int i_gl,
                                rvec *cg_cm,
                                const t_iparams *ip_in,
                                t_idef *idef,
-                               int **vsite_pbc, int *vsite_pbc_nalloc,
+                               VsitePbc *vsitePbc,
                                int iz,
                                gmx_bool bBCheck,
                                int *nbonded_local)
@@ -1313,7 +1310,7 @@ check_assign_interactions_atom(int i, int i_gl,
             {
                 add_vsite(*dd->ga2la, index, rtil, ftype, nral,
                           TRUE, i, i_gl, i_mol,
-                          iatoms.data(), idef, vsite_pbc, vsite_pbc_nalloc);
+                          iatoms.data(), idef, vsitePbc);
             }
             j += 1 + nral + 2;
         }
@@ -1504,8 +1501,7 @@ static int make_bondeds_zone(gmx_domdec_t *dd,
                              int *la2lc, t_pbc *pbc_null, rvec *cg_cm,
                              const t_iparams *ip_in,
                              t_idef *idef,
-                             int **vsite_pbc,
-                             int *vsite_pbc_nalloc,
+                             VsitePbc *vsitePbc,
                              int izone,
                              gmx::RangePartitioning::Block atomRange)
 {
@@ -1540,7 +1536,7 @@ static int make_bondeds_zone(gmx_domdec_t *dd,
                                        pbc_null,
                                        cg_cm,
                                        ip_in,
-                                       idef, vsite_pbc, vsite_pbc_nalloc,
+                                       idef, vsitePbc,
                                        izone,
                                        bBCheck,
                                        &nbonded_local);
@@ -1563,7 +1559,7 @@ static int make_bondeds_zone(gmx_domdec_t *dd,
                                            pbc_null,
                                            cg_cm,
                                            ip_in,
-                                           idef, vsite_pbc, vsite_pbc_nalloc,
+                                           idef, vsitePbc,
                                            izone,
                                            bBCheck,
                                            &nbonded_local);
@@ -1939,8 +1935,6 @@ static int make_local_bondeds_excls(gmx_domdec_t *dd,
             {
                 int       cg0t, cg1t;
                 t_idef   *idef_t;
-                int     **vsite_pbc;
-                int      *vsite_pbc_nalloc;
                 t_blocka *excl_t;
 
                 cg0t = cg0 + ((cg1 - cg0)* thread   )/numThreads;
@@ -1956,32 +1950,25 @@ static int make_local_bondeds_excls(gmx_domdec_t *dd,
                     clear_idef(idef_t);
                 }
 
+                VsitePbc *vsitePbc = nullptr;
                 if (vsite && vsite->bHaveChargeGroups && vsite->n_intercg_vsite > 0)
                 {
                     if (thread == 0)
                     {
-                        vsite_pbc        = vsite->vsite_pbc_loc;
-                        vsite_pbc_nalloc = vsite->vsite_pbc_loc_nalloc;
+                        vsitePbc = vsite->vsite_pbc_loc.get();
                     }
                     else
                     {
-                        vsite_pbc        = rt->th_work[thread].vsite_pbc;
-                        vsite_pbc_nalloc = rt->th_work[thread].vsite_pbc_nalloc;
+                        vsitePbc = rt->th_work[thread].vsitePbc.get();
                     }
                 }
-                else
-                {
-                    vsite_pbc        = nullptr;
-                    vsite_pbc_nalloc = nullptr;
-                }
 
                 rt->th_work[thread].nbonded =
                     make_bondeds_zone(dd, zones,
                                       mtop->molblock,
                                       bRCheckMB, rcheck, bRCheck2B, rc2,
                                       la2lc, pbc_null, cg_cm, idef->iparams,
-                                      idef_t,
-                                      vsite_pbc, vsite_pbc_nalloc,
+                                      idef_t, vsitePbc,
                                       izone,
                                       dd->atomGrouping().subRange(cg0t, cg1t));
 
@@ -2291,7 +2278,8 @@ t_blocka *make_charge_group_links(const gmx_mtop_t *mtop, gmx_domdec_t *dd,
         atoms.atom = nullptr;
 
         make_reverse_ilist(mtop->intermolecular_ilist, &atoms,
-                           nullptr, FALSE, FALSE, FALSE, TRUE, &ril_intermol);
+                           gmx::EmptyArrayRef(),
+                           FALSE, FALSE, FALSE, TRUE, &ril_intermol);
     }
 
     snew(link, 1);
@@ -2318,8 +2306,8 @@ t_blocka *make_charge_group_links(const gmx_mtop_t *mtop, gmx_domdec_t *dd,
          * The constraints are discarded here.
          */
         reverse_ilist_t ril;
-        make_reverse_ilist(molt.ilist, &molt.atoms,
-                           nullptr, FALSE, FALSE, FALSE, TRUE, &ril);
+        make_reverse_ilist(molt.ilist, &molt.atoms, gmx::EmptyArrayRef(),
+                           FALSE, FALSE, FALSE, TRUE, &ril);
 
         cgi_mb = &cginfo_mb[mb];
 
index 11d68e01db2d83946f1c7123bb4b8b74328e4059..db5cb53fff5859c0179df2e96e2bfc1f7af987f6 100644 (file)
@@ -43,6 +43,7 @@
 #include <algorithm>
 #include <vector>
 
+#include "gromacs/compat/make_unique.h"
 #include "gromacs/domdec/domdec.h"
 #include "gromacs/domdec/domdec_struct.h"
 #include "gromacs/gmxlib/network.h"
@@ -63,7 +64,6 @@
 #include "gromacs/utility/fatalerror.h"
 #include "gromacs/utility/gmxassert.h"
 #include "gromacs/utility/gmxomp.h"
-#include "gromacs/utility/smalloc.h"
 
 /* The strategy used here for assigning virtual sites to (thread-)tasks
  * is as follows:
@@ -131,7 +131,8 @@ struct InterdependentTask
 };
 
 /*! \brief Vsite thread task data structure */
-struct VsiteThread {
+struct VsiteThread
+{
     //! Start of atom range of this task
     int                rangeStart;
     //! End of atom range of this task
@@ -159,16 +160,6 @@ struct VsiteThread {
     }
 };
 
-
-/* The start and end values of for the vsite indices in the ftype enum.
- * The validity of these values is checked in init_vsite.
- * This is used to avoid loops over all ftypes just to get the vsite entries.
- * (We should replace the fixed ilist array by only the used entries.)
- */
-static const int c_ftypeVsiteStart = F_VSITE2;
-static const int c_ftypeVsiteEnd   = F_VSITEN + 1;
-
-
 /*! \brief Returns the sum of the vsite ilist sizes over all vsite types
  *
  * \param[in] ilist  The interaction list
@@ -473,10 +464,10 @@ static void construct_vsites_thread(const gmx_vsite_t *vsite,
         inv_dt = 1.0;
     }
 
-    const PbcMode  pbcMode   = getPbcMode(pbc_null, vsite);
+    const PbcMode            pbcMode   = getPbcMode(pbc_null, vsite);
     /* We need another pbc pointer, as with charge groups we switch per vsite */
-    const t_pbc   *pbc_null2 = pbc_null;
-    const int     *vsite_pbc = nullptr;
+    const t_pbc             *pbc_null2 = pbc_null;
+    gmx::ArrayRef<const int> vsite_pbc;
 
     for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
     {
@@ -494,7 +485,7 @@ static void construct_vsites_thread(const gmx_vsite_t *vsite,
 
             if (pbcMode == PbcMode::chargeGroup)
             {
-                vsite_pbc = vsite->vsite_pbc_loc[ftype - c_ftypeVsiteStart];
+                vsite_pbc = (*vsite->vsite_pbc_loc)[ftype - c_ftypeVsiteStart];
             }
 
             for (int i = 0; i < nr; )
@@ -1496,10 +1487,10 @@ static void spread_vsite_f_thread(const gmx_vsite_t *vsite,
                                   t_iparams ip[], const t_ilist ilist[],
                                   const t_graph *g, const t_pbc *pbc_null)
 {
-    const PbcMode  pbcMode   = getPbcMode(pbc_null, vsite);
+    const PbcMode            pbcMode   = getPbcMode(pbc_null, vsite);
     /* We need another pbc pointer, as with charge groups we switch per vsite */
-    const t_pbc   *pbc_null2 = pbc_null;
-    const int     *vsite_pbc = nullptr;
+    const t_pbc             *pbc_null2 = pbc_null;
+    gmx::ArrayRef<const int> vsite_pbc;
 
     /* this loop goes backwards to be able to build *
      * higher type vsites from lower types         */
@@ -1523,12 +1514,15 @@ static void spread_vsite_f_thread(const gmx_vsite_t *vsite,
             }
             else if (pbcMode == PbcMode::chargeGroup)
             {
-                vsite_pbc = vsite->vsite_pbc_loc[ftype - c_ftypeVsiteStart];
+                if (vsite->vsite_pbc_loc)
+                {
+                    vsite_pbc = (*vsite->vsite_pbc_loc)[ftype - c_ftypeVsiteStart];
+                }
             }
 
             for (int i = 0; i < nr; )
             {
-                if (vsite_pbc != nullptr)
+                if (!vsite_pbc.empty())
                 {
                     if (vsite_pbc[i/(1 + nra)] > -2)
                     {
@@ -1686,7 +1680,7 @@ void spread_vsite_f(const gmx_vsite_t *vsite,
             try
             {
                 int          thread = gmx_omp_get_thread_num();
-                VsiteThread *tData  = vsite->tData[thread];
+                VsiteThread &tData  = *vsite->tData[thread];
 
                 rvec        *fshift_t;
                 if (thread == 0 || fshift == nullptr)
@@ -1695,7 +1689,7 @@ void spread_vsite_f(const gmx_vsite_t *vsite,
                 }
                 else
                 {
-                    fshift_t = tData->fshift;
+                    fshift_t = tData.fshift;
 
                     for (int i = 0; i < SHIFTS; i++)
                     {
@@ -1704,16 +1698,16 @@ void spread_vsite_f(const gmx_vsite_t *vsite,
                 }
                 if (VirCorr)
                 {
-                    clear_mat(tData->dxdf);
+                    clear_mat(tData.dxdf);
                 }
 
-                if (tData->useInterdependentTask)
+                if (tData.useInterdependentTask)
                 {
                     /* Spread the vsites that spread outside our local range.
                      * This is done using a thread-local force buffer force.
                      * First we need to copy the input vsite forces to force.
                      */
-                    InterdependentTask *idTask = &tData->idTask;
+                    InterdependentTask *idTask = &tData.idTask;
 
                     /* Clear the buffer elements set by our task during
                      * the last call to spread_vsite_f.
@@ -1728,9 +1722,9 @@ void spread_vsite_f(const gmx_vsite_t *vsite,
                     }
                     spread_vsite_f_thread(vsite,
                                           x, as_rvec_array(idTask->force.data()), fshift_t,
-                                          VirCorr, tData->dxdf,
+                                          VirCorr, tData.dxdf,
                                           idef->iparams,
-                                          tData->idTask.ilist,
+                                          tData.idTask.ilist,
                                           g, pbc_null);
 
                     /* We need a barrier before reducing forces below
@@ -1761,18 +1755,18 @@ void spread_vsite_f(const gmx_vsite_t *vsite,
                     /* Clear the vsite forces, both in f and force */
                     for (int i = 0; i < nvsite; i++)
                     {
-                        int ind = tData->idTask.vsite[i];
+                        int ind = tData.idTask.vsite[i];
                         clear_rvec(f[ind]);
-                        clear_rvec(tData->idTask.force[ind]);
+                        clear_rvec(tData.idTask.force[ind]);
                     }
                 }
 
                 /* Spread the vsites that spread locally only */
                 spread_vsite_f_thread(vsite,
                                       x, f, fshift_t,
-                                      VirCorr, tData->dxdf,
+                                      VirCorr, tData.dxdf,
                                       idef->iparams,
-                                      tData->ilist,
+                                      tData.ilist,
                                       g, pbc_null);
             }
             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
@@ -1873,9 +1867,10 @@ int count_intercg_vsites(const gmx_mtop_t *mtop)
     return n_intercg_vsite;
 }
 
-static int **get_vsite_pbc(const t_iparams *iparams, const t_ilist *ilist,
-                           const t_atom *atom, const t_mdatoms *md,
-                           const t_block &cgs)
+static VsitePbc
+get_vsite_pbc(const t_iparams *iparams, const t_ilist *ilist,
+              const t_atom *atom, const t_mdatoms *md,
+              const t_block &cgs)
 {
     /* Make an atom to charge group index */
     std::vector<int> a2cg = atom2cg(cgs);
@@ -1892,19 +1887,17 @@ static int **get_vsite_pbc(const t_iparams *iparams, const t_ilist *ilist,
         }
     }
 
-    int **vsite_pbc;
-    snew(vsite_pbc, F_VSITEN-F_VSITE2+1);
+    VsitePbc vsite_pbc;
 
     for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
     {
         {   // TODO remove me
-            int            nral = NRAL(ftype);
-            const t_ilist *il   = &ilist[ftype];
-            const t_iatom *ia   = il->iatoms;
-            int           *vsite_pbc_f;
+            int               nral = NRAL(ftype);
+            const t_ilist    *il   = &ilist[ftype];
+            const t_iatom    *ia   = il->iatoms;
 
-            snew(vsite_pbc[ftype-F_VSITE2], il->nr/(1 + nral));
-            vsite_pbc_f = vsite_pbc[ftype-F_VSITE2];
+            std::vector<int> &vsite_pbc_f = vsite_pbc[ftype - F_VSITE2];
+            vsite_pbc_f.resize(il->nr/(1 + nral));
 
             int i = 0;
             while (i < il->nr)
@@ -2010,11 +2003,14 @@ static int **get_vsite_pbc(const t_iparams *iparams, const t_ilist *ilist,
 }
 
 
-gmx_vsite_t *initVsite(const gmx_mtop_t &mtop,
-                       const t_commrec  *cr)
+std::unique_ptr<gmx_vsite_t>
+initVsite(const gmx_mtop_t &mtop,
+          const t_commrec  *cr)
 {
     GMX_RELEASE_ASSERT(cr != nullptr, "We need a valid commrec");
 
+    std::unique_ptr<gmx_vsite_t> vsite;
+
     /* check if there are vsites */
     int nvsite = 0;
     for (int ftype = 0; ftype < F_NRE; ftype++)
@@ -2033,10 +2029,10 @@ gmx_vsite_t *initVsite(const gmx_mtop_t &mtop,
 
     if (nvsite == 0)
     {
-        return nullptr;
+        return vsite;
     }
 
-    gmx_vsite_t *vsite = new(gmx_vsite_t);
+    vsite = gmx::compat::make_unique<gmx_vsite_t>();
 
     vsite->n_intercg_vsite   = count_intercg_vsites(&mtop);
 
@@ -2055,8 +2051,7 @@ gmx_vsite_t *initVsite(const gmx_mtop_t &mtop,
         vsite->n_intercg_vsite > 0 &&
         DOMAINDECOMP(cr))
     {
-        vsite->nvsite_pbc_molt = mtop.moltype.size();
-        snew(vsite->vsite_pbc_molt, vsite->nvsite_pbc_molt);
+        vsite->vsite_pbc_molt.resize(mtop.moltype.size());
         for (size_t mt = 0; mt < mtop.moltype.size(); mt++)
         {
             const gmx_moltype_t &molt = mtop.moltype[mt];
@@ -2066,13 +2061,7 @@ gmx_vsite_t *initVsite(const gmx_mtop_t &mtop,
                                                       molt.cgs);
         }
 
-        snew(vsite->vsite_pbc_loc_nalloc, c_ftypeVsiteEnd - c_ftypeVsiteStart);
-        snew(vsite->vsite_pbc_loc,        c_ftypeVsiteEnd - c_ftypeVsiteStart);
-    }
-    else
-    {
-        vsite->vsite_pbc_molt = nullptr;
-        vsite->vsite_pbc_loc  = nullptr;
+        vsite->vsite_pbc_loc = gmx::compat::make_unique<VsitePbc>();
     }
 
     vsite->nthreads = gmx_omp_nthreads_get(emntVSITE);
@@ -2080,32 +2069,37 @@ gmx_vsite_t *initVsite(const gmx_mtop_t &mtop,
     if (vsite->nthreads > 1)
     {
         /* We need one extra thread data structure for the overlap vsites */
-        snew(vsite->tData, vsite->nthreads + 1);
+        vsite->tData.resize(vsite->nthreads + 1);
 #pragma omp parallel for num_threads(vsite->nthreads) schedule(static)
         for (int thread = 0; thread < vsite->nthreads; thread++)
         {
             try
             {
-                vsite->tData[thread] = new VsiteThread;
+                vsite->tData[thread] = gmx::compat::make_unique<VsiteThread>();
 
-                InterdependentTask *idTask = &vsite->tData[thread]->idTask;
-                idTask->nuse               = 0;
-                idTask->atomIndex.resize(vsite->nthreads);
+                InterdependentTask &idTask = vsite->tData[thread]->idTask;
+                idTask.nuse                = 0;
+                idTask.atomIndex.resize(vsite->nthreads);
             }
             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
         }
         if (vsite->nthreads > 1)
         {
-            vsite->tData[vsite->nthreads] = new VsiteThread;
+            vsite->tData[vsite->nthreads] = gmx::compat::make_unique<VsiteThread>();
         }
     }
 
-    vsite->taskIndex       = nullptr;
-    vsite->taskIndexNalloc = 0;
-
     return vsite;
 }
 
+gmx_vsite_t::gmx_vsite_t()
+{
+}
+
+gmx_vsite_t::~gmx_vsite_t()
+{
+}
+
 static inline void flagAtom(InterdependentTask *idTask, int atom,
                             int thread, int nthread, int natperthread)
 {
@@ -2135,7 +2129,7 @@ static void assignVsitesToThread(VsiteThread           *tData,
                                  int                    thread,
                                  int                    nthread,
                                  int                    natperthread,
-                                 int                   *taskIndex,
+                                 gmx::ArrayRef<int>     taskIndex,
                                  const t_ilist         *ilist,
                                  const t_iparams       *ip,
                                  const unsigned short  *ptype)
@@ -2276,11 +2270,11 @@ static void assignVsitesToThread(VsiteThread           *tData,
 }
 
 /*! \brief Assign all vsites with taskIndex[]==task to task tData */
-static void assignVsitesToSingleTask(VsiteThread     *tData,
-                                     int              task,
-                                     const int       *taskIndex,
-                                     const t_ilist   *ilist,
-                                     const t_iparams *ip)
+static void assignVsitesToSingleTask(VsiteThread              *tData,
+                                     int                       task,
+                                     gmx::ArrayRef<const int>  taskIndex,
+                                     const t_ilist            *ilist,
+                                     const t_iparams          *ip)
 {
     for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
     {
@@ -2407,17 +2401,13 @@ void split_vsites_over_threads(const t_ilist   *ilist,
     /* To simplify the vsite assignment, we make an index which tells us
      * to which task particles, both non-vsites and vsites, are assigned.
      */
-    if (mdatoms->nr > vsite->taskIndexNalloc)
-    {
-        vsite->taskIndexNalloc = over_alloc_large(mdatoms->nr);
-        srenew(vsite->taskIndex, vsite->taskIndexNalloc);
-    }
+    vsite->taskIndex.resize(mdatoms->nr);
 
     /* Initialize the task index array. Here we assign the non-vsite
      * particles to task=thread, so we easily figure out if vsites
      * depend on local and/or non-local particles in assignVsitesToThread.
      */
-    int *taskIndex = vsite->taskIndex;
+    gmx::ArrayRef<int> taskIndex = vsite->taskIndex;
     {
         int  thread = 0;
         for (int i = 0; i < mdatoms->nr; i++)
@@ -2444,31 +2434,31 @@ void split_vsites_over_threads(const t_ilist   *ilist,
         try
         {
             int          thread = gmx_omp_get_thread_num();
-            VsiteThread *tData  = vsite->tData[thread];
+            VsiteThread &tData  = *vsite->tData[thread];
 
             /* Clear the buffer use flags that were set before */
-            if (tData->useInterdependentTask)
+            if (tData.useInterdependentTask)
             {
-                InterdependentTask *idTask = &tData->idTask;
+                InterdependentTask &idTask = tData.idTask;
 
                 /* To avoid an extra OpenMP barrier in spread_vsite_f,
                  * we clear the force buffer at the next step,
                  * so we need to do it here as well.
                  */
-                clearTaskForceBufferUsedElements(idTask);
+                clearTaskForceBufferUsedElements(&idTask);
 
-                idTask->vsite.resize(0);
+                idTask.vsite.resize(0);
                 for (int t = 0; t < vsite->nthreads; t++)
                 {
-                    AtomIndex *atomIndex = &idTask->atomIndex[t];
-                    int        natom     = atomIndex->atom.size();
+                    AtomIndex &atomIndex = idTask.atomIndex[t];
+                    int        natom     = atomIndex.atom.size();
                     for (int i = 0; i < natom; i++)
                     {
-                        idTask->use[atomIndex->atom[i]] = false;
+                        idTask.use[atomIndex.atom[i]] = false;
                     }
-                    atomIndex->atom.resize(0);
+                    atomIndex.atom.resize(0);
                 }
-                idTask->nuse = 0;
+                idTask.nuse = 0;
             }
 
             /* To avoid large f_buf allocations of #threads*vsite_atom_range
@@ -2477,40 +2467,40 @@ void split_vsites_over_threads(const t_ilist   *ilist,
              * vsites will end up in the separate task.
              * Note that useTask2 should be the same for all threads.
              */
-            tData->useInterdependentTask = (vsite_atom_range <= 200000);
-            if (tData->useInterdependentTask)
+            tData.useInterdependentTask = (vsite_atom_range <= 200000);
+            if (tData.useInterdependentTask)
             {
                 size_t              natoms_use_in_vsites = vsite_atom_range;
-                InterdependentTask *idTask               = &tData->idTask;
+                InterdependentTask &idTask               = tData.idTask;
                 /* To avoid resizing and re-clearing every nstlist steps,
                  * we never down size the force buffer.
                  */
-                if (natoms_use_in_vsites > idTask->force.size() ||
-                    natoms_use_in_vsites > idTask->use.size())
+                if (natoms_use_in_vsites > idTask.force.size() ||
+                    natoms_use_in_vsites > idTask.use.size())
                 {
-                    idTask->force.resize(natoms_use_in_vsites, { 0, 0, 0 });
-                    idTask->use.resize(natoms_use_in_vsites, false);
+                    idTask.force.resize(natoms_use_in_vsites, { 0, 0, 0 });
+                    idTask.use.resize(natoms_use_in_vsites, false);
                 }
             }
 
             /* Assign all vsites that can execute independently on threads */
-            tData->rangeStart     =  thread     *natperthread;
+            tData.rangeStart     =  thread     *natperthread;
             if (thread < vsite->nthreads - 1)
             {
-                tData->rangeEnd   = (thread + 1)*natperthread;
+                tData.rangeEnd   = (thread + 1)*natperthread;
             }
             else
             {
                 /* The last thread should cover up to the end of the range */
-                tData->rangeEnd   = mdatoms->nr;
+                tData.rangeEnd   = mdatoms->nr;
             }
-            assignVsitesToThread(tData,
+            assignVsitesToThread(&tData,
                                  thread, vsite->nthreads,
                                  natperthread,
                                  taskIndex,
                                  ilist, ip, mdatoms->ptype);
 
-            if (tData->useInterdependentTask)
+            if (tData.useInterdependentTask)
             {
                 /* In the worst case, all tasks write to force ranges of
                  * all other tasks, leading to #tasks^2 scaling (this is only
@@ -2519,24 +2509,24 @@ void split_vsites_over_threads(const t_ilist   *ilist,
                  * scaling at high thread counts we therefore construct
                  * an index to only loop over the actually affected tasks.
                  */
-                InterdependentTask *idTask = &tData->idTask;
+                InterdependentTask &idTask = tData.idTask;
 
                 /* Ensure assignVsitesToThread finished on other threads */
 #pragma omp barrier
 
-                idTask->spreadTask.resize(0);
-                idTask->reduceTask.resize(0);
+                idTask.spreadTask.resize(0);
+                idTask.reduceTask.resize(0);
                 for (int t = 0; t < vsite->nthreads; t++)
                 {
                     /* Do we write to the force buffer of task t? */
-                    if (!idTask->atomIndex[t].atom.empty())
+                    if (!idTask.atomIndex[t].atom.empty())
                     {
-                        idTask->spreadTask.push_back(t);
+                        idTask.spreadTask.push_back(t);
                     }
                     /* Does task t write to our force buffer? */
                     if (!vsite->tData[t]->idTask.atomIndex[thread].atom.empty())
                     {
-                        idTask->reduceTask.push_back(t);
+                        idTask.reduceTask.push_back(t);
                     }
                 }
             }
@@ -2546,7 +2536,7 @@ void split_vsites_over_threads(const t_ilist   *ilist,
     /* Assign all remaining vsites, that will have taskIndex[]=2*vsite->nthreads,
      * to a single task that will not run in parallel with other tasks.
      */
-    assignVsitesToSingleTask(vsite->tData[vsite->nthreads],
+    assignVsitesToSingleTask(vsite->tData[vsite->nthreads].get(),
                              2*vsite->nthreads,
                              taskIndex,
                              ilist, ip);
@@ -2597,9 +2587,10 @@ void set_vsite_top(gmx_vsite_t          *vsite,
 {
     if (vsite->n_intercg_vsite > 0 && vsite->bHaveChargeGroups)
     {
-        vsite->vsite_pbc_loc = get_vsite_pbc(top->idef.iparams,
-                                             top->idef.il, nullptr, md,
-                                             top->cgs);
+        vsite->vsite_pbc_loc  = gmx::compat::make_unique<VsitePbc>();
+        *vsite->vsite_pbc_loc = get_vsite_pbc(top->idef.iparams,
+                                              top->idef.il, nullptr, md,
+                                              top->cgs);
     }
 
     if (vsite->nthreads > 1)
index 8b1d2132f85ad2b8668da787c7e4b294a948b9bb..98252a0f5a6b6f3cda24e579641824d0cc384cb7 100644 (file)
@@ -37,6 +37,8 @@
 #ifndef GMX_MDLIB_VSITE_H
 #define GMX_MDLIB_VSITE_H
 
+#include <memory>
+
 #include "gromacs/math/vectypes.h"
 #include "gromacs/pbcutil/ishift.h"
 #include "gromacs/topology/idef.h"
@@ -52,21 +54,37 @@ struct t_ilist;
 struct t_mdatoms;
 struct t_nrnb;
 struct gmx_wallcycle;
+struct VsiteThread;
+
+/* The start and end values of for the vsite indices in the ftype enum.
+ * The validity of these values is checked in init_vsite.
+ * This is used to avoid loops over all ftypes just to get the vsite entries.
+ * (We should replace the fixed ilist array by only the used entries.)
+ */
+static constexpr int c_ftypeVsiteStart = F_VSITE2;
+static constexpr int c_ftypeVsiteEnd   = F_VSITEN + 1;
+
+/* Type for storing PBC atom information for all vsite types in the system */
+typedef std::array<std::vector<int>, c_ftypeVsiteEnd - c_ftypeVsiteStart> VsitePbc;
+
+/* Data for handling vsites, needed with OpenMP threading or with charge-groups and PBC */
+struct gmx_vsite_t
+{
+    /* Constructor */
+    gmx_vsite_t();
+
+    /* Destructor */
+    ~gmx_vsite_t();
 
-typedef struct gmx_vsite_t {
-    gmx_bool             bHaveChargeGroups;    /* Do we have charge groups?               */
-    int                  n_intercg_vsite;      /* The number of inter charge group vsites */
-    int                  nvsite_pbc_molt;      /* The array size of vsite_pbc_molt        */
-    int               ***vsite_pbc_molt;       /* The pbc atoms for intercg vsites        */
-    int                **vsite_pbc_loc;        /* The local pbc atoms                     */
-    int                 *vsite_pbc_loc_nalloc; /* Sizes of vsite_pbc_loc                  */
-    int                  nthreads;             /* Number of threads used for vsites       */
-    struct VsiteThread **tData;                /* Thread local vsites and work structs    */
-    int                 *taskIndex;            /* Work array                              */
-    int                  taskIndexNalloc;      /* Size of taskIndex                       */
-    bool                 useDomdec;            /* Tells whether we use domain decomposition with more than 1 DD rank */
-
-} gmx_vsite_t;
+    gmx_bool                  bHaveChargeGroups;         /* Do we have charge groups?               */
+    int                       n_intercg_vsite;           /* The number of inter charge group vsites */
+    std::vector<VsitePbc>     vsite_pbc_molt;            /* The pbc atoms for intercg vsites        */
+    std::unique_ptr<VsitePbc> vsite_pbc_loc;             /* The local pbc atoms                     */
+    int                       nthreads;                  /* Number of threads used for vsites       */
+    std::vector < std::unique_ptr < VsiteThread>> tData; /* Thread local vsites and work structs    */
+    std::vector<int>          taskIndex;                 /* Work array                              */
+    bool                      useDomdec;                 /* Tells whether we use domain decomposition with more than 1 DD rank */
+};
 
 /*! \brief Create positions of vsite atoms based for the local system
  *
@@ -121,8 +139,9 @@ int count_intercg_vsites(const gmx_mtop_t *mtop);
  * \param[in] cr    The communication record
  * \returns A valid vsite struct or nullptr when there are no virtual sites
  */
-gmx_vsite_t *initVsite(const gmx_mtop_t &mtop,
-                       const t_commrec  *cr);
+std::unique_ptr<gmx_vsite_t>
+initVsite(const gmx_mtop_t &mtop,
+          const t_commrec  *cr);
 
 void split_vsites_over_threads(const t_ilist   *ilist,
                                const t_iparams *ip,
index 59cadf9613d9f6692bcbd3169fc5359de19ae8b2..82c96097f4250bb3071738d61a90d3822d985be7 100644 (file)
@@ -386,7 +386,6 @@ int Mdrunner::mdrunner()
     t_fcdata                 *fcd              = nullptr;
     real                      ewaldcoeff_q     = 0;
     real                      ewaldcoeff_lj    = 0;
-    gmx_vsite_t              *vsite            = nullptr;
     int                       nChargePerturbed = -1, nTypePerturbed = 0;
     gmx_wallcycle_t           wcycle;
     gmx_walltime_accounting_t walltime_accounting = nullptr;
@@ -1113,7 +1112,8 @@ int Mdrunner::mdrunner()
         membed = init_membed(fplog, nfile, fnm, &mtop, inputrec, globalState.get(), cr, &mdrunOptions.checkpointOptions.period);
     }
 
-    std::unique_ptr<MDAtoms> mdAtoms;
+    std::unique_ptr<MDAtoms>     mdAtoms;
+    std::unique_ptr<gmx_vsite_t> vsite;
 
     snew(nrnb, 1);
     if (thisRankHasDuty(cr, DUTY_PP))
@@ -1299,7 +1299,7 @@ int Mdrunner::mdrunner()
             /* This call is not included in init_domain_decomposition mainly
              * because fr->cginfo_mb is set later.
              */
-            dd_init_bondeds(fplog, cr->dd, &mtop, vsite, inputrec,
+            dd_init_bondeds(fplog, cr->dd, &mtop, vsite.get(), inputrec,
                             domdecOptions.checkBondedInteractions,
                             fr->cginfo_mb);
         }
@@ -1309,7 +1309,7 @@ int Mdrunner::mdrunner()
             fplog, cr, ms, mdlog, nfile, fnm,
             oenv,
             mdrunOptions,
-            vsite, constr.get(),
+            vsite.get(), constr.get(),
             enforcedRotation ? enforcedRotation->getLegacyEnfrot() : nullptr,
             deform.get(),
             mdModules->outputProvider(),