Merge remote-tracking branch 'origin/release-2021' into merge-2021-into-master
authorAndrey Alekseenko <al42and@gmail.com>
Thu, 19 Aug 2021 09:51:13 +0000 (12:51 +0300)
committerAndrey Alekseenko <al42and@gmail.com>
Thu, 19 Aug 2021 10:10:25 +0000 (13:10 +0300)
Resolved conflicts:
 - cmake/gmxVersionInfo.cmake
 - docs/CMakeLists.txt
 - python_packaging/src/gmxapi/export_system.cpp
 - python_packaging/src/setup.py
 - src/gromacs/domdec/domdec.cpp
 - src/gromacs/gmxana/gmx_chi.cpp

12 files changed:
1  2 
cmake/gmxVersionInfo.cmake
docs/CMakeLists.txt
docs/release-notes/index.rst
python_packaging/src/CMakeLists.txt
python_packaging/src/gmxapi/export_system.cpp
python_packaging/src/setup.py
src/gromacs/domdec/domdec.cpp
src/gromacs/domdec/domdec.h
src/gromacs/domdec/domdec_internal.h
src/gromacs/gmxana/anadih.cpp
src/gromacs/gmxana/gmx_chi.cpp
src/gromacs/mdrun/runner.cpp

Simple merge
index 468c8ed31a43d6c7bcc1a145c76d69f5a9b37591,c43d24b279bee367e8b0a56f446a0a80b3e6093d..6eefe6ed5f2788a59b00f363389633587d196dec
@@@ -364,16 -364,7 +364,17 @@@ if (SPHINX_FOUND
          how-to/visualize.rst
          install-guide/index.rst
          release-notes/index.rst
 +        release-notes/2022/major/highlights.rst
 +        release-notes/2022/major/features.rst
 +        release-notes/2022/major/performance.rst
 +        release-notes/2022/major/tools.rst
 +        release-notes/2022/major/bugs-fixed.rst
 +        release-notes/2022/major/removed-functionality.rst
 +        release-notes/2022/major/deprecated-functionality.rst
 +        release-notes/2022/major/portability.rst
 +        release-notes/2022/major/miscellaneous.rst
 +        release-notes/2022/major/api.rst
+         release-notes/2021/2021.4.rst
          release-notes/2021/2021.3.rst
          release-notes/2021/2021.2.rst
          release-notes/2021/2021.1.rst
Simple merge
Simple merge
index b504ccb9e74599b62bc3af6d1c91e722ecd7b33f,273ad5087afe05000f2157bb144404c78b1958ce..39eeb91d78c3e0b5c0818aeb2e3a394b896290c0
@@@ -81,18 -81,10 +82,11 @@@ void export_system(py::module& m
  
      // Export system container class
      py::class_<System, std::shared_ptr<System>> system(m, "MDSystem");
-     system.def(
-             "launch",
-             [](System* system, std::shared_ptr<PyContext> context) {
-                 auto work       = gmxapi::getWork(*system->get());
-                 auto newSession = context->launch(*work);
-                 return newSession;
-             },
-             "Launch the configured workflow in the provided context.");
+     system.def("launch", &launch, "Launch the configured workflow in the provided context.");
  
      // Module-level function
 -    m.def("from_tpr", &gmxpy::from_tpr,
 +    m.def("from_tpr",
 +          &gmxpy::from_tpr,
            "Return a system container initialized from the given input record.");
  }
  
index 27f3408f4cb31e52e04e1b2486fd1b639160fe4c,b5826a16566451fe46c5c182a5eaaa3ae7a3ea46..025aea1c68199c4bef79eda3cd299083d90e225a
@@@ -87,7 -87,7 +87,7 @@@ exist in the same location (with differ
  used when guessing a toolchain, because setup.py does not know which corresponds
  to the gmxapi support library.
  
--If specifying GMXTOOLCHAINDIR and gmxapi_DIR, the tool chain directory must be 
++If specifying GMXTOOLCHAINDIR and gmxapi_DIR, the tool chain directory must be
  located within a subdirectory of gmxapi_DIR.
  
  Refer to project web site for complete documentation.
index 3324ca2feae691fa90c8fd2a96446acf9645ea3a,d75345d5927642325fd1ad0414ba74aaad046e64..3627ea02d6dba4cf196572d380554b653764b384
@@@ -706,9 -721,10 +706,10 @@@ static int ddcoord2simnodeid(const t_co
          }
          else
          {
-             if (cr->dd->comm->ddRankSetup.usePmeOnlyRanks)
+             const DDRankSetup& rankSetup = cr->dd->comm->ddRankSetup;
+             if (rankSetup.rankOrder != DdRankOrder::pp_pme && rankSetup.usePmeOnlyRanks)
              {
 -                nodeid = ddindex + gmx_ddcoord2pmeindex(cr, x, y, z);
 +                nodeid = ddindex + gmx_ddcoord2pmeindex(*cr->dd, x, y, z);
              }
              else
              {
@@@ -2970,11 -3012,11 +2974,12 @@@ DomainDecompositionBuilder::Impl::Impl(
  
      cr_->npmenodes = ddGridSetup_.numPmeOnlyRanks;
  
-     ddRankSetup_ = getDDRankSetup(mdlog_, cr_->sizeOfDefaultCommunicator, ddGridSetup_, ir_);
 -    ddRankSetup_ = getDDRankSetup(mdlog_, cr_->sizeOfDefaultCommunicator, options_.rankOrder,
 -                                  ddGridSetup_, ir_);
++    ddRankSetup_ = getDDRankSetup(
++            mdlog_, cr_->sizeOfDefaultCommunicator, options_.rankOrder, ddGridSetup_, ir_);
  
      /* Generate the group communicator, also decides the duty of each rank */
 -    cartSetup_ = makeGroupCommunicators(mdlog_, ddSettings_, ddRankSetup_, cr_, ddCellIndex_, &pmeRanks_);
 +    cartSetup_ = makeGroupCommunicators(
 +            mdlog_, ddSettings_, options_.rankOrder, ddRankSetup_, cr_, ddCellIndex_, &pmeRanks_);
  }
  
  gmx_domdec_t* DomainDecompositionBuilder::Impl::build(LocalAtomSetManager* atomSets)
@@@ -3212,31 -3222,41 +3217,70 @@@ void communicateGpuHaloForces(const t_c
      }
  }
  
 -        const auto& updateGrouping = dd.comm->systemInfo.updateGroupingPerMoleculetype[molblock.type];
 +const gmx::LocalTopologyChecker& dd_localTopologyChecker(const gmx_domdec_t& dd)
 +{
 +    return *dd.localTopologyChecker;
 +}
 +
 +gmx::LocalTopologyChecker* dd_localTopologyChecker(gmx_domdec_t* dd)
 +{
 +    return dd->localTopologyChecker.get();
 +}
 +
 +void dd_init_local_state(const gmx_domdec_t& dd, const t_state* state_global, t_state* state_local)
 +{
 +    std::array<int, 5> buf;
 +
 +    if (DDMASTER(dd))
 +    {
 +        buf[0] = state_global->flags;
 +        buf[1] = state_global->ngtc;
 +        buf[2] = state_global->nnhpres;
 +        buf[3] = state_global->nhchainlength;
 +        buf[4] = state_global->dfhist ? state_global->dfhist->nlambda : 0;
 +    }
 +    dd_bcast(&dd, buf.size() * sizeof(int), buf.data());
 +
 +    init_gtc_state(state_local, buf[1], buf[2], buf[3]);
 +    init_dfhist_state(state_local, buf[4]);
 +    state_local->flags = buf[0];
 +}
++
+ void putUpdateGroupAtomsInSamePeriodicImage(const gmx_domdec_t&      dd,
+                                             const gmx_mtop_t&        mtop,
+                                             const matrix             box,
+                                             gmx::ArrayRef<gmx::RVec> positions)
+ {
+     int atomOffset = 0;
+     for (const gmx_molblock_t& molblock : mtop.molblock)
+     {
++        const auto& updateGrouping = dd.comm->systemInfo.updateGroupingsPerMoleculeType[molblock.type];
+         for (int mol = 0; mol < molblock.nmol; mol++)
+         {
+             for (int g = 0; g < updateGrouping.numBlocks(); g++)
+             {
+                 const auto& block     = updateGrouping.block(g);
+                 const int   atomBegin = atomOffset + block.begin();
+                 const int   atomEnd   = atomOffset + block.end();
+                 for (int a = atomBegin + 1; a < atomEnd; a++)
+                 {
+                     // Make sure that atoms in the same update group
+                     // are in the same periodic image after restarts.
+                     for (int d = DIM - 1; d >= 0; d--)
+                     {
+                         while (positions[a][d] - positions[atomBegin][d] > 0.5_real * box[d][d])
+                         {
+                             positions[a] -= box[d];
+                         }
+                         while (positions[a][d] - positions[atomBegin][d] < -0.5_real * box[d][d])
+                         {
+                             positions[a] += box[d];
+                         }
+                     }
+                 }
+             }
+             atomOffset += updateGrouping.fullRange().end();
+         }
+     }
+ }
Simple merge
Simple merge
Simple merge
index 0abd978d194e4b0f3c4d20f97105441204848110,d049f74b8830713520d80f0c227db4e62bd04455..deed54bfdf0fb50b9b931a4e6d3ff918c0994a5a
@@@ -1680,20 -1607,8 +1681,20 @@@ int gmx_chi(int argc, char* argv[]
          }
          mk_chi_lookup(chi_lookup, maxchi, nlist, dlist);
  
 -        get_chi_product_traj(dih, nf, nlist, maxchi, dlist, time, chi_lookup, multiplicity, FALSE,
 -                             bNormHisto, core_frac, bAll, opt2fn("-cp", NFILE, fnm), oenv);
 +        get_chi_product_traj(dih,
 +                             nf,
-                              nactdih,
++                             nlist,
 +                             maxchi,
 +                             dlist,
 +                             time,
 +                             chi_lookup,
 +                             multiplicity,
 +                             FALSE,
 +                             bNormHisto,
 +                             core_frac,
 +                             bAll,
 +                             opt2fn("-cp", NFILE, fnm),
 +                             oenv);
  
          for (i = 0; i < nlist; i++)
          {
index e3eb7ff8f227766936c457fa2636eca9d310aa18,8d19c598c2a03a6baf8aa6a3b940af7ea1badd16..2462285bef078b77394308accdff5101000d2387
@@@ -1389,9 -1232,22 +1389,22 @@@ int Mdrunner::mdrunner(
          ddBuilder.reset(nullptr);
          // Note that local state still does not exist yet.
      }
+     // Ensure that all atoms within the same update group are in the
+     // same periodic image. Otherwise, a simulation that did not use
+     // update groups (e.g. a single-rank simulation) cannot always be
+     // correctly restarted in a way that does use update groups
+     // (e.g. a multi-rank simulation).
+     if (isSimulationMasterRank)
+     {
+         const bool useUpdateGroups = cr->dd ? ddUsesUpdateGroups(*cr->dd) : false;
+         if (useUpdateGroups)
+         {
+             putUpdateGroupAtomsInSamePeriodicImage(*cr->dd, mtop, globalState->box, globalState->x);
+         }
+     }
  
      // The GPU update is decided here because we need to know whether the constraints or
 -    // SETTLEs can span accross the domain borders (i.e. whether or not update groups are
 +    // SETTLEs can span across the domain borders (i.e. whether or not update groups are
      // defined). This is only known after DD is initialized, hence decision on using GPU
      // update is done so late.
      try