Use enum class for gmx_omp_nthreads
authorJoe Jordan <ejjordan12@gmail.com>
Tue, 20 Apr 2021 09:58:52 +0000 (09:58 +0000)
committerJoe Jordan <ejjordan12@gmail.com>
Tue, 20 Apr 2021 09:58:52 +0000 (09:58 +0000)
46 files changed:
api/nblib/gmxsetup.cpp
src/gromacs/domdec/domdec.cpp
src/gromacs/domdec/domdec_constraints.cpp
src/gromacs/domdec/domdec_internal.h
src/gromacs/domdec/domdec_topology.cpp
src/gromacs/domdec/partition.cpp
src/gromacs/domdec/redistribute.cpp
src/gromacs/domdec/tests/.clang-tidy [new file with mode: 0644]
src/gromacs/ewald/pme_gpu.cpp
src/gromacs/ewald/pme_pp.cpp
src/gromacs/mdlib/calcmu.cpp
src/gromacs/mdlib/calcvir.cpp
src/gromacs/mdlib/constr.cpp
src/gromacs/mdlib/coupling.cpp
src/gromacs/mdlib/forcerec.cpp
src/gromacs/mdlib/gmx_omp_nthreads.cpp
src/gromacs/mdlib/gmx_omp_nthreads.h
src/gromacs/mdlib/lincs.cpp
src/gromacs/mdlib/md_support.cpp
src/gromacs/mdlib/mdatoms.cpp
src/gromacs/mdlib/sim_util.cpp
src/gromacs/mdlib/tests/constrtestrunners.cpp
src/gromacs/mdlib/tests/leapfrogtestrunners.cpp
src/gromacs/mdlib/update.cpp
src/gromacs/mdlib/vcm.cpp
src/gromacs/mdlib/vsite.cpp
src/gromacs/mdrun/minimize.cpp
src/gromacs/mdrun/runner.cpp
src/gromacs/mdrun/shellfc.cpp
src/gromacs/mdrun/tpi.cpp
src/gromacs/modularsimulator/propagator.cpp
src/gromacs/modularsimulator/statepropagatordata.cpp
src/gromacs/nbnxm/atomdata.cpp
src/gromacs/nbnxm/benchmark/bench_setup.cpp
src/gromacs/nbnxm/grid.cpp
src/gromacs/nbnxm/gridset.cpp
src/gromacs/nbnxm/kerneldispatch.cpp
src/gromacs/nbnxm/nbnxm_setup.cpp
src/gromacs/nbnxm/pairlist.cpp
src/gromacs/nbnxm/prunekerneldispatch.cpp
src/gromacs/pulling/pull.cpp
src/gromacs/pulling/pull_rotation.cpp
src/gromacs/pulling/tests/.clang-tidy [new file with mode: 0644]
src/gromacs/taskassignment/decidegpuusage.cpp
src/gromacs/taskassignment/resourcedivision.cpp
src/gromacs/taskassignment/tests/.clang-tidy [new file with mode: 0644]

index 007e0cf3dbefdd72707b3e4b7efbc2cf95012cef..955b8cc1db35d10a50f875b18aba964ae7051798 100644 (file)
@@ -103,8 +103,8 @@ NbvSetupUtil::NbvSetupUtil() : gmxForceCalculator_(std::make_unique<GmxForceCalc
 void NbvSetupUtil::setExecutionContext(const NBKernelOptions& options)
 {
     // Todo: find a more general way to initialize hardware
-    gmx_omp_nthreads_set(emntPairsearch, options.numOpenMPThreads);
-    gmx_omp_nthreads_set(emntNonbonded, options.numOpenMPThreads);
+    gmx_omp_nthreads_set(ModuleMultiThread::Pairsearch, options.numOpenMPThreads);
+    gmx_omp_nthreads_set(ModuleMultiThread::Nonbonded, options.numOpenMPThreads);
 }
 
 Nbnxm::KernelSetup NbvSetupUtil::getKernelSetup(const NBKernelOptions& options)
index 4f7cab78622be62aed00e2b0406a9b1a1f61644d..be76b4852db58fdd571c70b2c5c118542b6de58b 100644 (file)
@@ -92,6 +92,7 @@
 #include "gromacs/utility/basedefinitions.h"
 #include "gromacs/utility/basenetwork.h"
 #include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/enumerationhelpers.h"
 #include "gromacs/utility/exceptions.h"
 #include "gromacs/utility/fatalerror.h"
 #include "gromacs/utility/gmxmpi.h"
@@ -119,7 +120,13 @@ using gmx::DdRankOrder;
 using gmx::DlbOption;
 using gmx::DomdecOptions;
 
-static const char* edlbs_names[int(DlbState::nr)] = { "off", "off", "auto", "locked", "on", "on" };
+static const char* enumValueToString(DlbState enumValue)
+{
+    static constexpr gmx::EnumerationArray<DlbState, const char*> dlbStateNames = {
+        "off", "off", "auto", "locked", "on", "on"
+    };
+    return dlbStateNames[enumValue];
+}
 
 /* The size per atom group of the cggl_flag buffer in gmx_domdec_comm_t */
 #define DD_CGIBS 2
@@ -1912,14 +1919,9 @@ static gmx_domdec_comm_t* init_dd_comm()
 /* Returns whether mtop contains constraints and/or vsites */
 static bool systemHasConstraintsOrVsites(const gmx_mtop_t& mtop)
 {
-    for (const auto ilists : IListRange(mtop))
-    {
-        if (!extractILists(ilists.list(), IF_CONSTRAINT | IF_VSITE).empty())
-        {
-            return true;
-        }
-    }
-    return false;
+    return std::any_of(IListRange(mtop).begin(), IListRange(mtop).end(), [](const auto ilist) {
+        return !extractILists(ilist.list(), IF_CONSTRAINT | IF_VSITE).empty();
+    });
 }
 
 static void setupUpdateGroups(const gmx::MDLogger& mdlog,
@@ -2880,7 +2882,7 @@ static DDSettings getDDSettings(const gmx::MDLogger&     mdlog,
             determineInitialDlbState(mdlog, options.dlbOption, ddSettings.recordLoad, mdrunOptions, ir);
     GMX_LOG(mdlog.info)
             .appendTextFormatted("Dynamic load balancing: %s",
-                                 edlbs_names[static_cast<int>(ddSettings.initialDlbState)]);
+                                 enumValueToString(ddSettings.initialDlbState));
 
     return ddSettings;
 }
index 9bc5a5378d87ff00ea18bca7cf274492b3cdd988..1ba64f5c9f6f1b0451af6a561ef10f11ca33a058 100644 (file)
@@ -597,7 +597,7 @@ void init_domdec_constraints(gmx_domdec_t* dd, const gmx_mtop_t& mtop)
     int numKeysEstimate = std::min(mtop.natoms / 20, mtop.natoms / (2 * dd->nnodes));
     dc->ga2la           = std::make_unique<gmx::HashedMap<int>>(numKeysEstimate);
 
-    dc->nthread = gmx_omp_nthreads_get(emntDomdec);
+    dc->nthread = gmx_omp_nthreads_get(ModuleMultiThread::Domdec);
     dc->ils.resize(dc->nthread);
 
     dd->constraint_comm = new gmx_domdec_specat_comm_t;
index 41730c5c1b346842e10585ea02717727cac5d758..c0fe8f1bb17c7c083953b2f27a002f02dea4f848 100644 (file)
@@ -280,7 +280,7 @@ enum class DlbState
     offTemporarilyLocked, /**< DLB is off and temporarily can't turn on */
     onCanTurnOff,         /**< DLB is on and can turn off when slow */
     onUser,               /**< DLB is permanently on per user request */
-    nr                    /**< The number of DLB states */
+    Count                 /**< The number of DLB states */
 };
 
 /*! \brief The PME domain decomposition for one dimension */
index 2380a813e83311e05df29b45d4952b78065aef1d..992573a7eb8f68ee5b4a92df62448c0cc74e13f8 100644 (file)
@@ -767,7 +767,7 @@ gmx_reverse_top_t::Impl::Impl(const gmx_mtop_t&        mtop,
         mbi.push_back(mbiMolblock);
     }
 
-    for (int th = 0; th < gmx_omp_nthreads_get(emntDomdec); th++)
+    for (int th = 0; th < gmx_omp_nthreads_get(ModuleMultiThread::Domdec); th++)
     {
         th_work.emplace_back(mtop.ffparams);
     }
index 2aa7c35c971ab31db63640950601ffcbbc185bb2..6cd38c9f0161ca1306e8ddc9fe81e7800b82e512 100644 (file)
@@ -570,7 +570,7 @@ static void check_index_consistency(const gmx_domdec_t* dd, int natoms_sys, cons
     int ngl = 0;
     for (int i = 0; i < natoms_sys; i++)
     {
-        if (const auto entry = dd->ga2la->find(i))
+        if (const auto* entry = dd->ga2la->find(i))
         {
             const int a = entry->la;
             if (a >= numAtomsInZones)
@@ -1847,7 +1847,7 @@ static void setup_dd_communication(gmx_domdec_t* dd, matrix box, gmx_ddbox_t* dd
          * This can not be done in init_domain_decomposition,
          * as the numbers of threads is determined later.
          */
-        int numThreads = gmx_omp_nthreads_get(emntDomdec);
+        int numThreads = gmx_omp_nthreads_get(ModuleMultiThread::Domdec);
         comm->dth.resize(numThreads);
     }
 
@@ -3280,7 +3280,7 @@ void dd_partition_system(FILE*                     fplog,
     /* Update atom data for mdatoms and several algorithms */
     mdAlgorithmsSetupAtomData(cr, inputrec, top_global, top_local, fr, f, mdAtoms, constr, vsite, nullptr);
 
-    auto mdatoms = mdAtoms->mdatoms();
+    auto* mdatoms = mdAtoms->mdatoms();
     if (!thisRankHasDuty(cr, DUTY_PME))
     {
         /* Send the charges and/or c6/sigmas to our PME only node */
index 53d5514c9481d7c47ee8d9073b16051b3e766beb..d47243969bd898afb7f154371e8744623d2a4a24 100644 (file)
@@ -617,7 +617,7 @@ void dd_redistribute_cg(FILE*         fplog,
     matrix tcm;
     make_tric_corr_matrix(npbcdim, state->box, tcm);
 
-    const int nthread = gmx_omp_nthreads_get(emntDomdec);
+    const int nthread = gmx_omp_nthreads_get(ModuleMultiThread::Domdec);
 
     /* Compute the center of geometry for all home charge groups
      * and put them in the box and determine where they should go.
diff --git a/src/gromacs/domdec/tests/.clang-tidy b/src/gromacs/domdec/tests/.clang-tidy
new file mode 100644 (file)
index 0000000..0adf51e
--- /dev/null
@@ -0,0 +1,91 @@
+# List of rationales for check suppressions (where known).
+# This have to precede the list because inline comments are not
+# supported by clang-tidy.
+#
+#         -cppcoreguidelines-non-private-member-variables-in-classes,
+#         -misc-non-private-member-variables-in-classes,
+# We intend a gradual transition to conform to this guideline, but it
+# is not practical to implement yet.
+#
+#         -readability-isolate-declaration,
+# Declarations like "int a, b;" are readable. Some forms are not, and
+# those might reasonably be suggested against during code review.
+#
+#         -cppcoreguidelines-avoid-c-arrays,
+# C arrays are still necessary in many places with legacy code
+#
+#         -cppcoreguidelines-avoid-magic-numbers,
+#         -readability-magic-numbers,
+# We have many legitimate use cases for magic numbers
+#
+#         -cppcoreguidelines-macro-usage,
+# We do use too many macros, and we should fix many of them, but there
+# is no reasonable way to suppress the check e.g. in src/config.h and
+# configuring the build is a major legitimate use of macros.
+#
+#         -cppcoreguidelines-narrowing-conversions,
+#         -bugprone-narrowing-conversions
+# We have many cases where int is converted to float and we don't care
+# enough about such potential loss of precision to use explicit casts
+# in large numbers of places.
+#
+#         -google-readability-avoid-underscore-in-googletest-name
+# We need to use underscores for readability for our legacy types
+# and command-line parameter names
+#
+#         -misc-no-recursion
+# We have way too many functions and methods relying on recursion
+#
+#         -cppcoreguidelines-avoid-non-const-global-variables
+# There are quite a lot of static variables in the test code that
+# can not be replaced.
+#
+#         -modernize-avoid-bind
+# Some code needs to use std::bind and can't be modernized quickly.
+Checks:  clang-diagnostic-*,-clang-analyzer-*,-clang-analyzer-security.insecureAPI.strcpy,
+         bugprone-*,misc-*,readability-*,performance-*,mpi-*,
+         -readability-inconsistent-declaration-parameter-name,
+         -readability-function-size,-readability-else-after-return,
+         modernize-use-nullptr,modernize-use-emplace,
+         modernize-make-unique,modernize-make-shared,
+         modernize-avoid-bind,
+         modernize-use-override,
+         modernize-redundant-void-arg,modernize-use-bool-literals,
+         cppcoreguidelines-*,-cppcoreguidelines-pro-*,-cppcoreguidelines-owning-memory,
+         -cppcoreguidelines-no-malloc,-cppcoreguidelines-special-member-functions,
+         -cppcoreguidelines-avoid-goto,
+         google-*,-google-build-using-namespace,-google-explicit-constructor,
+         -google-readability-function-size,-google-readability-todo,-google-runtime-int,
+         -cppcoreguidelines-non-private-member-variables-in-classes,
+         -misc-non-private-member-variables-in-classes,
+         -readability-isolate-declaration,
+         -cppcoreguidelines-avoid-c-arrays,
+         -cppcoreguidelines-avoid-magic-numbers,
+         -readability-magic-numbers,
+         -cppcoreguidelines-macro-usage,
+         -cppcoreguidelines-narrowing-conversions,
+         -bugprone-narrowing-conversions,
+         -google-readability-avoid-underscore-in-googletest-name,
+         -cppcoreguidelines-init-variables,
+         -misc-no-recursion,
+         -cppcoreguidelines-avoid-non-const-global-variables,
+         -modernize-avoid-bind
+HeaderFilterRegex: .*
+CheckOptions:
+  - key:           cppcoreguidelines-special-member-functions.AllowSoleDefaultDtor
+    value:         1
+  - key:           modernize-make-unique.IncludeStyle
+    value:         google
+  - key:           modernize-make-shared.IncludeStyle
+    value:         google
+  - key:           readability-implicit-bool-conversion.AllowIntegerConditions
+    value:         1
+  - key:           readability-implicit-bool-conversion.AllowPointerConditions
+    value:         1
+  - key:           bugprone-dangling-handle.HandleClasses
+    value:         std::basic_string_view; nonstd::sv_lite::basic_string_view
+# Permit passing shard pointers by value for sink parameters
+  - key:           performance-unnecessary-copy-initialization.AllowedTypes
+    value:         shared_ptr
+  - key:           performance-unnecessary-value-param.AllowedTypes
+    value:         shared_ptr
index 564e213af9a7918fa4cc29992d040dd47685ff59..45c6da6b83f826d78dbb8b9305d6ab8b04035e65 100644 (file)
@@ -294,7 +294,7 @@ static void sum_forces(gmx::ArrayRef<gmx::RVec> f, gmx::ArrayRef<const gmx::RVec
 {
     const int end = forceToAdd.size();
 
-    int gmx_unused nt = gmx_omp_nthreads_get(emntPME);
+    int gmx_unused nt = gmx_omp_nthreads_get(ModuleMultiThread::PME);
 #pragma omp parallel for num_threads(nt) schedule(static)
     for (int i = 0; i < end; i++)
     {
index 11ca5faf720ce072b7c85911a94d9204bf20cd72..dfed70034dcc9624f78f0f8077baafd9311fb0ce 100644 (file)
@@ -553,7 +553,7 @@ void gmx_pme_receive_f(gmx::PmePpCommGpu*    pmePpCommGpu,
     void* recvptr = reinterpret_cast<void*>(buffer.data());
     recvFFromPme(pmePpCommGpu, recvptr, natoms, cr, useGpuPmePpComms, receivePmeForceToGpu);
 
-    int nt = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, natoms);
+    int nt = gmx_omp_nthreads_get_simple_rvec_task(ModuleMultiThread::Default, natoms);
 
     gmx::ArrayRef<gmx::RVec> f = forceWithVirial->force_;
 
index 07b27aad0048a020d041d2777f5b9495add4abb4..8d7b897d34696ec37eaed4f65dd916617209d261 100644 (file)
@@ -65,7 +65,7 @@ void calc_mu(int                            start,
 
     mu_x = mu_y = mu_z = 0.0;
 #pragma omp parallel for reduction(+: mu_x, mu_y, mu_z) schedule(static) \
-    num_threads(gmx_omp_nthreads_get(emntDefault))
+    num_threads(gmx_omp_nthreads_get(ModuleMultiThread::Default))
     for (int i = start; i < end; i++)
     {
         // Trivial OpenMP region that cannot throw
@@ -86,7 +86,7 @@ void calc_mu(int                            start,
     {
         mu_x = mu_y = mu_z = 0.0;
 #pragma omp parallel for reduction(+: mu_x, mu_y, mu_z) schedule(static) \
-        num_threads(gmx_omp_nthreads_get(emntDefault))
+        num_threads(gmx_omp_nthreads_get(ModuleMultiThread::Default))
         for (int i = start; i < end; i++)
         {
             // Trivial OpenMP region that cannot throw
index 42076ab2929ffcb48849099775fdb3b590b02068..6a9f1903fbad3215524837144d82752f60b1f547 100644 (file)
@@ -104,7 +104,7 @@ void calc_vir(int nxf, const rvec x[], const rvec f[], tensor vir, bool bScrewPB
 {
     matrix x_times_f;
 
-    int nthreads = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, nxf * 9);
+    int nthreads = gmx_omp_nthreads_get_simple_rvec_task(ModuleMultiThread::Default, nxf * 9);
 
     GMX_ASSERT(nthreads >= 1, "Avoids uninitialized x_times_f (warning)");
 
index dca3379ce84772ce7516badb877449754012460e..70559e6d35dd55377279bbe34c3da5146ace32a7 100644 (file)
@@ -459,7 +459,7 @@ bool Constraints::Impl::apply(bool                      bLog,
 
     if (nsettle > 0)
     {
-        nth = gmx_omp_nthreads_get(emntSETTLE);
+        nth = gmx_omp_nthreads_get(ModuleMultiThread::SETTLE);
     }
     else
     {
@@ -797,7 +797,7 @@ bool Constraints::Impl::apply(bool                      bLog,
             vRef = v.unpaddedArrayRef();
         }
 
-        int gmx_unused numThreads = gmx_omp_nthreads_get(emntUpdate);
+        int gmx_unused numThreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
 
 #pragma omp parallel for num_threads(numThreads) schedule(static)
         for (int i = 0; i < numHomeAtoms_; i++)
@@ -1200,7 +1200,7 @@ Constraints::Impl::Impl(const gmx_mtop_t&     mtop_p,
         }
 
         /* Allocate thread-local work arrays */
-        int nthreads = gmx_omp_nthreads_get(emntSETTLE);
+        int nthreads = gmx_omp_nthreads_get(ModuleMultiThread::SETTLE);
         if (nthreads > 1 && threadConstraintsVirial == nullptr)
         {
             snew(threadConstraintsVirial, nthreads);
index 005376d1227899ecede350d47dc1874c83317f08..4a5bd1e606473280bdf6ab73b559c931e10990d9 100644 (file)
@@ -93,7 +93,8 @@ static const double sy_const_5[] = { 0.2967324292201065,
                                      0.2967324292201065,
                                      0.2967324292201065 };
 
-static const double* sy_const[] = { nullptr, sy_const_1, nullptr, sy_const_3, nullptr, sy_const_5 };
+static constexpr std::array<const double*, 6> sy_const = { nullptr,    sy_const_1, nullptr,
+                                                           sy_const_3, nullptr,    sy_const_5 };
 
 
 void update_tcouple(int64_t           step,
@@ -292,7 +293,7 @@ void update_pcouple_after_coordinates(FILE*                fplog,
                 /* Scale the coordinates */
                 if (scaleCoordinates)
                 {
-                    auto x = state->x.rvec_array();
+                    auto* x = state->x.rvec_array();
                     for (int n = start; n < start + homenr; n++)
                     {
                         tmvmul_ur0(pressureCouplingMu, x[n], x[n]);
@@ -1132,7 +1133,7 @@ void pressureCouplingScaleBoxAndCoordinates(const t_inputrec*    ir,
     /* Scale the positions and the velocities */
     if (scaleCoordinates)
     {
-        const int gmx_unused numThreads = gmx_omp_nthreads_get(emntUpdate);
+        const int gmx_unused numThreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
 #pragma omp parallel for num_threads(numThreads) schedule(static)
         for (int n = start; n < start + nr_atoms; n++)
         {
index ad147cf911f32d7df327d998609b704b2c55349d..871e7cdc338ee523803c6626be63891072c43f94 100644 (file)
@@ -1025,7 +1025,7 @@ void init_forcerec(FILE*                            fplog,
             forcerec->listedForces.emplace_back(
                     mtop.ffparams,
                     mtop.groups.groups[SimulationAtomGroupType::EnergyOutput].size(),
-                    gmx_omp_nthreads_get(emntBonded),
+                    gmx_omp_nthreads_get(ModuleMultiThread::Bonded),
                     interactionSelection,
                     fplog);
         }
@@ -1036,7 +1036,7 @@ void init_forcerec(FILE*                            fplog,
         forcerec->listedForces.emplace_back(
                 mtop.ffparams,
                 mtop.groups.groups[SimulationAtomGroupType::EnergyOutput].size(),
-                gmx_omp_nthreads_get(emntBonded),
+                gmx_omp_nthreads_get(ModuleMultiThread::Bonded),
                 ListedForces::interactionSelectionAll(),
                 fplog);
     }
@@ -1061,7 +1061,7 @@ void init_forcerec(FILE*                            fplog,
 
     forcerec->print_force = print_force;
 
-    forcerec->nthread_ewc = gmx_omp_nthreads_get(emntBonded);
+    forcerec->nthread_ewc = gmx_omp_nthreads_get(ModuleMultiThread::Bonded);
     forcerec->ewc_t.resize(forcerec->nthread_ewc);
 
     if (inputrec.eDispCorr != DispersionCorrectionType::No)
index d1979af8a3358e2719c53df5b7701dda78a80e95..881ea7c676b8a159b958dd1f2dd82d975c6fd6c4 100644 (file)
@@ -2,7 +2,7 @@
  * This file is part of the GROMACS molecular simulation package.
  *
  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
- * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
  * and including many others, as listed in the AUTHORS file in the
  * top-level source directory and at http://www.gromacs.org.
@@ -47,6 +47,7 @@
 #include "gromacs/gmxlib/network.h"
 #include "gromacs/mdtypes/commrec.h"
 #include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/enumerationhelpers.h"
 #include "gromacs/utility/fatalerror.h"
 #include "gromacs/utility/gmxassert.h"
 #include "gromacs/utility/gmxomp.h"
@@ -60,31 +61,43 @@ typedef struct
     int gnth;     /**< Global num. of threads per PP or PP+PME process/tMPI thread. */
     int gnth_pme; /**< Global num. of threads per PME only process/tMPI thread. */
 
-    int      nth[emntNR]; /**< Number of threads for each module, indexed with module_nth_t */
-    gmx_bool initialized; /**< TRUE if the module as been initialized. */
+    gmx::EnumerationArray<ModuleMultiThread, int> nth; /**< Number of threads for each module, indexed with module_nth_t */
+    bool initialized; /**< TRUE if the module as been initialized. */
 } omp_module_nthreads_t;
 
 /** Names of environment variables to set the per module number of threads.
  *
  *  Indexed with the values of module_nth_t.
  * */
-static const char* modth_env_var[emntNR] = { "GMX_DEFAULT_NUM_THREADS should never be set",
-                                             "GMX_DOMDEC_NUM_THREADS",
-                                             "GMX_PAIRSEARCH_NUM_THREADS",
-                                             "GMX_NONBONDED_NUM_THREADS",
-                                             "GMX_LISTED_FORCES_NUM_THREADS",
-                                             "GMX_PME_NUM_THREADS",
-                                             "GMX_UPDATE_NUM_THREADS",
-                                             "GMX_VSITE_NUM_THREADS",
-                                             "GMX_LINCS_NUM_THREADS",
-                                             "GMX_SETTLE_NUM_THREADS" };
+static const char* enumValueToEnvVariableString(ModuleMultiThread enumValue)
+{
+    constexpr gmx::EnumerationArray<ModuleMultiThread, const char*> moduleMultiThreadEnvVariableNames = {
+        "GMX_DEFAULT_NUM_THREADS should never be set",
+        "GMX_DOMDEC_NUM_THREADS",
+        "GMX_PAIRSEARCH_NUM_THREADS",
+        "GMX_NONBONDED_NUM_THREADS",
+        "GMX_LISTED_FORCES_NUM_THREADS",
+        "GMX_PME_NUM_THREADS",
+        "GMX_UPDATE_NUM_THREADS",
+        "GMX_VSITE_NUM_THREADS",
+        "GMX_LINCS_NUM_THREADS",
+        "GMX_SETTLE_NUM_THREADS"
+    };
+    return moduleMultiThreadEnvVariableNames[enumValue];
+}
 
 /** Names of the modules. */
-static const char* mod_name[emntNR] = { "default",     "domain decomposition",
-                                        "pair search", "non-bonded",
-                                        "bonded",      "PME",
-                                        "update",      "LINCS",
-                                        "SETTLE" };
+static const char* enumValueToString(ModuleMultiThread enumValue)
+{
+    constexpr gmx::EnumerationArray<ModuleMultiThread, const char*> moduleMultiThreadNames = {
+        "default",     "domain decomposition",
+        "pair search", "non-bonded",
+        "bonded",      "PME",
+        "update",      "LINCS",
+        "SETTLE"
+    };
+    return moduleMultiThreadNames[enumValue];
+}
 
 /** Number of threads for each algorithmic module.
  *
@@ -94,6 +107,7 @@ static const char* mod_name[emntNR] = { "default",     "domain decomposition",
  *  All fields are initialized to 0 which should result in errors if
  *  the init call is omitted.
  * */
+// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
 static omp_module_nthreads_t modth = { 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0 }, FALSE };
 
 
@@ -106,7 +120,7 @@ static omp_module_nthreads_t modth = { 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0 }, FALS
  *  GMX_*_NUM_THERADS env var is set, case in which its value overrides
  *  the default.
  */
-static void pick_module_nthreads(const gmx::MDLogger& mdlog, int m, gmx_bool bSepPME)
+static void pick_module_nthreads(const gmx::MDLogger& mdlog, ModuleMultiThread m, gmx_bool bSepPME)
 {
     char* env;
     int   nth;
@@ -115,20 +129,20 @@ static void pick_module_nthreads(const gmx::MDLogger& mdlog, int m, gmx_bool bSe
 
     /* The default should never be set through a GMX_*_NUM_THREADS env var
      * as it's always equal with gnth. */
-    if (m == emntDefault)
+    if (m == ModuleMultiThread::Default)
     {
         return;
     }
 
     /* check the environment variable */
-    if ((env = getenv(modth_env_var[m])) != nullptr)
+    if ((env = getenv(enumValueToEnvVariableString(m))) != nullptr)
     {
         sscanf(env, "%d", &nth);
 
         if (!bOMP)
         {
             gmx_warning("%s=%d is set, but %s is compiled without OpenMP!",
-                        modth_env_var[m],
+                        enumValueToEnvVariableString(m),
                         nth,
                         gmx::getProgramContext().displayName());
         }
@@ -140,26 +154,26 @@ static void pick_module_nthreads(const gmx::MDLogger& mdlog, int m, gmx_bool bSe
             gmx_warning(
                     "%s=%d is set, the default number of threads also "
                     "needs to be set with OMP_NUM_THREADS!",
-                    modth_env_var[m],
+                    enumValueToEnvVariableString(m),
                     nth);
         }
 
         /* only babble if we are really overriding with a different value */
-        if ((bSepPME && m == emntPME && nth != modth.gnth_pme) || (nth != modth.gnth))
+        if ((bSepPME && m == ModuleMultiThread::PME && nth != modth.gnth_pme) || (nth != modth.gnth))
         {
             GMX_LOG(mdlog.warning)
                     .asParagraph()
                     .appendTextFormatted("%s=%d set, overriding the default number of %s threads",
-                                         modth_env_var[m],
+                                         enumValueToString(m),
                                          nth,
-                                         mod_name[m]);
+                                         enumValueToEnvVariableString(m));
         }
     }
     else
     {
         /* pick the global PME node nthreads if we are setting the number
          * of threads in separate PME nodes  */
-        nth = (bSepPME && m == emntPME) ? modth.gnth_pme : modth.gnth;
+        nth = (bSepPME && m == ModuleMultiThread::PME) ? modth.gnth_pme : modth.gnth;
     }
 
     gmx_omp_nthreads_set(m, nth);
@@ -335,16 +349,16 @@ static void manage_number_of_openmp_threads(const gmx::MDLogger& mdlog,
     }
 
     /* now set the per-module values */
-    modth.nth[emntDefault] = modth.gnth;
-    pick_module_nthreads(mdlog, emntDomdec, bSepPME);
-    pick_module_nthreads(mdlog, emntPairsearch, bSepPME);
-    pick_module_nthreads(mdlog, emntNonbonded, bSepPME);
-    pick_module_nthreads(mdlog, emntBonded, bSepPME);
-    pick_module_nthreads(mdlog, emntPME, bSepPME);
-    pick_module_nthreads(mdlog, emntUpdate, bSepPME);
-    pick_module_nthreads(mdlog, emntVSITE, bSepPME);
-    pick_module_nthreads(mdlog, emntLINCS, bSepPME);
-    pick_module_nthreads(mdlog, emntSETTLE, bSepPME);
+    modth.nth[ModuleMultiThread::Default] = modth.gnth;
+    pick_module_nthreads(mdlog, ModuleMultiThread::Domdec, bSepPME);
+    pick_module_nthreads(mdlog, ModuleMultiThread::Pairsearch, bSepPME);
+    pick_module_nthreads(mdlog, ModuleMultiThread::Nonbonded, bSepPME);
+    pick_module_nthreads(mdlog, ModuleMultiThread::Bonded, bSepPME);
+    pick_module_nthreads(mdlog, ModuleMultiThread::PME, bSepPME);
+    pick_module_nthreads(mdlog, ModuleMultiThread::Update, bSepPME);
+    pick_module_nthreads(mdlog, ModuleMultiThread::VSITE, bSepPME);
+    pick_module_nthreads(mdlog, ModuleMultiThread::LINCS, bSepPME);
+    pick_module_nthreads(mdlog, ModuleMultiThread::SETTLE, bSepPME);
 
     /* set the number of threads globally */
     if (bOMP)
@@ -477,9 +491,9 @@ void gmx_omp_nthreads_init(const gmx::MDLogger& mdlog,
     reportOpenmpSettings(mdlog, cr, bOMP, bSepPME);
 }
 
-int gmx_omp_nthreads_get(int mod)
+int gmx_omp_nthreads_get(ModuleMultiThread mod)
 {
-    if (mod < 0 || mod >= emntNR)
+    if (mod < ModuleMultiThread::Default || mod >= ModuleMultiThread::Count)
     {
         /* invalid module queried */
         return -1;
@@ -490,11 +504,12 @@ int gmx_omp_nthreads_get(int mod)
     }
 }
 
-void gmx_omp_nthreads_set(int mod, int nthreads)
+void gmx_omp_nthreads_set(ModuleMultiThread mod, int nthreads)
 {
     /* Catch an attempt to set the number of threads on an invalid
      * OpenMP module. */
-    GMX_RELEASE_ASSERT(mod >= 0 && mod < emntNR, "Trying to set nthreads on invalid OpenMP module");
+    GMX_RELEASE_ASSERT(mod >= ModuleMultiThread::Default && mod < ModuleMultiThread::Count,
+                       "Trying to set nthreads on invalid OpenMP module");
 
     modth.nth[mod] = nthreads;
 }
index 11b77a681bf1600f099d102ec6c7fc6b33033832..e42e4874eab2c01d15d28a1b6ee35db2ac097f3d 100644 (file)
@@ -2,7 +2,7 @@
  * This file is part of the GROMACS molecular simulation package.
  *
  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
- * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
  * and including many others, as listed in the AUTHORS file in the
  * top-level source directory and at http://www.gromacs.org.
@@ -49,22 +49,22 @@ class MDLogger;
 }
 
 /** Enum values corresponding to multithreaded algorithmic modules. */
-typedef enum module_nth
+enum class ModuleMultiThread : int
 {
     /* Default is meant to be used in OMP regions outside the named
      * algorithmic modules listed below. */
-    emntDefault,
-    emntDomdec,
-    emntPairsearch,
-    emntNonbonded,
-    emntBonded,
-    emntPME,
-    emntUpdate,
-    emntVSITE,
-    emntLINCS,
-    emntSETTLE,
-    emntNR
-} module_nth_t;
+    Default,
+    Domdec,
+    Pairsearch,
+    Nonbonded,
+    Bonded,
+    PME,
+    Update,
+    VSITE,
+    LINCS,
+    SETTLE,
+    Count
+};
 
 /*! \brief
  * Initializes the per-module thread count.
@@ -82,7 +82,7 @@ void gmx_omp_nthreads_init(const gmx::MDLogger& fplog,
 
 /*! \brief
  * Returns the number of threads to be used in the given module \p mod. */
-int gmx_omp_nthreads_get(int mod);
+int gmx_omp_nthreads_get(ModuleMultiThread mod);
 
 /*! \brief
  * Returns the number of threads to be used in the given module \p mod for simple rvec operations.
@@ -92,7 +92,7 @@ int gmx_omp_nthreads_get(int mod);
  * the reduction in computional cost due to parallelization. This routine
  * returns 1 when the overhead is expected to be higher than the gain.
  */
-static inline int gmx_omp_nthreads_get_simple_rvec_task(int mod, int nrvec)
+static inline int gmx_omp_nthreads_get_simple_rvec_task(ModuleMultiThread mod, int nrvec)
 {
     /* There can be a relatively large overhead to an OpenMP parallel for loop.
      * This overhead increases, slowly, with the numbe of threads used.
@@ -121,7 +121,7 @@ static inline int gmx_omp_nthreads_get_simple_rvec_task(int mod, int nrvec)
 /*! \brief Sets the number of threads to be used in module.
  *
  * Intended for use in testing. */
-void gmx_omp_nthreads_set(int mod, int nthreads);
+void gmx_omp_nthreads_set(ModuleMultiThread mod, int nthreads);
 
 /*! \brief
  * Read the OMP_NUM_THREADS env. var. and check against the value set on the
index 1bbfd8330e955c636c3c5dceb2e4b535f954f83d..24b6a63a4310c9667ee01292a16d7e44946891a5 100644 (file)
@@ -1505,7 +1505,7 @@ Lincs* init_lincs(FILE*                            fplog,
      * The current constraint to task assignment code can create independent
      * tasks only when not more than two constraints are connected sequentially.
      */
-    li->ntask    = gmx_omp_nthreads_get(emntLINCS);
+    li->ntask    = gmx_omp_nthreads_get(ModuleMultiThread::LINCS);
     li->bTaskDep = (li->ntask > 1 && bMoreThanTwoSeq);
     if (debug)
     {
index b991293a5661b8daa4577e8c8b685c7617f30913..31ba183021bba8eac1d6fa9b5b51f5c76fafe8b1 100644 (file)
@@ -111,7 +111,7 @@ static void calc_ke_part_normal(gmx::ArrayRef<const gmx::RVec> v,
         }
     }
     ekind->dekindl_old = ekind->dekindl;
-    int nthread        = gmx_omp_nthreads_get(emntUpdate);
+    int nthread        = gmx_omp_nthreads_get(ModuleMultiThread::Update);
 
 #pragma omp parallel for num_threads(nthread) schedule(static)
     for (int thread = 0; thread < nthread; thread++)
index 025634b9d8ec829fc5d01f8ed10924da2bf0a6b1..82ecd24b4b0937e50c0d021c4dbf8e26bae06c22 100644 (file)
@@ -225,7 +225,7 @@ void atoms2md(const gmx_mtop_t&  mtop,
 
     const SimulationGroups& groups = mtop.groups;
 
-    auto md = mdAtoms->mdatoms();
+    auto* md = mdAtoms->mdatoms();
     /* nindex>=0 indicates DD where we use an index */
     if (nindex >= 0)
     {
@@ -322,7 +322,7 @@ void atoms2md(const gmx_mtop_t&  mtop,
 
     int molb = 0;
 
-    nthreads = gmx_omp_nthreads_get(emntDefault);
+    nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
 #pragma omp parallel for num_threads(nthreads) schedule(static) firstprivate(molb)
     for (int i = 0; i < md->nr; i++)
     {
@@ -519,7 +519,7 @@ void update_mdatoms(t_mdatoms* md, real lambda)
         real L1 = 1 - lambda;
 
         /* Update masses of perturbed atoms for the change in lambda */
-        int gmx_unused nthreads = gmx_omp_nthreads_get(emntDefault);
+        int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
 #pragma omp parallel for num_threads(nthreads) schedule(static)
         for (int i = 0; i < md->nr; i++)
         {
index 105245ebfdd6c037b8d3c7c331507bdfb11d5e07..58a405608ccf6f1c7828bcd8e56a1e5aa43e602f 100644 (file)
@@ -146,7 +146,7 @@ static void sum_forces(ArrayRef<RVec> f, ArrayRef<const RVec> forceToAdd)
     GMX_ASSERT(f.size() >= forceToAdd.size(), "Accumulation buffer should be sufficiently large");
     const int end = forceToAdd.size();
 
-    int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
+    int gmx_unused nt = gmx_omp_nthreads_get(ModuleMultiThread::Default);
 #pragma omp parallel for num_threads(nt) schedule(static)
     for (int i = 0; i < end; i++)
     {
@@ -449,7 +449,7 @@ static void do_nb_verlet(t_forcerec*                fr,
 
 static inline void clearRVecs(ArrayRef<RVec> v, const bool useOpenmpThreading)
 {
-    int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, v.ssize());
+    int nth = gmx_omp_nthreads_get_simple_rvec_task(ModuleMultiThread::Default, v.ssize());
 
     /* Note that we would like to avoid this conditional by putting it
      * into the omp pragma instead, but then we still take the full
@@ -1087,7 +1087,7 @@ static void combineMtsForces(const int      numAtoms,
                              ArrayRef<RVec> forceMts,
                              const real     mtsFactor)
 {
-    const int gmx_unused numThreads = gmx_omp_nthreads_get(emntDefault);
+    const int gmx_unused numThreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
 #pragma omp parallel for num_threads(numThreads) schedule(static)
     for (int i = 0; i < numAtoms; i++)
     {
@@ -1261,7 +1261,7 @@ void do_force(FILE*                               fplog,
             put_atoms_in_box_omp(fr->pbcType,
                                  box,
                                  x.unpaddedArrayRef().subArray(0, mdatoms->homenr),
-                                 gmx_omp_nthreads_get(emntDefault));
+                                 gmx_omp_nthreads_get(ModuleMultiThread::Default));
             inc_nrnb(nrnb, eNR_SHIFTX, mdatoms->homenr);
         }
     }
@@ -1273,10 +1273,10 @@ void do_force(FILE*                               fplog,
     const bool reinitGpuPmePpComms =
             GMX_MPI && simulationWork.useGpuPmePpCommunication && (stepWork.doNeighborSearch);
 
-    const auto localXReadyOnDevice = (useGpuPmeOnThisRank || simulationWork.useGpuBufferOps)
-                                             ? stateGpu->getCoordinatesReadyOnDeviceEvent(
-                                                       AtomLocality::Local, simulationWork, stepWork)
-                                             : nullptr;
+    auto* localXReadyOnDevice = (useGpuPmeOnThisRank || simulationWork.useGpuBufferOps)
+                                              ? stateGpu->getCoordinatesReadyOnDeviceEvent(
+                                                        AtomLocality::Local, simulationWork, stepWork)
+                                              : nullptr;
 
     // Copy coordinate from the GPU if update is on the GPU and there
     // are forces to be computed on the CPU, or for the computation of
index 32ebfbeb5541b175845c4013938f785ca877c53f..e2fa3db58aa32d67c7f7fc26d337fcfccf61389d 100644 (file)
@@ -111,7 +111,7 @@ void LincsConstraintsRunner::applyConstraints(ConstraintsTestData* testData, t_p
     Lincs* lincsd;
     int    maxwarn         = 100;
     int    warncount_lincs = 0;
-    gmx_omp_nthreads_set(emntLINCS, 1);
+    gmx_omp_nthreads_set(ModuleMultiThread::LINCS, 1);
 
     // Communication record
     t_commrec cr;
index 8bc5f7f807f29e27b7db59ef6187228dd3d7a501..e2a38f88eb047062e458cce624ff6a79d2f9c8c4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the GROMACS molecular simulation package.
  *
- * Copyright (c) 2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
  * and including many others, as listed in the AUTHORS file in the
  * top-level source directory and at http://www.gromacs.org.
@@ -67,7 +67,7 @@ void LeapFrogHostTestRunner::integrate(LeapFrogTestData* testData, int numSteps)
         testData->state_.v[i] = testData->v_[i];
     }
 
-    gmx_omp_nthreads_set(emntUpdate, 1);
+    gmx_omp_nthreads_set(ModuleMultiThread::Update, 1);
 
     for (int step = 0; step < numSteps; step++)
     {
index a2876a3123240d047142a32b83da41582d48cce3..e24ef4aeafb6af8465f3fc70847ad8c4a8b022ee 100644 (file)
@@ -1417,7 +1417,7 @@ void Update::Impl::update_sd_second_half(const t_inputrec&                 input
 
         wallcycle_start(wcycle, WallCycleCounter::Update);
 
-        int nth = gmx_omp_nthreads_get(emntUpdate);
+        int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
 
 #pragma omp parallel for num_threads(nth) schedule(static)
         for (int th = 0; th < nth; th++)
@@ -1512,7 +1512,7 @@ void Update::Impl::finish_update(const t_inputrec& inputRecord,
         /* We have no frozen atoms or fully frozen atoms which have not
          * been moved by the update, so we can simply copy all coordinates.
          */
-        int gmx_unused nth = gmx_omp_nthreads_get(emntUpdate);
+        int gmx_unused nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
 #pragma omp parallel for num_threads(nth) schedule(static)
         for (int i = 0; i < homenr; i++)
         {
@@ -1560,7 +1560,7 @@ void Update::Impl::update_coords(const t_inputrec&                 inputRecord,
     }
 
     /* ############# START The update of velocities and positions ######### */
-    int nth = gmx_omp_nthreads_get(emntUpdate);
+    int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
 
 #pragma omp parallel for num_threads(nth) schedule(static)
     for (int th = 0; th < nth; th++)
@@ -1703,7 +1703,7 @@ void Update::Impl::update_for_constraint_virial(const t_inputrec&   inputRecord,
     // Cast to real for faster code, no loss in precision
     const real dt = inputRecord.delta_t;
 
-    const int nth = gmx_omp_nthreads_get(emntUpdate);
+    const int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
 
 #pragma omp parallel for num_threads(nth) schedule(static)
     for (int th = 0; th < nth; th++)
index da4ee32808f5239af56b0f9800a539111cc5006a..a8f6f0edbadcf50b127fdfdb827c050a2b45e706 100644 (file)
@@ -97,7 +97,7 @@ t_vcm::t_vcm(const SimulationGroups& groups, const t_inputrec& ir) :
                     *groups.groupNames[groups.groups[SimulationAtomGroupType::MassCenterVelocityRemoval][g]];
         }
 
-        thread_vcm.resize(gmx_omp_nthreads_get(emntDefault) * stride);
+        thread_vcm.resize(gmx_omp_nthreads_get(ModuleMultiThread::Default) * stride);
     }
 
     nFreeze = ir.opts.nFreeze;
@@ -160,7 +160,7 @@ void calc_vcm_grp(const t_mdatoms&               md,
     {
         return;
     }
-    int nthreads = gmx_omp_nthreads_get(emntDefault);
+    int nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
 
     {
 #pragma omp parallel num_threads(nthreads) default(none) shared(x, v, vcm, md)
@@ -364,7 +364,7 @@ static void do_stopcm_grp(const t_mdatoms&         mdatoms,
         const int             homenr   = mdatoms.homenr;
         const unsigned short* group_id = mdatoms.cVCM;
 
-        int gmx_unused nth = gmx_omp_nthreads_get(emntDefault);
+        int gmx_unused nth = gmx_omp_nthreads_get(ModuleMultiThread::Default);
         // homenr could be shared, but gcc-8 & gcc-9 don't agree how to write that...
         // https://www.gnu.org/software/gcc/gcc-9/porting_to.html -> OpenMP data sharing
 #pragma omp parallel num_threads(nth) default(none) shared(x, v, vcm, group_id, mdatoms) \
index 9be178c243ddc82bbde88639c1c54b1df3f651ad..0fed027437d7e69d0f9d557f2baa42568544db91 100644 (file)
@@ -2606,7 +2606,7 @@ std::unique_ptr<VirtualSitesHandler> makeVirtualSitesHandler(const gmx_mtop_t& m
     return std::make_unique<VirtualSitesHandler>(mtop, cr->dd, pbcType);
 }
 
-ThreadingInfo::ThreadingInfo() : numThreads_(gmx_omp_nthreads_get(emntVSITE))
+ThreadingInfo::ThreadingInfo() : numThreads_(gmx_omp_nthreads_get(ModuleMultiThread::VSITE))
 {
     if (numThreads_ > 1)
     {
index 2725353d5c0cf8bba0a6db93c12450eb1e4e2b72..b115acdbec8a0f8740e15461362d531e2109186a 100644 (file)
@@ -670,7 +670,7 @@ static bool do_em_step(const t_commrec*                          cr,
     start = 0;
     end   = md->homenr;
 
-    nthreads = gmx_omp_nthreads_get(emntUpdate);
+    nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
 #pragma omp parallel num_threads(nthreads)
     {
         const rvec* x1 = s1->x.rvec_array();
@@ -828,7 +828,7 @@ void setCoordinates(std::vector<RVec>* coords, ArrayRef<const RVec> refCoords)
 {
     coords->resize(refCoords.size());
 
-    const int gmx_unused nthreads = gmx_omp_nthreads_get(emntUpdate);
+    const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
 #pragma omp parallel for num_threads(nthreads) schedule(static)
     for (int i = 0; i < ssize(refCoords); i++)
     {
@@ -843,7 +843,7 @@ real maxCoordinateDifference(ArrayRef<const RVec> coords1, ArrayRef<const RVec>
 
     real maxDiffSquared = 0;
 
-    const int gmx_unused nthreads = gmx_omp_nthreads_get(emntUpdate);
+    const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
 #pragma omp parallel for reduction(max : maxDiffSquared) num_threads(nthreads) schedule(static)
     for (int i = 0; i < ssize(coords1); i++)
     {
@@ -1243,7 +1243,7 @@ void LegacySimulator::do_cg()
     tensor            vir, pres;
     int               number_steps, neval = 0, nstcg = inputrec->nstcgsteep;
     int               m, step, nminstep;
-    auto              mdatoms = mdAtoms->mdatoms();
+    auto*             mdatoms = mdAtoms->mdatoms();
 
     GMX_LOG(mdlog.info)
             .asParagraph()
@@ -1935,7 +1935,7 @@ void LegacySimulator::do_lbfgs()
     em_state_t         ems;
     gmx_localtop_t     top(top_global.ffparams);
     gmx_global_stat_t  gstat;
-    auto               mdatoms = mdAtoms->mdatoms();
+    auto*              mdatoms = mdAtoms->mdatoms();
 
     GMX_LOG(mdlog.info)
             .asParagraph()
@@ -2746,7 +2746,7 @@ void LegacySimulator::do_steep()
     int               nsteps;
     int               count          = 0;
     int               steps_accepted = 0;
-    auto              mdatoms        = mdAtoms->mdatoms();
+    auto*             mdatoms        = mdAtoms->mdatoms();
 
     GMX_LOG(mdlog.info)
             .asParagraph()
@@ -3064,11 +3064,11 @@ void LegacySimulator::do_nm()
     real*               full_matrix   = nullptr;
 
     /* added with respect to mdrun */
-    int  row, col;
-    real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
-    real x_min;
-    bool bIsMaster = MASTER(cr);
-    auto mdatoms   = mdAtoms->mdatoms();
+    int   row, col;
+    real  der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
+    real  x_min;
+    bool  bIsMaster = MASTER(cr);
+    auto* mdatoms   = mdAtoms->mdatoms();
 
     GMX_LOG(mdlog.info)
             .asParagraph()
index 0c8a98250955db256bf0fe79b775103d1da5474f..21876ff0c7d7520ef3b0289e090853706d7fd5bc 100644 (file)
@@ -439,7 +439,7 @@ static void mdrunner_start_fn(const void* arg)
 {
     try
     {
-        auto masterMdrunner = reinterpret_cast<const gmx::Mdrunner*>(arg);
+        const auto* masterMdrunner = reinterpret_cast<const gmx::Mdrunner*>(arg);
         /* copy the arg list to make sure that it's thread-local. This
            doesn't copy pointed-to items, of course; fnm, cr and fplog
            are reset in the call below, all others should be const. */
@@ -752,15 +752,15 @@ static void finish_run(FILE*                     fplog,
      * to the code that handled the thread region, so that there's a
      * mechanism to keep cycle counting working during the transition
      * to task parallelism. */
-    int nthreads_pp  = gmx_omp_nthreads_get(emntNonbonded);
-    int nthreads_pme = gmx_omp_nthreads_get(emntPME);
+    int nthreads_pp  = gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded);
+    int nthreads_pme = gmx_omp_nthreads_get(ModuleMultiThread::PME);
     wallcycle_scale_by_num_threads(
             wcycle, thisRankHasDuty(cr, DUTY_PME) && !thisRankHasDuty(cr, DUTY_PP), nthreads_pp, nthreads_pme);
     auto cycle_sum(wallcycle_sum(cr, wcycle));
 
     if (printReport)
     {
-        auto nbnxn_gpu_timings =
+        auto* nbnxn_gpu_timings =
                 (nbv != nullptr && nbv->useGpu()) ? Nbnxm::gpu_get_timings(nbv->gpu_nbv) : nullptr;
         gmx_wallclock_gpu_pme_t pme_gpu_timings = {};
 
@@ -1563,8 +1563,9 @@ int Mdrunner::mdrunner()
        PME: env variable should be read only on one node to make sure it is
        identical everywhere;
      */
-    const int numThreadsOnThisRank = thisRankHasDuty(cr, DUTY_PP) ? gmx_omp_nthreads_get(emntNonbonded)
-                                                                  : gmx_omp_nthreads_get(emntPME);
+    const int numThreadsOnThisRank = thisRankHasDuty(cr, DUTY_PP)
+                                             ? gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded)
+                                             : gmx_omp_nthreads_get(ModuleMultiThread::PME);
     checkHardwareOversubscription(
             numThreadsOnThisRank, cr->nodeid, *hwinfo_->hardwareTopology, physicalNodeComm, mdlog);
 
@@ -1837,7 +1838,7 @@ int Mdrunner::mdrunner()
                                        mdrunOptions.reproducible,
                                        ewaldcoeff_q,
                                        ewaldcoeff_lj,
-                                       gmx_omp_nthreads_get(emntPME),
+                                       gmx_omp_nthreads_get(ModuleMultiThread::PME),
                                        pmeRunMode,
                                        nullptr,
                                        deviceContext,
@@ -1864,7 +1865,7 @@ int Mdrunner::mdrunner()
     if (thisRankHasDuty(cr, DUTY_PP))
     {
         /* Assumes uniform use of the number of OpenMP threads */
-        walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault));
+        walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(ModuleMultiThread::Default));
 
         if (inputrec->bPull)
         {
@@ -1934,7 +1935,9 @@ int Mdrunner::mdrunner()
                            "cos_acceleration is only supported by integrator=md");
 
         /* Kinetic energy data */
-        gmx_ekindata_t ekind(inputrec->opts.ngtc, inputrec->cos_accel, gmx_omp_nthreads_get(emntUpdate));
+        gmx_ekindata_t ekind(inputrec->opts.ngtc,
+                             inputrec->cos_accel,
+                             gmx_omp_nthreads_get(ModuleMultiThread::Update));
 
         /* Set up interactive MD (IMD) */
         auto imdSession = makeImdSession(inputrec.get(),
@@ -2041,7 +2044,7 @@ int Mdrunner::mdrunner()
     {
         GMX_RELEASE_ASSERT(pmedata, "pmedata was NULL while cr->duty was not DUTY_PP");
         /* do PME only */
-        walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME));
+        walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(ModuleMultiThread::PME));
         gmx_pmeonly(pmedata,
                     cr,
                     &nrnb,
index af47d79566dfe08425692341b5a65f473dd0146b..3c000e1addacb8d365d04d18ba986c2f0f078a25 100644 (file)
@@ -558,7 +558,7 @@ void gmx::make_local_shells(const t_commrec* cr, const t_mdatoms& md, gmx_shellf
 
     std::vector<t_shell>& shells = shfc->shells;
     shells.clear();
-    auto ptype = md.ptype;
+    auto* ptype = md.ptype;
     for (int i = a0; i < a1; i++)
     {
         if (ptype[i] == ParticleType::Shell)
@@ -848,9 +848,9 @@ static void init_adir(gmx_shellfc_t*            shfc,
     rvec* x_old = as_rvec_array(xOld.paddedArrayRef().data());
     rvec* x     = as_rvec_array(xCurrent.paddedArrayRef().data());
 
-    auto ptype   = md.ptype;
-    auto invmass = gmx::arrayRefFromArray(md.invmass, md.nr);
-    dt           = ir->delta_t;
+    auto* ptype   = md.ptype;
+    auto  invmass = gmx::arrayRefFromArray(md.invmass, md.nr);
+    dt            = ir->delta_t;
 
     /* Does NOT work with freeze groups (yet) */
     for (n = 0; n < end; n++)
@@ -1024,7 +1024,8 @@ void relax_shell_flexcon(FILE*                         fplog,
          * before do_force is called, which normally puts all
          * charge groups in the box.
          */
-        put_atoms_in_box_omp(fr->pbcType, box, x.subArray(0, md.homenr), gmx_omp_nthreads_get(emntDefault));
+        put_atoms_in_box_omp(
+                fr->pbcType, box, x.subArray(0, md.homenr), gmx_omp_nthreads_get(ModuleMultiThread::Default));
     }
 
     if (nflexcon)
index 758e2ae6917d9adc9162469b77a7b86724538b5c..687908ebe5195fa2587dde30af1905ada73a5dfa 100644 (file)
@@ -163,7 +163,8 @@ namespace gmx
 // TODO: Convert to use the nbnxm kernels by putting the system and the teset molecule on two separate search grids
 void LegacySimulator::do_tpi()
 {
-    GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(emntDefault) == 1, "TPI does not support OpenMP");
+    GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(ModuleMultiThread::Default) == 1,
+                       "TPI does not support OpenMP");
 
     gmx_localtop_t    top(top_global.ffparams);
     gmx::ForceBuffers f;
@@ -191,7 +192,7 @@ void LegacySimulator::do_tpi()
     double            invbinw, *bin, refvolshift, logV, bUlogV;
     gmx_bool          bEnergyOutOfBounds;
     const char*       tpid_leg[2] = { "direct", "reweighted" };
-    auto              mdatoms     = mdAtoms->mdatoms();
+    auto*             mdatoms     = mdAtoms->mdatoms();
 
     GMX_UNUSED_VALUE(outputProvider);
 
index 2c08f9788348096e813fb73deead23893112e6d2..476d74c3a3ec1161a17ebd0f8b7c8e03aa3f888f 100644 (file)
@@ -196,7 +196,7 @@ void Propagator<IntegrationStage::PositionsOnly>::run()
     auto x  = as_rvec_array(statePropagatorData_->constPositionsView().paddedArrayRef().data());
     auto v  = as_rvec_array(statePropagatorData_->constVelocitiesView().paddedArrayRef().data());
 
-    int nth    = gmx_omp_nthreads_get(emntUpdate);
+    int nth    = gmx_omp_nthreads_get(ModuleMultiThread::Update);
     int homenr = mdAtoms_->mdatoms()->homenr;
 
 #pragma omp parallel for num_threads(nth) schedule(static) default(none) shared(nth, homenr, x, xp, v)
@@ -232,7 +232,7 @@ void Propagator<IntegrationStage::ScalePositions>::run()
     const real lambda =
             (numPositionScalingValues == NumPositionScalingValues::Single) ? positionScaling_[0] : 1.0;
 
-    int nth    = gmx_omp_nthreads_get(emntUpdate);
+    int nth    = gmx_omp_nthreads_get(ModuleMultiThread::Update);
     int homenr = mdAtoms_->mdatoms()->homenr;
 
 #pragma omp parallel for num_threads(nth) schedule(static) default(none) shared(nth, homenr, x) \
@@ -283,7 +283,7 @@ void Propagator<IntegrationStage::VelocitiesOnly>::run()
     const bool isFullScalingMatrixDiagonal =
             diagonalizePRMatrix<parrinelloRahmanVelocityScaling>(matrixPR_, diagPR_);
 
-    const int nth    = gmx_omp_nthreads_get(emntUpdate);
+    const int nth    = gmx_omp_nthreads_get(ModuleMultiThread::Update);
     const int homenr = mdAtoms_->mdatoms()->homenr;
 
 // const variables could be shared, but gcc-8 & gcc-9 don't agree how to write that...
@@ -366,7 +366,7 @@ void Propagator<IntegrationStage::LeapFrog>::run()
     const bool isFullScalingMatrixDiagonal =
             diagonalizePRMatrix<parrinelloRahmanVelocityScaling>(matrixPR_, diagPR_);
 
-    const int nth    = gmx_omp_nthreads_get(emntUpdate);
+    const int nth    = gmx_omp_nthreads_get(ModuleMultiThread::Update);
     const int homenr = mdAtoms_->mdatoms()->homenr;
 
 // const variables could be shared, but gcc-8 & gcc-9 don't agree how to write that...
@@ -451,7 +451,7 @@ void Propagator<IntegrationStage::VelocityVerletPositionsAndVelocities>::run()
     const bool isFullScalingMatrixDiagonal =
             diagonalizePRMatrix<parrinelloRahmanVelocityScaling>(matrixPR_, diagPR_);
 
-    const int nth    = gmx_omp_nthreads_get(emntUpdate);
+    const int nth    = gmx_omp_nthreads_get(ModuleMultiThread::Update);
     const int homenr = mdAtoms_->mdatoms()->homenr;
 
 // const variables could be shared, but gcc-8 & gcc-9 don't agree how to write that...
@@ -530,7 +530,7 @@ void Propagator<IntegrationStage::ScaleVelocities>::run()
                                      ? startVelocityScaling_[0]
                                      : 1.0;
 
-    const int nth    = gmx_omp_nthreads_get(emntUpdate);
+    const int nth    = gmx_omp_nthreads_get(ModuleMultiThread::Update);
     const int homenr = mdAtoms_->mdatoms()->homenr;
 
 // const variables could be shared, but gcc-8 & gcc-9 don't agree how to write that...
index 6c1d1a6ad92f41218407d62251b5b25f0ebcfd4d..4dd6a899f27f8bd358a543a8e2a4ecec5f22eaea 100644 (file)
@@ -309,7 +309,7 @@ ForceBuffers* StatePropagatorData::forcePointer()
 
 void StatePropagatorData::copyPosition()
 {
-    int nth = gmx_omp_nthreads_get(emntUpdate);
+    int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
 
 #pragma omp parallel for num_threads(nth) schedule(static) default(none) shared(nth)
     for (int th = 0; th < nth; th++)
index d2e497b6dafe07dab86faae6aed5f15729223af3..d74467f1634ebdf44e8942819532fc1803a8a8d5 100644 (file)
@@ -990,7 +990,7 @@ void nbnxn_atomdata_copy_x_to_nbat_x(const Nbnxm::GridSet&   gridSet,
     int gridEnd   = 0;
     getAtomRanges(gridSet, locality, &gridBegin, &gridEnd);
 
-    const int nth = gmx_omp_nthreads_get(emntPairsearch);
+    const int nth = gmx_omp_nthreads_get(ModuleMultiThread::Pairsearch);
 #pragma omp parallel for num_threads(nth) schedule(static)
     for (int th = 0; th < nth; th++)
     {
@@ -1246,7 +1246,7 @@ void reduceForces(nbnxn_atomdata_t* nbat, const gmx::AtomLocality locality, cons
         return;
     }
 
-    int nth = gmx_omp_nthreads_get(emntNonbonded);
+    int nth = gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded);
 
     if (nbat->out.size() > 1)
     {
index 90bec691ab707ddfdcd06bc964073520e2666a81..929625fc4151b02f5f11b4b5bdd61a706dc8c9f0 100644 (file)
@@ -444,8 +444,8 @@ static void setupAndRunInstance(const gmx::BenchmarkSystem& system,
 void bench(const int sizeFactor, const KernelBenchOptions& options)
 {
     // We don't want to call gmx_omp_nthreads_init(), so we init what we need
-    gmx_omp_nthreads_set(emntPairsearch, options.numThreads);
-    gmx_omp_nthreads_set(emntNonbonded, options.numThreads);
+    gmx_omp_nthreads_set(ModuleMultiThread::Pairsearch, options.numThreads);
+    gmx_omp_nthreads_set(ModuleMultiThread::Nonbonded, options.numThreads);
 
     const gmx::BenchmarkSystem system(sizeFactor, options.outputFile);
 
index 97d7b797a4797070f739c1bd551045c9779a04a6..56584071e7c69816a57ebaadcf660b1cb123d0b3 100644 (file)
@@ -1378,7 +1378,7 @@ void Grid::setCellIndices(int                            ddZone,
     srcAtomBegin_ = *atomRange.begin();
     srcAtomEnd_   = *atomRange.end();
 
-    const int nthread = gmx_omp_nthreads_get(emntPairsearch);
+    const int nthread = gmx_omp_nthreads_get(ModuleMultiThread::Pairsearch);
 
     const int numAtomsPerCell = geometry_.numAtomsPerCell;
 
index 6b6b84ef97f26b3ed28512635b1d143c22538732..3070568e74fd2ec58e510964585cd58d2b7b6e8b 100644 (file)
@@ -206,7 +206,7 @@ void GridSet::putOnGrid(const matrix                   box,
     /* Make space for the new cell indices */
     gridSetData_.cells.resize(*atomRange.end());
 
-    const int nthread = gmx_omp_nthreads_get(emntPairsearch);
+    const int nthread = gmx_omp_nthreads_get(ModuleMultiThread::Pairsearch);
     GMX_ASSERT(nthread > 0, "We expect the OpenMP thread count to be set");
 
 #pragma omp parallel for num_threads(nthread) schedule(static)
index fbd58b0af744de2aa01304a411a02cf805e50cdf..52667776a0b05993c801c9408d282b33ce74b236 100644 (file)
@@ -256,9 +256,9 @@ static void nbnxn_kernel_cpu(const PairlistSet&             pairlistSet,
 
     gmx::ArrayRef<const NbnxnPairlistCpu> pairlists = pairlistSet.cpuLists();
 
-    auto shiftVecPointer = as_rvec_array(shiftVectors.data());
+    const auto* shiftVecPointer = as_rvec_array(shiftVectors.data());
 
-    int gmx_unused nthreads = gmx_omp_nthreads_get(emntNonbonded);
+    int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded);
     wallcycle_sub_start(wcycle, WallCycleSubCounter::NonbondedClear);
 #pragma omp parallel for schedule(static) num_threads(nthreads)
     for (gmx::index nb = 0; nb < pairlists.ssize(); nb++)
@@ -544,7 +544,7 @@ void nonbonded_verlet_t::dispatchFreeEnergyKernel(gmx::InteractionLocality
     gmx::ArrayRef<real> energygrp_elec = enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR];
     gmx::ArrayRef<real> energygrp_vdw = enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR];
 
-    GMX_ASSERT(gmx_omp_nthreads_get(emntNonbonded) == nbl_fep.ssize(),
+    GMX_ASSERT(gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded) == nbl_fep.ssize(),
                "Number of lists should be same as number of NB threads");
 
     wallcycle_sub_start(wcycle_, WallCycleSubCounter::NonbondedFep);
index 279a385ed49b448a9a156b16685f5d49ae0af08a..a1185ea2cc150b20c2a15ca90d94dd43bc3dbac0 100644 (file)
@@ -429,14 +429,15 @@ std::unique_ptr<nonbonded_verlet_t> init_nb_verlet(const gmx::MDLogger& mdlog,
          */
         mimimumNumEnergyGroupNonbonded = 1;
     }
-    nbnxn_atomdata_init(mdlog,
-                        nbat.get(),
-                        kernelSetup.kernelType,
-                        enbnxninitcombrule,
-                        forcerec.ntype,
-                        forcerec.nbfp,
-                        mimimumNumEnergyGroupNonbonded,
-                        (useGpuForNonbonded || emulateGpu) ? 1 : gmx_omp_nthreads_get(emntNonbonded));
+    nbnxn_atomdata_init(
+            mdlog,
+            nbat.get(),
+            kernelSetup.kernelType,
+            enbnxninitcombrule,
+            forcerec.ntype,
+            forcerec.nbfp,
+            mimimumNumEnergyGroupNonbonded,
+            (useGpuForNonbonded || emulateGpu) ? 1 : gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded));
 
     NbnxmGpu* gpu_nbv                          = nullptr;
     int       minimumIlistCountForGpuBalancing = 0;
@@ -463,7 +464,7 @@ std::unique_ptr<nonbonded_verlet_t> init_nb_verlet(const gmx::MDLogger& mdlog,
                                          DOMAINDECOMP(commrec) ? domdec_zones(commrec->dd) : nullptr,
                                          pairlistParams.pairlistType,
                                          bFEP_NonBonded,
-                                         gmx_omp_nthreads_get(emntPairsearch),
+                                         gmx_omp_nthreads_get(ModuleMultiThread::Pairsearch),
                                          pinPolicy);
 
     return std::make_unique<nonbonded_verlet_t>(
index 5f9ea050561e10a32e412b03c524d0a0c4c28851..3d061b149f9dfc7c3ed29e08bac6990eee6c6108 100644 (file)
@@ -661,7 +661,7 @@ PairlistSet::PairlistSet(const PairlistParams& pairlistParams) :
     isCpuType_(!sc_isGpuPairListType[pairlistParams.pairlistType])
 {
 
-    const int numLists = gmx_omp_nthreads_get(emntNonbonded);
+    const int numLists = gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded);
 
     if (!combineLists_ && numLists > NBNXN_BUFFERFLAG_MAX_THREADS)
     {
@@ -2600,7 +2600,7 @@ static void combine_nblists(gmx::ArrayRef<const NbnxnPairlistGpu> nbls, NbnxnPai
     /* Each thread should copy its own data to the combined arrays,
      * as otherwise data will go back and forth between different caches.
      */
-    const int gmx_unused nthreads = gmx_omp_nthreads_get(emntPairsearch);
+    const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Pairsearch);
 
 #pragma omp parallel for num_threads(nthreads) schedule(static)
     for (gmx::index n = 0; n < nbls.ssize(); n++)
@@ -2673,7 +2673,7 @@ static void balance_fep_lists(gmx::ArrayRef<std::unique_ptr<t_nblist>> fepLists,
 
     const int nrj_target = (nrj_tot + numLists - 1) / numLists;
 
-    GMX_ASSERT(gmx_omp_nthreads_get(emntNonbonded) == numLists,
+    GMX_ASSERT(gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded) == numLists,
                "We should have as many work objects as FEP lists");
 
 #pragma omp parallel for schedule(static) num_threads(numLists)
index 98d05e13b1884e57c90d5d9f3f91c0ff14b74532..0c7e29970b3238f7641b60c03489ff1bed44c2a5 100644 (file)
@@ -64,7 +64,7 @@ void PairlistSet::dispatchPruneKernel(const nbnxn_atomdata_t* nbat, gmx::ArrayRe
     GMX_ASSERT(cpuLists_[0].ciOuter.size() >= cpuLists_[0].ci.size(),
                "Here we should either have an empty ci list or ciOuter should be >= ci");
 
-    int gmx_unused nthreads = gmx_omp_nthreads_get(emntNonbonded);
+    int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded);
     GMX_ASSERT(nthreads == static_cast<gmx::index>(cpuLists_.size()),
                "The number of threads should match the number of lists");
 #pragma omp parallel for schedule(static) num_threads(nthreads)
index e56fb9761a758574e2d31ccda3b2dcc1da654d5a..e40cecf28c11063bdd62df118f84902ac499f880 100644 (file)
@@ -1425,6 +1425,7 @@ static PullCoordVectorForces calculateVectorForces(const pull_coord_work_t& pcrd
  * We could use a different, local mutex for each pull object, but the overhead
  * is extremely small here and registration is only done during initialization.
  */
+// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
 static std::mutex registrationMutex;
 
 using Lock = std::lock_guard<std::mutex>;
@@ -1990,7 +1991,7 @@ struct pull_t* init_pull(FILE*                     fplog,
     pull->params = *pull_params;
 
     /* The gmx_omp_nthreads module might not be initialized here, so max(1,) */
-    const int maxNumThreads = std::max(1, gmx_omp_nthreads_get(emntDefault));
+    const int maxNumThreads = std::max(1, gmx_omp_nthreads_get(ModuleMultiThread::Default));
 
     for (int i = 0; i < pull_params->ngroup; ++i)
     {
index 27adb6c158a5a59d2cd2c525a98ccd36f5ec7cba..f4e0ba526af8436cdd0ea69d604c32ae8ac3532d 100644 (file)
@@ -80,7 +80,7 @@
 #include "gromacs/utility/pleasecite.h"
 #include "gromacs/utility/smalloc.h"
 
-static char const* RotStr = { "Enforced rotation:" };
+static const std::string RotStr = { "Enforced rotation:" };
 
 /* Set the minimum weight for the determination of the slab centers */
 #define WEIGHT_MIN (10 * GMX_FLOAT_MIN)
@@ -496,7 +496,7 @@ static void reduce_output(const t_commrec* cr, gmx_enfrot* er, real t, int64_t s
         }
         if (count > er->mpi_bufsize)
         {
-            gmx_fatal(FARGS, "%s MPI buffer overflow, please report this error.", RotStr);
+            gmx_fatal(FARGS, "%s MPI buffer overflow, please report this error.", RotStr.c_str());
         }
 
 #if GMX_MPI
@@ -2593,13 +2593,19 @@ static void get_firstlast_slab_check(gmx_enfrotgrp* erg, /* The rotation group (
     /* Check whether we have reference data to compare against */
     if (erg->slab_first < erg->slab_first_ref)
     {
-        gmx_fatal(FARGS, "%s No reference data for first slab (n=%d), unable to proceed.", RotStr, erg->slab_first);
+        gmx_fatal(FARGS,
+                  "%s No reference data for first slab (n=%d), unable to proceed.",
+                  RotStr.c_str(),
+                  erg->slab_first);
     }
 
     /* Check whether we have reference data to compare against */
     if (erg->slab_last > erg->slab_last_ref)
     {
-        gmx_fatal(FARGS, "%s No reference data for last slab (n=%d), unable to proceed.", RotStr, erg->slab_last);
+        gmx_fatal(FARGS,
+                  "%s No reference data for last slab (n=%d), unable to proceed.",
+                  RotStr.c_str(),
+                  erg->slab_last);
     }
 }
 
@@ -3343,7 +3349,7 @@ static void allocate_slabs(gmx_enfrotgrp* erg, FILE* fplog, gmx_bool bVerbose)
     {
         fprintf(fplog,
                 "%s allocating memory to store data for %d slabs (rotation group %d).\n",
-                RotStr,
+                RotStr.c_str(),
                 nslabs,
                 erg->groupIndex);
     }
@@ -3698,7 +3704,7 @@ std::unique_ptr<gmx::EnforcedRotation> init_rot(FILE*                       fplo
 
     if (MASTER(cr) && mdrunOptions.verbose)
     {
-        fprintf(stdout, "%s Initializing ...\n", RotStr);
+        fprintf(stdout, "%s Initializing ...\n", RotStr.c_str());
     }
 
     auto        enforcedRotation = std::make_unique<gmx::EnforcedRotation>();
@@ -3720,7 +3726,7 @@ std::unique_ptr<gmx::EnforcedRotation> init_rot(FILE*                       fplo
     {
         if (nullptr != fplog)
         {
-            fprintf(fplog, "%s rerun - will write rotation output every available step.\n", RotStr);
+            fprintf(fplog, "%s rerun - will write rotation output every available step.\n", RotStr.c_str());
         }
         er->nstrout = 1;
         er->nstsout = 1;
@@ -3765,7 +3771,7 @@ std::unique_ptr<gmx::EnforcedRotation> init_rot(FILE*                       fplo
         {
             fprintf(fplog,
                     "%s group %d type '%s'\n",
-                    RotStr,
+                    RotStr.c_str(),
                     groupIndex,
                     enumValueToString(erg->rotg->eType));
         }
diff --git a/src/gromacs/pulling/tests/.clang-tidy b/src/gromacs/pulling/tests/.clang-tidy
new file mode 100644 (file)
index 0000000..0adf51e
--- /dev/null
@@ -0,0 +1,91 @@
+# List of rationales for check suppressions (where known).
+# This have to precede the list because inline comments are not
+# supported by clang-tidy.
+#
+#         -cppcoreguidelines-non-private-member-variables-in-classes,
+#         -misc-non-private-member-variables-in-classes,
+# We intend a gradual transition to conform to this guideline, but it
+# is not practical to implement yet.
+#
+#         -readability-isolate-declaration,
+# Declarations like "int a, b;" are readable. Some forms are not, and
+# those might reasonably be suggested against during code review.
+#
+#         -cppcoreguidelines-avoid-c-arrays,
+# C arrays are still necessary in many places with legacy code
+#
+#         -cppcoreguidelines-avoid-magic-numbers,
+#         -readability-magic-numbers,
+# We have many legitimate use cases for magic numbers
+#
+#         -cppcoreguidelines-macro-usage,
+# We do use too many macros, and we should fix many of them, but there
+# is no reasonable way to suppress the check e.g. in src/config.h and
+# configuring the build is a major legitimate use of macros.
+#
+#         -cppcoreguidelines-narrowing-conversions,
+#         -bugprone-narrowing-conversions
+# We have many cases where int is converted to float and we don't care
+# enough about such potential loss of precision to use explicit casts
+# in large numbers of places.
+#
+#         -google-readability-avoid-underscore-in-googletest-name
+# We need to use underscores for readability for our legacy types
+# and command-line parameter names
+#
+#         -misc-no-recursion
+# We have way too many functions and methods relying on recursion
+#
+#         -cppcoreguidelines-avoid-non-const-global-variables
+# There are quite a lot of static variables in the test code that
+# can not be replaced.
+#
+#         -modernize-avoid-bind
+# Some code needs to use std::bind and can't be modernized quickly.
+Checks:  clang-diagnostic-*,-clang-analyzer-*,-clang-analyzer-security.insecureAPI.strcpy,
+         bugprone-*,misc-*,readability-*,performance-*,mpi-*,
+         -readability-inconsistent-declaration-parameter-name,
+         -readability-function-size,-readability-else-after-return,
+         modernize-use-nullptr,modernize-use-emplace,
+         modernize-make-unique,modernize-make-shared,
+         modernize-avoid-bind,
+         modernize-use-override,
+         modernize-redundant-void-arg,modernize-use-bool-literals,
+         cppcoreguidelines-*,-cppcoreguidelines-pro-*,-cppcoreguidelines-owning-memory,
+         -cppcoreguidelines-no-malloc,-cppcoreguidelines-special-member-functions,
+         -cppcoreguidelines-avoid-goto,
+         google-*,-google-build-using-namespace,-google-explicit-constructor,
+         -google-readability-function-size,-google-readability-todo,-google-runtime-int,
+         -cppcoreguidelines-non-private-member-variables-in-classes,
+         -misc-non-private-member-variables-in-classes,
+         -readability-isolate-declaration,
+         -cppcoreguidelines-avoid-c-arrays,
+         -cppcoreguidelines-avoid-magic-numbers,
+         -readability-magic-numbers,
+         -cppcoreguidelines-macro-usage,
+         -cppcoreguidelines-narrowing-conversions,
+         -bugprone-narrowing-conversions,
+         -google-readability-avoid-underscore-in-googletest-name,
+         -cppcoreguidelines-init-variables,
+         -misc-no-recursion,
+         -cppcoreguidelines-avoid-non-const-global-variables,
+         -modernize-avoid-bind
+HeaderFilterRegex: .*
+CheckOptions:
+  - key:           cppcoreguidelines-special-member-functions.AllowSoleDefaultDtor
+    value:         1
+  - key:           modernize-make-unique.IncludeStyle
+    value:         google
+  - key:           modernize-make-shared.IncludeStyle
+    value:         google
+  - key:           readability-implicit-bool-conversion.AllowIntegerConditions
+    value:         1
+  - key:           readability-implicit-bool-conversion.AllowPointerConditions
+    value:         1
+  - key:           bugprone-dangling-handle.HandleClasses
+    value:         std::basic_string_view; nonstd::sv_lite::basic_string_view
+# Permit passing shard pointers by value for sink parameters
+  - key:           performance-unnecessary-copy-initialization.AllowedTypes
+    value:         shared_ptr
+  - key:           performance-unnecessary-value-param.AllowedTypes
+    value:         shared_ptr
index b4d02f8213f26135a945c87a2f1fd6d96166648c..d6a0b36e5f337d018d5791cd18df6f69f79b7141 100644 (file)
@@ -83,7 +83,7 @@ namespace
 {
 
 //! Helper variable to localise the text of an often repeated message.
-const char* g_specifyEverythingFormatString =
+const char* const g_specifyEverythingFormatString =
         "When you use mdrun -gputasks, %s must be set to non-default "
         "values, so that the device IDs can be interpreted correctly."
 #if GMX_GPU
index 388406bf792450ba4c017ac14231931d33bd88b8..42c5b64a06b8fe793d9abddda2279ecd7807903e 100644 (file)
@@ -582,10 +582,10 @@ void check_resource_division_efficiency(const gmx_hw_info_t* hwinfo,
         GMX_RELEASE_ASSERT(nthreads_omp_faster_default >= nthreads_omp_mpi_ok_max,
                            "Inconsistent OpenMP thread count default values");
     }
-    GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(emntDefault) >= 1,
+    GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(ModuleMultiThread::Default) >= 1,
                        "Must have at least one OpenMP thread");
 
-    nth_omp_max = gmx_omp_nthreads_get(emntDefault);
+    nth_omp_max = gmx_omp_nthreads_get(ModuleMultiThread::Default);
 
     bool anyRankIsUsingGpus = willUsePhysicalGpu;
     /* Thread-MPI seems to have a bug with reduce on 1 node, so use a cond. */
diff --git a/src/gromacs/taskassignment/tests/.clang-tidy b/src/gromacs/taskassignment/tests/.clang-tidy
new file mode 100644 (file)
index 0000000..0adf51e
--- /dev/null
@@ -0,0 +1,91 @@
+# List of rationales for check suppressions (where known).
+# This have to precede the list because inline comments are not
+# supported by clang-tidy.
+#
+#         -cppcoreguidelines-non-private-member-variables-in-classes,
+#         -misc-non-private-member-variables-in-classes,
+# We intend a gradual transition to conform to this guideline, but it
+# is not practical to implement yet.
+#
+#         -readability-isolate-declaration,
+# Declarations like "int a, b;" are readable. Some forms are not, and
+# those might reasonably be suggested against during code review.
+#
+#         -cppcoreguidelines-avoid-c-arrays,
+# C arrays are still necessary in many places with legacy code
+#
+#         -cppcoreguidelines-avoid-magic-numbers,
+#         -readability-magic-numbers,
+# We have many legitimate use cases for magic numbers
+#
+#         -cppcoreguidelines-macro-usage,
+# We do use too many macros, and we should fix many of them, but there
+# is no reasonable way to suppress the check e.g. in src/config.h and
+# configuring the build is a major legitimate use of macros.
+#
+#         -cppcoreguidelines-narrowing-conversions,
+#         -bugprone-narrowing-conversions
+# We have many cases where int is converted to float and we don't care
+# enough about such potential loss of precision to use explicit casts
+# in large numbers of places.
+#
+#         -google-readability-avoid-underscore-in-googletest-name
+# We need to use underscores for readability for our legacy types
+# and command-line parameter names
+#
+#         -misc-no-recursion
+# We have way too many functions and methods relying on recursion
+#
+#         -cppcoreguidelines-avoid-non-const-global-variables
+# There are quite a lot of static variables in the test code that
+# can not be replaced.
+#
+#         -modernize-avoid-bind
+# Some code needs to use std::bind and can't be modernized quickly.
+Checks:  clang-diagnostic-*,-clang-analyzer-*,-clang-analyzer-security.insecureAPI.strcpy,
+         bugprone-*,misc-*,readability-*,performance-*,mpi-*,
+         -readability-inconsistent-declaration-parameter-name,
+         -readability-function-size,-readability-else-after-return,
+         modernize-use-nullptr,modernize-use-emplace,
+         modernize-make-unique,modernize-make-shared,
+         modernize-avoid-bind,
+         modernize-use-override,
+         modernize-redundant-void-arg,modernize-use-bool-literals,
+         cppcoreguidelines-*,-cppcoreguidelines-pro-*,-cppcoreguidelines-owning-memory,
+         -cppcoreguidelines-no-malloc,-cppcoreguidelines-special-member-functions,
+         -cppcoreguidelines-avoid-goto,
+         google-*,-google-build-using-namespace,-google-explicit-constructor,
+         -google-readability-function-size,-google-readability-todo,-google-runtime-int,
+         -cppcoreguidelines-non-private-member-variables-in-classes,
+         -misc-non-private-member-variables-in-classes,
+         -readability-isolate-declaration,
+         -cppcoreguidelines-avoid-c-arrays,
+         -cppcoreguidelines-avoid-magic-numbers,
+         -readability-magic-numbers,
+         -cppcoreguidelines-macro-usage,
+         -cppcoreguidelines-narrowing-conversions,
+         -bugprone-narrowing-conversions,
+         -google-readability-avoid-underscore-in-googletest-name,
+         -cppcoreguidelines-init-variables,
+         -misc-no-recursion,
+         -cppcoreguidelines-avoid-non-const-global-variables,
+         -modernize-avoid-bind
+HeaderFilterRegex: .*
+CheckOptions:
+  - key:           cppcoreguidelines-special-member-functions.AllowSoleDefaultDtor
+    value:         1
+  - key:           modernize-make-unique.IncludeStyle
+    value:         google
+  - key:           modernize-make-shared.IncludeStyle
+    value:         google
+  - key:           readability-implicit-bool-conversion.AllowIntegerConditions
+    value:         1
+  - key:           readability-implicit-bool-conversion.AllowPointerConditions
+    value:         1
+  - key:           bugprone-dangling-handle.HandleClasses
+    value:         std::basic_string_view; nonstd::sv_lite::basic_string_view
+# Permit passing shard pointers by value for sink parameters
+  - key:           performance-unnecessary-copy-initialization.AllowedTypes
+    value:         shared_ptr
+  - key:           performance-unnecessary-value-param.AllowedTypes
+    value:         shared_ptr