void NbvSetupUtil::setExecutionContext(const NBKernelOptions& options)
{
// Todo: find a more general way to initialize hardware
- gmx_omp_nthreads_set(emntPairsearch, options.numOpenMPThreads);
- gmx_omp_nthreads_set(emntNonbonded, options.numOpenMPThreads);
+ gmx_omp_nthreads_set(ModuleMultiThread::Pairsearch, options.numOpenMPThreads);
+ gmx_omp_nthreads_set(ModuleMultiThread::Nonbonded, options.numOpenMPThreads);
}
Nbnxm::KernelSetup NbvSetupUtil::getKernelSetup(const NBKernelOptions& options)
#include "gromacs/utility/basedefinitions.h"
#include "gromacs/utility/basenetwork.h"
#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/enumerationhelpers.h"
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxmpi.h"
using gmx::DlbOption;
using gmx::DomdecOptions;
-static const char* edlbs_names[int(DlbState::nr)] = { "off", "off", "auto", "locked", "on", "on" };
+static const char* enumValueToString(DlbState enumValue)
+{
+ static constexpr gmx::EnumerationArray<DlbState, const char*> dlbStateNames = {
+ "off", "off", "auto", "locked", "on", "on"
+ };
+ return dlbStateNames[enumValue];
+}
/* The size per atom group of the cggl_flag buffer in gmx_domdec_comm_t */
#define DD_CGIBS 2
/* Returns whether mtop contains constraints and/or vsites */
static bool systemHasConstraintsOrVsites(const gmx_mtop_t& mtop)
{
- for (const auto ilists : IListRange(mtop))
- {
- if (!extractILists(ilists.list(), IF_CONSTRAINT | IF_VSITE).empty())
- {
- return true;
- }
- }
- return false;
+ return std::any_of(IListRange(mtop).begin(), IListRange(mtop).end(), [](const auto ilist) {
+ return !extractILists(ilist.list(), IF_CONSTRAINT | IF_VSITE).empty();
+ });
}
static void setupUpdateGroups(const gmx::MDLogger& mdlog,
determineInitialDlbState(mdlog, options.dlbOption, ddSettings.recordLoad, mdrunOptions, ir);
GMX_LOG(mdlog.info)
.appendTextFormatted("Dynamic load balancing: %s",
- edlbs_names[static_cast<int>(ddSettings.initialDlbState)]);
+ enumValueToString(ddSettings.initialDlbState));
return ddSettings;
}
int numKeysEstimate = std::min(mtop.natoms / 20, mtop.natoms / (2 * dd->nnodes));
dc->ga2la = std::make_unique<gmx::HashedMap<int>>(numKeysEstimate);
- dc->nthread = gmx_omp_nthreads_get(emntDomdec);
+ dc->nthread = gmx_omp_nthreads_get(ModuleMultiThread::Domdec);
dc->ils.resize(dc->nthread);
dd->constraint_comm = new gmx_domdec_specat_comm_t;
offTemporarilyLocked, /**< DLB is off and temporarily can't turn on */
onCanTurnOff, /**< DLB is on and can turn off when slow */
onUser, /**< DLB is permanently on per user request */
- nr /**< The number of DLB states */
+ Count /**< The number of DLB states */
};
/*! \brief The PME domain decomposition for one dimension */
mbi.push_back(mbiMolblock);
}
- for (int th = 0; th < gmx_omp_nthreads_get(emntDomdec); th++)
+ for (int th = 0; th < gmx_omp_nthreads_get(ModuleMultiThread::Domdec); th++)
{
th_work.emplace_back(mtop.ffparams);
}
int ngl = 0;
for (int i = 0; i < natoms_sys; i++)
{
- if (const auto entry = dd->ga2la->find(i))
+ if (const auto* entry = dd->ga2la->find(i))
{
const int a = entry->la;
if (a >= numAtomsInZones)
* This can not be done in init_domain_decomposition,
* as the numbers of threads is determined later.
*/
- int numThreads = gmx_omp_nthreads_get(emntDomdec);
+ int numThreads = gmx_omp_nthreads_get(ModuleMultiThread::Domdec);
comm->dth.resize(numThreads);
}
/* Update atom data for mdatoms and several algorithms */
mdAlgorithmsSetupAtomData(cr, inputrec, top_global, top_local, fr, f, mdAtoms, constr, vsite, nullptr);
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
if (!thisRankHasDuty(cr, DUTY_PME))
{
/* Send the charges and/or c6/sigmas to our PME only node */
matrix tcm;
make_tric_corr_matrix(npbcdim, state->box, tcm);
- const int nthread = gmx_omp_nthreads_get(emntDomdec);
+ const int nthread = gmx_omp_nthreads_get(ModuleMultiThread::Domdec);
/* Compute the center of geometry for all home charge groups
* and put them in the box and determine where they should go.
--- /dev/null
+# List of rationales for check suppressions (where known).
+# This have to precede the list because inline comments are not
+# supported by clang-tidy.
+#
+# -cppcoreguidelines-non-private-member-variables-in-classes,
+# -misc-non-private-member-variables-in-classes,
+# We intend a gradual transition to conform to this guideline, but it
+# is not practical to implement yet.
+#
+# -readability-isolate-declaration,
+# Declarations like "int a, b;" are readable. Some forms are not, and
+# those might reasonably be suggested against during code review.
+#
+# -cppcoreguidelines-avoid-c-arrays,
+# C arrays are still necessary in many places with legacy code
+#
+# -cppcoreguidelines-avoid-magic-numbers,
+# -readability-magic-numbers,
+# We have many legitimate use cases for magic numbers
+#
+# -cppcoreguidelines-macro-usage,
+# We do use too many macros, and we should fix many of them, but there
+# is no reasonable way to suppress the check e.g. in src/config.h and
+# configuring the build is a major legitimate use of macros.
+#
+# -cppcoreguidelines-narrowing-conversions,
+# -bugprone-narrowing-conversions
+# We have many cases where int is converted to float and we don't care
+# enough about such potential loss of precision to use explicit casts
+# in large numbers of places.
+#
+# -google-readability-avoid-underscore-in-googletest-name
+# We need to use underscores for readability for our legacy types
+# and command-line parameter names
+#
+# -misc-no-recursion
+# We have way too many functions and methods relying on recursion
+#
+# -cppcoreguidelines-avoid-non-const-global-variables
+# There are quite a lot of static variables in the test code that
+# can not be replaced.
+#
+# -modernize-avoid-bind
+# Some code needs to use std::bind and can't be modernized quickly.
+Checks: clang-diagnostic-*,-clang-analyzer-*,-clang-analyzer-security.insecureAPI.strcpy,
+ bugprone-*,misc-*,readability-*,performance-*,mpi-*,
+ -readability-inconsistent-declaration-parameter-name,
+ -readability-function-size,-readability-else-after-return,
+ modernize-use-nullptr,modernize-use-emplace,
+ modernize-make-unique,modernize-make-shared,
+ modernize-avoid-bind,
+ modernize-use-override,
+ modernize-redundant-void-arg,modernize-use-bool-literals,
+ cppcoreguidelines-*,-cppcoreguidelines-pro-*,-cppcoreguidelines-owning-memory,
+ -cppcoreguidelines-no-malloc,-cppcoreguidelines-special-member-functions,
+ -cppcoreguidelines-avoid-goto,
+ google-*,-google-build-using-namespace,-google-explicit-constructor,
+ -google-readability-function-size,-google-readability-todo,-google-runtime-int,
+ -cppcoreguidelines-non-private-member-variables-in-classes,
+ -misc-non-private-member-variables-in-classes,
+ -readability-isolate-declaration,
+ -cppcoreguidelines-avoid-c-arrays,
+ -cppcoreguidelines-avoid-magic-numbers,
+ -readability-magic-numbers,
+ -cppcoreguidelines-macro-usage,
+ -cppcoreguidelines-narrowing-conversions,
+ -bugprone-narrowing-conversions,
+ -google-readability-avoid-underscore-in-googletest-name,
+ -cppcoreguidelines-init-variables,
+ -misc-no-recursion,
+ -cppcoreguidelines-avoid-non-const-global-variables,
+ -modernize-avoid-bind
+HeaderFilterRegex: .*
+CheckOptions:
+ - key: cppcoreguidelines-special-member-functions.AllowSoleDefaultDtor
+ value: 1
+ - key: modernize-make-unique.IncludeStyle
+ value: google
+ - key: modernize-make-shared.IncludeStyle
+ value: google
+ - key: readability-implicit-bool-conversion.AllowIntegerConditions
+ value: 1
+ - key: readability-implicit-bool-conversion.AllowPointerConditions
+ value: 1
+ - key: bugprone-dangling-handle.HandleClasses
+ value: std::basic_string_view; nonstd::sv_lite::basic_string_view
+# Permit passing shard pointers by value for sink parameters
+ - key: performance-unnecessary-copy-initialization.AllowedTypes
+ value: shared_ptr
+ - key: performance-unnecessary-value-param.AllowedTypes
+ value: shared_ptr
{
const int end = forceToAdd.size();
- int gmx_unused nt = gmx_omp_nthreads_get(emntPME);
+ int gmx_unused nt = gmx_omp_nthreads_get(ModuleMultiThread::PME);
#pragma omp parallel for num_threads(nt) schedule(static)
for (int i = 0; i < end; i++)
{
void* recvptr = reinterpret_cast<void*>(buffer.data());
recvFFromPme(pmePpCommGpu, recvptr, natoms, cr, useGpuPmePpComms, receivePmeForceToGpu);
- int nt = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, natoms);
+ int nt = gmx_omp_nthreads_get_simple_rvec_task(ModuleMultiThread::Default, natoms);
gmx::ArrayRef<gmx::RVec> f = forceWithVirial->force_;
mu_x = mu_y = mu_z = 0.0;
#pragma omp parallel for reduction(+: mu_x, mu_y, mu_z) schedule(static) \
- num_threads(gmx_omp_nthreads_get(emntDefault))
+ num_threads(gmx_omp_nthreads_get(ModuleMultiThread::Default))
for (int i = start; i < end; i++)
{
// Trivial OpenMP region that cannot throw
{
mu_x = mu_y = mu_z = 0.0;
#pragma omp parallel for reduction(+: mu_x, mu_y, mu_z) schedule(static) \
- num_threads(gmx_omp_nthreads_get(emntDefault))
+ num_threads(gmx_omp_nthreads_get(ModuleMultiThread::Default))
for (int i = start; i < end; i++)
{
// Trivial OpenMP region that cannot throw
{
matrix x_times_f;
- int nthreads = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, nxf * 9);
+ int nthreads = gmx_omp_nthreads_get_simple_rvec_task(ModuleMultiThread::Default, nxf * 9);
GMX_ASSERT(nthreads >= 1, "Avoids uninitialized x_times_f (warning)");
if (nsettle > 0)
{
- nth = gmx_omp_nthreads_get(emntSETTLE);
+ nth = gmx_omp_nthreads_get(ModuleMultiThread::SETTLE);
}
else
{
vRef = v.unpaddedArrayRef();
}
- int gmx_unused numThreads = gmx_omp_nthreads_get(emntUpdate);
+ int gmx_unused numThreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(numThreads) schedule(static)
for (int i = 0; i < numHomeAtoms_; i++)
}
/* Allocate thread-local work arrays */
- int nthreads = gmx_omp_nthreads_get(emntSETTLE);
+ int nthreads = gmx_omp_nthreads_get(ModuleMultiThread::SETTLE);
if (nthreads > 1 && threadConstraintsVirial == nullptr)
{
snew(threadConstraintsVirial, nthreads);
0.2967324292201065,
0.2967324292201065 };
-static const double* sy_const[] = { nullptr, sy_const_1, nullptr, sy_const_3, nullptr, sy_const_5 };
+static constexpr std::array<const double*, 6> sy_const = { nullptr, sy_const_1, nullptr,
+ sy_const_3, nullptr, sy_const_5 };
void update_tcouple(int64_t step,
/* Scale the coordinates */
if (scaleCoordinates)
{
- auto x = state->x.rvec_array();
+ auto* x = state->x.rvec_array();
for (int n = start; n < start + homenr; n++)
{
tmvmul_ur0(pressureCouplingMu, x[n], x[n]);
/* Scale the positions and the velocities */
if (scaleCoordinates)
{
- const int gmx_unused numThreads = gmx_omp_nthreads_get(emntUpdate);
+ const int gmx_unused numThreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(numThreads) schedule(static)
for (int n = start; n < start + nr_atoms; n++)
{
forcerec->listedForces.emplace_back(
mtop.ffparams,
mtop.groups.groups[SimulationAtomGroupType::EnergyOutput].size(),
- gmx_omp_nthreads_get(emntBonded),
+ gmx_omp_nthreads_get(ModuleMultiThread::Bonded),
interactionSelection,
fplog);
}
forcerec->listedForces.emplace_back(
mtop.ffparams,
mtop.groups.groups[SimulationAtomGroupType::EnergyOutput].size(),
- gmx_omp_nthreads_get(emntBonded),
+ gmx_omp_nthreads_get(ModuleMultiThread::Bonded),
ListedForces::interactionSelectionAll(),
fplog);
}
forcerec->print_force = print_force;
- forcerec->nthread_ewc = gmx_omp_nthreads_get(emntBonded);
+ forcerec->nthread_ewc = gmx_omp_nthreads_get(ModuleMultiThread::Bonded);
forcerec->ewc_t.resize(forcerec->nthread_ewc);
if (inputrec.eDispCorr != DispersionCorrectionType::No)
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
- * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "gromacs/gmxlib/network.h"
#include "gromacs/mdtypes/commrec.h"
#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/enumerationhelpers.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/gmxomp.h"
int gnth; /**< Global num. of threads per PP or PP+PME process/tMPI thread. */
int gnth_pme; /**< Global num. of threads per PME only process/tMPI thread. */
- int nth[emntNR]; /**< Number of threads for each module, indexed with module_nth_t */
- gmx_bool initialized; /**< TRUE if the module as been initialized. */
+ gmx::EnumerationArray<ModuleMultiThread, int> nth; /**< Number of threads for each module, indexed with module_nth_t */
+ bool initialized; /**< TRUE if the module as been initialized. */
} omp_module_nthreads_t;
/** Names of environment variables to set the per module number of threads.
*
* Indexed with the values of module_nth_t.
* */
-static const char* modth_env_var[emntNR] = { "GMX_DEFAULT_NUM_THREADS should never be set",
- "GMX_DOMDEC_NUM_THREADS",
- "GMX_PAIRSEARCH_NUM_THREADS",
- "GMX_NONBONDED_NUM_THREADS",
- "GMX_LISTED_FORCES_NUM_THREADS",
- "GMX_PME_NUM_THREADS",
- "GMX_UPDATE_NUM_THREADS",
- "GMX_VSITE_NUM_THREADS",
- "GMX_LINCS_NUM_THREADS",
- "GMX_SETTLE_NUM_THREADS" };
+static const char* enumValueToEnvVariableString(ModuleMultiThread enumValue)
+{
+ constexpr gmx::EnumerationArray<ModuleMultiThread, const char*> moduleMultiThreadEnvVariableNames = {
+ "GMX_DEFAULT_NUM_THREADS should never be set",
+ "GMX_DOMDEC_NUM_THREADS",
+ "GMX_PAIRSEARCH_NUM_THREADS",
+ "GMX_NONBONDED_NUM_THREADS",
+ "GMX_LISTED_FORCES_NUM_THREADS",
+ "GMX_PME_NUM_THREADS",
+ "GMX_UPDATE_NUM_THREADS",
+ "GMX_VSITE_NUM_THREADS",
+ "GMX_LINCS_NUM_THREADS",
+ "GMX_SETTLE_NUM_THREADS"
+ };
+ return moduleMultiThreadEnvVariableNames[enumValue];
+}
/** Names of the modules. */
-static const char* mod_name[emntNR] = { "default", "domain decomposition",
- "pair search", "non-bonded",
- "bonded", "PME",
- "update", "LINCS",
- "SETTLE" };
+static const char* enumValueToString(ModuleMultiThread enumValue)
+{
+ constexpr gmx::EnumerationArray<ModuleMultiThread, const char*> moduleMultiThreadNames = {
+ "default", "domain decomposition",
+ "pair search", "non-bonded",
+ "bonded", "PME",
+ "update", "LINCS",
+ "SETTLE"
+ };
+ return moduleMultiThreadNames[enumValue];
+}
/** Number of threads for each algorithmic module.
*
* All fields are initialized to 0 which should result in errors if
* the init call is omitted.
* */
+// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
static omp_module_nthreads_t modth = { 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0 }, FALSE };
* GMX_*_NUM_THERADS env var is set, case in which its value overrides
* the default.
*/
-static void pick_module_nthreads(const gmx::MDLogger& mdlog, int m, gmx_bool bSepPME)
+static void pick_module_nthreads(const gmx::MDLogger& mdlog, ModuleMultiThread m, gmx_bool bSepPME)
{
char* env;
int nth;
/* The default should never be set through a GMX_*_NUM_THREADS env var
* as it's always equal with gnth. */
- if (m == emntDefault)
+ if (m == ModuleMultiThread::Default)
{
return;
}
/* check the environment variable */
- if ((env = getenv(modth_env_var[m])) != nullptr)
+ if ((env = getenv(enumValueToEnvVariableString(m))) != nullptr)
{
sscanf(env, "%d", &nth);
if (!bOMP)
{
gmx_warning("%s=%d is set, but %s is compiled without OpenMP!",
- modth_env_var[m],
+ enumValueToEnvVariableString(m),
nth,
gmx::getProgramContext().displayName());
}
gmx_warning(
"%s=%d is set, the default number of threads also "
"needs to be set with OMP_NUM_THREADS!",
- modth_env_var[m],
+ enumValueToEnvVariableString(m),
nth);
}
/* only babble if we are really overriding with a different value */
- if ((bSepPME && m == emntPME && nth != modth.gnth_pme) || (nth != modth.gnth))
+ if ((bSepPME && m == ModuleMultiThread::PME && nth != modth.gnth_pme) || (nth != modth.gnth))
{
GMX_LOG(mdlog.warning)
.asParagraph()
.appendTextFormatted("%s=%d set, overriding the default number of %s threads",
- modth_env_var[m],
+ enumValueToString(m),
nth,
- mod_name[m]);
+ enumValueToEnvVariableString(m));
}
}
else
{
/* pick the global PME node nthreads if we are setting the number
* of threads in separate PME nodes */
- nth = (bSepPME && m == emntPME) ? modth.gnth_pme : modth.gnth;
+ nth = (bSepPME && m == ModuleMultiThread::PME) ? modth.gnth_pme : modth.gnth;
}
gmx_omp_nthreads_set(m, nth);
}
/* now set the per-module values */
- modth.nth[emntDefault] = modth.gnth;
- pick_module_nthreads(mdlog, emntDomdec, bSepPME);
- pick_module_nthreads(mdlog, emntPairsearch, bSepPME);
- pick_module_nthreads(mdlog, emntNonbonded, bSepPME);
- pick_module_nthreads(mdlog, emntBonded, bSepPME);
- pick_module_nthreads(mdlog, emntPME, bSepPME);
- pick_module_nthreads(mdlog, emntUpdate, bSepPME);
- pick_module_nthreads(mdlog, emntVSITE, bSepPME);
- pick_module_nthreads(mdlog, emntLINCS, bSepPME);
- pick_module_nthreads(mdlog, emntSETTLE, bSepPME);
+ modth.nth[ModuleMultiThread::Default] = modth.gnth;
+ pick_module_nthreads(mdlog, ModuleMultiThread::Domdec, bSepPME);
+ pick_module_nthreads(mdlog, ModuleMultiThread::Pairsearch, bSepPME);
+ pick_module_nthreads(mdlog, ModuleMultiThread::Nonbonded, bSepPME);
+ pick_module_nthreads(mdlog, ModuleMultiThread::Bonded, bSepPME);
+ pick_module_nthreads(mdlog, ModuleMultiThread::PME, bSepPME);
+ pick_module_nthreads(mdlog, ModuleMultiThread::Update, bSepPME);
+ pick_module_nthreads(mdlog, ModuleMultiThread::VSITE, bSepPME);
+ pick_module_nthreads(mdlog, ModuleMultiThread::LINCS, bSepPME);
+ pick_module_nthreads(mdlog, ModuleMultiThread::SETTLE, bSepPME);
/* set the number of threads globally */
if (bOMP)
reportOpenmpSettings(mdlog, cr, bOMP, bSepPME);
}
-int gmx_omp_nthreads_get(int mod)
+int gmx_omp_nthreads_get(ModuleMultiThread mod)
{
- if (mod < 0 || mod >= emntNR)
+ if (mod < ModuleMultiThread::Default || mod >= ModuleMultiThread::Count)
{
/* invalid module queried */
return -1;
}
}
-void gmx_omp_nthreads_set(int mod, int nthreads)
+void gmx_omp_nthreads_set(ModuleMultiThread mod, int nthreads)
{
/* Catch an attempt to set the number of threads on an invalid
* OpenMP module. */
- GMX_RELEASE_ASSERT(mod >= 0 && mod < emntNR, "Trying to set nthreads on invalid OpenMP module");
+ GMX_RELEASE_ASSERT(mod >= ModuleMultiThread::Default && mod < ModuleMultiThread::Count,
+ "Trying to set nthreads on invalid OpenMP module");
modth.nth[mod] = nthreads;
}
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
- * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
}
/** Enum values corresponding to multithreaded algorithmic modules. */
-typedef enum module_nth
+enum class ModuleMultiThread : int
{
/* Default is meant to be used in OMP regions outside the named
* algorithmic modules listed below. */
- emntDefault,
- emntDomdec,
- emntPairsearch,
- emntNonbonded,
- emntBonded,
- emntPME,
- emntUpdate,
- emntVSITE,
- emntLINCS,
- emntSETTLE,
- emntNR
-} module_nth_t;
+ Default,
+ Domdec,
+ Pairsearch,
+ Nonbonded,
+ Bonded,
+ PME,
+ Update,
+ VSITE,
+ LINCS,
+ SETTLE,
+ Count
+};
/*! \brief
* Initializes the per-module thread count.
/*! \brief
* Returns the number of threads to be used in the given module \p mod. */
-int gmx_omp_nthreads_get(int mod);
+int gmx_omp_nthreads_get(ModuleMultiThread mod);
/*! \brief
* Returns the number of threads to be used in the given module \p mod for simple rvec operations.
* the reduction in computional cost due to parallelization. This routine
* returns 1 when the overhead is expected to be higher than the gain.
*/
-static inline int gmx_omp_nthreads_get_simple_rvec_task(int mod, int nrvec)
+static inline int gmx_omp_nthreads_get_simple_rvec_task(ModuleMultiThread mod, int nrvec)
{
/* There can be a relatively large overhead to an OpenMP parallel for loop.
* This overhead increases, slowly, with the numbe of threads used.
/*! \brief Sets the number of threads to be used in module.
*
* Intended for use in testing. */
-void gmx_omp_nthreads_set(int mod, int nthreads);
+void gmx_omp_nthreads_set(ModuleMultiThread mod, int nthreads);
/*! \brief
* Read the OMP_NUM_THREADS env. var. and check against the value set on the
* The current constraint to task assignment code can create independent
* tasks only when not more than two constraints are connected sequentially.
*/
- li->ntask = gmx_omp_nthreads_get(emntLINCS);
+ li->ntask = gmx_omp_nthreads_get(ModuleMultiThread::LINCS);
li->bTaskDep = (li->ntask > 1 && bMoreThanTwoSeq);
if (debug)
{
}
}
ekind->dekindl_old = ekind->dekindl;
- int nthread = gmx_omp_nthreads_get(emntUpdate);
+ int nthread = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nthread) schedule(static)
for (int thread = 0; thread < nthread; thread++)
const SimulationGroups& groups = mtop.groups;
- auto md = mdAtoms->mdatoms();
+ auto* md = mdAtoms->mdatoms();
/* nindex>=0 indicates DD where we use an index */
if (nindex >= 0)
{
int molb = 0;
- nthreads = gmx_omp_nthreads_get(emntDefault);
+ nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
#pragma omp parallel for num_threads(nthreads) schedule(static) firstprivate(molb)
for (int i = 0; i < md->nr; i++)
{
real L1 = 1 - lambda;
/* Update masses of perturbed atoms for the change in lambda */
- int gmx_unused nthreads = gmx_omp_nthreads_get(emntDefault);
+ int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int i = 0; i < md->nr; i++)
{
GMX_ASSERT(f.size() >= forceToAdd.size(), "Accumulation buffer should be sufficiently large");
const int end = forceToAdd.size();
- int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
+ int gmx_unused nt = gmx_omp_nthreads_get(ModuleMultiThread::Default);
#pragma omp parallel for num_threads(nt) schedule(static)
for (int i = 0; i < end; i++)
{
static inline void clearRVecs(ArrayRef<RVec> v, const bool useOpenmpThreading)
{
- int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, v.ssize());
+ int nth = gmx_omp_nthreads_get_simple_rvec_task(ModuleMultiThread::Default, v.ssize());
/* Note that we would like to avoid this conditional by putting it
* into the omp pragma instead, but then we still take the full
ArrayRef<RVec> forceMts,
const real mtsFactor)
{
- const int gmx_unused numThreads = gmx_omp_nthreads_get(emntDefault);
+ const int gmx_unused numThreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
#pragma omp parallel for num_threads(numThreads) schedule(static)
for (int i = 0; i < numAtoms; i++)
{
put_atoms_in_box_omp(fr->pbcType,
box,
x.unpaddedArrayRef().subArray(0, mdatoms->homenr),
- gmx_omp_nthreads_get(emntDefault));
+ gmx_omp_nthreads_get(ModuleMultiThread::Default));
inc_nrnb(nrnb, eNR_SHIFTX, mdatoms->homenr);
}
}
const bool reinitGpuPmePpComms =
GMX_MPI && simulationWork.useGpuPmePpCommunication && (stepWork.doNeighborSearch);
- const auto localXReadyOnDevice = (useGpuPmeOnThisRank || simulationWork.useGpuBufferOps)
- ? stateGpu->getCoordinatesReadyOnDeviceEvent(
- AtomLocality::Local, simulationWork, stepWork)
- : nullptr;
+ auto* localXReadyOnDevice = (useGpuPmeOnThisRank || simulationWork.useGpuBufferOps)
+ ? stateGpu->getCoordinatesReadyOnDeviceEvent(
+ AtomLocality::Local, simulationWork, stepWork)
+ : nullptr;
// Copy coordinate from the GPU if update is on the GPU and there
// are forces to be computed on the CPU, or for the computation of
Lincs* lincsd;
int maxwarn = 100;
int warncount_lincs = 0;
- gmx_omp_nthreads_set(emntLINCS, 1);
+ gmx_omp_nthreads_set(ModuleMultiThread::LINCS, 1);
// Communication record
t_commrec cr;
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
testData->state_.v[i] = testData->v_[i];
}
- gmx_omp_nthreads_set(emntUpdate, 1);
+ gmx_omp_nthreads_set(ModuleMultiThread::Update, 1);
for (int step = 0; step < numSteps; step++)
{
wallcycle_start(wcycle, WallCycleCounter::Update);
- int nth = gmx_omp_nthreads_get(emntUpdate);
+ int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nth) schedule(static)
for (int th = 0; th < nth; th++)
/* We have no frozen atoms or fully frozen atoms which have not
* been moved by the update, so we can simply copy all coordinates.
*/
- int gmx_unused nth = gmx_omp_nthreads_get(emntUpdate);
+ int gmx_unused nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nth) schedule(static)
for (int i = 0; i < homenr; i++)
{
}
/* ############# START The update of velocities and positions ######### */
- int nth = gmx_omp_nthreads_get(emntUpdate);
+ int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nth) schedule(static)
for (int th = 0; th < nth; th++)
// Cast to real for faster code, no loss in precision
const real dt = inputRecord.delta_t;
- const int nth = gmx_omp_nthreads_get(emntUpdate);
+ const int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nth) schedule(static)
for (int th = 0; th < nth; th++)
*groups.groupNames[groups.groups[SimulationAtomGroupType::MassCenterVelocityRemoval][g]];
}
- thread_vcm.resize(gmx_omp_nthreads_get(emntDefault) * stride);
+ thread_vcm.resize(gmx_omp_nthreads_get(ModuleMultiThread::Default) * stride);
}
nFreeze = ir.opts.nFreeze;
{
return;
}
- int nthreads = gmx_omp_nthreads_get(emntDefault);
+ int nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
{
#pragma omp parallel num_threads(nthreads) default(none) shared(x, v, vcm, md)
const int homenr = mdatoms.homenr;
const unsigned short* group_id = mdatoms.cVCM;
- int gmx_unused nth = gmx_omp_nthreads_get(emntDefault);
+ int gmx_unused nth = gmx_omp_nthreads_get(ModuleMultiThread::Default);
// homenr could be shared, but gcc-8 & gcc-9 don't agree how to write that...
// https://www.gnu.org/software/gcc/gcc-9/porting_to.html -> OpenMP data sharing
#pragma omp parallel num_threads(nth) default(none) shared(x, v, vcm, group_id, mdatoms) \
return std::make_unique<VirtualSitesHandler>(mtop, cr->dd, pbcType);
}
-ThreadingInfo::ThreadingInfo() : numThreads_(gmx_omp_nthreads_get(emntVSITE))
+ThreadingInfo::ThreadingInfo() : numThreads_(gmx_omp_nthreads_get(ModuleMultiThread::VSITE))
{
if (numThreads_ > 1)
{
start = 0;
end = md->homenr;
- nthreads = gmx_omp_nthreads_get(emntUpdate);
+ nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel num_threads(nthreads)
{
const rvec* x1 = s1->x.rvec_array();
{
coords->resize(refCoords.size());
- const int gmx_unused nthreads = gmx_omp_nthreads_get(emntUpdate);
+ const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int i = 0; i < ssize(refCoords); i++)
{
real maxDiffSquared = 0;
- const int gmx_unused nthreads = gmx_omp_nthreads_get(emntUpdate);
+ const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for reduction(max : maxDiffSquared) num_threads(nthreads) schedule(static)
for (int i = 0; i < ssize(coords1); i++)
{
tensor vir, pres;
int number_steps, neval = 0, nstcg = inputrec->nstcgsteep;
int m, step, nminstep;
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
em_state_t ems;
gmx_localtop_t top(top_global.ffparams);
gmx_global_stat_t gstat;
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
int nsteps;
int count = 0;
int steps_accepted = 0;
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
real* full_matrix = nullptr;
/* added with respect to mdrun */
- int row, col;
- real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
- real x_min;
- bool bIsMaster = MASTER(cr);
- auto mdatoms = mdAtoms->mdatoms();
+ int row, col;
+ real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
+ real x_min;
+ bool bIsMaster = MASTER(cr);
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_LOG(mdlog.info)
.asParagraph()
{
try
{
- auto masterMdrunner = reinterpret_cast<const gmx::Mdrunner*>(arg);
+ const auto* masterMdrunner = reinterpret_cast<const gmx::Mdrunner*>(arg);
/* copy the arg list to make sure that it's thread-local. This
doesn't copy pointed-to items, of course; fnm, cr and fplog
are reset in the call below, all others should be const. */
* to the code that handled the thread region, so that there's a
* mechanism to keep cycle counting working during the transition
* to task parallelism. */
- int nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
- int nthreads_pme = gmx_omp_nthreads_get(emntPME);
+ int nthreads_pp = gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded);
+ int nthreads_pme = gmx_omp_nthreads_get(ModuleMultiThread::PME);
wallcycle_scale_by_num_threads(
wcycle, thisRankHasDuty(cr, DUTY_PME) && !thisRankHasDuty(cr, DUTY_PP), nthreads_pp, nthreads_pme);
auto cycle_sum(wallcycle_sum(cr, wcycle));
if (printReport)
{
- auto nbnxn_gpu_timings =
+ auto* nbnxn_gpu_timings =
(nbv != nullptr && nbv->useGpu()) ? Nbnxm::gpu_get_timings(nbv->gpu_nbv) : nullptr;
gmx_wallclock_gpu_pme_t pme_gpu_timings = {};
PME: env variable should be read only on one node to make sure it is
identical everywhere;
*/
- const int numThreadsOnThisRank = thisRankHasDuty(cr, DUTY_PP) ? gmx_omp_nthreads_get(emntNonbonded)
- : gmx_omp_nthreads_get(emntPME);
+ const int numThreadsOnThisRank = thisRankHasDuty(cr, DUTY_PP)
+ ? gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded)
+ : gmx_omp_nthreads_get(ModuleMultiThread::PME);
checkHardwareOversubscription(
numThreadsOnThisRank, cr->nodeid, *hwinfo_->hardwareTopology, physicalNodeComm, mdlog);
mdrunOptions.reproducible,
ewaldcoeff_q,
ewaldcoeff_lj,
- gmx_omp_nthreads_get(emntPME),
+ gmx_omp_nthreads_get(ModuleMultiThread::PME),
pmeRunMode,
nullptr,
deviceContext,
if (thisRankHasDuty(cr, DUTY_PP))
{
/* Assumes uniform use of the number of OpenMP threads */
- walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault));
+ walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(ModuleMultiThread::Default));
if (inputrec->bPull)
{
"cos_acceleration is only supported by integrator=md");
/* Kinetic energy data */
- gmx_ekindata_t ekind(inputrec->opts.ngtc, inputrec->cos_accel, gmx_omp_nthreads_get(emntUpdate));
+ gmx_ekindata_t ekind(inputrec->opts.ngtc,
+ inputrec->cos_accel,
+ gmx_omp_nthreads_get(ModuleMultiThread::Update));
/* Set up interactive MD (IMD) */
auto imdSession = makeImdSession(inputrec.get(),
{
GMX_RELEASE_ASSERT(pmedata, "pmedata was NULL while cr->duty was not DUTY_PP");
/* do PME only */
- walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME));
+ walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(ModuleMultiThread::PME));
gmx_pmeonly(pmedata,
cr,
&nrnb,
std::vector<t_shell>& shells = shfc->shells;
shells.clear();
- auto ptype = md.ptype;
+ auto* ptype = md.ptype;
for (int i = a0; i < a1; i++)
{
if (ptype[i] == ParticleType::Shell)
rvec* x_old = as_rvec_array(xOld.paddedArrayRef().data());
rvec* x = as_rvec_array(xCurrent.paddedArrayRef().data());
- auto ptype = md.ptype;
- auto invmass = gmx::arrayRefFromArray(md.invmass, md.nr);
- dt = ir->delta_t;
+ auto* ptype = md.ptype;
+ auto invmass = gmx::arrayRefFromArray(md.invmass, md.nr);
+ dt = ir->delta_t;
/* Does NOT work with freeze groups (yet) */
for (n = 0; n < end; n++)
* before do_force is called, which normally puts all
* charge groups in the box.
*/
- put_atoms_in_box_omp(fr->pbcType, box, x.subArray(0, md.homenr), gmx_omp_nthreads_get(emntDefault));
+ put_atoms_in_box_omp(
+ fr->pbcType, box, x.subArray(0, md.homenr), gmx_omp_nthreads_get(ModuleMultiThread::Default));
}
if (nflexcon)
// TODO: Convert to use the nbnxm kernels by putting the system and the teset molecule on two separate search grids
void LegacySimulator::do_tpi()
{
- GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(emntDefault) == 1, "TPI does not support OpenMP");
+ GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(ModuleMultiThread::Default) == 1,
+ "TPI does not support OpenMP");
gmx_localtop_t top(top_global.ffparams);
gmx::ForceBuffers f;
double invbinw, *bin, refvolshift, logV, bUlogV;
gmx_bool bEnergyOutOfBounds;
const char* tpid_leg[2] = { "direct", "reweighted" };
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
GMX_UNUSED_VALUE(outputProvider);
auto x = as_rvec_array(statePropagatorData_->constPositionsView().paddedArrayRef().data());
auto v = as_rvec_array(statePropagatorData_->constVelocitiesView().paddedArrayRef().data());
- int nth = gmx_omp_nthreads_get(emntUpdate);
+ int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
int homenr = mdAtoms_->mdatoms()->homenr;
#pragma omp parallel for num_threads(nth) schedule(static) default(none) shared(nth, homenr, x, xp, v)
const real lambda =
(numPositionScalingValues == NumPositionScalingValues::Single) ? positionScaling_[0] : 1.0;
- int nth = gmx_omp_nthreads_get(emntUpdate);
+ int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
int homenr = mdAtoms_->mdatoms()->homenr;
#pragma omp parallel for num_threads(nth) schedule(static) default(none) shared(nth, homenr, x) \
const bool isFullScalingMatrixDiagonal =
diagonalizePRMatrix<parrinelloRahmanVelocityScaling>(matrixPR_, diagPR_);
- const int nth = gmx_omp_nthreads_get(emntUpdate);
+ const int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
const int homenr = mdAtoms_->mdatoms()->homenr;
// const variables could be shared, but gcc-8 & gcc-9 don't agree how to write that...
const bool isFullScalingMatrixDiagonal =
diagonalizePRMatrix<parrinelloRahmanVelocityScaling>(matrixPR_, diagPR_);
- const int nth = gmx_omp_nthreads_get(emntUpdate);
+ const int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
const int homenr = mdAtoms_->mdatoms()->homenr;
// const variables could be shared, but gcc-8 & gcc-9 don't agree how to write that...
const bool isFullScalingMatrixDiagonal =
diagonalizePRMatrix<parrinelloRahmanVelocityScaling>(matrixPR_, diagPR_);
- const int nth = gmx_omp_nthreads_get(emntUpdate);
+ const int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
const int homenr = mdAtoms_->mdatoms()->homenr;
// const variables could be shared, but gcc-8 & gcc-9 don't agree how to write that...
? startVelocityScaling_[0]
: 1.0;
- const int nth = gmx_omp_nthreads_get(emntUpdate);
+ const int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
const int homenr = mdAtoms_->mdatoms()->homenr;
// const variables could be shared, but gcc-8 & gcc-9 don't agree how to write that...
void StatePropagatorData::copyPosition()
{
- int nth = gmx_omp_nthreads_get(emntUpdate);
+ int nth = gmx_omp_nthreads_get(ModuleMultiThread::Update);
#pragma omp parallel for num_threads(nth) schedule(static) default(none) shared(nth)
for (int th = 0; th < nth; th++)
int gridEnd = 0;
getAtomRanges(gridSet, locality, &gridBegin, &gridEnd);
- const int nth = gmx_omp_nthreads_get(emntPairsearch);
+ const int nth = gmx_omp_nthreads_get(ModuleMultiThread::Pairsearch);
#pragma omp parallel for num_threads(nth) schedule(static)
for (int th = 0; th < nth; th++)
{
return;
}
- int nth = gmx_omp_nthreads_get(emntNonbonded);
+ int nth = gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded);
if (nbat->out.size() > 1)
{
void bench(const int sizeFactor, const KernelBenchOptions& options)
{
// We don't want to call gmx_omp_nthreads_init(), so we init what we need
- gmx_omp_nthreads_set(emntPairsearch, options.numThreads);
- gmx_omp_nthreads_set(emntNonbonded, options.numThreads);
+ gmx_omp_nthreads_set(ModuleMultiThread::Pairsearch, options.numThreads);
+ gmx_omp_nthreads_set(ModuleMultiThread::Nonbonded, options.numThreads);
const gmx::BenchmarkSystem system(sizeFactor, options.outputFile);
srcAtomBegin_ = *atomRange.begin();
srcAtomEnd_ = *atomRange.end();
- const int nthread = gmx_omp_nthreads_get(emntPairsearch);
+ const int nthread = gmx_omp_nthreads_get(ModuleMultiThread::Pairsearch);
const int numAtomsPerCell = geometry_.numAtomsPerCell;
/* Make space for the new cell indices */
gridSetData_.cells.resize(*atomRange.end());
- const int nthread = gmx_omp_nthreads_get(emntPairsearch);
+ const int nthread = gmx_omp_nthreads_get(ModuleMultiThread::Pairsearch);
GMX_ASSERT(nthread > 0, "We expect the OpenMP thread count to be set");
#pragma omp parallel for num_threads(nthread) schedule(static)
gmx::ArrayRef<const NbnxnPairlistCpu> pairlists = pairlistSet.cpuLists();
- auto shiftVecPointer = as_rvec_array(shiftVectors.data());
+ const auto* shiftVecPointer = as_rvec_array(shiftVectors.data());
- int gmx_unused nthreads = gmx_omp_nthreads_get(emntNonbonded);
+ int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded);
wallcycle_sub_start(wcycle, WallCycleSubCounter::NonbondedClear);
#pragma omp parallel for schedule(static) num_threads(nthreads)
for (gmx::index nb = 0; nb < pairlists.ssize(); nb++)
gmx::ArrayRef<real> energygrp_elec = enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR];
gmx::ArrayRef<real> energygrp_vdw = enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR];
- GMX_ASSERT(gmx_omp_nthreads_get(emntNonbonded) == nbl_fep.ssize(),
+ GMX_ASSERT(gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded) == nbl_fep.ssize(),
"Number of lists should be same as number of NB threads");
wallcycle_sub_start(wcycle_, WallCycleSubCounter::NonbondedFep);
*/
mimimumNumEnergyGroupNonbonded = 1;
}
- nbnxn_atomdata_init(mdlog,
- nbat.get(),
- kernelSetup.kernelType,
- enbnxninitcombrule,
- forcerec.ntype,
- forcerec.nbfp,
- mimimumNumEnergyGroupNonbonded,
- (useGpuForNonbonded || emulateGpu) ? 1 : gmx_omp_nthreads_get(emntNonbonded));
+ nbnxn_atomdata_init(
+ mdlog,
+ nbat.get(),
+ kernelSetup.kernelType,
+ enbnxninitcombrule,
+ forcerec.ntype,
+ forcerec.nbfp,
+ mimimumNumEnergyGroupNonbonded,
+ (useGpuForNonbonded || emulateGpu) ? 1 : gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded));
NbnxmGpu* gpu_nbv = nullptr;
int minimumIlistCountForGpuBalancing = 0;
DOMAINDECOMP(commrec) ? domdec_zones(commrec->dd) : nullptr,
pairlistParams.pairlistType,
bFEP_NonBonded,
- gmx_omp_nthreads_get(emntPairsearch),
+ gmx_omp_nthreads_get(ModuleMultiThread::Pairsearch),
pinPolicy);
return std::make_unique<nonbonded_verlet_t>(
isCpuType_(!sc_isGpuPairListType[pairlistParams.pairlistType])
{
- const int numLists = gmx_omp_nthreads_get(emntNonbonded);
+ const int numLists = gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded);
if (!combineLists_ && numLists > NBNXN_BUFFERFLAG_MAX_THREADS)
{
/* Each thread should copy its own data to the combined arrays,
* as otherwise data will go back and forth between different caches.
*/
- const int gmx_unused nthreads = gmx_omp_nthreads_get(emntPairsearch);
+ const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Pairsearch);
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (gmx::index n = 0; n < nbls.ssize(); n++)
const int nrj_target = (nrj_tot + numLists - 1) / numLists;
- GMX_ASSERT(gmx_omp_nthreads_get(emntNonbonded) == numLists,
+ GMX_ASSERT(gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded) == numLists,
"We should have as many work objects as FEP lists");
#pragma omp parallel for schedule(static) num_threads(numLists)
GMX_ASSERT(cpuLists_[0].ciOuter.size() >= cpuLists_[0].ci.size(),
"Here we should either have an empty ci list or ciOuter should be >= ci");
- int gmx_unused nthreads = gmx_omp_nthreads_get(emntNonbonded);
+ int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded);
GMX_ASSERT(nthreads == static_cast<gmx::index>(cpuLists_.size()),
"The number of threads should match the number of lists");
#pragma omp parallel for schedule(static) num_threads(nthreads)
* We could use a different, local mutex for each pull object, but the overhead
* is extremely small here and registration is only done during initialization.
*/
+// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
static std::mutex registrationMutex;
using Lock = std::lock_guard<std::mutex>;
pull->params = *pull_params;
/* The gmx_omp_nthreads module might not be initialized here, so max(1,) */
- const int maxNumThreads = std::max(1, gmx_omp_nthreads_get(emntDefault));
+ const int maxNumThreads = std::max(1, gmx_omp_nthreads_get(ModuleMultiThread::Default));
for (int i = 0; i < pull_params->ngroup; ++i)
{
#include "gromacs/utility/pleasecite.h"
#include "gromacs/utility/smalloc.h"
-static char const* RotStr = { "Enforced rotation:" };
+static const std::string RotStr = { "Enforced rotation:" };
/* Set the minimum weight for the determination of the slab centers */
#define WEIGHT_MIN (10 * GMX_FLOAT_MIN)
}
if (count > er->mpi_bufsize)
{
- gmx_fatal(FARGS, "%s MPI buffer overflow, please report this error.", RotStr);
+ gmx_fatal(FARGS, "%s MPI buffer overflow, please report this error.", RotStr.c_str());
}
#if GMX_MPI
/* Check whether we have reference data to compare against */
if (erg->slab_first < erg->slab_first_ref)
{
- gmx_fatal(FARGS, "%s No reference data for first slab (n=%d), unable to proceed.", RotStr, erg->slab_first);
+ gmx_fatal(FARGS,
+ "%s No reference data for first slab (n=%d), unable to proceed.",
+ RotStr.c_str(),
+ erg->slab_first);
}
/* Check whether we have reference data to compare against */
if (erg->slab_last > erg->slab_last_ref)
{
- gmx_fatal(FARGS, "%s No reference data for last slab (n=%d), unable to proceed.", RotStr, erg->slab_last);
+ gmx_fatal(FARGS,
+ "%s No reference data for last slab (n=%d), unable to proceed.",
+ RotStr.c_str(),
+ erg->slab_last);
}
}
{
fprintf(fplog,
"%s allocating memory to store data for %d slabs (rotation group %d).\n",
- RotStr,
+ RotStr.c_str(),
nslabs,
erg->groupIndex);
}
if (MASTER(cr) && mdrunOptions.verbose)
{
- fprintf(stdout, "%s Initializing ...\n", RotStr);
+ fprintf(stdout, "%s Initializing ...\n", RotStr.c_str());
}
auto enforcedRotation = std::make_unique<gmx::EnforcedRotation>();
{
if (nullptr != fplog)
{
- fprintf(fplog, "%s rerun - will write rotation output every available step.\n", RotStr);
+ fprintf(fplog, "%s rerun - will write rotation output every available step.\n", RotStr.c_str());
}
er->nstrout = 1;
er->nstsout = 1;
{
fprintf(fplog,
"%s group %d type '%s'\n",
- RotStr,
+ RotStr.c_str(),
groupIndex,
enumValueToString(erg->rotg->eType));
}
--- /dev/null
+# List of rationales for check suppressions (where known).
+# This have to precede the list because inline comments are not
+# supported by clang-tidy.
+#
+# -cppcoreguidelines-non-private-member-variables-in-classes,
+# -misc-non-private-member-variables-in-classes,
+# We intend a gradual transition to conform to this guideline, but it
+# is not practical to implement yet.
+#
+# -readability-isolate-declaration,
+# Declarations like "int a, b;" are readable. Some forms are not, and
+# those might reasonably be suggested against during code review.
+#
+# -cppcoreguidelines-avoid-c-arrays,
+# C arrays are still necessary in many places with legacy code
+#
+# -cppcoreguidelines-avoid-magic-numbers,
+# -readability-magic-numbers,
+# We have many legitimate use cases for magic numbers
+#
+# -cppcoreguidelines-macro-usage,
+# We do use too many macros, and we should fix many of them, but there
+# is no reasonable way to suppress the check e.g. in src/config.h and
+# configuring the build is a major legitimate use of macros.
+#
+# -cppcoreguidelines-narrowing-conversions,
+# -bugprone-narrowing-conversions
+# We have many cases where int is converted to float and we don't care
+# enough about such potential loss of precision to use explicit casts
+# in large numbers of places.
+#
+# -google-readability-avoid-underscore-in-googletest-name
+# We need to use underscores for readability for our legacy types
+# and command-line parameter names
+#
+# -misc-no-recursion
+# We have way too many functions and methods relying on recursion
+#
+# -cppcoreguidelines-avoid-non-const-global-variables
+# There are quite a lot of static variables in the test code that
+# can not be replaced.
+#
+# -modernize-avoid-bind
+# Some code needs to use std::bind and can't be modernized quickly.
+Checks: clang-diagnostic-*,-clang-analyzer-*,-clang-analyzer-security.insecureAPI.strcpy,
+ bugprone-*,misc-*,readability-*,performance-*,mpi-*,
+ -readability-inconsistent-declaration-parameter-name,
+ -readability-function-size,-readability-else-after-return,
+ modernize-use-nullptr,modernize-use-emplace,
+ modernize-make-unique,modernize-make-shared,
+ modernize-avoid-bind,
+ modernize-use-override,
+ modernize-redundant-void-arg,modernize-use-bool-literals,
+ cppcoreguidelines-*,-cppcoreguidelines-pro-*,-cppcoreguidelines-owning-memory,
+ -cppcoreguidelines-no-malloc,-cppcoreguidelines-special-member-functions,
+ -cppcoreguidelines-avoid-goto,
+ google-*,-google-build-using-namespace,-google-explicit-constructor,
+ -google-readability-function-size,-google-readability-todo,-google-runtime-int,
+ -cppcoreguidelines-non-private-member-variables-in-classes,
+ -misc-non-private-member-variables-in-classes,
+ -readability-isolate-declaration,
+ -cppcoreguidelines-avoid-c-arrays,
+ -cppcoreguidelines-avoid-magic-numbers,
+ -readability-magic-numbers,
+ -cppcoreguidelines-macro-usage,
+ -cppcoreguidelines-narrowing-conversions,
+ -bugprone-narrowing-conversions,
+ -google-readability-avoid-underscore-in-googletest-name,
+ -cppcoreguidelines-init-variables,
+ -misc-no-recursion,
+ -cppcoreguidelines-avoid-non-const-global-variables,
+ -modernize-avoid-bind
+HeaderFilterRegex: .*
+CheckOptions:
+ - key: cppcoreguidelines-special-member-functions.AllowSoleDefaultDtor
+ value: 1
+ - key: modernize-make-unique.IncludeStyle
+ value: google
+ - key: modernize-make-shared.IncludeStyle
+ value: google
+ - key: readability-implicit-bool-conversion.AllowIntegerConditions
+ value: 1
+ - key: readability-implicit-bool-conversion.AllowPointerConditions
+ value: 1
+ - key: bugprone-dangling-handle.HandleClasses
+ value: std::basic_string_view; nonstd::sv_lite::basic_string_view
+# Permit passing shard pointers by value for sink parameters
+ - key: performance-unnecessary-copy-initialization.AllowedTypes
+ value: shared_ptr
+ - key: performance-unnecessary-value-param.AllowedTypes
+ value: shared_ptr
{
//! Helper variable to localise the text of an often repeated message.
-const char* g_specifyEverythingFormatString =
+const char* const g_specifyEverythingFormatString =
"When you use mdrun -gputasks, %s must be set to non-default "
"values, so that the device IDs can be interpreted correctly."
#if GMX_GPU
GMX_RELEASE_ASSERT(nthreads_omp_faster_default >= nthreads_omp_mpi_ok_max,
"Inconsistent OpenMP thread count default values");
}
- GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(emntDefault) >= 1,
+ GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(ModuleMultiThread::Default) >= 1,
"Must have at least one OpenMP thread");
- nth_omp_max = gmx_omp_nthreads_get(emntDefault);
+ nth_omp_max = gmx_omp_nthreads_get(ModuleMultiThread::Default);
bool anyRankIsUsingGpus = willUsePhysicalGpu;
/* Thread-MPI seems to have a bug with reduce on 1 node, so use a cond. */
--- /dev/null
+# List of rationales for check suppressions (where known).
+# This have to precede the list because inline comments are not
+# supported by clang-tidy.
+#
+# -cppcoreguidelines-non-private-member-variables-in-classes,
+# -misc-non-private-member-variables-in-classes,
+# We intend a gradual transition to conform to this guideline, but it
+# is not practical to implement yet.
+#
+# -readability-isolate-declaration,
+# Declarations like "int a, b;" are readable. Some forms are not, and
+# those might reasonably be suggested against during code review.
+#
+# -cppcoreguidelines-avoid-c-arrays,
+# C arrays are still necessary in many places with legacy code
+#
+# -cppcoreguidelines-avoid-magic-numbers,
+# -readability-magic-numbers,
+# We have many legitimate use cases for magic numbers
+#
+# -cppcoreguidelines-macro-usage,
+# We do use too many macros, and we should fix many of them, but there
+# is no reasonable way to suppress the check e.g. in src/config.h and
+# configuring the build is a major legitimate use of macros.
+#
+# -cppcoreguidelines-narrowing-conversions,
+# -bugprone-narrowing-conversions
+# We have many cases where int is converted to float and we don't care
+# enough about such potential loss of precision to use explicit casts
+# in large numbers of places.
+#
+# -google-readability-avoid-underscore-in-googletest-name
+# We need to use underscores for readability for our legacy types
+# and command-line parameter names
+#
+# -misc-no-recursion
+# We have way too many functions and methods relying on recursion
+#
+# -cppcoreguidelines-avoid-non-const-global-variables
+# There are quite a lot of static variables in the test code that
+# can not be replaced.
+#
+# -modernize-avoid-bind
+# Some code needs to use std::bind and can't be modernized quickly.
+Checks: clang-diagnostic-*,-clang-analyzer-*,-clang-analyzer-security.insecureAPI.strcpy,
+ bugprone-*,misc-*,readability-*,performance-*,mpi-*,
+ -readability-inconsistent-declaration-parameter-name,
+ -readability-function-size,-readability-else-after-return,
+ modernize-use-nullptr,modernize-use-emplace,
+ modernize-make-unique,modernize-make-shared,
+ modernize-avoid-bind,
+ modernize-use-override,
+ modernize-redundant-void-arg,modernize-use-bool-literals,
+ cppcoreguidelines-*,-cppcoreguidelines-pro-*,-cppcoreguidelines-owning-memory,
+ -cppcoreguidelines-no-malloc,-cppcoreguidelines-special-member-functions,
+ -cppcoreguidelines-avoid-goto,
+ google-*,-google-build-using-namespace,-google-explicit-constructor,
+ -google-readability-function-size,-google-readability-todo,-google-runtime-int,
+ -cppcoreguidelines-non-private-member-variables-in-classes,
+ -misc-non-private-member-variables-in-classes,
+ -readability-isolate-declaration,
+ -cppcoreguidelines-avoid-c-arrays,
+ -cppcoreguidelines-avoid-magic-numbers,
+ -readability-magic-numbers,
+ -cppcoreguidelines-macro-usage,
+ -cppcoreguidelines-narrowing-conversions,
+ -bugprone-narrowing-conversions,
+ -google-readability-avoid-underscore-in-googletest-name,
+ -cppcoreguidelines-init-variables,
+ -misc-no-recursion,
+ -cppcoreguidelines-avoid-non-const-global-variables,
+ -modernize-avoid-bind
+HeaderFilterRegex: .*
+CheckOptions:
+ - key: cppcoreguidelines-special-member-functions.AllowSoleDefaultDtor
+ value: 1
+ - key: modernize-make-unique.IncludeStyle
+ value: google
+ - key: modernize-make-shared.IncludeStyle
+ value: google
+ - key: readability-implicit-bool-conversion.AllowIntegerConditions
+ value: 1
+ - key: readability-implicit-bool-conversion.AllowPointerConditions
+ value: 1
+ - key: bugprone-dangling-handle.HandleClasses
+ value: std::basic_string_view; nonstd::sv_lite::basic_string_view
+# Permit passing shard pointers by value for sink parameters
+ - key: performance-unnecessary-copy-initialization.AllowedTypes
+ value: shared_ptr
+ - key: performance-unnecessary-value-param.AllowedTypes
+ value: shared_ptr