GMX_ASSERT(f.size() >= forceToAdd.size(), "Accumulation buffer should be sufficiently large");
const int end = forceToAdd.size();
- int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
+ int gmx_unused nt = gmx_omp_nthreads_get(ModuleMultiThread::Default);
#pragma omp parallel for num_threads(nt) schedule(static)
for (int i = 0; i < end; i++)
{
static inline void clearRVecs(ArrayRef<RVec> v, const bool useOpenmpThreading)
{
- int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, v.ssize());
+ int nth = gmx_omp_nthreads_get_simple_rvec_task(ModuleMultiThread::Default, v.ssize());
/* Note that we would like to avoid this conditional by putting it
* into the omp pragma instead, but then we still take the full
ArrayRef<RVec> forceMts,
const real mtsFactor)
{
- const int gmx_unused numThreads = gmx_omp_nthreads_get(emntDefault);
+ const int gmx_unused numThreads = gmx_omp_nthreads_get(ModuleMultiThread::Default);
#pragma omp parallel for num_threads(numThreads) schedule(static)
for (int i = 0; i < numAtoms; i++)
{
put_atoms_in_box_omp(fr->pbcType,
box,
x.unpaddedArrayRef().subArray(0, mdatoms->homenr),
- gmx_omp_nthreads_get(emntDefault));
+ gmx_omp_nthreads_get(ModuleMultiThread::Default));
inc_nrnb(nrnb, eNR_SHIFTX, mdatoms->homenr);
}
}
const bool reinitGpuPmePpComms =
GMX_MPI && simulationWork.useGpuPmePpCommunication && (stepWork.doNeighborSearch);
- const auto localXReadyOnDevice = (useGpuPmeOnThisRank || simulationWork.useGpuBufferOps)
- ? stateGpu->getCoordinatesReadyOnDeviceEvent(
- AtomLocality::Local, simulationWork, stepWork)
- : nullptr;
+ auto* localXReadyOnDevice = (useGpuPmeOnThisRank || simulationWork.useGpuBufferOps)
+ ? stateGpu->getCoordinatesReadyOnDeviceEvent(
+ AtomLocality::Local, simulationWork, stepWork)
+ : nullptr;
// Copy coordinate from the GPU if update is on the GPU and there
// are forces to be computed on the CPU, or for the computation of