2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Defines utility functionality for dividing resources and
37 * checking for consistency and usefulness.
39 * \author Mark Abraham <mark.j.abraham@gmail.com>
40 * \ingroup module_taskassignment
45 #include "resourcedivision.h"
54 #include "gromacs/ewald/pme.h"
55 #include "gromacs/hardware/cpuinfo.h"
56 #include "gromacs/hardware/detecthardware.h"
57 #include "gromacs/hardware/hardwaretopology.h"
58 #include "gromacs/hardware/hw_info.h"
59 #include "gromacs/math/functions.h"
60 #include "gromacs/mdlib/gmx_omp_nthreads.h"
61 #include "gromacs/mdrunutility/multisim.h"
62 #include "gromacs/mdtypes/commrec.h"
63 #include "gromacs/mdtypes/inputrec.h"
64 #include "gromacs/mdtypes/md_enums.h"
65 #include "gromacs/topology/mtop_util.h"
66 #include "gromacs/topology/topology.h"
67 #include "gromacs/utility/baseversion.h"
68 #include "gromacs/utility/fatalerror.h"
69 #include "gromacs/utility/gmxassert.h"
70 #include "gromacs/utility/logger.h"
71 #include "gromacs/utility/physicalnodecommunicator.h"
72 #include "gromacs/utility/stringutil.h"
75 /* DISCLAIMER: All the atom count and thread numbers below are heuristic.
76 * The real switching points will depend on the system simulation,
77 * the algorithms used and the hardware it's running on, as well as if there
78 * are other jobs running on the same machine. We try to take into account
79 * factors that have a large influence, such as recent Intel CPUs being
80 * much better at wide multi-threading. The remaining factors should
81 * (hopefully) have a small influence, such that the performance just before
82 * and after a switch point doesn't change too much.
85 /*! \brief The minimum number of atoms per thread-MPI thread when GPUs
86 * are present. With fewer atoms than this, the number of thread-MPI
87 * ranks will get lowered.
89 static constexpr int min_atoms_per_mpi_thread = 90;
90 /*! \brief The minimum number of atoms per GPU with thread-MPI
91 * active. With fewer atoms than this, the number of thread-MPI ranks
94 static constexpr int min_atoms_per_gpu = 900;
97 /*! \brief Constants for implementing default divisions of threads */
99 /* TODO choose nthreads_omp based on hardware topology
100 when we have a hardware topology detection library */
101 /* First we consider the case of no MPI (1 MPI rank).
102 * In general, when running up to 8 threads, OpenMP should be faster.
103 * Note: on AMD Bulldozer we should avoid running OpenMP over two dies.
104 * On Intel>=Nehalem running OpenMP on a single CPU is always faster,
105 * even on two CPUs it's usually faster (but with many OpenMP threads
106 * it could be faster not to use HT, currently we always use HT).
107 * On Nehalem/Westmere we want to avoid running 16 threads over
108 * two CPUs with HT, so we need a limit<16; thus we use 12.
109 * A reasonable limit for Intel Sandy and Ivy bridge,
110 * not knowing the topology, is 16 threads.
111 * Below we check for Intel and AVX, which for now includes
112 * Sandy/Ivy Bridge, Has/Broadwell. By checking for AVX instead of
113 * model numbers we ensure also future Intel CPUs are covered.
115 constexpr int nthreads_omp_faster_default = 8;
116 constexpr int nthreads_omp_faster_Nehalem = 12;
117 constexpr int nthreads_omp_faster_Intel_AVX = 16;
118 constexpr int nthreads_omp_faster_AMD_Ryzen = 16;
119 /* For CPU only runs the fastest options are usually MPI or OpenMP only.
120 * With one GPU, using MPI only is almost never optimal, so we need to
121 * compare running pure OpenMP with combined MPI+OpenMP. This means higher
122 * OpenMP threads counts can still be ok. Multiplying the numbers above
123 * by a factor of 2 seems to be a good estimate.
125 constexpr int nthreads_omp_faster_gpu_fac = 2;
127 /* This is the case with MPI (2 or more MPI PP ranks).
128 * By default we will terminate with a fatal error when more than 8
129 * OpenMP thread are (indirectly) requested, since using less threads
130 * nearly always results in better performance.
131 * With thread-mpi and multiple GPUs or one GPU and too many threads
132 * we first try 6 OpenMP threads and then less until the number of MPI ranks
133 * is divisible by the number of GPUs.
135 constexpr int nthreads_omp_mpi_ok_max = 8;
136 constexpr int nthreads_omp_mpi_ok_min_cpu = 1;
137 constexpr int nthreads_omp_mpi_ok_min_gpu = 2;
138 constexpr int nthreads_omp_mpi_target_max = 6;
142 /*! \brief Returns the maximum OpenMP thread count for which using a single MPI rank
143 * should be faster than using multiple ranks with the same total thread count.
145 static int nthreads_omp_faster(const gmx::CpuInfo& cpuInfo, gmx_bool bUseGPU)
149 if (cpuInfo.vendor() == gmx::CpuInfo::Vendor::Intel && cpuInfo.feature(gmx::CpuInfo::Feature::X86_Avx))
151 nth = nthreads_omp_faster_Intel_AVX;
153 else if (gmx::cpuIsX86Nehalem(cpuInfo))
156 nth = nthreads_omp_faster_Nehalem;
158 else if ((cpuInfo.vendor() == gmx::CpuInfo::Vendor::Amd && cpuInfo.family() >= 23)
159 || cpuInfo.vendor() == gmx::CpuInfo::Vendor::Hygon)
161 // AMD Ryzen || Hygon Dhyana
162 nth = nthreads_omp_faster_AMD_Ryzen;
166 nth = nthreads_omp_faster_default;
171 nth *= nthreads_omp_faster_gpu_fac;
174 nth = std::min(nth, GMX_OPENMP_MAX_THREADS);
179 /*! \brief Returns that maximum OpenMP thread count that passes the efficiency check */
180 gmx_unused static int nthreads_omp_efficient_max(int gmx_unused nrank, const gmx::CpuInfo& cpuInfo, gmx_bool bUseGPU)
182 if (GMX_OPENMP && GMX_MPI && (nrank > 1))
184 return nthreads_omp_mpi_ok_max;
188 return nthreads_omp_faster(cpuInfo, bUseGPU);
192 /*! \brief Return the number of thread-MPI ranks to use.
193 * This is chosen such that we can always obey our own efficiency checks.
195 gmx_unused static int get_tmpi_omp_thread_division(const gmx_hw_info_t* hwinfo,
196 const gmx_hw_opt_t& hw_opt,
201 const gmx::CpuInfo& cpuInfo = *hwinfo->cpuInfo;
203 GMX_RELEASE_ASSERT(nthreads_tot > 0, "There must be at least one thread per rank");
205 /* There are no separate PME nodes here, as we ensured in
206 * check_and_update_hw_opt that nthreads_tmpi>0 with PME nodes
207 * and a conditional ensures we would not have ended up here.
208 * Note that separate PME nodes might be switched on later.
212 if (hw_opt.nthreads_omp > 0)
214 /* In this case it is unclear if we should use 1 rank per GPU
215 * or more or less, so we require also setting the number of ranks.
218 "When using GPUs, setting the number of OpenMP threads without specifying "
220 "of ranks can lead to conflicting demands. Please specify the number of "
222 "as well (option -ntmpi).");
227 /* When the user sets nthreads_omp, we can end up oversubscribing CPU cores
228 * if we simply start as many ranks as GPUs. To avoid this, we start as few
229 * tMPI ranks as necessary to avoid oversubscription and instead leave GPUs idle.
230 * If the user does not set the number of OpenMP threads, nthreads_omp==0 and
231 * this code has no effect.
233 GMX_RELEASE_ASSERT(hw_opt.nthreads_omp >= 0,
234 "nthreads_omp is negative, but previous checks should "
235 "have prevented this");
236 while (nrank * hw_opt.nthreads_omp > hwinfo->nthreads_hw_avail && nrank > 1)
241 if (nthreads_tot < nrank)
243 /* #thread < #gpu is very unlikely, but if so: waste gpu(s) */
244 nrank = nthreads_tot;
246 else if (nthreads_tot > nthreads_omp_faster(cpuInfo, ngpu > 0)
247 || (ngpu > 1 && nthreads_tot / ngpu > nthreads_omp_mpi_target_max))
249 /* The high OpenMP thread count will likely result in sub-optimal
250 * performance. Increase the rank count to reduce the thread count
251 * per rank. This will lead to GPU sharing by MPI ranks/threads.
255 /* Increase the rank count as long as have we more than 6 OpenMP
256 * threads per rank or the number of hardware threads is not
257 * divisible by the rank count. Don't go below 2 OpenMP threads.
263 nrank = ngpu * nshare;
264 } while (nthreads_tot / nrank > nthreads_omp_mpi_target_max
265 || (nthreads_tot / (ngpu * (nshare + 1)) >= nthreads_omp_mpi_ok_min_gpu
266 && nthreads_tot % nrank != 0));
269 else if (hw_opt.nthreads_omp > 0)
271 /* Here we could oversubscribe, when we do, we issue a warning later */
272 nrank = std::max(1, nthreads_tot / hw_opt.nthreads_omp);
276 if (nthreads_tot <= nthreads_omp_faster(cpuInfo, ngpu > 0))
278 /* Use pure OpenMP parallelization */
283 /* Don't use OpenMP parallelization */
284 nrank = nthreads_tot;
291 //! Return whether hyper threading is enabled.
292 static bool gmxSmtIsEnabled(const gmx::HardwareTopology& hwTop)
294 return (hwTop.supportLevel() >= gmx::HardwareTopology::SupportLevel::Basic
295 && hwTop.machine().sockets[0].cores[0].hwThreads.size() > 1);
301 //! Handles checks for algorithms that must use a single rank.
302 class SingleRankChecker
305 SingleRankChecker() : value_(false) {}
306 /*! \brief Call this function for each possible condition
307 under which a single rank is required, along with a string
308 describing the constraint when it is applied. */
309 void applyConstraint(bool condition, const char* description)
314 reasons_.push_back(gmx::formatString("%s only supports a single rank.", description));
317 //! After applying any conditions, is a single rank required?
318 bool mustUseOneRank() const { return value_; }
319 /*! \brief Return a formatted string to use when writing a
320 message when a single rank is required, (or empty if no
321 constraint exists.) */
322 std::string getMessage() const
324 return formatAndJoin(reasons_, "\n", gmx::IdentityFormatter());
329 std::vector<std::string> reasons_;
334 /* Get the number of MPI ranks to use for thread-MPI based on how many
335 * were requested, which algorithms we're using,
336 * and how many particles there are.
337 * At the point we have already called check_and_update_hw_opt.
338 * Thus all options should be internally consistent and consistent
339 * with the hardware, except that ntmpi could be larger than #GPU.
341 int get_nthreads_mpi(const gmx_hw_info_t* hwinfo,
342 gmx_hw_opt_t* hw_opt,
343 const std::vector<int>& gpuIdsToUse,
346 const t_inputrec* inputrec,
347 const gmx_mtop_t* mtop,
348 const gmx::MDLogger& mdlog,
351 int nthreads_hw, nthreads_tot_max, nrank, ngpu;
352 int min_atoms_per_mpi_rank;
354 const gmx::CpuInfo& cpuInfo = *hwinfo->cpuInfo;
355 const gmx::HardwareTopology& hwTop = *hwinfo->hardwareTopology;
359 GMX_RELEASE_ASSERT((EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype))
360 && pme_gpu_supports_build(nullptr)
361 && pme_gpu_supports_hardware(*hwinfo, nullptr)
362 && pme_gpu_supports_input(*inputrec, *mtop, nullptr),
363 "PME can't be on GPUs unless we are using PME");
365 // PME on GPUs supports a single PME rank with PP running on the same or few other ranks.
366 // For now, let's treat separate PME GPU rank as opt-in.
367 if (hw_opt->nthreads_tmpi < 1)
374 /* Check if an algorithm does not support parallel simulation. */
375 // TODO This might work better if e.g. implemented algorithms
376 // had to define a function that returns such requirements,
377 // and a description string.
378 SingleRankChecker checker;
379 checker.applyConstraint(inputrec->eI == eiLBFGS, "L-BFGS minimization");
380 checker.applyConstraint(inputrec->coulombtype == eelEWALD, "Plain Ewald electrostatics");
381 checker.applyConstraint(doMembed, "Membrane embedding");
382 bool useOrientationRestraints = (gmx_mtop_ftype_count(mtop, F_ORIRES) > 0);
383 checker.applyConstraint(useOrientationRestraints, "Orientation restraints");
384 if (checker.mustUseOneRank())
386 std::string message = checker.getMessage();
387 if (hw_opt->nthreads_tmpi > 1)
390 "%s However, you asked for more than 1 thread-MPI rank, so mdrun cannot "
392 "Choose a single rank, or a different algorithm.",
395 GMX_LOG(mdlog.warning)
397 .appendTextFormatted("%s Choosing to use only a single thread-MPI rank.",
403 if (hw_opt->nthreads_tmpi > 0)
405 /* Trivial, return the user's choice right away */
406 return hw_opt->nthreads_tmpi;
409 // Now implement automatic selection of number of thread-MPI ranks
410 nthreads_hw = hwinfo->nthreads_hw_avail;
412 if (nthreads_hw <= 0)
414 /* This should normally not happen, but if it does, we handle it */
416 "The number of available hardware threads can not be detected, please specify "
418 "MPI ranks and the number of OpenMP threads (if supported) manually with options "
419 "-ntmpi and -ntomp, respectively");
422 /* How many total (#tMPI*#OpenMP) threads can we start? */
423 if (hw_opt->nthreads_tot > 0)
425 nthreads_tot_max = hw_opt->nthreads_tot;
429 nthreads_tot_max = nthreads_hw;
432 /* nonbondedOnGpu might be false e.g. because this simulation
433 * is a rerun with energy groups. */
434 ngpu = (nonbondedOnGpu ? gmx::ssize(gpuIdsToUse) : 0);
436 nrank = get_tmpi_omp_thread_division(hwinfo, *hw_opt, nthreads_tot_max, ngpu);
438 if (inputrec->eI == eiNM || EI_TPI(inputrec->eI))
440 /* Dims/steps are divided over the nodes iso splitting the atoms.
441 * With NM we can't have more ranks than #atoms*#dim. With TPI it's
442 * unlikely we have fewer atoms than ranks, and if so, communication
443 * would become a bottleneck, so we set the limit to 1 atom/rank.
445 min_atoms_per_mpi_rank = 1;
451 min_atoms_per_mpi_rank = min_atoms_per_gpu;
455 min_atoms_per_mpi_rank = min_atoms_per_mpi_thread;
459 if (mtop->natoms / nrank < min_atoms_per_mpi_rank)
463 /* the rank number was chosen automatically, but there are too few
464 atoms per rank, so we need to reduce the rank count */
465 nrank_new = std::max(1, mtop->natoms / min_atoms_per_mpi_rank);
467 /* Avoid partial use of Hyper-Threading */
468 if (gmxSmtIsEnabled(hwTop) && nrank_new > nthreads_hw / 2 && nrank_new < nthreads_hw)
470 nrank_new = nthreads_hw / 2;
473 /* If the user specified the total thread count, ensure this is
474 * divisible by the number of ranks.
475 * It is quite likely that we have too many total threads compared
476 * to the size of the system, but if the user asked for this many
477 * threads we should respect that.
479 while (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_tot % nrank_new != 0)
484 /* Avoid large prime numbers in the rank count */
487 /* Use only 6,8,10 with additional factors of 2 */
491 while (3 * fac * 2 <= nrank_new)
496 nrank_new = (nrank_new / fac) * fac;
500 /* Avoid 5, since small system won't fit 5 domains along
501 * a dimension. This might lead to waisting some cores, but this
502 * will have a small impact in this regime of very small systems.
512 /* We reduced the number of tMPI ranks, which means we might violate
513 * our own efficiency checks if we simply use all hardware threads.
515 if (GMX_OPENMP && hw_opt->nthreads_omp <= 0 && hw_opt->nthreads_tot <= 0)
517 /* The user set neither the total nor the OpenMP thread count,
518 * we should use all hardware threads, unless we will violate
519 * our own efficiency limitation on the thread count.
523 nt_omp_max = nthreads_omp_efficient_max(nrank, cpuInfo, ngpu >= 1);
525 if (nrank * nt_omp_max < hwinfo->nthreads_hw_avail)
527 /* Limit the number of OpenMP threads to start */
528 hw_opt->nthreads_omp = nt_omp_max;
532 fprintf(stderr, "\n");
533 fprintf(stderr, "NOTE: Parallelization is limited by the small number of atoms,\n");
534 fprintf(stderr, " only starting %d thread-MPI ranks.\n", nrank);
536 " You can use the -nt and/or -ntmpi option to optimize the number of "
544 void check_resource_division_efficiency(const gmx_hw_info_t* hwinfo,
545 bool willUsePhysicalGpu,
546 gmx_bool bNtOmpOptionSet,
548 const gmx::MDLogger& mdlog)
550 #if GMX_OPENMP && GMX_MPI
551 GMX_UNUSED_VALUE(hwinfo);
553 int nth_omp_min, nth_omp_max;
555 const char* mpi_option = GMX_THREAD_MPI ? " (option -ntmpi)" : "";
557 /* This function should be called after thread-MPI (when configured) and
558 * OpenMP have been initialized. Check that here.
562 GMX_RELEASE_ASSERT(nthreads_omp_faster_default >= nthreads_omp_mpi_ok_max,
563 "Inconsistent OpenMP thread count default values");
565 GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(emntDefault) >= 1,
566 "Must have at least one OpenMP thread");
568 nth_omp_min = gmx_omp_nthreads_get(emntDefault);
569 nth_omp_max = gmx_omp_nthreads_get(emntDefault);
571 bool anyRankIsUsingGpus = willUsePhysicalGpu;
572 /* Thread-MPI seems to have a bug with reduce on 1 node, so use a cond. */
575 int count[3], count_max[3];
577 count[0] = -nth_omp_min;
578 count[1] = nth_omp_max;
579 count[2] = int(willUsePhysicalGpu);
581 MPI_Allreduce(count, count_max, 3, MPI_INT, MPI_MAX, cr->mpi_comm_mysim);
583 /* In case of an inhomogeneous run setup we use the maximum counts */
584 nth_omp_min = -count_max[0];
585 nth_omp_max = count_max[1];
586 anyRankIsUsingGpus = count_max[2] > 0;
589 int nthreads_omp_mpi_ok_min;
591 if (!anyRankIsUsingGpus)
593 nthreads_omp_mpi_ok_min = nthreads_omp_mpi_ok_min_cpu;
597 /* With GPUs we set the minimum number of OpenMP threads to 2 to catch
598 * cases where the user specifies #ranks == #cores.
600 nthreads_omp_mpi_ok_min = nthreads_omp_mpi_ok_min_gpu;
603 if (DOMAINDECOMP(cr))
605 if (nth_omp_max < nthreads_omp_mpi_ok_min || nth_omp_max > nthreads_omp_mpi_ok_max)
607 /* Note that we print target_max here, not ok_max */
609 "Your choice of number of MPI ranks and amount of resources results in using "
611 "threads per rank, which is most likely inefficient. The optimum is usually "
613 " %d threads per rank.",
614 nth_omp_max, nthreads_omp_mpi_ok_min, nthreads_omp_mpi_target_max);
618 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted("NOTE: %s", buf);
622 /* This fatal error, and the one below, is nasty, but it's
623 * probably the only way to ensure that all users don't waste
624 * a lot of resources, since many users don't read logs/stderr.
627 "%s If you want to run with this setup, specify the -ntomp option. But "
629 "change the number of MPI ranks%s.",
634 #else // !GMX_OPENMP || ! GMX_MPI
635 GMX_UNUSED_VALUE(bNtOmpOptionSet);
636 GMX_UNUSED_VALUE(willUsePhysicalGpu);
637 GMX_UNUSED_VALUE(cr);
638 GMX_UNUSED_VALUE(nthreads_omp_mpi_ok_max);
639 GMX_UNUSED_VALUE(nthreads_omp_mpi_ok_min_cpu);
640 /* Check if we have more than 1 physical core, if detected,
641 * or more than 1 hardware thread if physical cores were not detected.
643 if (!GMX_OPENMP && !GMX_MPI && hwinfo->hardwareTopology->numberOfCores() > 1)
645 GMX_LOG(mdlog.warning)
648 "NOTE: GROMACS was compiled without OpenMP and (thread-)MPI support, can "
649 "only use a single CPU core");
651 #endif // end GMX_OPENMP && GMX_MPI
655 //! Dump a \c hw_opt to \c fp.
656 static void print_hw_opt(FILE* fp, const gmx_hw_opt_t* hw_opt)
658 fprintf(fp, "hw_opt: nt %d ntmpi %d ntomp %d ntomp_pme %d gpu_id '%s' gputasks '%s'\n",
659 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi, hw_opt->nthreads_omp, hw_opt->nthreads_omp_pme,
660 hw_opt->gpuIdsAvailable.c_str(), hw_opt->userGpuTaskAssignment.c_str());
663 void checkAndUpdateHardwareOptions(const gmx::MDLogger& mdlog,
664 gmx_hw_opt_t* hw_opt,
665 const bool isSimulationMasterRank,
667 const t_inputrec* inputrec)
669 /* Currently hw_opt only contains default settings or settings supplied
670 * by the user on the command line.
672 if (hw_opt->nthreads_omp < 0)
675 "The number of OpenMP threads supplied on the command line is %d, which is "
678 hw_opt->nthreads_omp);
681 /* Check for OpenMP settings stored in environment variables, which can
682 * potentially be different on different MPI ranks.
684 gmx_omp_nthreads_read_env(mdlog, &hw_opt->nthreads_omp);
686 /* Check restrictions on the user supplied options before modifying them.
687 * TODO: Put the user values in a const struct and preserve them.
692 if (hw_opt->nthreads_tot > 0)
695 "Setting the total number of threads is only supported with thread-MPI and "
697 "compiled without thread-MPI");
699 if (hw_opt->nthreads_tmpi > 0)
702 "Setting the number of thread-MPI ranks is only supported with thread-MPI "
704 "compiled without thread-MPI");
708 /* With thread-MPI we need to handle TPI and #OpenMP-threads=auto early,
709 * so we can parallelize using MPI only. The general check is done later.
711 if (GMX_THREAD_MPI && isSimulationMasterRank)
713 GMX_RELEASE_ASSERT(inputrec, "Expect a valid inputrec");
714 if (EI_TPI(inputrec->eI) && hw_opt->nthreads_omp == 0)
716 hw_opt->nthreads_omp = 1;
719 /* With thread-MPI the master thread sets hw_opt->totNumThreadsIsAuto.
720 * The other threads receive a partially processed hw_opt from the master
721 * thread and should not set hw_opt->totNumThreadsIsAuto again.
723 if (!GMX_THREAD_MPI || isSimulationMasterRank)
725 /* Check if mdrun is free to choose the total number of threads */
726 hw_opt->totNumThreadsIsAuto = (hw_opt->nthreads_omp == 0 && hw_opt->nthreads_omp_pme == 0
727 && hw_opt->nthreads_tot == 0);
732 /* Check restrictions on PME thread related options set by the user */
734 if (hw_opt->nthreads_omp_pme > 0 && hw_opt->nthreads_omp <= 0)
736 gmx_fatal(FARGS, "You need to specify -ntomp in addition to -ntomp_pme");
739 if (hw_opt->nthreads_omp_pme >= 1 && hw_opt->nthreads_omp_pme != hw_opt->nthreads_omp
742 /* This can result in a fatal error on many MPI ranks,
743 * but since the thread count can differ per rank,
744 * we can't easily avoid this.
747 "You need to explicitly specify the number of PME ranks (-npme) when using "
748 "different numbers of OpenMP threads for PP and PME ranks");
753 /* GROMACS was configured without OpenMP support */
755 if (hw_opt->nthreads_omp > 1 || hw_opt->nthreads_omp_pme > 1)
758 "More than 1 OpenMP thread requested, but GROMACS was compiled without "
761 hw_opt->nthreads_omp = 1;
762 hw_opt->nthreads_omp_pme = 1;
765 if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp_pme <= 0)
767 /* We have the same number of OpenMP threads for PP and PME ranks,
768 * thus we can perform several consistency checks.
770 if (hw_opt->nthreads_tmpi > 0 && hw_opt->nthreads_omp > 0
771 && hw_opt->nthreads_tot != hw_opt->nthreads_tmpi * hw_opt->nthreads_omp)
774 "The total number of threads requested (%d) does not match the thread-MPI "
776 "times the OpenMP threads (%d) requested",
777 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi, hw_opt->nthreads_omp);
780 if (hw_opt->nthreads_tmpi > 0 && hw_opt->nthreads_tot % hw_opt->nthreads_tmpi != 0)
783 "The total number of threads requested (%d) is not divisible by the number "
785 "ranks requested (%d)",
786 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi);
789 if (hw_opt->nthreads_omp > 0 && hw_opt->nthreads_tot % hw_opt->nthreads_omp != 0)
792 "The total number of threads requested (%d) is not divisible by the number "
794 "threads requested (%d)",
795 hw_opt->nthreads_tot, hw_opt->nthreads_omp);
799 if (hw_opt->nthreads_tot > 0)
801 if (hw_opt->nthreads_omp > hw_opt->nthreads_tot)
804 "You requested %d OpenMP threads with %d total threads. Choose a total "
806 "that is a multiple of the number of OpenMP threads.",
807 hw_opt->nthreads_omp, hw_opt->nthreads_tot);
810 if (hw_opt->nthreads_tmpi > hw_opt->nthreads_tot)
813 "You requested %d thread-MPI ranks with %d total threads. Choose a total "
815 "threads that is a multiple of the number of thread-MPI ranks.",
816 hw_opt->nthreads_tmpi, hw_opt->nthreads_tot);
820 if (GMX_THREAD_MPI && nPmeRanks > 0 && hw_opt->nthreads_tmpi <= 0)
823 "You need to explicitly specify the number of MPI threads (-ntmpi) when using "
824 "separate PME ranks");
829 print_hw_opt(debug, hw_opt);
832 /* Asserting this simplifies the hardware resource division later
835 !(hw_opt->nthreads_omp_pme >= 1 && hw_opt->nthreads_omp <= 0),
836 "PME thread count should only be set when the normal thread count is also set");
839 void checkAndUpdateRequestedNumOpenmpThreads(gmx_hw_opt_t* hw_opt,
840 const gmx_hw_info_t& hwinfo,
842 const gmx_multisim_t* ms,
843 int numRanksOnThisNode,
844 PmeRunMode pmeRunMode,
845 const gmx_mtop_t& mtop,
846 const t_inputrec& inputrec)
848 if (EI_TPI(inputrec.eI))
850 if (hw_opt->nthreads_omp > 1)
853 "You requested OpenMP parallelization, which is not supported with TPI.");
855 hw_opt->nthreads_omp = 1;
861 GMX_RELEASE_ASSERT(hw_opt->nthreads_tmpi >= 1, "Must have at least one thread-MPI rank");
863 /* If the user set the total number of threads on the command line
864 * and did not specify the number of OpenMP threads, set the latter here.
866 if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp <= 0)
868 hw_opt->nthreads_omp = hw_opt->nthreads_tot / hw_opt->nthreads_tmpi;
870 if (!GMX_OPENMP && hw_opt->nthreads_omp > 1)
873 "You (indirectly) asked for OpenMP threads by setting -nt > -ntmpi, but "
875 "compiled without OpenMP support");
879 /* With both non-bonded and PME on GPU, the work left on the CPU is often
880 * (much) slower with SMT than without SMT. This is mostly the case with
881 * few atoms per core. Thus, if the number of threads is set to auto,
882 * we turn off SMT in that case. Note that PME on GPU implies that also
883 * the non-bonded are computed on the GPU.
884 * We only need to do this when the number of hardware theads is larger
885 * than the number of cores. Note that a queuing system could limit
886 * the number of hardware threads available, but we are not trying to be
887 * too smart here in that case.
889 /* The thread reduction and synchronization costs go up roughy quadratically
890 * with the threads count, so we apply a threshold quadratic in #cores.
891 * Also more cores per GPU usually means the CPU gets faster than the GPU.
892 * The number 1000 atoms per core^2 is a reasonable threshold
893 * for Intel x86 and AMD Threadripper.
895 constexpr int c_numAtomsPerCoreSquaredSmtThreshold = 1000;
897 /* Prepare conditions for deciding if we should disable SMT.
898 * We currently only limit SMT for simulations using a single rank.
899 * TODO: Consider limiting also for multi-rank simulations.
901 bool canChooseNumOpenmpThreads = (GMX_OPENMP && hw_opt->nthreads_omp <= 0);
902 bool haveSmtSupport =
903 (hwinfo.hardwareTopology->supportLevel() >= gmx::HardwareTopology::SupportLevel::Basic
904 && hwinfo.hardwareTopology->machine().logicalProcessorCount
905 > hwinfo.hardwareTopology->numberOfCores());
906 bool simRunsSingleRankNBAndPmeOnGpu = (cr->nnodes == 1 && pmeRunMode == PmeRunMode::GPU);
908 if (canChooseNumOpenmpThreads && haveSmtSupport && simRunsSingleRankNBAndPmeOnGpu)
910 /* Note that the queing system might have limited us from using
911 * all detected ncore_tot physical cores. We are currently not
912 * checking for that here.
914 int numRanksTot = cr->nnodes * (isMultiSim(ms) ? ms->nsim : 1);
915 int numAtomsPerRank = mtop.natoms / cr->nnodes;
916 int numCoresPerRank = hwinfo.ncore_tot / numRanksTot;
917 if (numAtomsPerRank < c_numAtomsPerCoreSquaredSmtThreshold * gmx::square(numCoresPerRank))
919 /* Choose one OpenMP thread per physical core */
920 hw_opt->nthreads_omp =
921 std::max(1, hwinfo.hardwareTopology->numberOfCores() / numRanksOnThisNode);
925 GMX_RELEASE_ASSERT(GMX_OPENMP || hw_opt->nthreads_omp == 1,
926 "Without OpenMP support, only one thread per rank can be used");
928 /* We are done with updating nthreads_omp, we can set nthreads_omp_pme */
929 if (hw_opt->nthreads_omp_pme <= 0 && hw_opt->nthreads_omp > 0)
931 hw_opt->nthreads_omp_pme = hw_opt->nthreads_omp;
936 print_hw_opt(debug, hw_opt);
943 void checkHardwareOversubscription(int numThreadsOnThisRank,
945 const HardwareTopology& hwTop,
946 const PhysicalNodeCommunicator& comm,
947 const MDLogger& mdlog)
949 if (hwTop.supportLevel() < HardwareTopology::SupportLevel::LogicalProcessorCount)
951 /* There is nothing we can check */
955 int numRanksOnThisNode = comm.size_;
956 int numThreadsOnThisNode = numThreadsOnThisRank;
957 /* Avoid MPI calls with uninitialized thread-MPI communicators */
961 /* Count the threads within this physical node */
962 MPI_Allreduce(&numThreadsOnThisRank, &numThreadsOnThisNode, 1, MPI_INT, MPI_SUM, comm.comm_);
966 if (numThreadsOnThisNode > hwTop.machine().logicalProcessorCount)
968 std::string mesg = "WARNING: ";
971 mesg += formatString("On rank %d: o", rank);
977 mesg += formatString("versubscribing the available %d logical CPU cores",
978 hwTop.machine().logicalProcessorCount);
983 mesg += formatString(" with %d ", numThreadsOnThisNode);
984 if (numRanksOnThisNode == numThreadsOnThisNode)
988 mesg += "thread-MPI threads.";
992 mesg += "MPI processes.";
999 mesg += "\n This will cause considerable performance loss.";
1000 /* Note that only the master rank logs to stderr and only ranks
1001 * with an open log file write to log.
1002 * TODO: When we have a proper parallel logging framework,
1003 * the framework should add the rank and node numbers.
1005 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted("%s", mesg.c_str());