1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2010, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
45 #include "gmx_fatal.h"
51 #include "gmx_omp_nthreads.h"
52 #include "md_logging.h"
54 /** Structure with the number of threads for each OpenMP multi-threaded
55 * algorithmic module in mdrun. */
58 int gnth; /**< Global num. of threads per PP or PP+PME process/tMPI thread. */
59 int gnth_pme; /**< Global num. of threads per PME only process/tMPI thread. */
61 int nth[emntNR]; /**< Number of threads for each module, indexed with module_nth_t */
62 gmx_bool initialized; /**< TRUE if the module as been initialized. */
63 } omp_module_nthreads_t;
65 /** Names of environment variables to set the per module number of threads.
67 * Indexed with the values of module_nth_t.
69 static const char *modth_env_var[emntNR] =
71 "GMX_DEFAULT_NUM_THREADS should never be set",
72 "GMX_DOMDEC_NUM_THREADS", "GMX_PAIRSEARCH_NUM_THREADS",
73 "GMX_NONBONDED_NUM_THREADS", "GMX_BONDED_NUM_THREADS",
74 "GMX_PME_NUM_THREADS", "GMX_UPDATE_NUM_THREADS",
75 "GMX_VSITE_NUM_THREADS",
76 "GMX_LINCS_NUM_THREADS", "GMX_SETTLE_NUM_THREADS"
79 /** Names of the modules. */
80 static const char *mod_name[emntNR] =
82 "default", "domain decomposition", "pair search", "non-bonded",
83 "bonded", "PME", "update", "LINCS", "SETTLE"
86 /** Number of threads for each algorithmic module.
88 * File-scope global variable that gets set once in \init_module_nthreads
89 * and queried via gmx_omp_nthreads_get.
91 * All fields are initialized to 0 which should result in errors if
92 * the init call is omitted.
94 static omp_module_nthreads_t modth = { 0, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0}, FALSE};
97 /** Determine the number of threads for module \mod.
99 * \m takes values form the module_nth_t enum and maps these to the
100 * corresponding value in modth_env_var.
102 * Each number of threads per module takes the default value unless
103 * GMX_*_NUM_THERADS env var is set, case in which its value overrides
106 * The "group" scheme supports OpenMP only in PME and in thise case all but
107 * the PME nthread values default to 1.
109 static int pick_module_nthreads(FILE *fplog, int m,
111 gmx_bool bFullOmpSupport,
123 #endif /* GMX_OPENMP */
125 /* The default should never be set through a GMX_*_NUM_THREADS env var
126 * as it's always equal with gnth. */
127 if (m == emntDefault)
129 return modth.nth[emntDefault];
132 /* check the environment variable */
133 if ((env = getenv(modth_env_var[m])) != NULL)
135 sscanf(env, "%d", &nth);
139 gmx_warning("%s=%d is set, but %s is compiled without OpenMP!",
140 modth_env_var[m], nth, ShortProgram());
143 /* with the verlet codepath, when any GMX_*_NUM_THREADS env var is set,
144 * OMP_NUM_THREADS also has to be set */
145 if (bFullOmpSupport && getenv("OMP_NUM_THREADS") == NULL)
147 gmx_fatal(FARGS, "%s=%d is set, the default number of threads also "
148 "needs to be set with OMP_NUM_THREADS!",
149 modth_env_var[m], nth);
152 /* with the group scheme warn if any env var except PME is set */
153 if (!bFullOmpSupport)
157 gmx_warning("%s=%d is set, but OpenMP multithreading is not "
159 modth_env_var[m], nth, mod_name[m]);
164 /* only babble if we are really overriding with a different value */
165 if ((bSepPME && m == emntPME && nth != modth.gnth_pme) || (nth != modth.gnth))
167 sprintf(sbuf, "%s=%d set, overriding the default number of %s threads",
168 modth_env_var[m], nth, mod_name[m]);
171 fprintf(stderr, "\n%s\n", sbuf);
175 fprintf(fplog, "%s\n", sbuf);
181 /* pick the global PME node nthreads if we are setting the number
182 * of threads in separate PME nodes */
183 nth = (bSepPME && m == emntPME) ? modth.gnth_pme : modth.gnth;
186 return modth.nth[m] = nth;
189 void gmx_omp_nthreads_read_env(int *nthreads_omp,
190 gmx_bool bIsSimMaster)
193 gmx_bool bCommandLineSetNthreadsOMP = *nthreads_omp > 0;
196 assert(nthreads_omp);
198 if ((env = getenv("OMP_NUM_THREADS")) != NULL)
202 sscanf(env, "%d", &nt_omp);
205 gmx_fatal(FARGS, "OMP_NUM_THREADS is invalid: '%s'", env);
208 if (bCommandLineSetNthreadsOMP && nt_omp != *nthreads_omp)
210 gmx_fatal(FARGS, "Environment variable OMP_NUM_THREADS (%d) and the number of threads requested on the command line (%d) have different values. Either omit one, or set them both to the same value.", nt_omp, *nthreads_omp);
213 /* Setting the number of OpenMP threads. */
214 *nthreads_omp = nt_omp;
216 /* Output the results */
218 "The number of OpenMP threads was set by environment variable OMP_NUM_THREADS to %d%s\n",
220 bCommandLineSetNthreadsOMP ? " (and the command-line setting agreed with that)" : "");
223 /* This prints once per simulation for multi-simulations,
224 * which might help diagnose issues with inhomogenous
226 fputs(buffer, stderr);
230 /* This prints once per process for real MPI (i.e. once
231 * per debug file), and once per simulation for thread MPI
232 * (because of logic in the calling function). */
233 fputs(buffer, debug);
238 void gmx_omp_nthreads_init(FILE *fplog, t_commrec *cr,
239 int nthreads_hw_avail,
240 int omp_nthreads_req,
241 int omp_nthreads_pme_req,
242 gmx_bool bThisNodePMEOnly,
243 gmx_bool bFullOmpSupport)
245 int nth, nth_pmeonly, gmx_maxth, nppn;
247 gmx_bool bSepPME, bOMP;
253 #endif /* GMX_OPENMP */
255 /* number of MPI processes/threads per physical node */
256 nppn = cr->nrank_intranode;
258 bSepPME = ( (cr->duty & DUTY_PP) && !(cr->duty & DUTY_PME)) ||
259 (!(cr->duty & DUTY_PP) && (cr->duty & DUTY_PME));
261 #ifdef GMX_THREAD_MPI
262 /* modth is shared among tMPI threads, so for thread safety do the
263 * detection is done on the master only. It is not thread-safe with
264 * multiple simulations, but that's anyway not supported by tMPI. */
268 /* just return if the initialization has already been done */
269 if (modth.initialized)
274 /* With full OpenMP support (verlet scheme) set the number of threads
275 * per process / default:
276 * - 1 if not compiled with OpenMP or
277 * - OMP_NUM_THREADS if the env. var is set, or
278 * - omp_nthreads_req = #of threads requested by the user on the mdrun
279 * command line, otherwise
280 * - take the max number of available threads and distribute them
281 * on the processes/tMPI threads.
282 * ~ The GMX_*_NUM_THREADS env var overrides the number of threads of
283 * the respective module and it has to be used in conjunction with
286 * With the group scheme OpenMP multithreading is only supported in PME,
287 * for all other modules nthreads is set to 1.
288 * The number of PME threads is equal to:
289 * - 1 if not compiled with OpenMP or
290 * - GMX_PME_NUM_THREADS if defined, otherwise
291 * - OMP_NUM_THREADS if defined, otherwise
295 if ((env = getenv("OMP_NUM_THREADS")) != NULL)
297 if (!bOMP && (strncmp(env, "1", 1) != 0))
299 gmx_warning("OMP_NUM_THREADS is set, but %s was compiled without OpenMP support!",
304 nth = gmx_omp_get_max_threads();
307 else if (omp_nthreads_req > 0)
309 nth = omp_nthreads_req;
311 else if (bFullOmpSupport && bOMP)
313 /* max available threads per node */
314 nth = nthreads_hw_avail;
316 /* divide the threads among the MPI processes/tMPI threads */
327 /* now we have the global values, set them:
328 * - 1 if not compiled with OpenMP and for the group scheme
329 * - nth for the verlet scheme when compiled with OpenMP
331 if (bFullOmpSupport && bOMP)
342 if (omp_nthreads_pme_req > 0)
344 modth.gnth_pme = omp_nthreads_pme_req;
348 modth.gnth_pme = nth;
356 /* now set the per-module values */
357 modth.nth[emntDefault] = modth.gnth;
358 pick_module_nthreads(fplog, emntDomdec, SIMMASTER(cr), bFullOmpSupport, bSepPME);
359 pick_module_nthreads(fplog, emntPairsearch, SIMMASTER(cr), bFullOmpSupport, bSepPME);
360 pick_module_nthreads(fplog, emntNonbonded, SIMMASTER(cr), bFullOmpSupport, bSepPME);
361 pick_module_nthreads(fplog, emntBonded, SIMMASTER(cr), bFullOmpSupport, bSepPME);
362 pick_module_nthreads(fplog, emntPME, SIMMASTER(cr), bFullOmpSupport, bSepPME);
363 pick_module_nthreads(fplog, emntUpdate, SIMMASTER(cr), bFullOmpSupport, bSepPME);
364 pick_module_nthreads(fplog, emntVSITE, SIMMASTER(cr), bFullOmpSupport, bSepPME);
365 pick_module_nthreads(fplog, emntLINCS, SIMMASTER(cr), bFullOmpSupport, bSepPME);
366 pick_module_nthreads(fplog, emntSETTLE, SIMMASTER(cr), bFullOmpSupport, bSepPME);
368 /* set the number of threads globally */
371 #ifndef GMX_THREAD_MPI
372 if (bThisNodePMEOnly)
374 gmx_omp_set_num_threads(modth.gnth_pme);
377 #endif /* GMX_THREAD_MPI */
381 gmx_omp_set_num_threads(nth);
385 gmx_omp_set_num_threads(1);
390 modth.initialized = TRUE;
392 #ifdef GMX_THREAD_MPI
393 /* Non-master threads have to wait for the detection to be done. */
396 MPI_Barrier(cr->mpi_comm_mysim);
400 /* inform the user about the settings */
403 #ifdef GMX_THREAD_MPI
404 const char *mpi_str = "per tMPI thread";
406 const char *mpi_str = "per MPI process";
409 /* for group scheme we print PME threads info only */
412 md_print_info(cr, fplog, "Using %d OpenMP thread%s %s\n",
413 modth.gnth, modth.gnth > 1 ? "s" : "",
414 cr->nnodes > 1 ? mpi_str : "");
416 if (bSepPME && modth.gnth_pme != modth.gnth)
418 md_print_info(cr, fplog, "Using %d OpenMP thread%s %s for PME\n",
419 modth.gnth_pme, modth.gnth_pme > 1 ? "s" : "",
420 cr->nnodes > 1 ? mpi_str : "");
424 /* detect and warn about oversubscription
425 * TODO: enable this for separate PME nodes as well! */
426 if (!bSepPME && cr->rank_pp_intranode == 0)
428 char sbuf[STRLEN], sbuf1[STRLEN], sbuf2[STRLEN];
430 if (modth.gnth*nppn > nthreads_hw_avail)
432 sprintf(sbuf, "threads");
438 #ifdef GMX_THREAD_MPI
439 sprintf(sbuf, "thread-MPI threads");
441 sprintf(sbuf, "MPI processes");
442 sprintf(sbuf1, " per node");
443 sprintf(sbuf2, "On node %d: o", cr->sim_nodeid);
447 md_print_warn(cr, fplog,
448 "WARNING: %sversubscribing the available %d logical CPU cores%s with %d %s.\n"
449 " This will cause considerable performance loss!",
450 sbuf2, nthreads_hw_avail, sbuf1, nppn*modth.gnth, sbuf);
455 int gmx_omp_nthreads_get(int mod)
457 if (mod < 0 || mod >= emntNR)
459 /* invalid module queried */
464 return modth.nth[mod];