2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #if defined(HAVE_SCHED_H) && defined(HAVE_SCHED_GETAFFINITY)
41 #include <sys/syscall.h>
49 #include "types/commrec.h"
50 #include "types/hw_info.h"
52 #include "gmx_cpuid.h"
53 #include "gmx_omp_nthreads.h"
54 #include "md_logging.h"
55 #include "gmx_thread_affinity.h"
57 #include "thread_mpi/threads.h"
58 #include "gromacs/utility/gmxomp.h"
61 get_thread_affinity_layout(FILE *fplog,
63 const gmx_hw_info_t * hwinfo,
65 int pin_offset, int * pin_stride,
66 const int **locality_order)
68 int nhwthreads, npkg, ncores, nhwthreads_per_core, rc;
71 const int * hwthread_id;
72 gmx_bool bPickPinStride;
76 gmx_fatal(FARGS, "Negative thread pinning offset requested");
80 gmx_fatal(FARGS, "Negative thread pinning stride requested");
83 rc = gmx_cpuid_topology(hwinfo->cpuid_info, &nhwthreads, &npkg, &ncores,
85 &pkg_id, &core_id, &hwthread_id, locality_order);
89 /* topology information not available or invalid, ignore it */
90 nhwthreads = hwinfo->nthreads_hw_avail;
91 *locality_order = NULL;
95 /* We don't know anything about the hardware, don't pin */
96 md_print_warn(cr, fplog,
97 "NOTE: We don't know how many logical cores we have, will not pin threads");
103 if (nthreads > nhwthreads)
105 /* We are oversubscribing, don't pin */
106 md_print_warn(NULL, fplog,
107 "WARNING: Oversubscribing the CPU, will not pin threads");
112 if (pin_offset + nthreads > nhwthreads)
114 /* We are oversubscribing, don't pin */
115 md_print_warn(NULL, fplog,
116 "WARNING: The requested pin offset is too large for the available logical cores,\n"
117 " will not pin threads");
123 /* do we need to choose the pinning stride? */
124 bPickPinStride = (*pin_stride == 0);
128 if (rc == 0 && pin_offset + nthreads*nhwthreads_per_core <= nhwthreads)
130 /* Put one thread on each physical core */
131 *pin_stride = nhwthreads_per_core;
135 /* We don't know if we have SMT, and if we do, we don't know
136 * if hw threads in the same physical core are consecutive.
137 * Without SMT the pinning layout should not matter too much.
138 * so we assume a consecutive layout and maximally spread out"
139 * the threads at equal threads per core.
140 * Note that IBM is the major non-x86 case with cpuid support
141 * and probably threads are already pinned by the queuing system,
142 * so we wouldn't end up here in the first place.
144 *pin_stride = (nhwthreads - pin_offset)/nthreads;
149 /* Check the placement of the thread with the largest index to make sure
150 * that the offset & stride doesn't cause pinning beyond the last hardware thread. */
151 if (pin_offset + (nthreads-1)*(*pin_stride) >= nhwthreads)
153 /* We are oversubscribing, don't pin */
154 md_print_warn(NULL, fplog,
155 "WARNING: The requested pinning stride is too large for the available logical cores,\n"
156 " will not pin threads");
164 fprintf(fplog, "Pinning threads with a%s logical core stride of %d\n",
165 bPickPinStride ? "n auto-selected" : " user-specified",
172 /* Set CPU affinity. Can be important for performance.
173 On some systems (e.g. Cray) CPU Affinity is set by default.
174 But default assigning doesn't work (well) with only some ranks
175 having threads. This causes very low performance.
176 External tools have cumbersome syntax for setting affinity
177 in the case that only some ranks have threads.
178 Thus it is important that GROMACS sets the affinity internally
179 if only PME is using threads.
182 gmx_set_thread_affinity(FILE *fplog,
184 gmx_hw_opt_t *hw_opt,
185 const gmx_hw_info_t *hwinfo)
187 int nth_affinity_set, thread0_id_node,
188 nthread_local, nthread_node, nthread_hw_max, nphyscore;
190 const int *locality_order;
193 if (hw_opt->thread_affinity == threadaffOFF)
199 /* If the tMPI thread affinity setting is not supported encourage the user
200 * to report it as it's either a bug or an exotic platform which we might
201 * want to support. */
202 if (tMPI_Thread_setaffinity_support() != TMPI_SETAFFINITY_SUPPORT_YES)
204 /* we know Mac OS doesn't support setting thread affinity, so there's
205 no point in warning the user in that case. In any other case
206 the user might be able to do something about it. */
208 md_print_warn(NULL, fplog,
209 "Can not set thread affinities on the current platform. On NUMA systems this\n"
210 "can cause performance degradation. If you think your platform should support\n"
211 "setting affinities, contact the GROMACS developers.");
212 #endif /* __APPLE__ */
216 /* threads on this MPI process or TMPI thread */
217 if (cr->duty & DUTY_PP)
219 nthread_local = gmx_omp_nthreads_get(emntNonbonded);
223 nthread_local = gmx_omp_nthreads_get(emntPME);
226 /* map the current process to cores */
228 nthread_node = nthread_local;
230 if (PAR(cr) || MULTISIM(cr))
232 /* We need to determine a scan of the thread counts in this
237 MPI_Comm_split(MPI_COMM_WORLD, gmx_hostname_num(), cr->rank_intranode,
239 MPI_Scan(&nthread_local, &thread0_id_node, 1, MPI_INT, MPI_SUM, comm_intra);
240 /* MPI_Scan is inclusive, but here we need exclusive */
241 thread0_id_node -= nthread_local;
242 /* Get the total number of threads on this physical node */
243 MPI_Allreduce(&nthread_local, &nthread_node, 1, MPI_INT, MPI_SUM, comm_intra);
244 MPI_Comm_free(&comm_intra);
248 if (hw_opt->thread_affinity == threadaffAUTO &&
249 nthread_node != hwinfo->nthreads_hw_avail)
251 if (nthread_node > 1 && nthread_node < hwinfo->nthreads_hw_avail)
253 md_print_warn(cr, fplog,
254 "NOTE: The number of threads is not equal to the number of (logical) cores\n"
255 " and the -pin option is set to auto: will not pin thread to cores.\n"
256 " This can lead to significant performance degradation.\n"
257 " Consider using -pin on (and -pinoffset in case you run multiple jobs).\n");
264 if (hw_opt->core_pinning_offset != 0)
266 offset = hw_opt->core_pinning_offset;
267 md_print_info(cr, fplog, "Applying core pinning offset %d\n", offset);
270 rc = get_thread_affinity_layout(fplog, cr, hwinfo,
272 offset, &hw_opt->core_pinning_stride,
277 /* Incompatible layout, don't pin, warning was already issued */
281 /* Set the per-thread affinity. In order to be able to check the success
282 * of affinity settings, we will set nth_affinity_set to 1 on threads
283 * where the affinity setting succeded and to 0 where it failed.
284 * Reducing these 0/1 values over the threads will give the total number
285 * of threads on which we succeeded.
287 nth_affinity_set = 0;
288 #pragma omp parallel num_threads(nthread_local) reduction(+:nth_affinity_set)
290 int thread_id, thread_id_node;
292 gmx_bool setaffinity_ret;
294 thread_id = gmx_omp_get_thread_num();
295 thread_id_node = thread0_id_node + thread_id;
296 index = offset + thread_id_node*hw_opt->core_pinning_stride;
297 if (locality_order != NULL)
299 core = locality_order[index];
306 setaffinity_ret = tMPI_Thread_setaffinity_single(tMPI_Thread_self(), core);
308 /* store the per-thread success-values of the setaffinity */
309 nth_affinity_set = (setaffinity_ret == 0);
313 fprintf(debug, "On rank %2d, thread %2d, index %2d, core %2d the affinity setting returned %d\n",
314 cr->nodeid, gmx_omp_get_thread_num(), index, core, setaffinity_ret);
318 if (nth_affinity_set > nthread_local)
322 sprintf(msg, "Looks like we have set affinity for more threads than "
323 "we have (%d > %d)!\n", nth_affinity_set, nthread_local);
328 /* check & warn if some threads failed to set their affinities */
329 if (nth_affinity_set != nthread_local)
331 char sbuf1[STRLEN], sbuf2[STRLEN];
333 /* sbuf1 contains rank info, while sbuf2 OpenMP thread info */
334 sbuf1[0] = sbuf2[0] = '\0';
335 /* Only add rank info if we have more than one rank. */
339 #ifdef GMX_THREAD_MPI
340 sprintf(sbuf1, "In tMPI thread #%d: ", cr->nodeid);
341 #else /* GMX_LIB_MPI */
342 sprintf(sbuf1, "In MPI process #%d: ", cr->nodeid);
347 if (nthread_local > 1)
349 sprintf(sbuf2, "for %d/%d thread%s ",
350 nthread_local - nth_affinity_set, nthread_local,
351 nthread_local > 1 ? "s" : "");
354 md_print_warn(NULL, fplog,
355 "WARNING: %sAffinity setting %sfailed.\n"
356 " This can cause performance degradation! If you think your setting are\n"
357 " correct, contact the GROMACS developers.",
364 /* Check the process affinity mask and if it is found to be non-zero,
365 * will honor it and disable mdrun internal affinity setting.
366 * Note that this will only work on Linux as we use a GNU feature.
369 gmx_check_thread_affinity_set(FILE gmx_unused *fplog,
370 const t_commrec gmx_unused *cr,
371 gmx_hw_opt_t gmx_unused *hw_opt,
372 int gmx_unused ncpus,
373 gmx_bool gmx_unused bAfterOpenmpInit)
375 #ifdef HAVE_SCHED_GETAFFINITY
376 cpu_set_t mask_current;
377 int i, ret, cpu_count, cpu_set;
381 if (hw_opt->thread_affinity == threadaffOFF)
383 /* internal affinity setting is off, don't bother checking process affinity */
387 CPU_ZERO(&mask_current);
388 if ((ret = sched_getaffinity(0, sizeof(cpu_set_t), &mask_current)) != 0)
390 /* failed to query affinity mask, will just return */
393 fprintf(debug, "Failed to query affinity mask (error %d)", ret);
398 /* Before proceeding with the actual check, make sure that the number of
399 * detected CPUs is >= the CPUs in the current set.
400 * We need to check for CPU_COUNT as it was added only in glibc 2.6. */
402 if (ncpus < CPU_COUNT(&mask_current))
406 fprintf(debug, "%d CPUs detected, but %d was returned by CPU_COUNT",
407 ncpus, CPU_COUNT(&mask_current));
411 #endif /* CPU_COUNT */
414 for (i = 0; (i < ncpus && i < CPU_SETSIZE); i++)
416 bAllSet = bAllSet && (CPU_ISSET(i, &mask_current) != 0);
421 if (hw_opt->thread_affinity == threadaffAUTO)
423 if (!bAfterOpenmpInit)
425 md_print_warn(cr, fplog,
426 "Non-default thread affinity set, disabling internal thread affinity");
430 md_print_warn(cr, fplog,
431 "Non-default thread affinity set probably by the OpenMP library,\n"
432 "disabling internal thread affinity");
434 hw_opt->thread_affinity = threadaffOFF;
438 /* Only warn once, at the last check (bAfterOpenmpInit==TRUE) */
439 if (bAfterOpenmpInit)
441 md_print_warn(cr, fplog,
442 "Overriding thread affinity set outside %s\n",
449 fprintf(debug, "Non-default affinity mask found\n");
456 fprintf(debug, "Default affinity mask found\n");
459 #endif /* HAVE_SCHED_GETAFFINITY */