2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #if defined(HAVE_SCHED_H) && defined(HAVE_SCHED_GETAFFINITY)
41 #include <sys/syscall.h>
48 #include "types/commrec.h"
49 #include "types/hw_info.h"
50 #include "gmx_cpuid.h"
51 #include "gmx_omp_nthreads.h"
52 #include "md_logging.h"
54 #include "gmx_thread_affinity.h"
56 #include "thread_mpi/threads.h"
57 #include "gromacs/utility/gmxomp.h"
60 get_thread_affinity_layout(FILE *fplog,
62 const gmx_hw_info_t * hwinfo,
64 int pin_offset, int * pin_stride,
65 const int **locality_order)
67 int nhwthreads, npkg, ncores, nhwthreads_per_core, rc;
70 const int * hwthread_id;
71 gmx_bool bPickPinStride;
75 gmx_fatal(FARGS, "Negative thread pinning offset requested");
79 gmx_fatal(FARGS, "Negative thread pinning stride requested");
82 rc = gmx_cpuid_topology(hwinfo->cpuid_info, &nhwthreads, &npkg, &ncores,
84 &pkg_id, &core_id, &hwthread_id, locality_order);
88 /* topology information not available or invalid, ignore it */
89 nhwthreads = hwinfo->nthreads_hw_avail;
90 *locality_order = NULL;
94 /* We don't know anything about the hardware, don't pin */
95 md_print_warn(cr, fplog,
96 "NOTE: We don't know how many logical cores we have, will not pin threads");
102 if (nthreads > nhwthreads)
104 /* We are oversubscribing, don't pin */
105 md_print_warn(NULL, fplog,
106 "WARNING: Oversubscribing the CPU, will not pin threads");
111 if (pin_offset + nthreads > nhwthreads)
113 /* We are oversubscribing, don't pin */
114 md_print_warn(NULL, fplog,
115 "WARNING: The requested pin offset is too large for the available logical cores,\n"
116 " will not pin threads");
122 /* do we need to choose the pinning stride? */
123 bPickPinStride = (*pin_stride == 0);
127 if (rc == 0 && pin_offset + nthreads*nhwthreads_per_core <= nhwthreads)
129 /* Put one thread on each physical core */
130 *pin_stride = nhwthreads_per_core;
134 /* We don't know if we have SMT, and if we do, we don't know
135 * if hw threads in the same physical core are consecutive.
136 * Without SMT the pinning layout should not matter too much.
137 * so we assume a consecutive layout and maximally spread out"
138 * the threads at equal threads per core.
139 * Note that IBM is the major non-x86 case with cpuid support
140 * and probably threads are already pinned by the queuing system,
141 * so we wouldn't end up here in the first place.
143 *pin_stride = (nhwthreads - pin_offset)/nthreads;
148 /* Check the placement of the thread with the largest index to make sure
149 * that the offset & stride doesn't cause pinning beyond the last hardware thread. */
150 if (pin_offset + (nthreads-1)*(*pin_stride) >= nhwthreads)
152 /* We are oversubscribing, don't pin */
153 md_print_warn(NULL, fplog,
154 "WARNING: The requested pinning stride is too large for the available logical cores,\n"
155 " will not pin threads");
163 fprintf(fplog, "Pinning threads with a%s logical core stride of %d\n",
164 bPickPinStride ? "n auto-selected" : " user-specified",
171 /* Set CPU affinity. Can be important for performance.
172 On some systems (e.g. Cray) CPU Affinity is set by default.
173 But default assigning doesn't work (well) with only some ranks
174 having threads. This causes very low performance.
175 External tools have cumbersome syntax for setting affinity
176 in the case that only some ranks have threads.
177 Thus it is important that GROMACS sets the affinity internally
178 if only PME is using threads.
181 gmx_set_thread_affinity(FILE *fplog,
183 gmx_hw_opt_t *hw_opt,
184 const gmx_hw_info_t *hwinfo)
186 int nth_affinity_set, thread0_id_node,
187 nthread_local, nthread_node, nthread_hw_max, nphyscore;
189 const int *locality_order;
192 if (hw_opt->thread_affinity == threadaffOFF)
198 /* If the tMPI thread affinity setting is not supported encourage the user
199 * to report it as it's either a bug or an exotic platform which we might
200 * want to support. */
201 if (tMPI_Thread_setaffinity_support() != TMPI_SETAFFINITY_SUPPORT_YES)
203 /* we know Mac OS doesn't support setting thread affinity, so there's
204 no point in warning the user in that case. In any other case
205 the user might be able to do something about it. */
207 md_print_warn(NULL, fplog,
208 "Can not set thread affinities on the current platform. On NUMA systems this\n"
209 "can cause performance degradation. If you think your platform should support\n"
210 "setting affinities, contact the GROMACS developers.");
211 #endif /* __APPLE__ */
215 /* threads on this MPI process or TMPI thread */
216 if (cr->duty & DUTY_PP)
218 nthread_local = gmx_omp_nthreads_get(emntNonbonded);
222 nthread_local = gmx_omp_nthreads_get(emntPME);
225 /* map the current process to cores */
227 nthread_node = nthread_local;
229 if (PAR(cr) || MULTISIM(cr))
231 /* We need to determine a scan of the thread counts in this
236 MPI_Comm_split(MPI_COMM_WORLD, gmx_hostname_num(), cr->rank_intranode,
238 MPI_Scan(&nthread_local, &thread0_id_node, 1, MPI_INT, MPI_SUM, comm_intra);
239 /* MPI_Scan is inclusive, but here we need exclusive */
240 thread0_id_node -= nthread_local;
241 /* Get the total number of threads on this physical node */
242 MPI_Allreduce(&nthread_local, &nthread_node, 1, MPI_INT, MPI_SUM, comm_intra);
243 MPI_Comm_free(&comm_intra);
247 if (hw_opt->thread_affinity == threadaffAUTO &&
248 nthread_node != hwinfo->nthreads_hw_avail)
250 if (nthread_node > 1 && nthread_node < hwinfo->nthreads_hw_avail)
252 md_print_warn(cr, fplog,
253 "NOTE: The number of threads is not equal to the number of (logical) cores\n"
254 " and the -pin option is set to auto: will not pin thread to cores.\n"
255 " This can lead to significant performance degradation.\n"
256 " Consider using -pin on (and -pinoffset in case you run multiple jobs).\n");
263 if (hw_opt->core_pinning_offset != 0)
265 offset = hw_opt->core_pinning_offset;
266 md_print_info(cr, fplog, "Applying core pinning offset %d\n", offset);
269 rc = get_thread_affinity_layout(fplog, cr, hwinfo,
271 offset, &hw_opt->core_pinning_stride,
276 /* Incompatible layout, don't pin, warning was already issued */
280 /* Set the per-thread affinity. In order to be able to check the success
281 * of affinity settings, we will set nth_affinity_set to 1 on threads
282 * where the affinity setting succeded and to 0 where it failed.
283 * Reducing these 0/1 values over the threads will give the total number
284 * of threads on which we succeeded.
286 nth_affinity_set = 0;
287 #pragma omp parallel num_threads(nthread_local) reduction(+:nth_affinity_set)
289 int thread_id, thread_id_node;
291 gmx_bool setaffinity_ret;
293 thread_id = gmx_omp_get_thread_num();
294 thread_id_node = thread0_id_node + thread_id;
295 index = offset + thread_id_node*hw_opt->core_pinning_stride;
296 if (locality_order != NULL)
298 core = locality_order[index];
305 setaffinity_ret = tMPI_Thread_setaffinity_single(tMPI_Thread_self(), core);
307 /* store the per-thread success-values of the setaffinity */
308 nth_affinity_set = (setaffinity_ret == 0);
312 fprintf(debug, "On rank %2d, thread %2d, index %2d, core %2d the affinity setting returned %d\n",
313 cr->nodeid, gmx_omp_get_thread_num(), index, core, setaffinity_ret);
317 if (nth_affinity_set > nthread_local)
321 sprintf(msg, "Looks like we have set affinity for more threads than "
322 "we have (%d > %d)!\n", nth_affinity_set, nthread_local);
327 /* check & warn if some threads failed to set their affinities */
328 if (nth_affinity_set != nthread_local)
330 char sbuf1[STRLEN], sbuf2[STRLEN];
332 /* sbuf1 contains rank info, while sbuf2 OpenMP thread info */
333 sbuf1[0] = sbuf2[0] = '\0';
334 /* Only add rank info if we have more than one rank. */
338 #ifdef GMX_THREAD_MPI
339 sprintf(sbuf1, "In tMPI thread #%d: ", cr->nodeid);
340 #else /* GMX_LIB_MPI */
341 sprintf(sbuf1, "In MPI process #%d: ", cr->nodeid);
346 if (nthread_local > 1)
348 sprintf(sbuf2, "for %d/%d thread%s ",
349 nthread_local - nth_affinity_set, nthread_local,
350 nthread_local > 1 ? "s" : "");
353 md_print_warn(NULL, fplog,
354 "WARNING: %sAffinity setting %sfailed.\n"
355 " This can cause performance degradation! If you think your setting are\n"
356 " correct, contact the GROMACS developers.",
363 /* Check the process affinity mask and if it is found to be non-zero,
364 * will honor it and disable mdrun internal affinity setting.
365 * Note that this will only work on Linux as we use a GNU feature.
368 gmx_check_thread_affinity_set(FILE gmx_unused *fplog,
369 const t_commrec gmx_unused *cr,
370 gmx_hw_opt_t gmx_unused *hw_opt,
371 int gmx_unused ncpus,
372 gmx_bool gmx_unused bAfterOpenmpInit)
374 #ifdef HAVE_SCHED_GETAFFINITY
375 cpu_set_t mask_current;
376 int i, ret, cpu_count, cpu_set;
380 if (hw_opt->thread_affinity == threadaffOFF)
382 /* internal affinity setting is off, don't bother checking process affinity */
386 CPU_ZERO(&mask_current);
387 if ((ret = sched_getaffinity(0, sizeof(cpu_set_t), &mask_current)) != 0)
389 /* failed to query affinity mask, will just return */
392 fprintf(debug, "Failed to query affinity mask (error %d)", ret);
397 /* Before proceeding with the actual check, make sure that the number of
398 * detected CPUs is >= the CPUs in the current set.
399 * We need to check for CPU_COUNT as it was added only in glibc 2.6. */
401 if (ncpus < CPU_COUNT(&mask_current))
405 fprintf(debug, "%d CPUs detected, but %d was returned by CPU_COUNT",
406 ncpus, CPU_COUNT(&mask_current));
410 #endif /* CPU_COUNT */
413 for (i = 0; (i < ncpus && i < CPU_SETSIZE); i++)
415 bAllSet = bAllSet && (CPU_ISSET(i, &mask_current) != 0);
420 if (hw_opt->thread_affinity == threadaffAUTO)
422 if (!bAfterOpenmpInit)
424 md_print_warn(cr, fplog,
425 "Non-default thread affinity set, disabling internal thread affinity");
429 md_print_warn(cr, fplog,
430 "Non-default thread affinity set probably by the OpenMP library,\n"
431 "disabling internal thread affinity");
433 hw_opt->thread_affinity = threadaffOFF;
437 /* Only warn once, at the last check (bAfterOpenmpInit==TRUE) */
438 if (bAfterOpenmpInit)
440 md_print_warn(cr, fplog,
441 "Overriding thread affinity set outside %s\n",
448 fprintf(debug, "Non-default affinity mask found\n");
455 fprintf(debug, "Default affinity mask found\n");
458 #endif /* HAVE_SCHED_GETAFFINITY */