2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012, by the GROMACS development team, led by
5 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
6 * others, as listed in the AUTHORS file in the top-level source
7 * directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #if defined(HAVE_SCHED_H) && defined(HAVE_SCHED_GETAFFINITY)
41 #include <sys/syscall.h>
48 #include "types/commrec.h"
49 #include "types/hw_info.h"
50 #include "gmx_cpuid.h"
52 #include "gmx_omp_nthreads.h"
54 #include "md_logging.h"
56 #include "gmx_thread_affinity.h"
58 #include "thread_mpi/threads.h"
62 get_thread_affinity_layout(FILE *fplog,
64 const gmx_hw_info_t * hwinfo,
66 int pin_offset, int * pin_stride,
67 const int **locality_order)
69 int nhwthreads, npkg, ncores, nhwthreads_per_core, rc;
72 const int * hwthread_id;
73 gmx_bool bPickPinStride;
77 gmx_fatal(FARGS, "Negative thread pinning offset requested");
81 gmx_fatal(FARGS, "Negative thread pinning stride requested");
84 rc = gmx_cpuid_topology(hwinfo->cpuid_info, &nhwthreads, &npkg, &ncores,
86 &pkg_id, &core_id, &hwthread_id, locality_order);
90 /* topology information not available or invalid, ignore it */
91 nhwthreads = hwinfo->nthreads_hw_avail;
92 *locality_order = NULL;
96 /* We don't know anything about the hardware, don't pin */
97 md_print_warn(cr, fplog,
98 "NOTE: We don't know how many logical cores we have, will not pin threads");
104 if (nthreads > nhwthreads)
106 /* We are oversubscribing, don't pin */
107 md_print_warn(NULL, fplog,
108 "WARNING: Oversubscribing the CPU, will not pin threads");
113 if (pin_offset + nthreads > nhwthreads)
115 /* We are oversubscribing, don't pin */
116 md_print_warn(NULL, fplog,
117 "WARNING: The requested pin offset is too large for the available logical cores,\n"
118 " will not pin threads");
124 /* do we need to choose the pinning stride? */
125 bPickPinStride = (*pin_stride == 0);
129 if (rc == 0 && pin_offset + nthreads*nhwthreads_per_core <= nhwthreads)
131 /* Put one thread on each physical core */
132 *pin_stride = nhwthreads_per_core;
136 /* We don't know if we have SMT, and if we do, we don't know
137 * if hw threads in the same physical core are consecutive.
138 * Without SMT the pinning layout should not matter too much.
139 * so we assume a consecutive layout and maximally spread out"
140 * the threads at equal threads per core.
141 * Note that IBM is the major non-x86 case with cpuid support
142 * and probably threads are already pinned by the queuing system,
143 * so we wouldn't end up here in the first place.
145 *pin_stride = (nhwthreads - pin_offset)/nthreads;
150 /* Check the placement of the thread with the largest index to make sure
151 * that the offset & stride doesn't cause pinning beyond the last hardware thread. */
152 if (pin_offset + (nthreads-1)*(*pin_stride) >= nhwthreads)
154 /* We are oversubscribing, don't pin */
155 md_print_warn(NULL, fplog,
156 "WARNING: The requested pinning stride is too large for the available logical cores,\n"
157 " will not pin threads");
165 fprintf(fplog, "Pinning threads with a%s logical core stride of %d\n",
166 bPickPinStride ? "n auto-selected" : " user-specified",
173 /* Set CPU affinity. Can be important for performance.
174 On some systems (e.g. Cray) CPU Affinity is set by default.
175 But default assigning doesn't work (well) with only some ranks
176 having threads. This causes very low performance.
177 External tools have cumbersome syntax for setting affinity
178 in the case that only some ranks have threads.
179 Thus it is important that GROMACS sets the affinity internally
180 if only PME is using threads.
183 gmx_set_thread_affinity(FILE *fplog,
185 gmx_hw_opt_t *hw_opt,
186 const gmx_hw_info_t *hwinfo)
188 int nth_affinity_set, thread0_id_node,
189 nthread_local, nthread_node, nthread_hw_max, nphyscore;
191 const int *locality_order;
194 if (hw_opt->thread_affinity == threadaffOFF)
200 /* If the tMPI thread affinity setting is not supported encourage the user
201 * to report it as it's either a bug or an exotic platform which we might
202 * want to support. */
203 if (tMPI_Thread_setaffinity_support() != TMPI_SETAFFINITY_SUPPORT_YES)
205 /* we know Mac OS doesn't support setting thread affinity, so there's
206 no point in warning the user in that case. In any other case
207 the user might be able to do something about it. */
209 md_print_warn(NULL, fplog,
210 "Can not set thread affinities on the current platform. On NUMA systems this\n"
211 "can cause performance degradation. If you think your platform should support\n"
212 "setting affinities, contact the GROMACS developers.");
213 #endif /* __APPLE__ */
217 /* threads on this MPI process or TMPI thread */
218 if (cr->duty & DUTY_PP)
220 nthread_local = gmx_omp_nthreads_get(emntNonbonded);
224 nthread_local = gmx_omp_nthreads_get(emntPME);
227 /* map the current process to cores */
229 nthread_node = nthread_local;
231 if (PAR(cr) || MULTISIM(cr))
233 /* We need to determine a scan of the thread counts in this
238 MPI_Comm_split(MPI_COMM_WORLD, gmx_hostname_num(), cr->rank_intranode,
240 MPI_Scan(&nthread_local, &thread0_id_node, 1, MPI_INT, MPI_SUM, comm_intra);
241 /* MPI_Scan is inclusive, but here we need exclusive */
242 thread0_id_node -= nthread_local;
243 /* Get the total number of threads on this physical node */
244 MPI_Allreduce(&nthread_local, &nthread_node, 1, MPI_INT, MPI_SUM, comm_intra);
245 MPI_Comm_free(&comm_intra);
249 if (hw_opt->thread_affinity == threadaffAUTO &&
250 nthread_node != hwinfo->nthreads_hw_avail)
252 if (nthread_node > 1 && nthread_node < hwinfo->nthreads_hw_avail)
254 md_print_warn(cr, fplog,
255 "NOTE: The number of threads is not equal to the number of (logical) cores\n"
256 " and the -pin option is set to auto: will not pin thread to cores.\n"
257 " This can lead to significant performance degradation.\n"
258 " Consider using -pin on (and -pinoffset in case you run multiple jobs).\n");
265 if (hw_opt->core_pinning_offset != 0)
267 offset = hw_opt->core_pinning_offset;
268 md_print_info(cr, fplog, "Applying core pinning offset %d\n", offset);
271 rc = get_thread_affinity_layout(fplog, cr, hwinfo,
273 offset, &hw_opt->core_pinning_stride,
278 /* Incompatible layout, don't pin, warning was already issued */
282 /* Set the per-thread affinity. In order to be able to check the success
283 * of affinity settings, we will set nth_affinity_set to 1 on threads
284 * where the affinity setting succeded and to 0 where it failed.
285 * Reducing these 0/1 values over the threads will give the total number
286 * of threads on which we succeeded.
288 nth_affinity_set = 0;
289 #pragma omp parallel num_threads(nthread_local) reduction(+:nth_affinity_set)
291 int thread_id, thread_id_node;
293 gmx_bool setaffinity_ret;
295 thread_id = gmx_omp_get_thread_num();
296 thread_id_node = thread0_id_node + thread_id;
297 index = offset + thread_id_node*hw_opt->core_pinning_stride;
298 if (locality_order != NULL)
300 core = locality_order[index];
307 setaffinity_ret = tMPI_Thread_setaffinity_single(tMPI_Thread_self(), core);
309 /* store the per-thread success-values of the setaffinity */
310 nth_affinity_set = (setaffinity_ret == 0);
314 fprintf(debug, "On rank %2d, thread %2d, index %2d, core %2d the affinity setting returned %d\n",
315 cr->nodeid, gmx_omp_get_thread_num(), index, core, setaffinity_ret);
319 if (nth_affinity_set > nthread_local)
323 sprintf(msg, "Looks like we have set affinity for more threads than "
324 "we have (%d > %d)!\n", nth_affinity_set, nthread_local);
329 /* check & warn if some threads failed to set their affinities */
330 if (nth_affinity_set != nthread_local)
332 char sbuf1[STRLEN], sbuf2[STRLEN];
334 /* sbuf1 contains rank info, while sbuf2 OpenMP thread info */
335 sbuf1[0] = sbuf2[0] = '\0';
336 /* Only add rank info if we have more than one rank. */
340 #ifdef GMX_THREAD_MPI
341 sprintf(sbuf1, "In tMPI thread #%d: ", cr->nodeid);
342 #else /* GMX_LIB_MPI */
343 sprintf(sbuf1, "In MPI process #%d: ", cr->nodeid);
348 if (nthread_local > 1)
350 sprintf(sbuf2, "for %d/%d thread%s ",
351 nthread_local - nth_affinity_set, nthread_local,
352 nthread_local > 1 ? "s" : "");
355 md_print_warn(NULL, fplog,
356 "WARNING: %sAffinity setting %sfailed.\n"
357 " This can cause performance degradation! If you think your setting are\n"
358 " correct, contact the GROMACS developers.",
365 /* Check the process affinity mask and if it is found to be non-zero,
366 * will honor it and disable mdrun internal affinity setting.
367 * Note that this will only work on Linux as we use a GNU feature.
370 gmx_check_thread_affinity_set(FILE *fplog, const t_commrec *cr,
371 gmx_hw_opt_t *hw_opt, int ncpus,
372 gmx_bool bAfterOpenmpInit)
374 #ifdef HAVE_SCHED_GETAFFINITY
375 cpu_set_t mask_current;
376 int i, ret, cpu_count, cpu_set;
380 if (hw_opt->thread_affinity == threadaffOFF)
382 /* internal affinity setting is off, don't bother checking process affinity */
386 CPU_ZERO(&mask_current);
387 if ((ret = sched_getaffinity(0, sizeof(cpu_set_t), &mask_current)) != 0)
389 /* failed to query affinity mask, will just return */
392 fprintf(debug, "Failed to query affinity mask (error %d)", ret);
397 /* Before proceeding with the actual check, make sure that the number of
398 * detected CPUs is >= the CPUs in the current set.
399 * We need to check for CPU_COUNT as it was added only in glibc 2.6. */
401 if (ncpus < CPU_COUNT(&mask_current))
405 fprintf(debug, "%d CPUs detected, but %d was returned by CPU_COUNT",
406 ncpus, CPU_COUNT(&mask_current));
410 #endif /* CPU_COUNT */
413 for (i = 0; (i < ncpus && i < CPU_SETSIZE); i++)
415 bAllSet = bAllSet && (CPU_ISSET(i, &mask_current) != 0);
420 if (hw_opt->thread_affinity == threadaffAUTO)
422 if (!bAfterOpenmpInit)
424 md_print_warn(cr, fplog,
425 "Non-default thread affinity set, disabling internal thread affinity");
429 md_print_warn(cr, fplog,
430 "Non-default thread affinity set probably by the OpenMP library,\n"
431 "disabling internal thread affinity");
433 hw_opt->thread_affinity = threadaffOFF;
437 /* Only warn once, at the last check (bAfterOpenmpInit==TRUE) */
438 if (bAfterOpenmpInit)
440 md_print_warn(cr, fplog,
441 "Overriding thread affinity set outside %s\n",
448 fprintf(debug, "Non-default affinity mask found\n");
455 fprintf(debug, "Default affinity mask found\n");
458 #endif /* HAVE_SCHED_GETAFFINITY */