2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012, by the GROMACS development team, led by
5 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
6 * others, as listed in the AUTHORS file in the top-level source
7 * directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #if defined(HAVE_SCHED_H) && defined(HAVE_SCHED_GETAFFINITY)
41 #include <sys/syscall.h>
46 #include "types/commrec.h"
47 #include "types/hw_info.h"
48 #include "gmx_cpuid.h"
50 #include "gmx_omp_nthreads.h"
52 #include "md_logging.h"
54 #include "gmx_thread_affinity.h"
56 #include "thread_mpi/threads.h"
60 get_thread_affinity_layout(FILE *fplog,
62 const gmx_hw_info_t * hwinfo,
64 int pin_offset, int * pin_stride,
65 const int **locality_order)
67 int nhwthreads, npkg, ncores, nhwthreads_per_core, rc;
70 const int * hwthread_id;
71 gmx_bool bPickPinStride;
75 gmx_fatal(FARGS, "Negative thread pinning offset requested");
79 gmx_fatal(FARGS, "Negative thread pinning stride requested");
82 rc = gmx_cpuid_topology(hwinfo->cpuid_info, &nhwthreads, &npkg, &ncores,
84 &pkg_id, &core_id, &hwthread_id, locality_order);
88 /* topology information not available or invalid, ignore it */
89 nhwthreads = hwinfo->nthreads_hw_avail;
90 *locality_order = NULL;
94 /* We don't know anything about the hardware, don't pin */
95 md_print_warn(cr, fplog,
96 "NOTE: We don't know how many logical cores we have, will not pin threads");
102 if (nthreads > nhwthreads)
104 /* We are oversubscribing, don't pin */
105 md_print_warn(NULL, fplog,
106 "WARNING: Oversubscribing the CPU, will not pin threads");
111 if (pin_offset + nthreads > nhwthreads)
113 /* We are oversubscribing, don't pin */
114 md_print_warn(NULL, fplog,
115 "WARNING: The requested pin offset is too large for the available logical cores,\n"
116 " will not pin threads");
122 /* do we need to choose the pinning stride? */
123 bPickPinStride = (*pin_stride == 0);
127 if (rc == 0 && pin_offset + nthreads*nhwthreads_per_core <= nhwthreads)
129 /* Put one thread on each physical core */
130 *pin_stride = nhwthreads_per_core;
134 /* We don't know if we have SMT, and if we do, we don't know
135 * if hw threads in the same physical core are consecutive.
136 * Without SMT the pinning layout should not matter too much.
137 * so we assume a consecutive layout and maximally spread out"
138 * the threads at equal threads per core.
139 * Note that IBM is the major non-x86 case with cpuid support
140 * and probably threads are already pinned by the queuing system,
141 * so we wouldn't end up here in the first place.
143 *pin_stride = (nhwthreads - pin_offset)/nthreads;
148 /* Check the placement of the thread with the largest index to make sure
149 * that the offset & stride doesn't cause pinning beyond the last hardware thread. */
150 if (pin_offset + (nthreads-1)*(*pin_stride) >= nhwthreads)
152 /* We are oversubscribing, don't pin */
153 md_print_warn(NULL, fplog,
154 "WARNING: The requested pinning stride is too large for the available logical cores,\n"
155 " will not pin threads");
163 fprintf(fplog, "Pinning threads with a%s logical core stride of %d\n",
164 bPickPinStride ? "n auto-selected" : " user-specified",
171 /* Set CPU affinity. Can be important for performance.
172 On some systems (e.g. Cray) CPU Affinity is set by default.
173 But default assigning doesn't work (well) with only some ranks
174 having threads. This causes very low performance.
175 External tools have cumbersome syntax for setting affinity
176 in the case that only some ranks have threads.
177 Thus it is important that GROMACS sets the affinity internally
178 if only PME is using threads.
181 gmx_set_thread_affinity(FILE *fplog,
183 gmx_hw_opt_t *hw_opt,
185 const gmx_hw_info_t *hwinfo,
186 const t_inputrec *inputrec)
188 int nth_affinity_set, thread_id_node, thread_id,
189 nthread_local, nthread_node, nthread_hw_max, nphyscore;
191 const int *locality_order;
194 if (hw_opt->thread_affinity == threadaffOFF)
201 /* If the tMPI thread affinity setting is not supported encourage the user
202 * to report it as it's either a bug or an exotic platform which we might
203 * want to support. */
204 if (tMPI_Thread_setaffinity_support() != TMPI_SETAFFINITY_SUPPORT_YES)
206 md_print_warn(NULL, fplog,
207 "Can not set thread affinities on the current platform. On NUMA systems this\n"
208 "can cause performance degradation. If you think your platform should support\n"
209 "setting affinities, contact the GROMACS developers.");
212 #endif /* __APPLE__ */
214 /* threads on this MPI process or TMPI thread */
215 if (cr->duty & DUTY_PP)
217 nthread_local = gmx_omp_nthreads_get(emntNonbonded);
221 nthread_local = gmx_omp_nthreads_get(emntPME);
224 /* map the current process to cores */
226 nthread_node = nthread_local;
228 if (PAR(cr) || MULTISIM(cr))
230 /* We need to determine a scan of the thread counts in this
235 MPI_Comm_split(MPI_COMM_WORLD, gmx_hostname_num(), cr->rank_intranode,
237 MPI_Scan(&nthread_local, &thread_id_node, 1, MPI_INT, MPI_SUM, comm_intra);
238 /* MPI_Scan is inclusive, but here we need exclusive */
239 thread_id_node -= nthread_local;
240 /* Get the total number of threads on this physical node */
241 MPI_Allreduce(&nthread_local, &nthread_node, 1, MPI_INT, MPI_SUM, comm_intra);
242 MPI_Comm_free(&comm_intra);
246 if (hw_opt->thread_affinity == threadaffAUTO &&
247 nthread_node != hwinfo->nthreads_hw_avail)
249 if (nthread_node > 1 && nthread_node < hwinfo->nthreads_hw_avail)
251 md_print_warn(cr, fplog,
252 "NOTE: The number of threads is not equal to the number of (logical) cores\n"
253 " and the -pin option is set to auto: will not pin thread to cores.\n"
254 " This can lead to significant performance degradation.\n"
255 " Consider using -pin on (and -pinoffset in case you run multiple jobs).\n");
262 if (hw_opt->core_pinning_offset != 0)
264 offset = hw_opt->core_pinning_offset;
265 md_print_info(cr, fplog, "Applying core pinning offset %d\n", offset);
268 rc = get_thread_affinity_layout(fplog, cr, hwinfo,
270 offset, &hw_opt->core_pinning_stride,
275 /* Incompatible layout, don't pin, warning was already issued */
279 /* Set the per-thread affinity. In order to be able to check the success
280 * of affinity settings, we will set nth_affinity_set to 1 on threads
281 * where the affinity setting succeded and to 0 where it failed.
282 * Reducing these 0/1 values over the threads will give the total number
283 * of threads on which we succeeded.
285 nth_affinity_set = 0;
286 #pragma omp parallel firstprivate(thread_id_node) num_threads(nthread_local) \
287 reduction(+:nth_affinity_set)
290 gmx_bool setaffinity_ret;
292 thread_id = gmx_omp_get_thread_num();
293 thread_id_node += thread_id;
294 index = offset + thread_id_node*hw_opt->core_pinning_stride;
295 if (locality_order != NULL)
297 core = locality_order[index];
304 setaffinity_ret = tMPI_Thread_setaffinity_single(tMPI_Thread_self(), core);
306 /* store the per-thread success-values of the setaffinity */
307 nth_affinity_set = (setaffinity_ret == 0);
311 fprintf(debug, "On rank %2d, thread %2d, core %2d the affinity setting returned %d\n",
312 cr->nodeid, gmx_omp_get_thread_num(), core, setaffinity_ret);
316 if (nth_affinity_set > nthread_local)
320 sprintf(msg, "Looks like we have set affinity for more threads than "
321 "we have (%d > %d)!\n", nth_affinity_set, nthread_local);
326 /* check & warn if some threads failed to set their affinities */
327 if (nth_affinity_set != nthread_local)
329 char sbuf1[STRLEN], sbuf2[STRLEN];
331 /* sbuf1 contains rank info, while sbuf2 OpenMP thread info */
332 sbuf1[0] = sbuf2[0] = '\0';
333 /* Only add rank info if we have more than one rank. */
337 #ifdef GMX_THREAD_MPI
338 sprintf(sbuf1, "In tMPI thread #%d: ", cr->nodeid);
339 #else /* GMX_LIB_MPI */
340 sprintf(sbuf1, "In MPI process #%d: ", cr->nodeid);
345 if (nthread_local > 1)
347 sprintf(sbuf2, "for %d/%d thread%s ",
348 nthread_local - nth_affinity_set, nthread_local,
349 nthread_local > 1 ? "s" : "");
352 md_print_warn(NULL, fplog,
353 "WARNING: %sAffinity setting %sfailed.\n"
354 " This can cause performance degradation! If you think your setting are\n"
355 " correct, contact the GROMACS developers.",
361 /* Check the process affinity mask and if it is found to be non-zero,
362 * will honor it and disable mdrun internal affinity setting.
363 * Note that this will only work on Linux as we use a GNU feature.
366 gmx_check_thread_affinity_set(FILE *fplog, const t_commrec *cr,
367 gmx_hw_opt_t *hw_opt, int ncpus,
368 gmx_bool bAfterOpenmpInit)
370 #ifdef HAVE_SCHED_GETAFFINITY
371 cpu_set_t mask_current;
372 int i, ret, cpu_count, cpu_set;
376 if (hw_opt->thread_affinity == threadaffOFF)
378 /* internal affinity setting is off, don't bother checking process affinity */
382 CPU_ZERO(&mask_current);
383 if ((ret = sched_getaffinity(0, sizeof(cpu_set_t), &mask_current)) != 0)
385 /* failed to query affinity mask, will just return */
388 fprintf(debug, "Failed to query affinity mask (error %d)", ret);
393 /* Before proceeding with the actual check, make sure that the number of
394 * detected CPUs is >= the CPUs in the current set.
395 * We need to check for CPU_COUNT as it was added only in glibc 2.6. */
397 if (ncpus < CPU_COUNT(&mask_current))
401 fprintf(debug, "%d CPUs detected, but %d was returned by CPU_COUNT",
402 ncpus, CPU_COUNT(&mask_current));
406 #endif /* CPU_COUNT */
409 for (i = 0; (i < ncpus && i < CPU_SETSIZE); i++)
411 bAllSet = bAllSet && (CPU_ISSET(i, &mask_current) != 0);
416 if (hw_opt->thread_affinity == threadaffAUTO)
418 if (!bAfterOpenmpInit)
420 md_print_warn(cr, fplog,
421 "Non-default thread affinity set, disabling internal thread affinity");
425 md_print_warn(cr, fplog,
426 "Non-default thread affinity set probably by the OpenMP library,\n"
427 "disabling internal thread affinity");
429 hw_opt->thread_affinity = threadaffOFF;
433 /* Only warn once, at the last check (bAfterOpenmpInit==TRUE) */
434 if (bAfterOpenmpInit)
436 md_print_warn(cr, fplog,
437 "Overriding thread affinity set outside %s\n",
444 fprintf(debug, "Non-default affinity mask found\n");
451 fprintf(debug, "Default affinity mask found\n");
454 #endif /* HAVE_SCHED_GETAFFINITY */