- rc = get_thread_affinity_layout(fplog, cr, hwinfo,
- nthread_node,
- offset, &hw_opt->core_pinning_stride,
- &locality_order);
+ /* hw_opt is shared among tMPI threads, so for thread safety we need to do
+ * the layout detection only on master as core_pinning_stride is an in-out
+ * parameter and gets auto-set depending on its initial value.
+ * This
+ * This is not thread-safe with multi-simulations, but that's anyway not
+ * supported by tMPI. */
+ if (SIMMASTER(cr))
+ {
+ int ret;
+ int i;
+
+ ret = tMPI_Thread_mutex_lock(&locality_order_mtx);
+ if (ret != 0)
+ {
+ goto locality_order_err;
+ }
+ rc = get_thread_affinity_layout(fplog, cr, hwinfo,
+ nthread_node,
+ offset, &hw_opt->core_pinning_stride,
+ &locality_order);
+ have_locality_order = TRUE;
+ ret = tMPI_Thread_cond_broadcast(&locality_order_cond);
+ if (ret != 0)
+ {
+ tMPI_Thread_mutex_unlock(&locality_order_mtx);
+ goto locality_order_err;
+ }
+ ret = tMPI_Thread_mutex_unlock(&locality_order_mtx);
+ if (ret != 0)
+ {
+ goto locality_order_err;
+ }
+ }
+ else
+ {
+ int ret;
+ /* all other threads wait for the locality order data. */
+ ret = tMPI_Thread_mutex_lock(&locality_order_mtx);
+ if (ret != 0)
+ {
+ goto locality_order_err;
+ }
+
+ while (!have_locality_order)
+ {
+ ret = tMPI_Thread_cond_wait(&locality_order_cond,
+ &locality_order_mtx);
+ if (ret != 0)
+ {
+ tMPI_Thread_mutex_unlock(&locality_order_mtx);
+ goto locality_order_err;
+ }
+ }
+ ret = tMPI_Thread_mutex_unlock(&locality_order_mtx);
+ if (ret != 0)
+ {
+ goto locality_order_err;
+ }
+ }