Apply re-formatting to C++ in src/ tree.
[alexxy/gromacs.git] / src / gromacs / mdrunutility / threadaffinity.cpp
index 8cf377acbf160ab0ee2c720f86bea938eecc880b..1324cf7c79c3dc4bb8528e1cd3d49588049bd22b 100644 (file)
@@ -262,7 +262,8 @@ static bool get_thread_affinity_layout(const gmx::MDLogger&         mdlog,
     {
         GMX_LOG(mdlog.info)
                 .appendTextFormatted("Pinning threads with a%s logical core stride of %d",
-                                     bPickPinStride ? "n auto-selected" : " user-specified", *pin_stride);
+                                     bPickPinStride ? "n auto-selected" : " user-specified",
+                                     *pin_stride);
     }
 
     *issuedWarning = alreadyWarned;
@@ -317,7 +318,11 @@ static bool set_affinity(const t_commrec*            cr,
                 fprintf(debug,
                         "On rank %2d, thread %2d, index %2d, core %2d the affinity setting "
                         "returned %d\n",
-                        cr->nodeid, gmx_omp_get_thread_num(), index, core, ret ? 1 : 0);
+                        cr->nodeid,
+                        gmx_omp_get_thread_num(),
+                        index,
+                        core,
+                        ret ? 1 : 0);
             }
         }
         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
@@ -330,7 +335,8 @@ static bool set_affinity(const t_commrec*            cr,
         sprintf(msg,
                 "Looks like we have set affinity for more threads than "
                 "we have (%d > %d)!\n",
-                nth_affinity_set, nthread_local);
+                nth_affinity_set,
+                nthread_local);
         gmx_incons(msg);
     }
 
@@ -356,7 +362,10 @@ static bool set_affinity(const t_commrec*            cr,
 
         if (nthread_local > 1)
         {
-            sprintf(sbuf2, "for %d/%d thread%s ", nthread_local - nth_affinity_set, nthread_local,
+            sprintf(sbuf2,
+                    "for %d/%d thread%s ",
+                    nthread_local - nth_affinity_set,
+                    nthread_local,
                     nthread_local > 1 ? "s" : "");
         }
 
@@ -382,8 +391,8 @@ void analyzeThreadsOnThisNode(const gmx::PhysicalNodeCommunicator& physicalNodeC
         /* MPI_Scan is inclusive, but here we need exclusive */
         *intraNodeThreadOffset -= numThreadsOnThisRank;
         /* Get the total number of threads on this physical node */
-        MPI_Allreduce(&numThreadsOnThisRank, numThreadsOnThisNode, 1, MPI_INT, MPI_SUM,
-                      physicalNodeComm.comm_);
+        MPI_Allreduce(
+                &numThreadsOnThisRank, numThreadsOnThisNode, 1, MPI_INT, MPI_SUM, physicalNodeComm.comm_);
     }
 #else
     GMX_UNUSED_VALUE(physicalNodeComm);
@@ -446,17 +455,23 @@ void gmx_set_thread_affinity(const gmx::MDLogger&         mdlog,
 
     bool affinityIsAutoAndNumThreadsIsNotAuto =
             (hw_opt->threadAffinity == ThreadAffinity::Auto && !hw_opt->totNumThreadsIsAuto);
-    bool issuedWarning;
-    bool validLayout = get_thread_affinity_layout(
-            mdlog, cr, hwTop, numThreadsOnThisNode, affinityIsAutoAndNumThreadsIsNotAuto, offset,
-            &core_pinning_stride, &localityOrder, &issuedWarning);
+    bool                   issuedWarning;
+    bool                   validLayout = get_thread_affinity_layout(mdlog,
+                                                  cr,
+                                                  hwTop,
+                                                  numThreadsOnThisNode,
+                                                  affinityIsAutoAndNumThreadsIsNotAuto,
+                                                  offset,
+                                                  &core_pinning_stride,
+                                                  &localityOrder,
+                                                  &issuedWarning);
     const gmx::sfree_guard localityOrderGuard(localityOrder);
 
     bool allAffinitiesSet;
     if (validLayout)
     {
-        allAffinitiesSet = set_affinity(cr, numThreadsOnThisRank, intraNodeThreadOffset, offset,
-                                        core_pinning_stride, localityOrder, affinityAccess);
+        allAffinitiesSet = set_affinity(
+                cr, numThreadsOnThisRank, intraNodeThreadOffset, offset, core_pinning_stride, localityOrder, affinityAccess);
     }
     else
     {
@@ -502,8 +517,10 @@ static bool detectDefaultAffinityMask(const int nthreads_hw_avail)
     {
         if (debug)
         {
-            fprintf(debug, "%d hardware threads detected, but %d was returned by CPU_COUNT",
-                    nthreads_hw_avail, CPU_COUNT(&mask_current));
+            fprintf(debug,
+                    "%d hardware threads detected, but %d was returned by CPU_COUNT",
+                    nthreads_hw_avail,
+                    CPU_COUNT(&mask_current));
         }
         detectedDefaultAffinityMask = false;
     }