2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2008, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
40 #include "wallcycle.h"
49 #include "gromacs/math/functions.h"
50 #include "gromacs/mdtypes/commrec.h"
51 #include "gromacs/timing/cyclecounter.h"
52 #include "gromacs/timing/gpu_timing.h"
53 #include "gromacs/timing/wallcyclereporting.h"
54 #include "gromacs/utility/cstringutil.h"
55 #include "gromacs/utility/gmxassert.h"
56 #include "gromacs/utility/gmxmpi.h"
57 #include "gromacs/utility/logger.h"
58 #include "gromacs/utility/smalloc.h"
59 #include "gromacs/utility/snprintf.h"
61 static const bool useCycleSubcounters = GMX_CYCLE_SUBCOUNTERS;
64 /*! \brief Enables consistency checking for the counters.
66 * If the macro is set to 1, code checks if you stop a counter different from the last
67 * one that was opened and if you do nest too deep.
69 # define DEBUG_WCYCLE 0
71 //! Whether wallcycle debugging is enabled
72 constexpr bool gmx_unused enableWallcycleDebug = (DEBUG_WCYCLE != 0);
73 //! True if only the master rank should print debugging output
74 constexpr bool gmx_unused onlyMasterDebugPrints = true;
75 //! True if cycle counter nesting depth debuggin prints are enabled
76 constexpr bool gmx_unused debugPrintDepth = false /* enableWallcycleDebug */;
79 # include "gromacs/utility/fatalerror.h"
92 /* did we detect one or more invalid cycle counts */
93 gmx_bool haveInvalidCount;
94 /* variables for testing/debugging */
100 int counterlist[DEPTH_MAX];
105 gmx_cycles_t cycle_prev;
106 int64_t reset_counters;
108 MPI_Comm mpi_comm_mygroup;
113 /* Each name should not exceed 19 printing characters
114 (ie. terminating null can be twentieth) */
115 static const char* wcn[ewcNR] = { "Run",
137 "Wait + Recv. PME F",
138 "Wait PME GPU spread",
140 "PME solve", /* the strings for FFT/solve are repeated here for mixed mode counters */
141 "Wait PME GPU gather",
144 "Wait GPU NB nonloc.",
146 "Wait GPU state copy",
147 "NB X/F buffer ops.",
161 static const char* wcsn[ewcsNR] = {
172 "NS search non-loc.",
176 "Listed buffer ops.",
178 "Nonbonded F kernel",
181 "Launch NB GPU tasks",
182 "Launch Bonded GPU tasks",
183 "Launch PME GPU tasks",
185 "Ewald F correction",
188 "Clear force buffer",
189 "Launch GPU NB X buffer ops.",
190 "Launch GPU NB F buffer ops.",
191 "Launch GPU Comm. coord.",
192 "Launch GPU Comm. force.",
197 /* PME GPU timing events' names - correspond to the enum in the gpu_timing.h */
198 static const char* PMEStageNames[] = {
199 "PME spline", "PME spread", "PME spline + spread", "PME 3D-FFT r2c",
200 "PME solve", "PME 3D-FFT c2r", "PME gather",
203 gmx_bool wallcycle_have_counter()
205 return gmx_cycles_have_counter();
208 gmx_wallcycle_t wallcycle_init(FILE* fplog, int resetstep, t_commrec gmx_unused* cr)
213 if (!wallcycle_have_counter())
220 wc->haveInvalidCount = FALSE;
221 wc->wc_barrier = FALSE;
222 wc->wcc_all = nullptr;
225 wc->reset_counters = resetstep;
229 if (PAR(cr) && getenv("GMX_CYCLE_BARRIER") != nullptr)
233 fprintf(fplog, "\nWill call MPI_Barrier before each cycle start/stop call\n\n");
235 wc->wc_barrier = TRUE;
236 wc->mpi_comm_mygroup = cr->mpi_comm_mygroup;
240 snew(wc->wcc, ewcNR);
241 if (getenv("GMX_CYCLE_ALL") != nullptr)
245 fprintf(fplog, "\nWill time all the code during the run\n\n");
247 snew(wc->wcc_all, ewcNR * ewcNR);
250 if (useCycleSubcounters)
252 snew(wc->wcsc, ewcsNR);
257 wc->isMasterRank = MASTER(cr);
263 void wallcycle_destroy(gmx_wallcycle_t wc)
270 if (wc->wcc != nullptr)
274 if (wc->wcc_all != nullptr)
278 if (wc->wcsc != nullptr)
285 static void wallcycle_all_start(gmx_wallcycle_t wc, int ewc, gmx_cycles_t cycle)
288 wc->cycle_prev = cycle;
291 static void wallcycle_all_stop(gmx_wallcycle_t wc, int ewc, gmx_cycles_t cycle)
293 wc->wcc_all[wc->ewc_prev * ewcNR + ewc].n += 1;
294 wc->wcc_all[wc->ewc_prev * ewcNR + ewc].c += cycle - wc->cycle_prev;
299 static void debug_start_check(gmx_wallcycle_t wc, int ewc)
301 if (wc->count_depth < 0 || wc->count_depth >= DEPTH_MAX)
303 gmx_fatal(FARGS, "wallcycle counter depth out of range: %d", wc->count_depth + 1);
305 wc->counterlist[wc->count_depth] = ewc;
308 if (debugPrintDepth && (!onlyMasterDebugPrints || wc->isMasterRank))
310 std::string indentStr(4 * wc->count_depth, ' ');
311 fprintf(stderr, "%swcycle_start depth %d, %s\n", indentStr.c_str(), wc->count_depth, wcn[ewc]);
315 static void debug_stop_check(gmx_wallcycle_t wc, int ewc)
317 if (debugPrintDepth && (!onlyMasterDebugPrints || wc->isMasterRank))
319 std::string indentStr(4 * wc->count_depth, ' ');
320 fprintf(stderr, "%swcycle_stop depth %d, %s\n", indentStr.c_str(), wc->count_depth, wcn[ewc]);
325 if (wc->count_depth < 0)
327 gmx_fatal(FARGS, "wallcycle counter depth out of range when stopping %s: %d", wcn[ewc], wc->count_depth);
329 if (wc->counterlist[wc->count_depth] != ewc)
332 "wallcycle mismatch at stop, start %s, stop %s",
333 wcn[wc->counterlist[wc->count_depth]],
339 void wallcycle_start(gmx_wallcycle_t wc, int ewc)
351 MPI_Barrier(wc->mpi_comm_mygroup);
356 debug_start_check(wc, ewc);
359 cycle = gmx_cycles_read();
360 wc->wcc[ewc].start = cycle;
361 if (wc->wcc_all != nullptr)
366 wallcycle_all_start(wc, ewc, cycle);
368 else if (wc->wc_depth == 3)
370 wallcycle_all_stop(wc, ewc, cycle);
375 void wallcycle_increment_event_count(gmx_wallcycle_t wc, int ewc)
384 void wallcycle_start_nocount(gmx_wallcycle_t wc, int ewc)
391 wallcycle_start(wc, ewc);
395 double wallcycle_stop(gmx_wallcycle_t wc, int ewc)
397 gmx_cycles_t cycle, last;
407 MPI_Barrier(wc->mpi_comm_mygroup);
412 debug_stop_check(wc, ewc);
415 /* When processes or threads migrate between cores, the cycle counting
416 * can get messed up if the cycle counter on different cores are not
417 * synchronized. When this happens we expect both large negative and
418 * positive cycle differences. We can detect negative cycle differences.
419 * Detecting too large positive counts if difficult, since count can be
420 * large, especially for ewcRUN. If we detect a negative count,
421 * we will not print the cycle accounting table.
423 cycle = gmx_cycles_read();
424 if (cycle >= wc->wcc[ewc].start)
426 last = cycle - wc->wcc[ewc].start;
431 wc->haveInvalidCount = TRUE;
433 wc->wcc[ewc].c += last;
440 wallcycle_all_stop(wc, ewc, cycle);
442 else if (wc->wc_depth == 2)
444 wallcycle_all_start(wc, ewc, cycle);
451 void wallcycle_get(gmx_wallcycle_t wc, int ewc, int* n, double* c)
454 *c = static_cast<double>(wc->wcc[ewc].c);
457 void wallcycle_reset_all(gmx_wallcycle_t wc)
466 for (i = 0; i < ewcNR; i++)
471 wc->haveInvalidCount = FALSE;
475 for (i = 0; i < ewcNR * ewcNR; i++)
477 wc->wcc_all[i].n = 0;
478 wc->wcc_all[i].c = 0;
483 for (i = 0; i < ewcsNR; i++)
491 static gmx_bool is_pme_counter(int ewc)
493 return (ewc >= ewcPMEMESH && ewc <= ewcPMEWAITCOMM);
496 static gmx_bool is_pme_subcounter(int ewc)
498 return (ewc >= ewcPME_REDISTXF && ewc < ewcPMEWAITCOMM);
501 /* Subtract counter ewc_sub timed inside a timing block for ewc_main */
502 static void subtract_cycles(wallcc_t* wcc, int ewc_main, int ewc_sub)
504 if (wcc[ewc_sub].n > 0)
506 if (wcc[ewc_main].c >= wcc[ewc_sub].c)
508 wcc[ewc_main].c -= wcc[ewc_sub].c;
512 /* Something is wrong with the cycle counting */
518 void wallcycle_scale_by_num_threads(gmx_wallcycle_t wc, bool isPmeRank, int nthreads_pp, int nthreads_pme)
525 for (int i = 0; i < ewcNR; i++)
527 if (is_pme_counter(i) || (i == ewcRUN && isPmeRank))
529 wc->wcc[i].c *= nthreads_pme;
533 for (int j = 0; j < ewcNR; j++)
535 wc->wcc_all[i * ewcNR + j].c *= nthreads_pme;
541 wc->wcc[i].c *= nthreads_pp;
545 for (int j = 0; j < ewcNR; j++)
547 wc->wcc_all[i * ewcNR + j].c *= nthreads_pp;
552 if (useCycleSubcounters && wc->wcsc && !isPmeRank)
554 for (int i = 0; i < ewcsNR; i++)
556 wc->wcsc[i].c *= nthreads_pp;
561 /* TODO Make an object for this function to return, containing some
562 * vectors of something like wallcc_t for the summed wcc, wcc_all and
563 * wcsc, AND the original wcc for rank 0.
565 * The GPU timing is reported only for rank 0, so we want to preserve
566 * the original wcycle on that rank. Rank 0 also reports the global
567 * counts before that, so needs something to contain the global data
568 * without over-writing the rank-0 data. The current implementation
569 * uses cycles_sum to manage this, which works OK now because wcsc and
570 * wcc_all are unused by the GPU reporting, but it is not satisfactory
571 * for the future. Also, there's no need for MPI_Allreduce, since
572 * only MASTERRANK uses any of the results. */
573 WallcycleCounts wallcycle_sum(const t_commrec* cr, gmx_wallcycle_t wc)
575 WallcycleCounts cycles_sum;
577 double cycles[int(ewcNR) + int(ewcsNR)];
579 double cycles_n[int(ewcNR) + int(ewcsNR) + 1];
586 /* Default construction of std::array of non-class T can leave
587 the values indeterminate, just like a C array, and icc
595 subtract_cycles(wcc, ewcDOMDEC, ewcDDCOMMLOAD);
596 subtract_cycles(wcc, ewcDOMDEC, ewcDDCOMMBOUND);
598 subtract_cycles(wcc, ewcPME_FFT, ewcPME_FFTCOMM);
600 if (cr->npmenodes == 0)
602 /* All nodes do PME (or no PME at all) */
603 subtract_cycles(wcc, ewcFORCE, ewcPMEMESH);
607 /* The are PME-only nodes */
608 if (wcc[ewcPMEMESH].n > 0)
610 /* This must be a PME only node, calculate the Wait + Comm. time */
611 GMX_ASSERT(wcc[ewcRUN].c >= wcc[ewcPMEMESH].c,
612 "Total run ticks must be greater than PME-only ticks");
613 wcc[ewcPMEWAITCOMM].c = wcc[ewcRUN].c - wcc[ewcPMEMESH].c;
617 /* Store the cycles in a double buffer for summing */
618 for (i = 0; i < ewcNR; i++)
621 cycles_n[i] = static_cast<double>(wcc[i].n);
623 cycles[i] = static_cast<double>(wcc[i].c);
628 for (i = 0; i < ewcsNR; i++)
631 cycles_n[ewcNR + i] = static_cast<double>(wc->wcsc[i].n);
633 cycles[ewcNR + i] = static_cast<double>(wc->wcsc[i].c);
641 double buf[int(ewcNR) + int(ewcsNR) + 1];
643 // TODO this code is used only at the end of the run, so we
644 // can just do a simple reduce of haveInvalidCount in
645 // wallcycle_print, and avoid bugs
646 cycles_n[nsum] = (wc->haveInvalidCount ? 1 : 0);
647 // TODO Use MPI_Reduce
648 MPI_Allreduce(cycles_n, buf, nsum + 1, MPI_DOUBLE, MPI_MAX, cr->mpi_comm_mysim);
649 for (i = 0; i < ewcNR; i++)
651 wcc[i].n = gmx::roundToInt(buf[i]);
653 wc->haveInvalidCount = (buf[nsum] > 0);
656 for (i = 0; i < ewcsNR; i++)
658 wc->wcsc[i].n = gmx::roundToInt(buf[ewcNR + i]);
662 // TODO Use MPI_Reduce
663 MPI_Allreduce(cycles, cycles_sum.data(), nsum, MPI_DOUBLE, MPI_SUM, cr->mpi_comm_mysim);
665 if (wc->wcc_all != nullptr)
667 double *buf_all, *cyc_all;
669 snew(cyc_all, ewcNR * ewcNR);
670 snew(buf_all, ewcNR * ewcNR);
671 for (i = 0; i < ewcNR * ewcNR; i++)
673 cyc_all[i] = wc->wcc_all[i].c;
675 // TODO Use MPI_Reduce
676 MPI_Allreduce(cyc_all, buf_all, ewcNR * ewcNR, MPI_DOUBLE, MPI_SUM, cr->mpi_comm_mysim);
677 for (i = 0; i < ewcNR * ewcNR; i++)
679 wc->wcc_all[i].c = static_cast<gmx_cycles_t>(buf_all[i]);
688 for (i = 0; i < nsum; i++)
690 cycles_sum[i] = cycles[i];
698 print_cycles(FILE* fplog, double c2t, const char* name, int nnodes, int nthreads, int ncalls, double c_sum, double tot)
700 char nnodes_str[STRLEN];
701 char nthreads_str[STRLEN];
702 char ncalls_str[STRLEN];
704 double percentage = (tot > 0.) ? (100. * c_sum / tot) : 0.;
710 snprintf(ncalls_str, sizeof(ncalls_str), "%10d", ncalls);
713 snprintf(nnodes_str, sizeof(nnodes_str), "N/A");
717 snprintf(nnodes_str, sizeof(nnodes_str), "%4d", nnodes);
721 snprintf(nthreads_str, sizeof(nthreads_str), "N/A");
725 snprintf(nthreads_str, sizeof(nthreads_str), "%4d", nthreads);
734 /* Convert the cycle count to wallclock time for this task */
738 " %-19.19s %4s %4s %10s %10.3f %14.3f %5.1f\n",
749 static void print_gputimes(FILE* fplog, const char* name, int n, double t, double tot_t)
756 snprintf(num, sizeof(num), "%10d", n);
757 snprintf(avg_perf, sizeof(avg_perf), "%10.3f", t / n);
762 sprintf(avg_perf, " ");
764 if (t != tot_t && tot_t > 0)
766 fprintf(fplog, " %-29s %10s%12.3f %s %5.1f\n", name, num, t / 1000, avg_perf, 100 * t / tot_t);
770 fprintf(fplog, " %-29s %10s%12.3f %s %5.1f\n", name, "", t / 1000, avg_perf, 100.0);
774 static void print_header(FILE* fplog, int nrank_pp, int nth_pp, int nrank_pme, int nth_pme)
776 int nrank_tot = nrank_pp + nrank_pme;
779 fprintf(fplog, "On %d MPI rank%s", nrank_tot, nrank_tot == 1 ? "" : "s");
782 fprintf(fplog, ", each using %d OpenMP threads", nth_pp);
784 /* Don't report doing PP+PME, because we can't tell here if
785 * this is RF, etc. */
789 fprintf(fplog, "On %d MPI rank%s doing PP", nrank_pp, nrank_pp == 1 ? "" : "s");
792 fprintf(fplog, ",%s using %d OpenMP threads", nrank_pp > 1 ? " each" : "", nth_pp);
794 fprintf(fplog, ", and\non %d MPI rank%s doing PME", nrank_pme, nrank_pme == 1 ? "" : "s");
797 fprintf(fplog, ",%s using %d OpenMP threads", nrank_pme > 1 ? " each" : "", nth_pme);
801 fprintf(fplog, "\n\n");
802 fprintf(fplog, " Computing: Num Num Call Wall time Giga-Cycles\n");
803 fprintf(fplog, " Ranks Threads Count (s) total sum %%\n");
807 void wallcycle_print(FILE* fplog,
808 const gmx::MDLogger& mdlog,
815 const WallcycleCounts& cyc_sum,
816 const gmx_wallclock_gpu_nbnxn_t* gpu_nbnxn_t,
817 const gmx_wallclock_gpu_pme_t* gpu_pme_t)
819 double tot, tot_for_pp, tot_for_rest, tot_cpu_overlap, gpu_cpu_ratio;
820 double c2t, c2t_pp, c2t_pme = 0;
821 int i, j, npp, nth_tot;
824 "-----------------------------------------------------------------------------";
831 GMX_ASSERT(nth_pp > 0, "Number of particle-particle threads must be >0");
832 GMX_ASSERT(nth_pme > 0, "Number of PME threads must be >0");
833 GMX_ASSERT(nnodes > 0, "Number of nodes must be >0");
834 GMX_ASSERT(npme >= 0, "Number of PME nodes cannot be negative");
836 /* npme is the number of PME-only ranks used, and we always do PP work */
837 GMX_ASSERT(npp > 0, "Number of particle-particle nodes must be >0");
839 nth_tot = npp * nth_pp + npme * nth_pme;
841 /* When using PME-only nodes, the next line is valid for both
842 PP-only and PME-only nodes because they started ewcRUN at the
844 tot = cyc_sum[ewcRUN];
849 /* TODO This is heavy handed, but until someone reworks the
850 code so that it is provably robust with respect to
851 non-positive values for all possible timer and cycle
852 counters, there is less value gained from printing whatever
853 timing data might still be sensible for some non-Jenkins
854 run, than is lost from diagnosing Jenkins FP exceptions on
855 runs about whose execution time we don't care. */
856 GMX_LOG(mdlog.warning)
858 .appendTextFormatted(
859 "WARNING: A total of %f CPU cycles was recorded, so mdrun cannot print a "
865 if (wc->haveInvalidCount)
867 GMX_LOG(mdlog.warning)
870 "NOTE: Detected invalid cycle counts, probably because threads moved "
871 "between CPU cores that do not have synchronized cycle counters. Will not "
872 "print the cycle accounting.");
877 /* Conversion factor from cycles to seconds */
878 c2t = realtime / tot;
879 c2t_pp = c2t * nth_tot / static_cast<double>(npp * nth_pp);
882 c2t_pme = c2t * nth_tot / static_cast<double>(npme * nth_pme);
889 fprintf(fplog, "\n R E A L C Y C L E A N D T I M E A C C O U N T I N G\n\n");
891 print_header(fplog, npp, nth_pp, npme, nth_pme);
893 fprintf(fplog, "%s\n", hline);
894 for (i = ewcPPDURINGPME + 1; i < ewcNR; i++)
896 if (is_pme_subcounter(i))
898 /* Do not count these at all */
900 else if (npme > 0 && is_pme_counter(i))
902 /* Print timing information for PME-only nodes, but add an
903 * asterisk so the reader of the table can know that the
904 * walltimes are not meant to add up. The asterisk still
905 * fits in the required maximum of 19 characters. */
907 snprintf(buffer, STRLEN, "%s *", wcn[i]);
908 print_cycles(fplog, c2t_pme, buffer, npme, nth_pme, wc->wcc[i].n, cyc_sum[i], tot);
912 /* Print timing information when it is for a PP or PP+PME
914 print_cycles(fplog, c2t_pp, wcn[i], npp, nth_pp, wc->wcc[i].n, cyc_sum[i], tot);
915 tot_for_pp += cyc_sum[i];
918 if (wc->wcc_all != nullptr)
920 for (i = 0; i < ewcNR; i++)
922 for (j = 0; j < ewcNR; j++)
924 snprintf(buf, 20, "%-9.9s %-9.9s", wcn[i], wcn[j]);
930 wc->wcc_all[i * ewcNR + j].n,
931 wc->wcc_all[i * ewcNR + j].c,
936 tot_for_rest = tot * npp * nth_pp / static_cast<double>(nth_tot);
937 print_cycles(fplog, c2t_pp, "Rest", npp, nth_pp, -1, tot_for_rest - tot_for_pp, tot);
938 fprintf(fplog, "%s\n", hline);
939 print_cycles(fplog, c2t, "Total", npp, nth_pp, -1, tot, tot);
940 fprintf(fplog, "%s\n", hline);
945 "(*) Note that with separate PME ranks, the walltime column actually sums to\n"
946 " twice the total reported, but the cycle count total and %% are correct.\n"
951 if (wc->wcc[ewcPMEMESH].n > 0)
953 // A workaround to not print breakdown when no subcounters were recorded.
954 // TODO: figure out and record PME GPU counters (what to do with the waiting ones?)
955 std::vector<int> validPmeSubcounterIndices;
956 for (i = ewcPPDURINGPME + 1; i < ewcNR; i++)
958 if (is_pme_subcounter(i) && wc->wcc[i].n > 0)
960 validPmeSubcounterIndices.push_back(i);
964 if (!validPmeSubcounterIndices.empty())
966 fprintf(fplog, " Breakdown of PME mesh computation\n");
967 fprintf(fplog, "%s\n", hline);
968 for (auto i : validPmeSubcounterIndices)
971 npme > 0 ? c2t_pme : c2t_pp,
973 npme > 0 ? npme : npp,
979 fprintf(fplog, "%s\n", hline);
983 if (useCycleSubcounters && wc->wcsc)
985 fprintf(fplog, " Breakdown of PP computation\n");
986 fprintf(fplog, "%s\n", hline);
987 for (i = 0; i < ewcsNR; i++)
989 print_cycles(fplog, c2t_pp, wcsn[i], npp, nth_pp, wc->wcsc[i].n, cyc_sum[ewcNR + i], tot);
991 fprintf(fplog, "%s\n", hline);
994 /* print GPU timing summary */
995 double tot_gpu = 0.0;
998 for (size_t k = 0; k < gtPME_EVENT_COUNT; k++)
1000 tot_gpu += gpu_pme_t->timing[k].t;
1005 const char* k_log_str[2][2] = { { "Nonbonded F kernel", "Nonbonded F+ene k." },
1006 { "Nonbonded F+prune k.", "Nonbonded F+ene+prune k." } };
1007 tot_gpu += gpu_nbnxn_t->pl_h2d_t + gpu_nbnxn_t->nb_h2d_t + gpu_nbnxn_t->nb_d2h_t;
1009 /* add up the kernel timings */
1010 for (i = 0; i < 2; i++)
1012 for (j = 0; j < 2; j++)
1014 tot_gpu += gpu_nbnxn_t->ktime[i][j].t;
1017 tot_gpu += gpu_nbnxn_t->pruneTime.t;
1019 tot_cpu_overlap = wc->wcc[ewcFORCE].c;
1020 if (wc->wcc[ewcPMEMESH].n > 0)
1022 tot_cpu_overlap += wc->wcc[ewcPMEMESH].c;
1024 tot_cpu_overlap *= realtime * 1000 / tot; /* convert s to ms */
1026 fprintf(fplog, "\n GPU timings\n%s\n", hline);
1028 " Computing: Count Wall t (s) ms/step %c\n",
1030 fprintf(fplog, "%s\n", hline);
1031 print_gputimes(fplog, "Pair list H2D", gpu_nbnxn_t->pl_h2d_c, gpu_nbnxn_t->pl_h2d_t, tot_gpu);
1032 print_gputimes(fplog, "X / q H2D", gpu_nbnxn_t->nb_c, gpu_nbnxn_t->nb_h2d_t, tot_gpu);
1034 for (i = 0; i < 2; i++)
1036 for (j = 0; j < 2; j++)
1038 if (gpu_nbnxn_t->ktime[i][j].c)
1040 print_gputimes(fplog,
1042 gpu_nbnxn_t->ktime[i][j].c,
1043 gpu_nbnxn_t->ktime[i][j].t,
1050 for (size_t k = 0; k < gtPME_EVENT_COUNT; k++)
1052 if (gpu_pme_t->timing[k].c)
1055 fplog, PMEStageNames[k], gpu_pme_t->timing[k].c, gpu_pme_t->timing[k].t, tot_gpu);
1059 if (gpu_nbnxn_t->pruneTime.c)
1061 print_gputimes(fplog, "Pruning kernel", gpu_nbnxn_t->pruneTime.c, gpu_nbnxn_t->pruneTime.t, tot_gpu);
1063 print_gputimes(fplog, "F D2H", gpu_nbnxn_t->nb_c, gpu_nbnxn_t->nb_d2h_t, tot_gpu);
1064 fprintf(fplog, "%s\n", hline);
1065 print_gputimes(fplog, "Total ", gpu_nbnxn_t->nb_c, tot_gpu, tot_gpu);
1066 fprintf(fplog, "%s\n", hline);
1067 if (gpu_nbnxn_t->dynamicPruneTime.c)
1069 /* We print the dynamic pruning kernel timings after a separator
1070 * and avoid adding it to tot_gpu as this is not in the force
1071 * overlap. We print the fraction as relative to the rest.
1073 print_gputimes(fplog,
1075 gpu_nbnxn_t->dynamicPruneTime.c,
1076 gpu_nbnxn_t->dynamicPruneTime.t,
1078 fprintf(fplog, "%s\n", hline);
1080 gpu_cpu_ratio = tot_gpu / tot_cpu_overlap;
1081 if (gpu_nbnxn_t->nb_c > 0 && wc->wcc[ewcFORCE].n > 0)
1084 "\nAverage per-step force GPU/CPU evaluation time ratio: %.3f ms/%.3f ms = "
1086 tot_gpu / gpu_nbnxn_t->nb_c,
1087 tot_cpu_overlap / wc->wcc[ewcFORCE].n,
1091 /* only print notes related to CPU-GPU load balance with PME */
1092 if (wc->wcc[ewcPMEMESH].n > 0)
1094 fprintf(fplog, "For optimal resource utilization this ratio should be close to 1\n");
1096 /* print note if the imbalance is high with PME case in which
1097 * CPU-GPU load balancing is possible */
1098 if (gpu_cpu_ratio < 0.8 || gpu_cpu_ratio > 1.25)
1100 /* Only the sim master calls this function, so always print to stderr */
1101 if (gpu_cpu_ratio < 0.8)
1105 /* The user could have used -notunepme,
1106 * but we currently can't check that here.
1108 GMX_LOG(mdlog.warning)
1111 "NOTE: The CPU has >25% more load than the GPU. This "
1112 "imbalance wastes\n"
1113 " GPU resources. Maybe the domain decomposition "
1114 "limits the PME tuning.\n"
1115 " In that case, try setting the DD grid manually "
1116 "(-dd) or lowering -dds.");
1120 /* We should not end up here, unless the box is
1121 * too small for increasing the cut-off for PME tuning.
1123 GMX_LOG(mdlog.warning)
1126 "NOTE: The CPU has >25% more load than the GPU. This "
1127 "imbalance wastes\n"
1131 if (gpu_cpu_ratio > 1.25)
1133 GMX_LOG(mdlog.warning)
1136 "NOTE: The GPU has >25% more load than the CPU. This imbalance "
1146 GMX_LOG(mdlog.warning)
1149 "MPI_Barrier was called before each cycle start/stop\n"
1150 "call, so timings are not those of real runs.");
1153 if (wc->wcc[ewcNB_XF_BUF_OPS].n > 0 && (cyc_sum[ewcDOMDEC] > tot * 0.1 || cyc_sum[ewcNS] > tot * 0.1))
1155 /* Only the sim master calls this function, so always print to stderr */
1156 if (wc->wcc[ewcDOMDEC].n == 0)
1158 GMX_LOG(mdlog.warning)
1160 .appendTextFormatted(
1161 "NOTE: %d %% of the run time was spent in pair search,\n"
1162 " you might want to increase nstlist (this has no effect on "
1164 gmx::roundToInt(100 * cyc_sum[ewcNS] / tot));
1168 GMX_LOG(mdlog.warning)
1170 .appendTextFormatted(
1171 "NOTE: %d %% of the run time was spent in domain decomposition,\n"
1172 " %d %% of the run time was spent in pair search,\n"
1173 " you might want to increase nstlist (this has no effect on "
1175 gmx::roundToInt(100 * cyc_sum[ewcDOMDEC] / tot),
1176 gmx::roundToInt(100 * cyc_sum[ewcNS] / tot));
1180 if (cyc_sum[ewcMoveE] > tot * 0.05)
1182 GMX_LOG(mdlog.warning)
1184 .appendTextFormatted(
1185 "NOTE: %d %% of the run time was spent communicating energies,\n"
1186 " you might want to increase some nst* mdp options\n",
1187 gmx::roundToInt(100 * cyc_sum[ewcMoveE] / tot));
1191 extern int64_t wcycle_get_reset_counters(gmx_wallcycle_t wc)
1198 return wc->reset_counters;
1201 extern void wcycle_set_reset_counters(gmx_wallcycle_t wc, int64_t reset_counters)
1208 wc->reset_counters = reset_counters;
1211 void wallcycle_sub_start(gmx_wallcycle_t wc, int ewcs)
1213 if (useCycleSubcounters && wc != nullptr)
1215 wc->wcsc[ewcs].start = gmx_cycles_read();
1219 void wallcycle_sub_start_nocount(gmx_wallcycle_t wc, int ewcs)
1221 if (useCycleSubcounters && wc != nullptr)
1223 wallcycle_sub_start(wc, ewcs);
1228 void wallcycle_sub_stop(gmx_wallcycle_t wc, int ewcs)
1230 if (useCycleSubcounters && wc != nullptr)
1232 wc->wcsc[ewcs].c += gmx_cycles_read() - wc->wcsc[ewcs].start;