2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2008, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 #include "wallcycle.h"
48 #include "gromacs/math/functions.h"
49 #include "gromacs/mdtypes/commrec.h"
50 #include "gromacs/timing/cyclecounter.h"
51 #include "gromacs/timing/gpu_timing.h"
52 #include "gromacs/timing/wallcyclereporting.h"
53 #include "gromacs/utility/cstringutil.h"
54 #include "gromacs/utility/gmxassert.h"
55 #include "gromacs/utility/gmxmpi.h"
56 #include "gromacs/utility/logger.h"
57 #include "gromacs/utility/smalloc.h"
58 #include "gromacs/utility/snprintf.h"
60 static const bool useCycleSubcounters = GMX_CYCLE_SUBCOUNTERS;
62 /* DEBUG_WCYCLE adds consistency checking for the counters.
63 * It checks if you stop a counter different from the last
64 * one that was opened and if you do nest too deep.
66 /* #define DEBUG_WCYCLE */
69 #include "gromacs/utility/fatalerror.h"
82 /* did we detect one or more invalid cycle counts */
83 gmx_bool haveInvalidCount;
84 /* variables for testing/debugging */
90 int counterlist[DEPTH_MAX];
94 gmx_cycles_t cycle_prev;
95 int64_t reset_counters;
97 MPI_Comm mpi_comm_mygroup;
102 /* Each name should not exceed 19 printing characters
103 (ie. terminating null can be twentieth) */
104 static const char *wcn[ewcNR] =
106 "Run", "Step", "PP during PME", "Domain decomp.", "DD comm. load",
107 "DD comm. bounds", "Vsite constr.", "Send X to PME", "Neighbor search", "Launch GPU ops.",
108 "Comm. coord.", "Force", "Wait + Comm. F", "PME mesh",
109 "PME redist. X/F", "PME spread", "PME gather", "PME 3D-FFT", "PME 3D-FFT Comm.", "PME solve LJ", "PME solve Elec",
110 "PME wait for PP", "Wait + Recv. PME F",
111 "Wait PME GPU spread", "PME 3D-FFT", "PME solve", /* the strings for FFT/solve are repeated here for mixed mode counters */
112 "Wait PME GPU gather", "Wait Bonded GPU", "Reduce GPU PME F",
113 "Wait GPU NB nonloc.", "Wait GPU NB local", "NB X/F buffer ops.",
114 "Vsite spread", "COM pull force", "AWH",
115 "Write traj.", "Update", "Constraints", "Comm. energies",
116 "Enforced rotation", "Add rot. forces", "Position swapping", "IMD", "Test"
119 static const char *wcsn[ewcsNR] =
121 "DD redist.", "DD NS grid + sort", "DD setup comm.",
122 "DD make top.", "DD make constr.", "DD top. other",
123 "NS grid local", "NS grid non-loc.", "NS search local", "NS search non-loc.",
127 "Listed buffer ops.",
130 "NB F kernel", "NB F clear buf",
131 "Launch NB GPU tasks",
132 "Launch Bonded GPU tasks",
133 "Launch PME GPU tasks",
134 "Ewald F correction",
137 "Clear force buffer",
141 /* PME GPU timing events' names - correspond to the enum in the gpu_timing.h */
142 static const char *PMEStageNames[] =
146 "PME spline + spread",
153 gmx_bool wallcycle_have_counter()
155 return gmx_cycles_have_counter();
158 gmx_wallcycle_t wallcycle_init(FILE *fplog, int resetstep, t_commrec gmx_unused *cr)
163 if (!wallcycle_have_counter())
170 wc->haveInvalidCount = FALSE;
171 wc->wc_barrier = FALSE;
172 wc->wcc_all = nullptr;
175 wc->reset_counters = resetstep;
178 if (PAR(cr) && getenv("GMX_CYCLE_BARRIER") != nullptr)
182 fprintf(fplog, "\nWill call MPI_Barrier before each cycle start/stop call\n\n");
184 wc->wc_barrier = TRUE;
185 wc->mpi_comm_mygroup = cr->mpi_comm_mygroup;
189 snew(wc->wcc, ewcNR);
190 if (getenv("GMX_CYCLE_ALL") != nullptr)
194 fprintf(fplog, "\nWill time all the code during the run\n\n");
196 snew(wc->wcc_all, ewcNR*ewcNR);
199 if (useCycleSubcounters)
201 snew(wc->wcsc, ewcsNR);
211 void wallcycle_destroy(gmx_wallcycle_t wc)
218 if (wc->wcc != nullptr)
222 if (wc->wcc_all != nullptr)
226 if (wc->wcsc != nullptr)
233 static void wallcycle_all_start(gmx_wallcycle_t wc, int ewc, gmx_cycles_t cycle)
236 wc->cycle_prev = cycle;
239 static void wallcycle_all_stop(gmx_wallcycle_t wc, int ewc, gmx_cycles_t cycle)
241 wc->wcc_all[wc->ewc_prev*ewcNR+ewc].n += 1;
242 wc->wcc_all[wc->ewc_prev*ewcNR+ewc].c += cycle - wc->cycle_prev;
247 static void debug_start_check(gmx_wallcycle_t wc, int ewc)
249 /* fprintf(stderr,"wcycle_start depth %d, %s\n",wc->count_depth,wcn[ewc]); */
251 if (wc->count_depth < 0 || wc->count_depth >= DEPTH_MAX)
253 gmx_fatal(FARGS, "wallcycle counter depth out of range: %d",
256 wc->counterlist[wc->count_depth] = ewc;
260 static void debug_stop_check(gmx_wallcycle_t wc, int ewc)
264 /* fprintf(stderr,"wcycle_stop depth %d, %s\n",wc->count_depth,wcn[ewc]); */
266 if (wc->count_depth < 0)
268 gmx_fatal(FARGS, "wallcycle counter depth out of range when stopping %s: %d", wcn[ewc], wc->count_depth);
270 if (wc->counterlist[wc->count_depth] != ewc)
272 gmx_fatal(FARGS, "wallcycle mismatch at stop, start %s, stop %s",
273 wcn[wc->counterlist[wc->count_depth]], wcn[ewc]);
278 void wallcycle_start(gmx_wallcycle_t wc, int ewc)
290 MPI_Barrier(wc->mpi_comm_mygroup);
295 debug_start_check(wc, ewc);
298 cycle = gmx_cycles_read();
299 wc->wcc[ewc].start = cycle;
300 if (wc->wcc_all != nullptr)
305 wallcycle_all_start(wc, ewc, cycle);
307 else if (wc->wc_depth == 3)
309 wallcycle_all_stop(wc, ewc, cycle);
314 void wallcycle_start_nocount(gmx_wallcycle_t wc, int ewc)
321 wallcycle_start(wc, ewc);
325 double wallcycle_stop(gmx_wallcycle_t wc, int ewc)
327 gmx_cycles_t cycle, last;
337 MPI_Barrier(wc->mpi_comm_mygroup);
342 debug_stop_check(wc, ewc);
345 /* When processes or threads migrate between cores, the cycle counting
346 * can get messed up if the cycle counter on different cores are not
347 * synchronized. When this happens we expect both large negative and
348 * positive cycle differences. We can detect negative cycle differences.
349 * Detecting too large positive counts if difficult, since count can be
350 * large, especially for ewcRUN. If we detect a negative count,
351 * we will not print the cycle accounting table.
353 cycle = gmx_cycles_read();
354 if (cycle >= wc->wcc[ewc].start)
356 last = cycle - wc->wcc[ewc].start;
361 wc->haveInvalidCount = TRUE;
363 wc->wcc[ewc].c += last;
370 wallcycle_all_stop(wc, ewc, cycle);
372 else if (wc->wc_depth == 2)
374 wallcycle_all_start(wc, ewc, cycle);
381 void wallcycle_get(gmx_wallcycle_t wc, int ewc, int *n, double *c)
384 *c = static_cast<double>(wc->wcc[ewc].c);
387 void wallcycle_reset_all(gmx_wallcycle_t wc)
396 for (i = 0; i < ewcNR; i++)
401 wc->haveInvalidCount = FALSE;
405 for (i = 0; i < ewcNR*ewcNR; i++)
407 wc->wcc_all[i].n = 0;
408 wc->wcc_all[i].c = 0;
413 for (i = 0; i < ewcsNR; i++)
421 static gmx_bool is_pme_counter(int ewc)
423 return (ewc >= ewcPMEMESH && ewc <= ewcPMEWAITCOMM);
426 static gmx_bool is_pme_subcounter(int ewc)
428 return (ewc >= ewcPME_REDISTXF && ewc < ewcPMEWAITCOMM);
431 /* Subtract counter ewc_sub timed inside a timing block for ewc_main */
432 static void subtract_cycles(wallcc_t *wcc, int ewc_main, int ewc_sub)
434 if (wcc[ewc_sub].n > 0)
436 if (wcc[ewc_main].c >= wcc[ewc_sub].c)
438 wcc[ewc_main].c -= wcc[ewc_sub].c;
442 /* Something is wrong with the cycle counting */
448 void wallcycle_scale_by_num_threads(gmx_wallcycle_t wc, bool isPmeRank, int nthreads_pp, int nthreads_pme)
455 for (int i = 0; i < ewcNR; i++)
457 if (is_pme_counter(i) || (i == ewcRUN && isPmeRank))
459 wc->wcc[i].c *= nthreads_pme;
463 for (int j = 0; j < ewcNR; j++)
465 wc->wcc_all[i*ewcNR+j].c *= nthreads_pme;
471 wc->wcc[i].c *= nthreads_pp;
475 for (int j = 0; j < ewcNR; j++)
477 wc->wcc_all[i*ewcNR+j].c *= nthreads_pp;
482 if (useCycleSubcounters && wc->wcsc && !isPmeRank)
484 for (int i = 0; i < ewcsNR; i++)
486 wc->wcsc[i].c *= nthreads_pp;
491 /* TODO Make an object for this function to return, containing some
492 * vectors of something like wallcc_t for the summed wcc, wcc_all and
493 * wcsc, AND the original wcc for rank 0.
495 * The GPU timing is reported only for rank 0, so we want to preserve
496 * the original wcycle on that rank. Rank 0 also reports the global
497 * counts before that, so needs something to contain the global data
498 * without over-writing the rank-0 data. The current implementation
499 * uses cycles_sum to manage this, which works OK now because wcsc and
500 * wcc_all are unused by the GPU reporting, but it is not satisfactory
501 * for the future. Also, there's no need for MPI_Allreduce, since
502 * only MASTERRANK uses any of the results. */
503 WallcycleCounts wallcycle_sum(const t_commrec *cr, gmx_wallcycle_t wc)
505 WallcycleCounts cycles_sum;
507 double cycles[ewcNR+ewcsNR];
509 double cycles_n[ewcNR+ewcsNR+1];
516 /* Default construction of std::array of non-class T can leave
517 the values indeterminate, just like a C array, and icc
525 subtract_cycles(wcc, ewcDOMDEC, ewcDDCOMMLOAD);
526 subtract_cycles(wcc, ewcDOMDEC, ewcDDCOMMBOUND);
528 subtract_cycles(wcc, ewcPME_FFT, ewcPME_FFTCOMM);
530 if (cr->npmenodes == 0)
532 /* All nodes do PME (or no PME at all) */
533 subtract_cycles(wcc, ewcFORCE, ewcPMEMESH);
537 /* The are PME-only nodes */
538 if (wcc[ewcPMEMESH].n > 0)
540 /* This must be a PME only node, calculate the Wait + Comm. time */
541 GMX_ASSERT(wcc[ewcRUN].c >= wcc[ewcPMEMESH].c, "Total run ticks must be greater than PME-only ticks");
542 wcc[ewcPMEWAITCOMM].c = wcc[ewcRUN].c - wcc[ewcPMEMESH].c;
546 /* Store the cycles in a double buffer for summing */
547 for (i = 0; i < ewcNR; i++)
550 cycles_n[i] = static_cast<double>(wcc[i].n);
552 cycles[i] = static_cast<double>(wcc[i].c);
557 for (i = 0; i < ewcsNR; i++)
560 cycles_n[ewcNR+i] = static_cast<double>(wc->wcsc[i].n);
562 cycles[ewcNR+i] = static_cast<double>(wc->wcsc[i].c);
570 double buf[ewcNR+ewcsNR+1];
572 // TODO this code is used only at the end of the run, so we
573 // can just do a simple reduce of haveInvalidCount in
574 // wallcycle_print, and avoid bugs
575 cycles_n[nsum] = (wc->haveInvalidCount ? 1 : 0);
576 // TODO Use MPI_Reduce
577 MPI_Allreduce(cycles_n, buf, nsum + 1, MPI_DOUBLE, MPI_MAX,
579 for (i = 0; i < ewcNR; i++)
581 wcc[i].n = gmx::roundToInt(buf[i]);
583 wc->haveInvalidCount = (buf[nsum] > 0);
586 for (i = 0; i < ewcsNR; i++)
588 wc->wcsc[i].n = gmx::roundToInt(buf[ewcNR+i]);
592 // TODO Use MPI_Reduce
593 MPI_Allreduce(cycles, cycles_sum.data(), nsum, MPI_DOUBLE, MPI_SUM,
596 if (wc->wcc_all != nullptr)
598 double *buf_all, *cyc_all;
600 snew(cyc_all, ewcNR*ewcNR);
601 snew(buf_all, ewcNR*ewcNR);
602 for (i = 0; i < ewcNR*ewcNR; i++)
604 cyc_all[i] = wc->wcc_all[i].c;
606 // TODO Use MPI_Reduce
607 MPI_Allreduce(cyc_all, buf_all, ewcNR*ewcNR, MPI_DOUBLE, MPI_SUM,
609 for (i = 0; i < ewcNR*ewcNR; i++)
611 wc->wcc_all[i].c = static_cast<gmx_cycles_t>(buf_all[i]);
620 for (i = 0; i < nsum; i++)
622 cycles_sum[i] = cycles[i];
629 static void print_cycles(FILE *fplog, double c2t, const char *name,
630 int nnodes, int nthreads,
631 int ncalls, double c_sum, double tot)
633 char nnodes_str[STRLEN];
634 char nthreads_str[STRLEN];
635 char ncalls_str[STRLEN];
637 double percentage = (tot > 0.) ? (100. * c_sum / tot) : 0.;
643 snprintf(ncalls_str, sizeof(ncalls_str), "%10d", ncalls);
646 snprintf(nnodes_str, sizeof(nnodes_str), "N/A");
650 snprintf(nnodes_str, sizeof(nnodes_str), "%4d", nnodes);
654 snprintf(nthreads_str, sizeof(nthreads_str), "N/A");
658 snprintf(nthreads_str, sizeof(nthreads_str), "%4d", nthreads);
667 /* Convert the cycle count to wallclock time for this task */
670 fprintf(fplog, " %-19.19s %4s %4s %10s %10.3f %14.3f %5.1f\n",
671 name, nnodes_str, nthreads_str, ncalls_str, wallt,
672 c_sum*1e-9, percentage);
676 static void print_gputimes(FILE *fplog, const char *name,
677 int n, double t, double tot_t)
684 snprintf(num, sizeof(num), "%10d", n);
685 snprintf(avg_perf, sizeof(avg_perf), "%10.3f", t/n);
690 sprintf(avg_perf, " ");
692 if (t != tot_t && tot_t > 0)
694 fprintf(fplog, " %-29s %10s%12.3f %s %5.1f\n",
695 name, num, t/1000, avg_perf, 100 * t/tot_t);
699 fprintf(fplog, " %-29s %10s%12.3f %s %5.1f\n",
700 name, "", t/1000, avg_perf, 100.0);
704 static void print_header(FILE *fplog, int nrank_pp, int nth_pp, int nrank_pme, int nth_pme)
706 int nrank_tot = nrank_pp + nrank_pme;
709 fprintf(fplog, "On %d MPI rank%s", nrank_tot, nrank_tot == 1 ? "" : "s");
712 fprintf(fplog, ", each using %d OpenMP threads", nth_pp);
714 /* Don't report doing PP+PME, because we can't tell here if
715 * this is RF, etc. */
719 fprintf(fplog, "On %d MPI rank%s doing PP", nrank_pp, nrank_pp == 1 ? "" : "s");
722 fprintf(fplog, ",%s using %d OpenMP threads", nrank_pp > 1 ? " each" : "", nth_pp);
724 fprintf(fplog, ", and\non %d MPI rank%s doing PME", nrank_pme, nrank_pme == 1 ? "" : "s");
727 fprintf(fplog, ",%s using %d OpenMP threads", nrank_pme > 1 ? " each" : "", nth_pme);
731 fprintf(fplog, "\n\n");
732 fprintf(fplog, " Computing: Num Num Call Wall time Giga-Cycles\n");
733 fprintf(fplog, " Ranks Threads Count (s) total sum %%\n");
737 void wallcycle_print(FILE *fplog, const gmx::MDLogger &mdlog, int nnodes, int npme,
738 int nth_pp, int nth_pme, double realtime,
739 gmx_wallcycle_t wc, const WallcycleCounts &cyc_sum,
740 const gmx_wallclock_gpu_nbnxn_t *gpu_nbnxn_t,
741 const gmx_wallclock_gpu_pme_t *gpu_pme_t)
743 double tot, tot_for_pp, tot_for_rest, tot_cpu_overlap, gpu_cpu_ratio;
744 double c2t, c2t_pp, c2t_pme = 0;
745 int i, j, npp, nth_tot;
747 const char *hline = "-----------------------------------------------------------------------------";
754 GMX_ASSERT(nth_pp > 0, "Number of particle-particle threads must be >0");
755 GMX_ASSERT(nth_pme > 0, "Number of PME threads must be >0");
756 GMX_ASSERT(nnodes > 0, "Number of nodes must be >0");
757 GMX_ASSERT(npme >= 0, "Number of PME nodes cannot be negative");
759 /* npme is the number of PME-only ranks used, and we always do PP work */
760 GMX_ASSERT(npp > 0, "Number of particle-particle nodes must be >0");
762 nth_tot = npp*nth_pp + npme*nth_pme;
764 /* When using PME-only nodes, the next line is valid for both
765 PP-only and PME-only nodes because they started ewcRUN at the
767 tot = cyc_sum[ewcRUN];
772 /* TODO This is heavy handed, but until someone reworks the
773 code so that it is provably robust with respect to
774 non-positive values for all possible timer and cycle
775 counters, there is less value gained from printing whatever
776 timing data might still be sensible for some non-Jenkins
777 run, than is lost from diagnosing Jenkins FP exceptions on
778 runs about whose execution time we don't care. */
779 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
780 "WARNING: A total of %f CPU cycles was recorded, so mdrun cannot print a time accounting",
785 if (wc->haveInvalidCount)
787 GMX_LOG(mdlog.warning).asParagraph().appendText("NOTE: Detected invalid cycle counts, probably because threads moved between CPU cores that do not have synchronized cycle counters. Will not print the cycle accounting.");
792 /* Conversion factor from cycles to seconds */
794 c2t_pp = c2t * nth_tot / static_cast<double>(npp*nth_pp);
797 c2t_pme = c2t * nth_tot / static_cast<double>(npme*nth_pme);
804 fprintf(fplog, "\n R E A L C Y C L E A N D T I M E A C C O U N T I N G\n\n");
806 print_header(fplog, npp, nth_pp, npme, nth_pme);
808 fprintf(fplog, "%s\n", hline);
809 for (i = ewcPPDURINGPME+1; i < ewcNR; i++)
811 if (is_pme_subcounter(i))
813 /* Do not count these at all */
815 else if (npme > 0 && is_pme_counter(i))
817 /* Print timing information for PME-only nodes, but add an
818 * asterisk so the reader of the table can know that the
819 * walltimes are not meant to add up. The asterisk still
820 * fits in the required maximum of 19 characters. */
822 snprintf(buffer, STRLEN, "%s *", wcn[i]);
823 print_cycles(fplog, c2t_pme, buffer,
825 wc->wcc[i].n, cyc_sum[i], tot);
829 /* Print timing information when it is for a PP or PP+PME
831 print_cycles(fplog, c2t_pp, wcn[i],
833 wc->wcc[i].n, cyc_sum[i], tot);
834 tot_for_pp += cyc_sum[i];
837 if (wc->wcc_all != nullptr)
839 for (i = 0; i < ewcNR; i++)
841 for (j = 0; j < ewcNR; j++)
843 snprintf(buf, 20, "%-9.9s %-9.9s", wcn[i], wcn[j]);
844 print_cycles(fplog, c2t_pp, buf,
846 wc->wcc_all[i*ewcNR+j].n,
847 wc->wcc_all[i*ewcNR+j].c,
852 tot_for_rest = tot * npp * nth_pp / static_cast<double>(nth_tot);
853 print_cycles(fplog, c2t_pp, "Rest",
855 -1, tot_for_rest - tot_for_pp, tot);
856 fprintf(fplog, "%s\n", hline);
857 print_cycles(fplog, c2t, "Total",
860 fprintf(fplog, "%s\n", hline);
865 "(*) Note that with separate PME ranks, the walltime column actually sums to\n"
866 " twice the total reported, but the cycle count total and %% are correct.\n"
870 if (wc->wcc[ewcPMEMESH].n > 0)
872 // A workaround to not print breakdown when no subcounters were recorded.
873 // TODO: figure out and record PME GPU counters (what to do with the waiting ones?)
874 std::vector<int> validPmeSubcounterIndices;
875 for (i = ewcPPDURINGPME+1; i < ewcNR; i++)
877 if (is_pme_subcounter(i) && wc->wcc[i].n > 0)
879 validPmeSubcounterIndices.push_back(i);
883 if (!validPmeSubcounterIndices.empty())
885 fprintf(fplog, " Breakdown of PME mesh computation\n");
886 fprintf(fplog, "%s\n", hline);
887 for (auto i : validPmeSubcounterIndices)
889 print_cycles(fplog, npme > 0 ? c2t_pme : c2t_pp, wcn[i],
890 npme > 0 ? npme : npp, nth_pme,
891 wc->wcc[i].n, cyc_sum[i], tot);
893 fprintf(fplog, "%s\n", hline);
897 if (useCycleSubcounters && wc->wcsc)
899 fprintf(fplog, " Breakdown of PP computation\n");
900 fprintf(fplog, "%s\n", hline);
901 for (i = 0; i < ewcsNR; i++)
903 print_cycles(fplog, c2t_pp, wcsn[i],
905 wc->wcsc[i].n, cyc_sum[ewcNR+i], tot);
907 fprintf(fplog, "%s\n", hline);
910 /* print GPU timing summary */
911 double tot_gpu = 0.0;
914 for (size_t k = 0; k < gtPME_EVENT_COUNT; k++)
916 tot_gpu += gpu_pme_t->timing[k].t;
921 const char *k_log_str[2][2] = {
922 {"Nonbonded F kernel", "Nonbonded F+ene k."},
923 {"Nonbonded F+prune k.", "Nonbonded F+ene+prune k."}
925 tot_gpu += gpu_nbnxn_t->pl_h2d_t + gpu_nbnxn_t->nb_h2d_t + gpu_nbnxn_t->nb_d2h_t;
927 /* add up the kernel timings */
928 for (i = 0; i < 2; i++)
930 for (j = 0; j < 2; j++)
932 tot_gpu += gpu_nbnxn_t->ktime[i][j].t;
935 tot_gpu += gpu_nbnxn_t->pruneTime.t;
937 tot_cpu_overlap = wc->wcc[ewcFORCE].c;
938 if (wc->wcc[ewcPMEMESH].n > 0)
940 tot_cpu_overlap += wc->wcc[ewcPMEMESH].c;
942 tot_cpu_overlap *= realtime*1000/tot; /* convert s to ms */
944 fprintf(fplog, "\n GPU timings\n%s\n", hline);
945 fprintf(fplog, " Computing: Count Wall t (s) ms/step %c\n", '%');
946 fprintf(fplog, "%s\n", hline);
947 print_gputimes(fplog, "Pair list H2D",
948 gpu_nbnxn_t->pl_h2d_c, gpu_nbnxn_t->pl_h2d_t, tot_gpu);
949 print_gputimes(fplog, "X / q H2D",
950 gpu_nbnxn_t->nb_c, gpu_nbnxn_t->nb_h2d_t, tot_gpu);
952 for (i = 0; i < 2; i++)
954 for (j = 0; j < 2; j++)
956 if (gpu_nbnxn_t->ktime[i][j].c)
958 print_gputimes(fplog, k_log_str[i][j],
959 gpu_nbnxn_t->ktime[i][j].c, gpu_nbnxn_t->ktime[i][j].t, tot_gpu);
965 for (size_t k = 0; k < gtPME_EVENT_COUNT; k++)
967 if (gpu_pme_t->timing[k].c)
969 print_gputimes(fplog, PMEStageNames[k],
970 gpu_pme_t->timing[k].c,
971 gpu_pme_t->timing[k].t,
976 if (gpu_nbnxn_t->pruneTime.c)
978 print_gputimes(fplog, "Pruning kernel", gpu_nbnxn_t->pruneTime.c, gpu_nbnxn_t->pruneTime.t, tot_gpu);
980 print_gputimes(fplog, "F D2H", gpu_nbnxn_t->nb_c, gpu_nbnxn_t->nb_d2h_t, tot_gpu);
981 fprintf(fplog, "%s\n", hline);
982 print_gputimes(fplog, "Total ", gpu_nbnxn_t->nb_c, tot_gpu, tot_gpu);
983 fprintf(fplog, "%s\n", hline);
984 if (gpu_nbnxn_t->dynamicPruneTime.c)
986 /* We print the dynamic pruning kernel timings after a separator
987 * and avoid adding it to tot_gpu as this is not in the force
988 * overlap. We print the fraction as relative to the rest.
990 print_gputimes(fplog, "*Dynamic pruning", gpu_nbnxn_t->dynamicPruneTime.c, gpu_nbnxn_t->dynamicPruneTime.t, tot_gpu);
991 fprintf(fplog, "%s\n", hline);
993 gpu_cpu_ratio = tot_gpu/tot_cpu_overlap;
994 if (gpu_nbnxn_t->nb_c > 0 && wc->wcc[ewcFORCE].n > 0)
996 fprintf(fplog, "\nAverage per-step force GPU/CPU evaluation time ratio: %.3f ms/%.3f ms = %.3f\n",
997 tot_gpu/gpu_nbnxn_t->nb_c, tot_cpu_overlap/wc->wcc[ewcFORCE].n,
1001 /* only print notes related to CPU-GPU load balance with PME */
1002 if (wc->wcc[ewcPMEMESH].n > 0)
1004 fprintf(fplog, "For optimal resource utilization this ratio should be close to 1\n");
1006 /* print note if the imbalance is high with PME case in which
1007 * CPU-GPU load balancing is possible */
1008 if (gpu_cpu_ratio < 0.8 || gpu_cpu_ratio > 1.25)
1010 /* Only the sim master calls this function, so always print to stderr */
1011 if (gpu_cpu_ratio < 0.8)
1015 /* The user could have used -notunepme,
1016 * but we currently can't check that here.
1018 GMX_LOG(mdlog.warning).asParagraph().appendText(
1019 "NOTE: The CPU has >25% more load than the GPU. This imbalance wastes\n"
1020 " GPU resources. Maybe the domain decomposition limits the PME tuning.\n"
1021 " In that case, try setting the DD grid manually (-dd) or lowering -dds.");
1025 /* We should not end up here, unless the box is
1026 * too small for increasing the cut-off for PME tuning.
1028 GMX_LOG(mdlog.warning).asParagraph().appendText(
1029 "NOTE: The CPU has >25% more load than the GPU. This imbalance wastes\n"
1033 if (gpu_cpu_ratio > 1.25)
1035 GMX_LOG(mdlog.warning).asParagraph().appendText(
1036 "NOTE: The GPU has >25% more load than the CPU. This imbalance wastes\n"
1045 GMX_LOG(mdlog.warning).asParagraph().appendText(
1046 "MPI_Barrier was called before each cycle start/stop\n"
1047 "call, so timings are not those of real runs.");
1050 if (wc->wcc[ewcNB_XF_BUF_OPS].n > 0 &&
1051 (cyc_sum[ewcDOMDEC] > tot*0.1 ||
1052 cyc_sum[ewcNS] > tot*0.1))
1054 /* Only the sim master calls this function, so always print to stderr */
1055 if (wc->wcc[ewcDOMDEC].n == 0)
1057 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
1058 "NOTE: %d %% of the run time was spent in pair search,\n"
1059 " you might want to increase nstlist (this has no effect on accuracy)\n",
1060 gmx::roundToInt(100*cyc_sum[ewcNS]/tot));
1064 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
1065 "NOTE: %d %% of the run time was spent in domain decomposition,\n"
1066 " %d %% of the run time was spent in pair search,\n"
1067 " you might want to increase nstlist (this has no effect on accuracy)\n",
1068 gmx::roundToInt(100*cyc_sum[ewcDOMDEC]/tot),
1069 gmx::roundToInt(100*cyc_sum[ewcNS]/tot));
1073 if (cyc_sum[ewcMoveE] > tot*0.05)
1075 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
1076 "NOTE: %d %% of the run time was spent communicating energies,\n"
1077 " you might want to increase some nst* mdp options\n",
1078 gmx::roundToInt(100*cyc_sum[ewcMoveE]/tot));
1082 extern int64_t wcycle_get_reset_counters(gmx_wallcycle_t wc)
1089 return wc->reset_counters;
1092 extern void wcycle_set_reset_counters(gmx_wallcycle_t wc, int64_t reset_counters)
1099 wc->reset_counters = reset_counters;
1102 void wallcycle_sub_start(gmx_wallcycle_t wc, int ewcs)
1104 if (useCycleSubcounters && wc != nullptr)
1106 wc->wcsc[ewcs].start = gmx_cycles_read();
1110 void wallcycle_sub_start_nocount(gmx_wallcycle_t wc, int ewcs)
1112 if (useCycleSubcounters && wc != nullptr)
1114 wallcycle_sub_start(wc, ewcs);
1119 void wallcycle_sub_stop(gmx_wallcycle_t wc, int ewcs)
1121 if (useCycleSubcounters && wc != nullptr)
1123 wc->wcsc[ewcs].c += gmx_cycles_read() - wc->wcsc[ewcs].start;