1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2008, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
35 #include "gromacs/timing/wallcycle.h"
44 #include "gmx_fatal.h"
45 #include "md_logging.h"
48 #include "gromacs/timing/cyclecounter.h"
49 #include "gromacs/utility/gmxmpi.h"
51 /* DEBUG_WCYCLE adds consistency checking for the counters.
52 * It checks if you stop a counter different from the last
53 * one that was opened and if you do nest too deep.
55 /* #define DEBUG_WCYCLE */
65 typedef struct gmx_wallcycle
68 /* variables for testing/debugging */
74 int counterlist[DEPTH_MAX];
78 gmx_cycles_t cycle_prev;
79 gmx_large_int_t reset_counters;
81 MPI_Comm mpi_comm_mygroup;
85 #ifdef GMX_CYCLE_SUBCOUNTERS
91 /* Each name should not exceed 19 printing characters
92 (ie. terminating null can be twentieth) */
93 static const char *wcn[ewcNR] =
95 "Run", "Step", "PP during PME", "Domain decomp.", "DD comm. load",
96 "DD comm. bounds", "Vsite constr.", "Send X to PME", "Neighbor search", "Launch GPU ops.",
97 "Comm. coord.", "Born radii", "Force", "Wait + Comm. F", "PME mesh",
98 "PME redist. X/F", "PME spread/gather", "PME 3D-FFT", "PME 3D-FFT Comm.", "PME solve",
99 "PME wait for PP", "Wait + Recv. PME F", "Wait GPU nonlocal", "Wait GPU local", "NB X/F buffer ops.",
100 "Vsite spread", "Write traj.", "Update", "Constraints", "Comm. energies",
101 "Enforced rotation", "Add rot. forces", "Test"
104 static const char *wcsn[ewcsNR] =
106 "DD redist.", "DD NS grid + sort", "DD setup comm.",
107 "DD make top.", "DD make constr.", "DD top. other",
108 "NS grid local", "NS grid non-loc.", "NS search local", "NS search non-loc.",
109 "Bonded F", "Nonbonded F", "Ewald F correction",
110 "NB X buffer ops.", "NB F buffer ops."
113 gmx_bool wallcycle_have_counter(void)
115 return gmx_cycles_have_counter();
118 gmx_wallcycle_t wallcycle_init(FILE *fplog, int resetstep, t_commrec *cr,
119 int nthreads_pp, int nthreads_pme)
124 if (!wallcycle_have_counter())
131 wc->wc_barrier = FALSE;
135 wc->reset_counters = resetstep;
136 wc->nthreads_pp = nthreads_pp;
137 wc->nthreads_pme = nthreads_pme;
138 wc->cycles_sum = NULL;
141 if (PAR(cr) && getenv("GMX_CYCLE_BARRIER") != NULL)
145 fprintf(fplog, "\nWill call MPI_Barrier before each cycle start/stop call\n\n");
147 wc->wc_barrier = TRUE;
148 wc->mpi_comm_mygroup = cr->mpi_comm_mygroup;
152 snew(wc->wcc, ewcNR);
153 if (getenv("GMX_CYCLE_ALL") != NULL)
157 fprintf(fplog, "\nWill time all the code during the run\n\n");
159 snew(wc->wcc_all, ewcNR*ewcNR);
162 #ifdef GMX_CYCLE_SUBCOUNTERS
163 snew(wc->wcsc, ewcsNR);
173 void wallcycle_destroy(gmx_wallcycle_t wc)
184 if (wc->wcc_all != NULL)
188 #ifdef GMX_CYCLE_SUBCOUNTERS
189 if (wc->wcsc != NULL)
197 static void wallcycle_all_start(gmx_wallcycle_t wc, int ewc, gmx_cycles_t cycle)
200 wc->cycle_prev = cycle;
203 static void wallcycle_all_stop(gmx_wallcycle_t wc, int ewc, gmx_cycles_t cycle)
205 wc->wcc_all[wc->ewc_prev*ewcNR+ewc].n += 1;
206 wc->wcc_all[wc->ewc_prev*ewcNR+ewc].c += cycle - wc->cycle_prev;
211 static void debug_start_check(gmx_wallcycle_t wc, int ewc)
213 /* fprintf(stderr,"wcycle_start depth %d, %s\n",wc->count_depth,wcn[ewc]); */
215 if (wc->count_depth < 0 || wc->count_depth >= DEPTH_MAX)
217 gmx_fatal(FARGS, "wallcycle counter depth out of range: %d",
220 wc->counterlist[wc->count_depth] = ewc;
224 static void debug_stop_check(gmx_wallcycle_t wc, int ewc)
228 /* fprintf(stderr,"wcycle_stop depth %d, %s\n",wc->count_depth,wcn[ewc]); */
230 if (wc->count_depth < 0)
232 gmx_fatal(FARGS, "wallcycle counter depth out of range when stopping %s: %d", wcn[ewc], wc->count_depth);
234 if (wc->counterlist[wc->count_depth] != ewc)
236 gmx_fatal(FARGS, "wallcycle mismatch at stop, start %s, stop %s",
237 wcn[wc->counterlist[wc->count_depth]], wcn[ewc]);
242 void wallcycle_start(gmx_wallcycle_t wc, int ewc)
254 MPI_Barrier(wc->mpi_comm_mygroup);
259 debug_start_check(wc, ewc);
262 cycle = gmx_cycles_read();
263 wc->wcc[ewc].start = cycle;
264 if (wc->wcc_all != NULL)
269 wallcycle_all_start(wc, ewc, cycle);
271 else if (wc->wc_depth == 3)
273 wallcycle_all_stop(wc, ewc, cycle);
278 void wallcycle_start_nocount(gmx_wallcycle_t wc, int ewc)
285 wallcycle_start(wc, ewc);
289 double wallcycle_stop(gmx_wallcycle_t wc, int ewc)
291 gmx_cycles_t cycle, last;
301 MPI_Barrier(wc->mpi_comm_mygroup);
306 debug_stop_check(wc, ewc);
309 cycle = gmx_cycles_read();
310 last = cycle - wc->wcc[ewc].start;
311 wc->wcc[ewc].c += last;
318 wallcycle_all_stop(wc, ewc, cycle);
320 else if (wc->wc_depth == 2)
322 wallcycle_all_start(wc, ewc, cycle);
329 void wallcycle_reset_all(gmx_wallcycle_t wc)
338 for (i = 0; i < ewcNR; i++)
345 for (i = 0; i < ewcNR*ewcNR; i++)
347 wc->wcc_all[i].n = 0;
348 wc->wcc_all[i].c = 0;
351 #ifdef GMX_CYCLE_SUBCOUNTERS
352 for (i = 0; i < ewcsNR; i++)
360 static gmx_bool is_pme_counter(int ewc)
362 return (ewc >= ewcPMEMESH && ewc <= ewcPMEWAITCOMM);
365 static gmx_bool is_pme_subcounter(int ewc)
367 return (ewc >= ewcPME_REDISTXF && ewc < ewcPMEWAITCOMM);
370 void wallcycle_sum(t_commrec *cr, gmx_wallcycle_t wc)
374 double cycles_n[ewcNR+ewcsNR], buf[ewcNR+ewcsNR], *cyc_all, *buf_all;
383 snew(wc->cycles_sum, ewcNR+ewcsNR);
384 cycles = wc->cycles_sum;
388 for (i = 0; i < ewcNR; i++)
390 if (is_pme_counter(i) || (i == ewcRUN && cr->duty == DUTY_PME))
392 wcc[i].c *= wc->nthreads_pme;
396 for (j = 0; j < ewcNR; j++)
398 wc->wcc_all[i*ewcNR+j].c *= wc->nthreads_pme;
404 wcc[i].c *= wc->nthreads_pp;
408 for (j = 0; j < ewcNR; j++)
410 wc->wcc_all[i*ewcNR+j].c *= wc->nthreads_pp;
416 if (wcc[ewcDDCOMMLOAD].n > 0)
418 wcc[ewcDOMDEC].c -= wcc[ewcDDCOMMLOAD].c;
420 if (wcc[ewcDDCOMMBOUND].n > 0)
422 wcc[ewcDOMDEC].c -= wcc[ewcDDCOMMBOUND].c;
424 if (wcc[ewcPME_FFTCOMM].n > 0)
426 wcc[ewcPME_FFT].c -= wcc[ewcPME_FFTCOMM].c;
429 if (cr->npmenodes == 0)
431 /* All nodes do PME (or no PME at all) */
432 if (wcc[ewcPMEMESH].n > 0)
434 wcc[ewcFORCE].c -= wcc[ewcPMEMESH].c;
439 /* The are PME-only nodes */
440 if (wcc[ewcPMEMESH].n > 0)
442 /* This must be a PME only node, calculate the Wait + Comm. time */
443 wcc[ewcPMEWAITCOMM].c = wcc[ewcRUN].c - wcc[ewcPMEMESH].c;
447 /* Store the cycles in a double buffer for summing */
448 for (i = 0; i < ewcNR; i++)
450 cycles_n[i] = (double)wcc[i].n;
451 cycles[i] = (double)wcc[i].c;
454 #ifdef GMX_CYCLE_SUBCOUNTERS
455 for (i = 0; i < ewcsNR; i++)
457 wc->wcsc[i].c *= wc->nthreads_pp;
458 cycles_n[ewcNR+i] = (double)wc->wcsc[i].n;
459 cycles[ewcNR+i] = (double)wc->wcsc[i].c;
467 MPI_Allreduce(cycles_n, buf, nsum, MPI_DOUBLE, MPI_MAX,
469 for (i = 0; i < ewcNR; i++)
471 wcc[i].n = (int)(buf[i] + 0.5);
473 #ifdef GMX_CYCLE_SUBCOUNTERS
474 for (i = 0; i < ewcsNR; i++)
476 wc->wcsc[i].n = (int)(buf[ewcNR+i] + 0.5);
480 MPI_Allreduce(cycles, buf, nsum, MPI_DOUBLE, MPI_SUM,
482 for (i = 0; i < nsum; i++)
487 if (wc->wcc_all != NULL)
489 snew(cyc_all, ewcNR*ewcNR);
490 snew(buf_all, ewcNR*ewcNR);
491 for (i = 0; i < ewcNR*ewcNR; i++)
493 cyc_all[i] = wc->wcc_all[i].c;
495 MPI_Allreduce(cyc_all, buf_all, ewcNR*ewcNR, MPI_DOUBLE, MPI_SUM,
497 for (i = 0; i < ewcNR*ewcNR; i++)
499 wc->wcc_all[i].c = buf_all[i];
508 static void print_cycles(FILE *fplog, double c2t, const char *name,
510 int nnodes, int nthreads,
511 int n, double c, double tot)
521 snprintf(num, sizeof(num), "%10d", n);
524 snprintf(thstr, sizeof(thstr), "N/A");
528 snprintf(thstr, sizeof(thstr), "%4d", nthreads);
536 /* Convert the cycle count to wallclock time for this task */
539 /* Cycle count has been multiplied by the thread count,
540 * correct for the number of threads used.
542 wallt = c*c2t*nthreads_tot/(double)(nnodes*nthreads);
546 /* nthreads=-1 signals total run time, no correction required */
549 fprintf(fplog, " %-19.19s %4d %4s %10s %10.3f %12.3f %5.1f\n",
550 name, nnodes, thstr, num, wallt, c*1e-9, 100*c/tot);
554 static void print_gputimes(FILE *fplog, const char *name,
555 int n, double t, double tot_t)
562 snprintf(num, sizeof(num), "%10d", n);
563 snprintf(avg_perf, sizeof(avg_perf), "%10.3f", t/n);
568 sprintf(avg_perf, " ");
572 fprintf(fplog, " %-29s %10s%12.3f %s %5.1f\n",
573 name, num, t/1000, avg_perf, 100 * t/tot_t);
577 fprintf(fplog, " %-29s %10s%12.3f %s %5.1f\n",
578 name, "", t/1000, avg_perf, 100.0);
582 void wallcycle_print(FILE *fplog, int nnodes, int npme, double realtime,
583 gmx_wallcycle_t wc, wallclock_gpu_t *gpu_t)
586 double c2t, tot, tot_gpu, tot_cpu_overlap, gpu_cpu_ratio, sum, tot_k;
587 int i, j, npp, nth_pp, nth_pme, nth_tot;
589 const char *hline = "-----------------------------------------------------------------------------";
596 nth_pp = wc->nthreads_pp;
597 nth_pme = wc->nthreads_pme;
599 cycles = wc->cycles_sum;
605 nth_tot = npp*nth_pp + npme*nth_pme;
612 nth_tot = npp*nth_pp;
615 tot = cycles[ewcRUN];
617 /* Conversion factor from cycles to seconds */
627 fprintf(fplog, "\n R E A L C Y C L E A N D T I M E A C C O U N T I N G\n\n");
629 fprintf(fplog, " Computing: Nodes Th. Count Wall t (s) G-Cycles %c\n", '%');
630 fprintf(fplog, "%s\n", hline);
632 for (i = ewcPPDURINGPME+1; i < ewcNR; i++)
634 if (!is_pme_subcounter(i))
636 print_cycles(fplog, c2t, wcn[i], nth_tot,
637 is_pme_counter(i) ? npme : npp,
638 is_pme_counter(i) ? nth_pme : nth_pp,
639 wc->wcc[i].n, cycles[i], tot);
643 if (wc->wcc_all != NULL)
645 for (i = 0; i < ewcNR; i++)
647 for (j = 0; j < ewcNR; j++)
649 snprintf(buf, 20, "%-9.9s %-9.9s", wcn[i], wcn[j]);
650 print_cycles(fplog, c2t, buf, nth_tot,
651 is_pme_counter(i) ? npme : npp,
652 is_pme_counter(i) ? nth_pme : nth_pp,
653 wc->wcc_all[i*ewcNR+j].n,
654 wc->wcc_all[i*ewcNR+j].c,
659 print_cycles(fplog, c2t, "Rest", nth_tot, npp, -1, 0, tot-sum, tot);
660 fprintf(fplog, "%s\n", hline);
661 print_cycles(fplog, c2t, "Total", nth_tot, nnodes, -1, 0, tot, tot);
662 fprintf(fplog, "%s\n", hline);
664 if (wc->wcc[ewcPMEMESH].n > 0)
666 fprintf(fplog, "%s\n", hline);
667 for (i = ewcPPDURINGPME+1; i < ewcNR; i++)
669 if (is_pme_subcounter(i))
671 print_cycles(fplog, c2t, wcn[i], nth_tot,
672 is_pme_counter(i) ? npme : npp,
673 is_pme_counter(i) ? nth_pme : nth_pp,
674 wc->wcc[i].n, cycles[i], tot);
677 fprintf(fplog, "%s\n", hline);
680 #ifdef GMX_CYCLE_SUBCOUNTERS
681 fprintf(fplog, "%s\n", hline);
682 for (i = 0; i < ewcsNR; i++)
684 print_cycles(fplog, c2t, wcsn[i], nth_tot, npp, nth_pp,
685 wc->wcsc[i].n, cycles[ewcNR+i], tot);
687 fprintf(fplog, "%s\n", hline);
690 /* print GPU timing summary */
693 const char *k_log_str[2][2] = {
694 {"Nonbonded F kernel", "Nonbonded F+ene k."},
695 {"Nonbonded F+prune k.", "Nonbonded F+ene+prune k."}
698 tot_gpu = gpu_t->pl_h2d_t + gpu_t->nb_h2d_t + gpu_t->nb_d2h_t;
700 /* add up the kernel timings */
702 for (i = 0; i < 2; i++)
704 for (j = 0; j < 2; j++)
706 tot_k += gpu_t->ktime[i][j].t;
711 tot_cpu_overlap = wc->wcc[ewcFORCE].c;
712 if (wc->wcc[ewcPMEMESH].n > 0)
714 tot_cpu_overlap += wc->wcc[ewcPMEMESH].c;
716 tot_cpu_overlap *= c2t * 1000; /* convert s to ms */
718 fprintf(fplog, "\n GPU timings\n%s\n", hline);
719 fprintf(fplog, " Computing: Count Wall t (s) ms/step %c\n", '%');
720 fprintf(fplog, "%s\n", hline);
721 print_gputimes(fplog, "Pair list H2D",
722 gpu_t->pl_h2d_c, gpu_t->pl_h2d_t, tot_gpu);
723 print_gputimes(fplog, "X / q H2D",
724 gpu_t->nb_c, gpu_t->nb_h2d_t, tot_gpu);
726 for (i = 0; i < 2; i++)
728 for (j = 0; j < 2; j++)
730 if (gpu_t->ktime[i][j].c)
732 print_gputimes(fplog, k_log_str[i][j],
733 gpu_t->ktime[i][j].c, gpu_t->ktime[i][j].t, tot_gpu);
738 print_gputimes(fplog, "F D2H", gpu_t->nb_c, gpu_t->nb_d2h_t, tot_gpu);
739 fprintf(fplog, "%s\n", hline);
740 print_gputimes(fplog, "Total ", gpu_t->nb_c, tot_gpu, tot_gpu);
741 fprintf(fplog, "%s\n", hline);
743 gpu_cpu_ratio = tot_gpu/tot_cpu_overlap;
744 fprintf(fplog, "\nForce evaluation time GPU/CPU: %.3f ms/%.3f ms = %.3f\n",
745 tot_gpu/gpu_t->nb_c, tot_cpu_overlap/wc->wcc[ewcFORCE].n,
748 /* only print notes related to CPU-GPU load balance with PME */
749 if (wc->wcc[ewcPMEMESH].n > 0)
751 fprintf(fplog, "For optimal performance this ratio should be close to 1!\n");
753 /* print note if the imbalance is high with PME case in which
754 * CPU-GPU load balancing is possible */
755 if (gpu_cpu_ratio < 0.75 || gpu_cpu_ratio > 1.2)
757 /* Only the sim master calls this function, so always print to stderr */
758 if (gpu_cpu_ratio < 0.75)
762 /* The user could have used -notunepme,
763 * but we currently can't check that here.
765 md_print_warn(NULL, fplog,
766 "\nNOTE: The GPU has >25%% less load than the CPU. This imbalance causes\n"
767 " performance loss. Maybe the domain decomposition limits the PME tuning.\n"
768 " In that case, try setting the DD grid manually (-dd) or lowering -dds.");
772 /* We should not end up here, unless the box is
773 * too small for increasing the cut-off for PME tuning.
775 md_print_warn(NULL, fplog,
776 "\nNOTE: The GPU has >25%% less load than the CPU. This imbalance causes\n"
777 " performance loss.");
780 if (gpu_cpu_ratio > 1.2)
782 md_print_warn(NULL, fplog,
783 "\nNOTE: The GPU has >20%% more load than the CPU. This imbalance causes\n"
784 " performance loss, consider using a shorter cut-off and a finer PME grid.");
790 if (wc->wcc[ewcNB_XF_BUF_OPS].n > 0 &&
791 (cycles[ewcDOMDEC] > tot*0.1 ||
792 cycles[ewcNS] > tot*0.1))
794 /* Only the sim master calls this function, so always print to stderr */
795 if (wc->wcc[ewcDOMDEC].n == 0)
797 md_print_warn(NULL, fplog,
798 "NOTE: %d %% of the run time was spent in pair search,\n"
799 " you might want to increase nstlist (this has no effect on accuracy)\n",
800 (int)(100*cycles[ewcNS]/tot+0.5));
804 md_print_warn(NULL, fplog,
805 "NOTE: %d %% of the run time was spent in domain decomposition,\n"
806 " %d %% of the run time was spent in pair search,\n"
807 " you might want to increase nstlist (this has no effect on accuracy)\n",
808 (int)(100*cycles[ewcDOMDEC]/tot+0.5),
809 (int)(100*cycles[ewcNS]/tot+0.5));
813 if (cycles[ewcMoveE] > tot*0.05)
815 /* Only the sim master calls this function, so always print to stderr */
816 md_print_warn(NULL, fplog,
817 "NOTE: %d %% of the run time was spent communicating energies,\n"
818 " you might want to use the -gcom option of mdrun\n",
819 (int)(100*cycles[ewcMoveE]/tot+0.5));
823 extern gmx_large_int_t wcycle_get_reset_counters(gmx_wallcycle_t wc)
830 return wc->reset_counters;
833 extern void wcycle_set_reset_counters(gmx_wallcycle_t wc, gmx_large_int_t reset_counters)
840 wc->reset_counters = reset_counters;
843 #ifdef GMX_CYCLE_SUBCOUNTERS
845 void wallcycle_sub_start(gmx_wallcycle_t wc, int ewcs)
849 wc->wcsc[ewcs].start = gmx_cycles_read();
853 void wallcycle_sub_stop(gmx_wallcycle_t wc, int ewcs)
857 wc->wcsc[ewcs].c += gmx_cycles_read() - wc->wcsc[ewcs].start;
864 void wallcycle_sub_start(gmx_wallcycle_t gmx_unused wc, int gmx_unused ewcs) {}
865 void wallcycle_sub_stop(gmx_wallcycle_t gmx_unused wc, int gmx_unused ewcs) {}
867 #endif /* GMX_CYCLE_SUBCOUNTERS */