1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2008, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
42 #include "gmx_wallcycle.h"
43 #include "gmx_cyclecounter.h"
45 #include "gmx_fatal.h"
54 /* DEBUG_WCYCLE adds consistency checking for the counters.
55 * It checks if you stop a counter different from the last
56 * one that was opened and if you do nest too deep.
58 /* #define DEBUG_WCYCLE */
68 typedef struct gmx_wallcycle
71 /* variables for testing/debugging */
77 int counterlist[DEPTH_MAX];
81 gmx_cycles_t cycle_prev;
82 gmx_large_int_t reset_counters;
84 MPI_Comm mpi_comm_mygroup;
88 #ifdef GMX_CYCLE_SUBCOUNTERS
94 /* Each name should not exceed 19 characters */
95 static const char *wcn[ewcNR] =
96 { "Run", "Step", "PP during PME", "Domain decomp.", "DD comm. load",
97 "DD comm. bounds", "Vsite constr.", "Send X to PME", "Neighbor search", "Launch GPU ops.",
98 "Comm. coord.", "Born radii", "Force", "Wait + Comm. F", "PME mesh",
99 "PME redist. X/F", "PME spread/gather", "PME 3D-FFT", "PME 3D-FFT Comm.", "PME solve",
100 "PME wait for PP", "Wait + Recv. PME F", "Wait GPU nonlocal", "Wait GPU local", "NB X/F buffer ops.",
101 "Vsite spread", "Write traj.", "Update", "Constraints", "Comm. energies",
102 "Enforced rotation", "Add rot. forces", "Test" };
104 static const char *wcsn[ewcsNR] =
105 { "DD redist.", "DD NS grid + sort", "DD setup comm.",
106 "DD make top.", "DD make constr.", "DD top. other",
107 "NS grid local", "NS grid non-loc.", "NS search local", "NS search non-loc.",
108 "Bonded F", "Nonbonded F", "Ewald F correction",
109 "NB X buffer ops.", "NB F buffer ops."
112 gmx_bool wallcycle_have_counter(void)
114 return gmx_cycles_have_counter();
117 gmx_wallcycle_t wallcycle_init(FILE *fplog,int resetstep,t_commrec *cr,
118 int nthreads_pp, int nthreads_pme)
123 if (!wallcycle_have_counter())
130 wc->wc_barrier = FALSE;
134 wc->reset_counters = resetstep;
135 wc->nthreads_pp = nthreads_pp;
136 wc->nthreads_pme = nthreads_pme;
137 wc->cycles_sum = NULL;
140 if (PAR(cr) && getenv("GMX_CYCLE_BARRIER") != NULL)
144 fprintf(fplog,"\nWill call MPI_Barrier before each cycle start/stop call\n\n");
146 wc->wc_barrier = TRUE;
147 wc->mpi_comm_mygroup = cr->mpi_comm_mygroup;
152 if (getenv("GMX_CYCLE_ALL") != NULL)
156 fprintf(fplog,"\nWill time all the code during the run\n\n");
158 snew(wc->wcc_all,ewcNR*ewcNR);
161 #ifdef GMX_CYCLE_SUBCOUNTERS
162 snew(wc->wcsc,ewcsNR);
172 void wallcycle_destroy(gmx_wallcycle_t wc)
183 if (wc->wcc_all != NULL)
187 #ifdef GMX_CYCLE_SUBCOUNTERS
188 if (wc->wcsc != NULL)
196 static void wallcycle_all_start(gmx_wallcycle_t wc,int ewc,gmx_cycles_t cycle)
199 wc->cycle_prev = cycle;
202 static void wallcycle_all_stop(gmx_wallcycle_t wc,int ewc,gmx_cycles_t cycle)
204 wc->wcc_all[wc->ewc_prev*ewcNR+ewc].n += 1;
205 wc->wcc_all[wc->ewc_prev*ewcNR+ewc].c += cycle - wc->cycle_prev;
210 static void debug_start_check(gmx_wallcycle_t wc, int ewc)
212 /* fprintf(stderr,"wcycle_start depth %d, %s\n",wc->count_depth,wcn[ewc]); */
214 if (wc->count_depth < 0 || wc->count_depth >= DEPTH_MAX)
216 gmx_fatal(FARGS,"wallcycle counter depth out of range: %d",
219 wc->counterlist[wc->count_depth] = ewc;
223 static void debug_stop_check(gmx_wallcycle_t wc, int ewc)
227 /* fprintf(stderr,"wcycle_stop depth %d, %s\n",wc->count_depth,wcn[ewc]); */
229 if (wc->count_depth < 0)
231 gmx_fatal(FARGS,"wallcycle counter depth out of range when stopping %s: %d",wcn[ewc],wc->count_depth);
233 if (wc->counterlist[wc->count_depth] != ewc)
235 gmx_fatal(FARGS,"wallcycle mismatch at stop, start %s, stop %s",
236 wcn[wc->counterlist[wc->count_depth]],wcn[ewc]);
241 void wallcycle_start(gmx_wallcycle_t wc, int ewc)
253 MPI_Barrier(wc->mpi_comm_mygroup);
258 debug_start_check(wc,ewc);
261 cycle = gmx_cycles_read();
262 wc->wcc[ewc].start = cycle;
263 if (wc->wcc_all != NULL)
268 wallcycle_all_start(wc,ewc,cycle);
270 else if (wc->wc_depth == 3)
272 wallcycle_all_stop(wc,ewc,cycle);
277 void wallcycle_start_nocount(gmx_wallcycle_t wc, int ewc)
284 wallcycle_start(wc, ewc);
288 double wallcycle_stop(gmx_wallcycle_t wc, int ewc)
290 gmx_cycles_t cycle,last;
300 MPI_Barrier(wc->mpi_comm_mygroup);
305 debug_stop_check(wc,ewc);
308 cycle = gmx_cycles_read();
309 last = cycle - wc->wcc[ewc].start;
310 wc->wcc[ewc].c += last;
317 wallcycle_all_stop(wc,ewc,cycle);
319 else if (wc->wc_depth == 2)
321 wallcycle_all_start(wc,ewc,cycle);
328 void wallcycle_reset_all(gmx_wallcycle_t wc)
337 for(i=0; i<ewcNR; i++)
344 for(i=0; i<ewcNR*ewcNR; i++)
346 wc->wcc_all[i].n = 0;
347 wc->wcc_all[i].c = 0;
350 #ifdef GMX_CYCLE_SUBCOUNTERS
351 for (i=0; i<ewcsNR; i++)
359 static gmx_bool is_pme_counter(int ewc)
361 return (ewc >= ewcPMEMESH && ewc <= ewcPMEWAITCOMM);
364 static gmx_bool is_pme_subcounter(int ewc)
366 return (ewc >= ewcPME_REDISTXF && ewc < ewcPMEWAITCOMM);
369 void wallcycle_sum(t_commrec *cr, gmx_wallcycle_t wc)
373 double cycles_n[ewcNR+ewcsNR],buf[ewcNR+ewcsNR],*cyc_all,*buf_all;
382 snew(wc->cycles_sum,ewcNR+ewcsNR);
383 cycles = wc->cycles_sum;
387 for(i=0; i<ewcNR; i++)
389 if (is_pme_counter(i) || (i==ewcRUN && cr->duty == DUTY_PME))
391 wcc[i].c *= wc->nthreads_pme;
395 for(j=0; j<ewcNR; j++)
397 wc->wcc_all[i*ewcNR+j].c *= wc->nthreads_pme;
403 wcc[i].c *= wc->nthreads_pp;
407 for(j=0; j<ewcNR; j++)
409 wc->wcc_all[i*ewcNR+j].c *= wc->nthreads_pp;
415 if (wcc[ewcDDCOMMLOAD].n > 0)
417 wcc[ewcDOMDEC].c -= wcc[ewcDDCOMMLOAD].c;
419 if (wcc[ewcDDCOMMBOUND].n > 0)
421 wcc[ewcDOMDEC].c -= wcc[ewcDDCOMMBOUND].c;
423 if (wcc[ewcPME_FFTCOMM].n > 0)
425 wcc[ewcPME_FFT].c -= wcc[ewcPME_FFTCOMM].c;
428 if (cr->npmenodes == 0)
430 /* All nodes do PME (or no PME at all) */
431 if (wcc[ewcPMEMESH].n > 0)
433 wcc[ewcFORCE].c -= wcc[ewcPMEMESH].c;
438 /* The are PME-only nodes */
439 if (wcc[ewcPMEMESH].n > 0)
441 /* This must be a PME only node, calculate the Wait + Comm. time */
442 wcc[ewcPMEWAITCOMM].c = wcc[ewcRUN].c - wcc[ewcPMEMESH].c;
446 /* Store the cycles in a double buffer for summing */
447 for(i=0; i<ewcNR; i++)
449 cycles_n[i] = (double)wcc[i].n;
450 cycles[i] = (double)wcc[i].c;
453 #ifdef GMX_CYCLE_SUBCOUNTERS
454 for(i=0; i<ewcsNR; i++)
456 wc->wcsc[i].c *= wc->nthreads_pp;
457 cycles_n[ewcNR+i] = (double)wc->wcsc[i].n;
458 cycles[ewcNR+i] = (double)wc->wcsc[i].c;
466 MPI_Allreduce(cycles_n,buf,nsum,MPI_DOUBLE,MPI_MAX,
468 for(i=0; i<ewcNR; i++)
470 wcc[i].n = (int)(buf[i] + 0.5);
472 #ifdef GMX_CYCLE_SUBCOUNTERS
473 for(i=0; i<ewcsNR; i++)
475 wc->wcsc[i].n = (int)(buf[ewcNR+i] + 0.5);
479 MPI_Allreduce(cycles,buf,nsum,MPI_DOUBLE,MPI_SUM,
481 for(i=0; i<nsum; i++)
486 if (wc->wcc_all != NULL)
488 snew(cyc_all,ewcNR*ewcNR);
489 snew(buf_all,ewcNR*ewcNR);
490 for(i=0; i<ewcNR*ewcNR; i++)
492 cyc_all[i] = wc->wcc_all[i].c;
494 MPI_Allreduce(cyc_all,buf_all,ewcNR*ewcNR,MPI_DOUBLE,MPI_SUM,
496 for(i=0; i<ewcNR*ewcNR; i++)
498 wc->wcc_all[i].c = buf_all[i];
507 static void print_cycles(FILE *fplog, double c2t, const char *name,
508 int nnodes_tot,int nnodes, int nthreads,
509 int n, double c, double tot)
519 sprintf(num,"%10d",n);
521 sprintf(thstr, "N/A");
523 sprintf(thstr, "%4d", nthreads);
530 wallt = c*c2t*nnodes_tot/(double)nnodes;
531 fprintf(fplog," %-19s %4d %4s %10s %10.3f %12.3f %5.1f\n",
532 name,nnodes,thstr,num,wallt,c*1e-9,100*c/tot);
536 static void print_gputimes(FILE *fplog, const char *name,
537 int n, double t, double tot_t)
544 sprintf(num, "%10d", n);
545 sprintf(avg_perf, "%10.3f", t/n);
550 sprintf(avg_perf," ");
554 fprintf(fplog, " %-29s %10s%12.3f %s %5.1f\n",
555 name, num, t/1000, avg_perf, 100 * t/tot_t);
559 fprintf(fplog, " %-29s %10s%12.3f %s %5.1f\n",
560 name, "", t/1000, avg_perf, 100.0);
564 void wallcycle_print(FILE *fplog, int nnodes, int npme, double realtime,
565 gmx_wallcycle_t wc, wallclock_gpu_t *gpu_t)
568 double c2t,tot,tot_gpu,tot_cpu_overlap,gpu_cpu_ratio,sum,tot_k;
569 int i,j,npp,nth_pp,nth_pme;
571 const char *hline = "-----------------------------------------------------------------------------";
578 nth_pp = wc->nthreads_pp;
579 nth_pme = wc->nthreads_pme;
581 cycles = wc->cycles_sum;
592 tot = cycles[ewcRUN];
594 /* Conversion factor from cycles to seconds */
604 fprintf(fplog,"\n R E A L C Y C L E A N D T I M E A C C O U N T I N G\n\n");
606 fprintf(fplog," Computing: Nodes Th. Count Wall t (s) G-Cycles %c\n",'%');
607 fprintf(fplog,"%s\n",hline);
609 for(i=ewcPPDURINGPME+1; i<ewcNR; i++)
611 if (!is_pme_subcounter(i))
613 print_cycles(fplog,c2t,wcn[i],nnodes,
614 is_pme_counter(i) ? npme : npp,
615 is_pme_counter(i) ? nth_pme : nth_pp,
616 wc->wcc[i].n,cycles[i],tot);
620 if (wc->wcc_all != NULL)
622 for(i=0; i<ewcNR; i++)
624 for(j=0; j<ewcNR; j++)
626 sprintf(buf,"%-9s",wcn[i]);
628 sprintf(buf+10,"%-9s",wcn[j]);
630 print_cycles(fplog,c2t,buf,nnodes,
631 is_pme_counter(i) ? npme : npp,
632 is_pme_counter(i) ? nth_pme : nth_pp,
633 wc->wcc_all[i*ewcNR+j].n,
634 wc->wcc_all[i*ewcNR+j].c,
639 print_cycles(fplog,c2t,"Rest",npp,npp,-1,0,tot-sum,tot);
640 fprintf(fplog,"%s\n",hline);
641 print_cycles(fplog,c2t,"Total",nnodes,nnodes,-1,0,tot,tot);
642 fprintf(fplog,"%s\n",hline);
644 if (wc->wcc[ewcPMEMESH].n > 0)
646 fprintf(fplog,"%s\n",hline);
647 for(i=ewcPPDURINGPME+1; i<ewcNR; i++)
649 if (is_pme_subcounter(i))
651 print_cycles(fplog,c2t,wcn[i],nnodes,
652 is_pme_counter(i) ? npme : npp,
653 is_pme_counter(i) ? nth_pme : nth_pp,
654 wc->wcc[i].n,cycles[i],tot);
657 fprintf(fplog,"%s\n",hline);
660 #ifdef GMX_CYCLE_SUBCOUNTERS
661 fprintf(fplog,"%s\n",hline);
662 for(i=0; i<ewcsNR; i++)
664 print_cycles(fplog,c2t,wcsn[i],nnodes,npp,nth_pp,
665 wc->wcsc[i].n,cycles[ewcNR+i],tot);
667 fprintf(fplog,"%s\n",hline);
670 /* print GPU timing summary */
673 const char *k_log_str[2][2] = {
674 {"Nonbonded F kernel", "Nonbonded F+ene k."},
675 {"Nonbonded F+prune k.", "Nonbonded F+ene+prune k."}};
677 tot_gpu = gpu_t->pl_h2d_t + gpu_t->nb_h2d_t + gpu_t->nb_d2h_t;
679 /* add up the kernel timings */
681 for (i = 0; i < 2; i++)
683 for(j = 0; j < 2; j++)
685 tot_k += gpu_t->ktime[i][j].t;
690 tot_cpu_overlap = wc->wcc[ewcFORCE].c;
691 if (wc->wcc[ewcPMEMESH].n > 0)
693 tot_cpu_overlap += wc->wcc[ewcPMEMESH].c;
695 tot_cpu_overlap *= c2t * 1000; /* convert s to ms */
697 fprintf(fplog, "\n GPU timings\n%s\n", hline);
698 fprintf(fplog," Computing: Count Wall t (s) ms/step %c\n",'%');
699 fprintf(fplog, "%s\n", hline);
700 print_gputimes(fplog, "Pair list H2D",
701 gpu_t->pl_h2d_c, gpu_t->pl_h2d_t, tot_gpu);
702 print_gputimes(fplog, "X / q H2D",
703 gpu_t->nb_c, gpu_t->nb_h2d_t, tot_gpu);
705 for (i = 0; i < 2; i++)
707 for(j = 0; j < 2; j++)
709 if (gpu_t->ktime[i][j].c)
711 print_gputimes(fplog, k_log_str[i][j],
712 gpu_t->ktime[i][j].c, gpu_t->ktime[i][j].t, tot_gpu);
717 print_gputimes(fplog, "F D2H", gpu_t->nb_c, gpu_t->nb_d2h_t, tot_gpu);
718 fprintf(fplog, "%s\n", hline);
719 print_gputimes(fplog, "Total ", gpu_t->nb_c, tot_gpu, tot_gpu);
720 fprintf(fplog, "%s\n", hline);
722 gpu_cpu_ratio = tot_gpu/tot_cpu_overlap;
723 fprintf(fplog, "\n Force evaluation time GPU/CPU: %.3f ms/%.3f ms = %.3f\n",
724 tot_gpu/gpu_t->nb_c, tot_cpu_overlap/wc->wcc[ewcFORCE].n,
727 /* only print notes related to CPU-GPU load balance with PME */
728 if (wc->wcc[ewcPMEMESH].n > 0)
730 fprintf(fplog, "For optimal performance this ratio should be close to 1!\n");
732 /* print note if the imbalance is high with PME case in which
733 * CPU-GPU load balancing is possible */
734 if (gpu_cpu_ratio < 0.75 || gpu_cpu_ratio > 1.2)
736 if (gpu_cpu_ratio < 0.75)
738 sprintf(buf, "NOTE: The GPU has >25%% less load than the CPU. This imbalance causes\n"
739 " performance loss, consider turning on PME tuning (-tunepme).");
741 if (gpu_cpu_ratio > 1.2)
743 sprintf(buf, "NOTE: The GPU has >20%% more load than the CPU. This imbalance causes\n"
744 " performance loss, consider using a shorter cut-off.");
748 fprintf(fplog,"\n%s\n",buf);
750 fprintf(stderr,"\n\n%s\n",buf);
755 if (wc->wcc[ewcNB_XF_BUF_OPS].n > 0 &&
756 (cycles[ewcDOMDEC] > tot*0.1 ||
757 cycles[ewcNS] > tot*0.1))
759 if (wc->wcc[ewcDOMDEC].n == 0)
762 "NOTE: %d %% of the run time was spent in pair search,\n"
763 " you might want to increase nstlist (this has no effect on accuracy)\n",
764 (int)(100*cycles[ewcNS]/tot+0.5));
769 "NOTE: %d %% of the run time was spent in domain decomposition,\n"
770 " %d %% of the run time was spent in pair search,\n"
771 " you might want to increase nstlist (this has no effect on accuracy)\n",
772 (int)(100*cycles[ewcDOMDEC]/tot+0.5),
773 (int)(100*cycles[ewcNS]/tot+0.5));
777 fprintf(fplog,"\n%s\n",buf);
779 /* Only the sim master calls this function, so always print to stderr */
780 fprintf(stderr,"\n%s\n",buf);
783 if (cycles[ewcMoveE] > tot*0.05)
786 "NOTE: %d %% of the run time was spent communicating energies,\n"
787 " you might want to use the -gcom option of mdrun\n",
788 (int)(100*cycles[ewcMoveE]/tot+0.5));
791 fprintf(fplog,"\n%s\n",buf);
793 /* Only the sim master calls this function, so always print to stderr */
794 fprintf(stderr,"\n%s\n",buf);
798 extern gmx_large_int_t wcycle_get_reset_counters(gmx_wallcycle_t wc)
805 return wc->reset_counters;
808 extern void wcycle_set_reset_counters(gmx_wallcycle_t wc, gmx_large_int_t reset_counters)
813 wc->reset_counters = reset_counters;
816 #ifdef GMX_CYCLE_SUBCOUNTERS
818 void wallcycle_sub_start(gmx_wallcycle_t wc, int ewcs)
822 wc->wcsc[ewcs].start = gmx_cycles_read();
826 void wallcycle_sub_stop(gmx_wallcycle_t wc, int ewcs)
830 wc->wcsc[ewcs].c += gmx_cycles_read() - wc->wcsc[ewcs].start;
835 #endif /* GMX_CYCLE_SUBCOUNTERS */