2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2008, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
45 #include "gmx_wallcycle.h"
46 #include "gmx_cyclecounter.h"
48 #include "gmx_fatal.h"
49 #include "md_logging.h"
59 /* DEBUG_WCYCLE adds consistency checking for the counters.
60 * It checks if you stop a counter different from the last
61 * one that was opened and if you do nest too deep.
63 /* #define DEBUG_WCYCLE */
73 typedef struct gmx_wallcycle
76 /* variables for testing/debugging */
82 int counterlist[DEPTH_MAX];
86 gmx_cycles_t cycle_prev;
87 gmx_large_int_t reset_counters;
89 MPI_Comm mpi_comm_mygroup;
93 #ifdef GMX_CYCLE_SUBCOUNTERS
99 /* Each name should not exceed 19 characters */
100 static const char *wcn[ewcNR] =
101 { "Run", "Step", "PP during PME", "Domain decomp.", "DD comm. load",
102 "DD comm. bounds", "Vsite constr.", "Send X to PME", "Neighbor search", "Launch GPU ops.",
103 "Comm. coord.", "Born radii", "Force", "Wait + Comm. F", "PME mesh",
104 "PME redist. X/F", "PME spread/gather", "PME 3D-FFT", "PME 3D-FFT Comm.", "PME solve",
105 "PME wait for PP", "Wait + Recv. PME F", "Wait GPU nonlocal", "Wait GPU local", "NB X/F buffer ops.",
106 "Vsite spread", "Write traj.", "Update", "Constraints", "Comm. energies",
107 "Enforced rotation", "Add rot. forces", "Test" };
109 static const char *wcsn[ewcsNR] =
110 { "DD redist.", "DD NS grid + sort", "DD setup comm.",
111 "DD make top.", "DD make constr.", "DD top. other",
112 "NS grid local", "NS grid non-loc.", "NS search local", "NS search non-loc.",
113 "Bonded F", "Nonbonded F", "Ewald F correction",
114 "NB X buffer ops.", "NB F buffer ops."
117 gmx_bool wallcycle_have_counter(void)
119 return gmx_cycles_have_counter();
122 gmx_wallcycle_t wallcycle_init(FILE *fplog,int resetstep,t_commrec *cr,
123 int nthreads_pp, int nthreads_pme)
128 if (!wallcycle_have_counter())
135 wc->wc_barrier = FALSE;
139 wc->reset_counters = resetstep;
140 wc->nthreads_pp = nthreads_pp;
141 wc->nthreads_pme = nthreads_pme;
142 wc->cycles_sum = NULL;
145 if (PAR(cr) && getenv("GMX_CYCLE_BARRIER") != NULL)
149 fprintf(fplog,"\nWill call MPI_Barrier before each cycle start/stop call\n\n");
151 wc->wc_barrier = TRUE;
152 wc->mpi_comm_mygroup = cr->mpi_comm_mygroup;
157 if (getenv("GMX_CYCLE_ALL") != NULL)
161 fprintf(fplog,"\nWill time all the code during the run\n\n");
163 snew(wc->wcc_all,ewcNR*ewcNR);
166 #ifdef GMX_CYCLE_SUBCOUNTERS
167 snew(wc->wcsc,ewcsNR);
177 void wallcycle_destroy(gmx_wallcycle_t wc)
188 if (wc->wcc_all != NULL)
192 #ifdef GMX_CYCLE_SUBCOUNTERS
193 if (wc->wcsc != NULL)
201 static void wallcycle_all_start(gmx_wallcycle_t wc,int ewc,gmx_cycles_t cycle)
204 wc->cycle_prev = cycle;
207 static void wallcycle_all_stop(gmx_wallcycle_t wc,int ewc,gmx_cycles_t cycle)
209 wc->wcc_all[wc->ewc_prev*ewcNR+ewc].n += 1;
210 wc->wcc_all[wc->ewc_prev*ewcNR+ewc].c += cycle - wc->cycle_prev;
215 static void debug_start_check(gmx_wallcycle_t wc, int ewc)
217 /* fprintf(stderr,"wcycle_start depth %d, %s\n",wc->count_depth,wcn[ewc]); */
219 if (wc->count_depth < 0 || wc->count_depth >= DEPTH_MAX)
221 gmx_fatal(FARGS,"wallcycle counter depth out of range: %d",
224 wc->counterlist[wc->count_depth] = ewc;
228 static void debug_stop_check(gmx_wallcycle_t wc, int ewc)
232 /* fprintf(stderr,"wcycle_stop depth %d, %s\n",wc->count_depth,wcn[ewc]); */
234 if (wc->count_depth < 0)
236 gmx_fatal(FARGS,"wallcycle counter depth out of range when stopping %s: %d",wcn[ewc],wc->count_depth);
238 if (wc->counterlist[wc->count_depth] != ewc)
240 gmx_fatal(FARGS,"wallcycle mismatch at stop, start %s, stop %s",
241 wcn[wc->counterlist[wc->count_depth]],wcn[ewc]);
246 void wallcycle_start(gmx_wallcycle_t wc, int ewc)
258 MPI_Barrier(wc->mpi_comm_mygroup);
263 debug_start_check(wc,ewc);
266 cycle = gmx_cycles_read();
267 wc->wcc[ewc].start = cycle;
268 if (wc->wcc_all != NULL)
273 wallcycle_all_start(wc,ewc,cycle);
275 else if (wc->wc_depth == 3)
277 wallcycle_all_stop(wc,ewc,cycle);
282 void wallcycle_start_nocount(gmx_wallcycle_t wc, int ewc)
289 wallcycle_start(wc, ewc);
293 double wallcycle_stop(gmx_wallcycle_t wc, int ewc)
295 gmx_cycles_t cycle,last;
305 MPI_Barrier(wc->mpi_comm_mygroup);
310 debug_stop_check(wc,ewc);
313 cycle = gmx_cycles_read();
314 last = cycle - wc->wcc[ewc].start;
315 wc->wcc[ewc].c += last;
322 wallcycle_all_stop(wc,ewc,cycle);
324 else if (wc->wc_depth == 2)
326 wallcycle_all_start(wc,ewc,cycle);
333 void wallcycle_reset_all(gmx_wallcycle_t wc)
342 for(i=0; i<ewcNR; i++)
349 for(i=0; i<ewcNR*ewcNR; i++)
351 wc->wcc_all[i].n = 0;
352 wc->wcc_all[i].c = 0;
355 #ifdef GMX_CYCLE_SUBCOUNTERS
356 for (i=0; i<ewcsNR; i++)
364 static gmx_bool is_pme_counter(int ewc)
366 return (ewc >= ewcPMEMESH && ewc <= ewcPMEWAITCOMM);
369 static gmx_bool is_pme_subcounter(int ewc)
371 return (ewc >= ewcPME_REDISTXF && ewc < ewcPMEWAITCOMM);
374 void wallcycle_sum(t_commrec *cr, gmx_wallcycle_t wc)
378 double cycles_n[ewcNR+ewcsNR],buf[ewcNR+ewcsNR],*cyc_all,*buf_all;
387 snew(wc->cycles_sum,ewcNR+ewcsNR);
388 cycles = wc->cycles_sum;
392 for(i=0; i<ewcNR; i++)
394 if (is_pme_counter(i) || (i==ewcRUN && cr->duty == DUTY_PME))
396 wcc[i].c *= wc->nthreads_pme;
400 for(j=0; j<ewcNR; j++)
402 wc->wcc_all[i*ewcNR+j].c *= wc->nthreads_pme;
408 wcc[i].c *= wc->nthreads_pp;
412 for(j=0; j<ewcNR; j++)
414 wc->wcc_all[i*ewcNR+j].c *= wc->nthreads_pp;
420 if (wcc[ewcDDCOMMLOAD].n > 0)
422 wcc[ewcDOMDEC].c -= wcc[ewcDDCOMMLOAD].c;
424 if (wcc[ewcDDCOMMBOUND].n > 0)
426 wcc[ewcDOMDEC].c -= wcc[ewcDDCOMMBOUND].c;
428 if (wcc[ewcPME_FFTCOMM].n > 0)
430 wcc[ewcPME_FFT].c -= wcc[ewcPME_FFTCOMM].c;
433 if (cr->npmenodes == 0)
435 /* All nodes do PME (or no PME at all) */
436 if (wcc[ewcPMEMESH].n > 0)
438 wcc[ewcFORCE].c -= wcc[ewcPMEMESH].c;
443 /* The are PME-only nodes */
444 if (wcc[ewcPMEMESH].n > 0)
446 /* This must be a PME only node, calculate the Wait + Comm. time */
447 wcc[ewcPMEWAITCOMM].c = wcc[ewcRUN].c - wcc[ewcPMEMESH].c;
451 /* Store the cycles in a double buffer for summing */
452 for(i=0; i<ewcNR; i++)
454 cycles_n[i] = (double)wcc[i].n;
455 cycles[i] = (double)wcc[i].c;
458 #ifdef GMX_CYCLE_SUBCOUNTERS
459 for(i=0; i<ewcsNR; i++)
461 wc->wcsc[i].c *= wc->nthreads_pp;
462 cycles_n[ewcNR+i] = (double)wc->wcsc[i].n;
463 cycles[ewcNR+i] = (double)wc->wcsc[i].c;
471 MPI_Allreduce(cycles_n,buf,nsum,MPI_DOUBLE,MPI_MAX,
473 for(i=0; i<ewcNR; i++)
475 wcc[i].n = (int)(buf[i] + 0.5);
477 #ifdef GMX_CYCLE_SUBCOUNTERS
478 for(i=0; i<ewcsNR; i++)
480 wc->wcsc[i].n = (int)(buf[ewcNR+i] + 0.5);
484 MPI_Allreduce(cycles,buf,nsum,MPI_DOUBLE,MPI_SUM,
486 for(i=0; i<nsum; i++)
491 if (wc->wcc_all != NULL)
493 snew(cyc_all,ewcNR*ewcNR);
494 snew(buf_all,ewcNR*ewcNR);
495 for(i=0; i<ewcNR*ewcNR; i++)
497 cyc_all[i] = wc->wcc_all[i].c;
499 MPI_Allreduce(cyc_all,buf_all,ewcNR*ewcNR,MPI_DOUBLE,MPI_SUM,
501 for(i=0; i<ewcNR*ewcNR; i++)
503 wc->wcc_all[i].c = buf_all[i];
512 static void print_cycles(FILE *fplog, double c2t, const char *name,
513 int nnodes_tot,int nnodes, int nthreads,
514 int n, double c, double tot)
524 snprintf(num,sizeof(num),"%10d",n);
526 snprintf(thstr, sizeof(thstr), "N/A");
528 snprintf(thstr, sizeof(thstr), "%4d", nthreads);
535 wallt = c*c2t*nnodes_tot/(double)nnodes;
536 fprintf(fplog," %-19s %4d %4s %10s %10.3f %12.3f %5.1f\n",
537 name,nnodes,thstr,num,wallt,c*1e-9,100*c/tot);
541 static void print_gputimes(FILE *fplog, const char *name,
542 int n, double t, double tot_t)
549 snprintf(num, sizeof(num), "%10d", n);
550 snprintf(avg_perf, sizeof(avg_perf), "%10.3f", t/n);
555 sprintf(avg_perf," ");
559 fprintf(fplog, " %-29s %10s%12.3f %s %5.1f\n",
560 name, num, t/1000, avg_perf, 100 * t/tot_t);
564 fprintf(fplog, " %-29s %10s%12.3f %s %5.1f\n",
565 name, "", t/1000, avg_perf, 100.0);
569 void wallcycle_print(FILE *fplog, int nnodes, int npme, double realtime,
570 gmx_wallcycle_t wc, wallclock_gpu_t *gpu_t)
573 double c2t,tot,tot_gpu,tot_cpu_overlap,gpu_cpu_ratio,sum,tot_k;
574 int i,j,npp,nth_pp,nth_pme;
576 const char *hline = "-----------------------------------------------------------------------------";
583 nth_pp = wc->nthreads_pp;
584 nth_pme = wc->nthreads_pme;
586 cycles = wc->cycles_sum;
597 tot = cycles[ewcRUN];
599 /* Conversion factor from cycles to seconds */
609 fprintf(fplog,"\n R E A L C Y C L E A N D T I M E A C C O U N T I N G\n\n");
611 fprintf(fplog," Computing: Nodes Th. Count Wall t (s) G-Cycles %c\n",'%');
612 fprintf(fplog,"%s\n",hline);
614 for(i=ewcPPDURINGPME+1; i<ewcNR; i++)
616 if (!is_pme_subcounter(i))
618 print_cycles(fplog,c2t,wcn[i],nnodes,
619 is_pme_counter(i) ? npme : npp,
620 is_pme_counter(i) ? nth_pme : nth_pp,
621 wc->wcc[i].n,cycles[i],tot);
625 if (wc->wcc_all != NULL)
627 for(i=0; i<ewcNR; i++)
629 for(j=0; j<ewcNR; j++)
631 snprintf(buf,9,"%-9s",wcn[i]);
633 snprintf(buf+10,9,"%-9s",wcn[j]);
635 print_cycles(fplog,c2t,buf,nnodes,
636 is_pme_counter(i) ? npme : npp,
637 is_pme_counter(i) ? nth_pme : nth_pp,
638 wc->wcc_all[i*ewcNR+j].n,
639 wc->wcc_all[i*ewcNR+j].c,
644 print_cycles(fplog,c2t,"Rest",npp,npp,-1,0,tot-sum,tot);
645 fprintf(fplog,"%s\n",hline);
646 print_cycles(fplog,c2t,"Total",nnodes,nnodes,-1,0,tot,tot);
647 fprintf(fplog,"%s\n",hline);
649 if (wc->wcc[ewcPMEMESH].n > 0)
651 fprintf(fplog,"%s\n",hline);
652 for(i=ewcPPDURINGPME+1; i<ewcNR; i++)
654 if (is_pme_subcounter(i))
656 print_cycles(fplog,c2t,wcn[i],nnodes,
657 is_pme_counter(i) ? npme : npp,
658 is_pme_counter(i) ? nth_pme : nth_pp,
659 wc->wcc[i].n,cycles[i],tot);
662 fprintf(fplog,"%s\n",hline);
665 #ifdef GMX_CYCLE_SUBCOUNTERS
666 fprintf(fplog,"%s\n",hline);
667 for(i=0; i<ewcsNR; i++)
669 print_cycles(fplog,c2t,wcsn[i],nnodes,npp,nth_pp,
670 wc->wcsc[i].n,cycles[ewcNR+i],tot);
672 fprintf(fplog,"%s\n",hline);
675 /* print GPU timing summary */
678 const char *k_log_str[2][2] = {
679 {"Nonbonded F kernel", "Nonbonded F+ene k."},
680 {"Nonbonded F+prune k.", "Nonbonded F+ene+prune k."}};
682 tot_gpu = gpu_t->pl_h2d_t + gpu_t->nb_h2d_t + gpu_t->nb_d2h_t;
684 /* add up the kernel timings */
686 for (i = 0; i < 2; i++)
688 for(j = 0; j < 2; j++)
690 tot_k += gpu_t->ktime[i][j].t;
695 tot_cpu_overlap = wc->wcc[ewcFORCE].c;
696 if (wc->wcc[ewcPMEMESH].n > 0)
698 tot_cpu_overlap += wc->wcc[ewcPMEMESH].c;
700 tot_cpu_overlap *= c2t * 1000; /* convert s to ms */
702 fprintf(fplog, "\n GPU timings\n%s\n", hline);
703 fprintf(fplog," Computing: Count Wall t (s) ms/step %c\n",'%');
704 fprintf(fplog, "%s\n", hline);
705 print_gputimes(fplog, "Pair list H2D",
706 gpu_t->pl_h2d_c, gpu_t->pl_h2d_t, tot_gpu);
707 print_gputimes(fplog, "X / q H2D",
708 gpu_t->nb_c, gpu_t->nb_h2d_t, tot_gpu);
710 for (i = 0; i < 2; i++)
712 for(j = 0; j < 2; j++)
714 if (gpu_t->ktime[i][j].c)
716 print_gputimes(fplog, k_log_str[i][j],
717 gpu_t->ktime[i][j].c, gpu_t->ktime[i][j].t, tot_gpu);
722 print_gputimes(fplog, "F D2H", gpu_t->nb_c, gpu_t->nb_d2h_t, tot_gpu);
723 fprintf(fplog, "%s\n", hline);
724 print_gputimes(fplog, "Total ", gpu_t->nb_c, tot_gpu, tot_gpu);
725 fprintf(fplog, "%s\n", hline);
727 gpu_cpu_ratio = tot_gpu/tot_cpu_overlap;
728 fprintf(fplog, "\nForce evaluation time GPU/CPU: %.3f ms/%.3f ms = %.3f\n",
729 tot_gpu/gpu_t->nb_c, tot_cpu_overlap/wc->wcc[ewcFORCE].n,
732 /* only print notes related to CPU-GPU load balance with PME */
733 if (wc->wcc[ewcPMEMESH].n > 0)
735 fprintf(fplog, "For optimal performance this ratio should be close to 1!\n");
737 /* print note if the imbalance is high with PME case in which
738 * CPU-GPU load balancing is possible */
739 if (gpu_cpu_ratio < 0.75 || gpu_cpu_ratio > 1.2)
741 /* Only the sim master calls this function, so always print to stderr */
742 if (gpu_cpu_ratio < 0.75)
746 /* The user could have used -notunepme,
747 * but we currently can't check that here.
749 md_print_warn(NULL,fplog,
750 "\nNOTE: The GPU has >25%% less load than the CPU. This imbalance causes\n"
751 " performance loss. Maybe the domain decomposition limits the PME tuning.\n"
752 " In that case, try setting the DD grid manually (-dd) or lowering -dds.");
756 /* We should not end up here, unless the box is
757 * too small for increasing the cut-off for PME tuning.
759 md_print_warn(NULL,fplog,
760 "\nNOTE: The GPU has >25%% less load than the CPU. This imbalance causes\n"
761 " performance loss.");
764 if (gpu_cpu_ratio > 1.2)
766 md_print_warn(NULL,fplog,
767 "\nNOTE: The GPU has >20%% more load than the CPU. This imbalance causes\n"
768 " performance loss, consider using a shorter cut-off and a finer PME grid.");
774 if (wc->wcc[ewcNB_XF_BUF_OPS].n > 0 &&
775 (cycles[ewcDOMDEC] > tot*0.1 ||
776 cycles[ewcNS] > tot*0.1))
778 /* Only the sim master calls this function, so always print to stderr */
779 if (wc->wcc[ewcDOMDEC].n == 0)
781 md_print_warn(NULL,fplog,
782 "NOTE: %d %% of the run time was spent in pair search,\n"
783 " you might want to increase nstlist (this has no effect on accuracy)\n",
784 (int)(100*cycles[ewcNS]/tot+0.5));
788 md_print_warn(NULL,fplog,
789 "NOTE: %d %% of the run time was spent in domain decomposition,\n"
790 " %d %% of the run time was spent in pair search,\n"
791 " you might want to increase nstlist (this has no effect on accuracy)\n",
792 (int)(100*cycles[ewcDOMDEC]/tot+0.5),
793 (int)(100*cycles[ewcNS]/tot+0.5));
797 if (cycles[ewcMoveE] > tot*0.05)
799 /* Only the sim master calls this function, so always print to stderr */
800 md_print_warn(NULL,fplog,
801 "NOTE: %d %% of the run time was spent communicating energies,\n"
802 " you might want to use the -gcom option of mdrun\n",
803 (int)(100*cycles[ewcMoveE]/tot+0.5));
807 extern gmx_large_int_t wcycle_get_reset_counters(gmx_wallcycle_t wc)
814 return wc->reset_counters;
817 extern void wcycle_set_reset_counters(gmx_wallcycle_t wc, gmx_large_int_t reset_counters)
822 wc->reset_counters = reset_counters;
825 #ifdef GMX_CYCLE_SUBCOUNTERS
827 void wallcycle_sub_start(gmx_wallcycle_t wc, int ewcs)
831 wc->wcsc[ewcs].start = gmx_cycles_read();
835 void wallcycle_sub_stop(gmx_wallcycle_t wc, int ewcs)
839 wc->wcsc[ewcs].c += gmx_cycles_read() - wc->wcsc[ewcs].start;
844 #endif /* GMX_CYCLE_SUBCOUNTERS */