2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2008, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 #include "wallcycle.h"
46 #include "gromacs/legacyheaders/md_logging.h"
47 #include "gromacs/legacyheaders/types/commrec.h"
48 #include "gromacs/timing/cyclecounter.h"
49 #include "gromacs/timing/gpu_timing.h"
50 #include "gromacs/utility/cstringutil.h"
51 #include "gromacs/utility/gmxmpi.h"
52 #include "gromacs/utility/smalloc.h"
53 #include "gromacs/utility/snprintf.h"
55 /* DEBUG_WCYCLE adds consistency checking for the counters.
56 * It checks if you stop a counter different from the last
57 * one that was opened and if you do nest too deep.
59 /* #define DEBUG_WCYCLE */
62 #include "gromacs/utility/fatalerror.h"
73 typedef struct gmx_wallcycle
76 /* variables for testing/debugging */
82 int counterlist[DEPTH_MAX];
86 gmx_cycles_t cycle_prev;
87 gmx_int64_t reset_counters;
89 MPI_Comm mpi_comm_mygroup;
93 #ifdef GMX_CYCLE_SUBCOUNTERS
99 /* Each name should not exceed 19 printing characters
100 (ie. terminating null can be twentieth) */
101 static const char *wcn[ewcNR] =
103 "Run", "Step", "PP during PME", "Domain decomp.", "DD comm. load",
104 "DD comm. bounds", "Vsite constr.", "Send X to PME", "Neighbor search", "Launch GPU ops.",
105 "Comm. coord.", "Born radii", "Force", "Wait + Comm. F", "PME mesh",
106 "PME redist. X/F", "PME spread/gather", "PME 3D-FFT", "PME 3D-FFT Comm.", "PME solve LJ", "PME solve Elec",
107 "PME wait for PP", "Wait + Recv. PME F", "Wait GPU nonlocal", "Wait GPU local", "Wait GPU loc. est.", "NB X/F buffer ops.",
108 "Vsite spread", "COM pull force",
109 "Write traj.", "Update", "Constraints", "Comm. energies",
110 "Enforced rotation", "Add rot. forces", "Coordinate swapping", "IMD", "Test"
113 static const char *wcsn[ewcsNR] =
115 "DD redist.", "DD NS grid + sort", "DD setup comm.",
116 "DD make top.", "DD make constr.", "DD top. other",
117 "NS grid local", "NS grid non-loc.", "NS search local", "NS search non-loc.",
121 "Listed buffer ops.",
123 "Ewald F correction",
128 gmx_bool wallcycle_have_counter(void)
130 return gmx_cycles_have_counter();
133 gmx_wallcycle_t wallcycle_init(FILE *fplog, int resetstep, t_commrec gmx_unused *cr,
134 int nthreads_pp, int nthreads_pme)
139 if (!wallcycle_have_counter())
146 wc->wc_barrier = FALSE;
150 wc->reset_counters = resetstep;
151 wc->nthreads_pp = nthreads_pp;
152 wc->nthreads_pme = nthreads_pme;
153 wc->cycles_sum = NULL;
156 if (PAR(cr) && getenv("GMX_CYCLE_BARRIER") != NULL)
160 fprintf(fplog, "\nWill call MPI_Barrier before each cycle start/stop call\n\n");
162 wc->wc_barrier = TRUE;
163 wc->mpi_comm_mygroup = cr->mpi_comm_mygroup;
167 snew(wc->wcc, ewcNR);
168 if (getenv("GMX_CYCLE_ALL") != NULL)
172 fprintf(fplog, "\nWill time all the code during the run\n\n");
174 snew(wc->wcc_all, ewcNR*ewcNR);
177 #ifdef GMX_CYCLE_SUBCOUNTERS
178 snew(wc->wcsc, ewcsNR);
188 void wallcycle_destroy(gmx_wallcycle_t wc)
199 if (wc->wcc_all != NULL)
203 #ifdef GMX_CYCLE_SUBCOUNTERS
204 if (wc->wcsc != NULL)
212 static void wallcycle_all_start(gmx_wallcycle_t wc, int ewc, gmx_cycles_t cycle)
215 wc->cycle_prev = cycle;
218 static void wallcycle_all_stop(gmx_wallcycle_t wc, int ewc, gmx_cycles_t cycle)
220 wc->wcc_all[wc->ewc_prev*ewcNR+ewc].n += 1;
221 wc->wcc_all[wc->ewc_prev*ewcNR+ewc].c += cycle - wc->cycle_prev;
226 static void debug_start_check(gmx_wallcycle_t wc, int ewc)
228 /* fprintf(stderr,"wcycle_start depth %d, %s\n",wc->count_depth,wcn[ewc]); */
230 if (wc->count_depth < 0 || wc->count_depth >= DEPTH_MAX)
232 gmx_fatal(FARGS, "wallcycle counter depth out of range: %d",
235 wc->counterlist[wc->count_depth] = ewc;
239 static void debug_stop_check(gmx_wallcycle_t wc, int ewc)
243 /* fprintf(stderr,"wcycle_stop depth %d, %s\n",wc->count_depth,wcn[ewc]); */
245 if (wc->count_depth < 0)
247 gmx_fatal(FARGS, "wallcycle counter depth out of range when stopping %s: %d", wcn[ewc], wc->count_depth);
249 if (wc->counterlist[wc->count_depth] != ewc)
251 gmx_fatal(FARGS, "wallcycle mismatch at stop, start %s, stop %s",
252 wcn[wc->counterlist[wc->count_depth]], wcn[ewc]);
257 void wallcycle_start(gmx_wallcycle_t wc, int ewc)
269 MPI_Barrier(wc->mpi_comm_mygroup);
274 debug_start_check(wc, ewc);
277 cycle = gmx_cycles_read();
278 wc->wcc[ewc].start = cycle;
279 if (wc->wcc_all != NULL)
284 wallcycle_all_start(wc, ewc, cycle);
286 else if (wc->wc_depth == 3)
288 wallcycle_all_stop(wc, ewc, cycle);
293 void wallcycle_start_nocount(gmx_wallcycle_t wc, int ewc)
300 wallcycle_start(wc, ewc);
304 double wallcycle_stop(gmx_wallcycle_t wc, int ewc)
306 gmx_cycles_t cycle, last;
316 MPI_Barrier(wc->mpi_comm_mygroup);
321 debug_stop_check(wc, ewc);
324 cycle = gmx_cycles_read();
325 last = cycle - wc->wcc[ewc].start;
326 wc->wcc[ewc].c += last;
333 wallcycle_all_stop(wc, ewc, cycle);
335 else if (wc->wc_depth == 2)
337 wallcycle_all_start(wc, ewc, cycle);
344 void wallcycle_get(gmx_wallcycle_t wc, int ewc, int *n, double *c)
347 *c = (double)wc->wcc[ewc].c;
350 void wallcycle_reset_all(gmx_wallcycle_t wc)
359 for (i = 0; i < ewcNR; i++)
366 for (i = 0; i < ewcNR*ewcNR; i++)
368 wc->wcc_all[i].n = 0;
369 wc->wcc_all[i].c = 0;
372 #ifdef GMX_CYCLE_SUBCOUNTERS
373 for (i = 0; i < ewcsNR; i++)
381 static gmx_bool is_pme_counter(int ewc)
383 return (ewc >= ewcPMEMESH && ewc <= ewcPMEWAITCOMM);
386 static gmx_bool is_pme_subcounter(int ewc)
388 return (ewc >= ewcPME_REDISTXF && ewc < ewcPMEWAITCOMM);
391 void wallcycle_sum(t_commrec *cr, gmx_wallcycle_t wc)
394 double cycles[ewcNR+ewcsNR];
395 double cycles_n[ewcNR+ewcsNR], buf[ewcNR+ewcsNR], *cyc_all, *buf_all;
404 snew(wc->cycles_sum, ewcNR+ewcsNR);
408 /* The GPU wait estimate counter is used for load balancing only
409 * and will mess up the total due to double counting: clear it.
411 wcc[ewcWAIT_GPU_NB_L_EST].n = 0;
412 wcc[ewcWAIT_GPU_NB_L_EST].c = 0;
414 for (i = 0; i < ewcNR; i++)
416 if (is_pme_counter(i) || (i == ewcRUN && cr->duty == DUTY_PME))
418 wcc[i].c *= wc->nthreads_pme;
422 for (j = 0; j < ewcNR; j++)
424 wc->wcc_all[i*ewcNR+j].c *= wc->nthreads_pme;
430 wcc[i].c *= wc->nthreads_pp;
434 for (j = 0; j < ewcNR; j++)
436 wc->wcc_all[i*ewcNR+j].c *= wc->nthreads_pp;
442 if (wcc[ewcDDCOMMLOAD].n > 0)
444 wcc[ewcDOMDEC].c -= wcc[ewcDDCOMMLOAD].c;
446 if (wcc[ewcDDCOMMBOUND].n > 0)
448 wcc[ewcDOMDEC].c -= wcc[ewcDDCOMMBOUND].c;
450 if (wcc[ewcPME_FFTCOMM].n > 0)
452 wcc[ewcPME_FFT].c -= wcc[ewcPME_FFTCOMM].c;
455 if (cr->npmenodes == 0)
457 /* All nodes do PME (or no PME at all) */
458 if (wcc[ewcPMEMESH].n > 0)
460 wcc[ewcFORCE].c -= wcc[ewcPMEMESH].c;
465 /* The are PME-only nodes */
466 if (wcc[ewcPMEMESH].n > 0)
468 /* This must be a PME only node, calculate the Wait + Comm. time */
469 wcc[ewcPMEWAITCOMM].c = wcc[ewcRUN].c - wcc[ewcPMEMESH].c;
473 /* Store the cycles in a double buffer for summing */
474 for (i = 0; i < ewcNR; i++)
476 cycles_n[i] = (double)wcc[i].n;
477 cycles[i] = (double)wcc[i].c;
480 #ifdef GMX_CYCLE_SUBCOUNTERS
481 for (i = 0; i < ewcsNR; i++)
483 wc->wcsc[i].c *= wc->nthreads_pp;
484 cycles_n[ewcNR+i] = (double)wc->wcsc[i].n;
485 cycles[ewcNR+i] = (double)wc->wcsc[i].c;
493 MPI_Allreduce(cycles_n, buf, nsum, MPI_DOUBLE, MPI_MAX,
495 for (i = 0; i < ewcNR; i++)
497 wcc[i].n = (int)(buf[i] + 0.5);
499 #ifdef GMX_CYCLE_SUBCOUNTERS
500 for (i = 0; i < ewcsNR; i++)
502 wc->wcsc[i].n = (int)(buf[ewcNR+i] + 0.5);
506 MPI_Allreduce(cycles, wc->cycles_sum, nsum, MPI_DOUBLE, MPI_SUM,
509 if (wc->wcc_all != NULL)
511 snew(cyc_all, ewcNR*ewcNR);
512 snew(buf_all, ewcNR*ewcNR);
513 for (i = 0; i < ewcNR*ewcNR; i++)
515 cyc_all[i] = wc->wcc_all[i].c;
517 MPI_Allreduce(cyc_all, buf_all, ewcNR*ewcNR, MPI_DOUBLE, MPI_SUM,
519 for (i = 0; i < ewcNR*ewcNR; i++)
521 wc->wcc_all[i].c = buf_all[i];
530 for (i = 0; i < nsum; i++)
532 wc->cycles_sum[i] = cycles[i];
537 static void print_cycles(FILE *fplog, double c2t, const char *name,
538 int nnodes, int nthreads,
539 int ncalls, double c_sum, double tot)
542 char nthreads_str[6];
545 double percentage = (tot > 0.) ? (100. * c_sum / tot) : 0.;
551 snprintf(ncalls_str, sizeof(ncalls_str), "%10d", ncalls);
554 snprintf(nnodes_str, sizeof(nnodes_str), "N/A");
558 snprintf(nnodes_str, sizeof(nnodes_str), "%4d", nnodes);
562 snprintf(nthreads_str, sizeof(nthreads_str), "N/A");
566 snprintf(nthreads_str, sizeof(nthreads_str), "%4d", nthreads);
575 /* Convert the cycle count to wallclock time for this task */
578 fprintf(fplog, " %-19.19s %4s %4s %10s %10.3f %14.3f %5.1f\n",
579 name, nnodes_str, nthreads_str, ncalls_str, wallt,
580 c_sum*1e-9, percentage);
584 static void print_gputimes(FILE *fplog, const char *name,
585 int n, double t, double tot_t)
592 snprintf(num, sizeof(num), "%10d", n);
593 snprintf(avg_perf, sizeof(avg_perf), "%10.3f", t/n);
598 sprintf(avg_perf, " ");
600 if (t != tot_t && tot_t > 0)
602 fprintf(fplog, " %-29s %10s%12.3f %s %5.1f\n",
603 name, num, t/1000, avg_perf, 100 * t/tot_t);
607 fprintf(fplog, " %-29s %10s%12.3f %s %5.1f\n",
608 name, "", t/1000, avg_perf, 100.0);
612 static void print_header(FILE *fplog, int nrank_pp, int nth_pp, int nrank_pme, int nth_pme)
614 int nrank_tot = nrank_pp + nrank_pme;
617 fprintf(fplog, "On %d MPI rank%s", nrank_tot, nrank_tot == 1 ? "" : "s");
620 fprintf(fplog, ", each using %d OpenMP threads", nth_pp);
622 /* Don't report doing PP+PME, because we can't tell here if
623 * this is RF, etc. */
627 fprintf(fplog, "On %d MPI rank%s doing PP", nrank_pp, nrank_pp == 1 ? "" : "s");
630 fprintf(fplog, ",%s using %d OpenMP threads", nrank_pp > 1 ? " each" : "", nth_pp);
632 fprintf(fplog, ", and\non %d MPI rank%s doing PME", nrank_pme, nrank_pme == 1 ? "" : "s");
635 fprintf(fplog, ",%s using %d OpenMP threads", nrank_pme > 1 ? " each" : "", nth_pme);
639 fprintf(fplog, "\n\n");
640 fprintf(fplog, " Computing: Num Num Call Wall time Giga-Cycles\n");
641 fprintf(fplog, " Ranks Threads Count (s) total sum %%\n");
644 void wallcycle_print(FILE *fplog, int nnodes, int npme, double realtime,
645 gmx_wallcycle_t wc, struct gmx_wallclock_gpu_t *gpu_t)
648 double tot, tot_for_pp, tot_for_rest, tot_gpu, tot_cpu_overlap, gpu_cpu_ratio, tot_k;
649 double c2t, c2t_pp, c2t_pme = 0;
650 int i, j, npp, nth_pp, nth_pme, nth_tot;
652 const char *hline = "-----------------------------------------------------------------------------";
659 nth_pp = wc->nthreads_pp;
662 nth_pme = wc->nthreads_pme;
665 cyc_sum = wc->cycles_sum;
670 /* npme is the number of PME-only ranks used, and we always do PP work */
673 nth_tot = npp*nth_pp + npme*nth_pme;
675 /* When using PME-only nodes, the next line is valid for both
676 PP-only and PME-only nodes because they started ewcRUN at the
678 tot = cyc_sum[ewcRUN];
683 /* TODO This is heavy handed, but until someone reworks the
684 code so that it is provably robust with respect to
685 non-positive values for all possible timer and cycle
686 counters, there is less value gained from printing whatever
687 timing data might still be sensible for some non-Jenkins
688 run, than is lost from diagnosing Jenkins FP exceptions on
689 runs about whose execution time we don't care. */
690 md_print_warn(NULL, fplog, "WARNING: A total of %f CPU cycles was recorded, so mdrun cannot print a time accounting\n", tot);
694 /* Conversion factor from cycles to seconds */
696 c2t_pp = c2t * nth_tot / (double) (npp*nth_pp);
699 c2t_pme = c2t * nth_tot / (double) (npme*nth_pme);
706 fprintf(fplog, "\n R E A L C Y C L E A N D T I M E A C C O U N T I N G\n\n");
708 print_header(fplog, npp, nth_pp, npme, nth_pme);
710 fprintf(fplog, "%s\n", hline);
711 for (i = ewcPPDURINGPME+1; i < ewcNR; i++)
713 if (is_pme_subcounter(i))
715 /* Do not count these at all */
717 else if (npme > 0 && is_pme_counter(i))
719 /* Print timing information for PME-only nodes, but add an
720 * asterisk so the reader of the table can know that the
721 * walltimes are not meant to add up. The asterisk still
722 * fits in the required maximum of 19 characters. */
724 snprintf(buffer, STRLEN, "%s *", wcn[i]);
725 print_cycles(fplog, c2t_pme, buffer,
727 wc->wcc[i].n, cyc_sum[i], tot);
731 /* Print timing information when it is for a PP or PP+PME
733 print_cycles(fplog, c2t_pp, wcn[i],
735 wc->wcc[i].n, cyc_sum[i], tot);
736 tot_for_pp += cyc_sum[i];
739 if (wc->wcc_all != NULL)
741 for (i = 0; i < ewcNR; i++)
743 for (j = 0; j < ewcNR; j++)
745 snprintf(buf, 20, "%-9.9s %-9.9s", wcn[i], wcn[j]);
746 print_cycles(fplog, c2t_pp, buf,
748 wc->wcc_all[i*ewcNR+j].n,
749 wc->wcc_all[i*ewcNR+j].c,
754 tot_for_rest = tot * (npp * nth_pp) / (double) nth_tot;
755 print_cycles(fplog, c2t_pp, "Rest",
757 -1, tot_for_rest - tot_for_pp, tot);
758 fprintf(fplog, "%s\n", hline);
759 print_cycles(fplog, c2t, "Total",
762 fprintf(fplog, "%s\n", hline);
767 "(*) Note that with separate PME ranks, the walltime column actually sums to\n"
768 " twice the total reported, but the cycle count total and %% are correct.\n"
772 if (wc->wcc[ewcPMEMESH].n > 0)
774 fprintf(fplog, " Breakdown of PME mesh computation\n");
775 fprintf(fplog, "%s\n", hline);
776 for (i = ewcPPDURINGPME+1; i < ewcNR; i++)
778 if (is_pme_subcounter(i))
780 print_cycles(fplog, npme > 0 ? c2t_pme : c2t_pp, wcn[i],
781 npme > 0 ? npme : npp, nth_pme,
782 wc->wcc[i].n, cyc_sum[i], tot);
785 fprintf(fplog, "%s\n", hline);
788 #ifdef GMX_CYCLE_SUBCOUNTERS
789 fprintf(fplog, " Breakdown of PP computation\n");
790 fprintf(fplog, "%s\n", hline);
791 for (i = 0; i < ewcsNR; i++)
793 print_cycles(fplog, c2t_pp, wcsn[i],
795 wc->wcsc[i].n, cyc_sum[ewcNR+i], tot);
797 fprintf(fplog, "%s\n", hline);
800 /* print GPU timing summary */
803 const char *k_log_str[2][2] = {
804 {"Nonbonded F kernel", "Nonbonded F+ene k."},
805 {"Nonbonded F+prune k.", "Nonbonded F+ene+prune k."}
808 tot_gpu = gpu_t->pl_h2d_t + gpu_t->nb_h2d_t + gpu_t->nb_d2h_t;
810 /* add up the kernel timings */
812 for (i = 0; i < 2; i++)
814 for (j = 0; j < 2; j++)
816 tot_k += gpu_t->ktime[i][j].t;
821 tot_cpu_overlap = wc->wcc[ewcFORCE].c;
822 if (wc->wcc[ewcPMEMESH].n > 0)
824 tot_cpu_overlap += wc->wcc[ewcPMEMESH].c;
826 tot_cpu_overlap *= realtime*1000/tot; /* convert s to ms */
828 fprintf(fplog, "\n GPU timings\n%s\n", hline);
829 fprintf(fplog, " Computing: Count Wall t (s) ms/step %c\n", '%');
830 fprintf(fplog, "%s\n", hline);
831 print_gputimes(fplog, "Pair list H2D",
832 gpu_t->pl_h2d_c, gpu_t->pl_h2d_t, tot_gpu);
833 print_gputimes(fplog, "X / q H2D",
834 gpu_t->nb_c, gpu_t->nb_h2d_t, tot_gpu);
836 for (i = 0; i < 2; i++)
838 for (j = 0; j < 2; j++)
840 if (gpu_t->ktime[i][j].c)
842 print_gputimes(fplog, k_log_str[i][j],
843 gpu_t->ktime[i][j].c, gpu_t->ktime[i][j].t, tot_gpu);
848 print_gputimes(fplog, "F D2H", gpu_t->nb_c, gpu_t->nb_d2h_t, tot_gpu);
849 fprintf(fplog, "%s\n", hline);
850 print_gputimes(fplog, "Total ", gpu_t->nb_c, tot_gpu, tot_gpu);
851 fprintf(fplog, "%s\n", hline);
853 gpu_cpu_ratio = tot_gpu/tot_cpu_overlap;
854 if (gpu_t->nb_c > 0 && wc->wcc[ewcFORCE].n > 0)
856 fprintf(fplog, "\nForce evaluation time GPU/CPU: %.3f ms/%.3f ms = %.3f\n",
857 tot_gpu/gpu_t->nb_c, tot_cpu_overlap/wc->wcc[ewcFORCE].n,
861 /* only print notes related to CPU-GPU load balance with PME */
862 if (wc->wcc[ewcPMEMESH].n > 0)
864 fprintf(fplog, "For optimal performance this ratio should be close to 1!\n");
866 /* print note if the imbalance is high with PME case in which
867 * CPU-GPU load balancing is possible */
868 if (gpu_cpu_ratio < 0.75 || gpu_cpu_ratio > 1.2)
870 /* Only the sim master calls this function, so always print to stderr */
871 if (gpu_cpu_ratio < 0.75)
875 /* The user could have used -notunepme,
876 * but we currently can't check that here.
878 md_print_warn(NULL, fplog,
879 "\nNOTE: The GPU has >25%% less load than the CPU. This imbalance causes\n"
880 " performance loss. Maybe the domain decomposition limits the PME tuning.\n"
881 " In that case, try setting the DD grid manually (-dd) or lowering -dds.");
885 /* We should not end up here, unless the box is
886 * too small for increasing the cut-off for PME tuning.
888 md_print_warn(NULL, fplog,
889 "\nNOTE: The GPU has >25%% less load than the CPU. This imbalance causes\n"
890 " performance loss.");
893 if (gpu_cpu_ratio > 1.2)
895 md_print_warn(NULL, fplog,
896 "\nNOTE: The GPU has >20%% more load than the CPU. This imbalance causes\n"
897 " performance loss, consider using a shorter cut-off and a finer PME grid.");
905 md_print_warn(NULL, fplog,
906 "MPI_Barrier was called before each cycle start/stop\n"
907 "call, so timings are not those of real runs.\n");
910 if (wc->wcc[ewcNB_XF_BUF_OPS].n > 0 &&
911 (cyc_sum[ewcDOMDEC] > tot*0.1 ||
912 cyc_sum[ewcNS] > tot*0.1))
914 /* Only the sim master calls this function, so always print to stderr */
915 if (wc->wcc[ewcDOMDEC].n == 0)
917 md_print_warn(NULL, fplog,
918 "NOTE: %d %% of the run time was spent in pair search,\n"
919 " you might want to increase nstlist (this has no effect on accuracy)\n",
920 (int)(100*cyc_sum[ewcNS]/tot+0.5));
924 md_print_warn(NULL, fplog,
925 "NOTE: %d %% of the run time was spent in domain decomposition,\n"
926 " %d %% of the run time was spent in pair search,\n"
927 " you might want to increase nstlist (this has no effect on accuracy)\n",
928 (int)(100*cyc_sum[ewcDOMDEC]/tot+0.5),
929 (int)(100*cyc_sum[ewcNS]/tot+0.5));
933 if (cyc_sum[ewcMoveE] > tot*0.05)
935 /* Only the sim master calls this function, so always print to stderr */
936 md_print_warn(NULL, fplog,
937 "NOTE: %d %% of the run time was spent communicating energies,\n"
938 " you might want to use the -gcom option of mdrun\n",
939 (int)(100*cyc_sum[ewcMoveE]/tot+0.5));
943 extern gmx_int64_t wcycle_get_reset_counters(gmx_wallcycle_t wc)
950 return wc->reset_counters;
953 extern void wcycle_set_reset_counters(gmx_wallcycle_t wc, gmx_int64_t reset_counters)
960 wc->reset_counters = reset_counters;
963 #ifdef GMX_CYCLE_SUBCOUNTERS
965 void wallcycle_sub_start(gmx_wallcycle_t wc, int ewcs)
969 wc->wcsc[ewcs].start = gmx_cycles_read();
973 void wallcycle_sub_start_nocount(gmx_wallcycle_t wc, int ewcs)
980 wallcycle_sub_start(wc, ewcs);
984 void wallcycle_sub_stop(gmx_wallcycle_t wc, int ewcs)
988 wc->wcsc[ewcs].c += gmx_cycles_read() - wc->wcsc[ewcs].start;
995 void wallcycle_sub_start(gmx_wallcycle_t gmx_unused wc, int gmx_unused ewcs)
998 void wallcycle_sub_start_nocount(gmx_wallcycle_t gmx_unused wc, int gmx_unused ewcs)
1001 void wallcycle_sub_stop(gmx_wallcycle_t gmx_unused wc, int gmx_unused ewcs)
1005 #endif /* GMX_CYCLE_SUBCOUNTERS */