2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2009,2010,2011,2012,2013,2014,2015,2016, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
46 #ifdef HAVE_SYS_TIME_H
50 #include "gromacs/commandline/pargs.h"
51 #include "gromacs/fft/calcgrid.h"
52 #include "gromacs/fileio/checkpoint.h"
53 #include "gromacs/fileio/tpxio.h"
54 #include "gromacs/gmxana/gmx_ana.h"
55 #include "gromacs/math/utilities.h"
56 #include "gromacs/math/vec.h"
57 #include "gromacs/mdlib/perf_est.h"
58 #include "gromacs/mdrunutility/mdmodules.h"
59 #include "gromacs/mdtypes/commrec.h"
60 #include "gromacs/mdtypes/inputrec.h"
61 #include "gromacs/mdtypes/md_enums.h"
62 #include "gromacs/pbcutil/pbc.h"
63 #include "gromacs/timing/walltime_accounting.h"
64 #include "gromacs/topology/topology.h"
65 #include "gromacs/utility/arraysize.h"
66 #include "gromacs/utility/baseversion.h"
67 #include "gromacs/utility/cstringutil.h"
68 #include "gromacs/utility/fatalerror.h"
69 #include "gromacs/utility/futil.h"
70 #include "gromacs/utility/gmxassert.h"
71 #include "gromacs/utility/smalloc.h"
73 /* Enum for situations that can occur during log file parsing, the
74 * corresponding string entries can be found in do_the_tests() in
75 * const char* ParseLog[] */
76 /* TODO clean up CamelCasing of these enum names */
82 eParselogResetProblem,
86 eParselogLargePrimeFactor,
87 eParselogMismatchOfNumberOfPPRanksAndAvailableGPUs,
96 int nPMEnodes; /* number of PME-only nodes used in this test */
97 int nx, ny, nz; /* DD grid */
98 int guessPME; /* if nPMEnodes == -1, this is the guessed number of PME nodes */
99 double *Gcycles; /* This can contain more than one value if doing multiple tests */
103 float *PME_f_load; /* PME mesh/force load average*/
104 float PME_f_load_Av; /* Average average ;) ... */
105 char *mdrun_cmd_line; /* Mdrun command line used for this test */
111 int nr_inputfiles; /* The number of tpr and mdp input files */
112 gmx_int64_t orig_sim_steps; /* Number of steps to be done in the real simulation */
113 gmx_int64_t orig_init_step; /* Init step for the real simulation */
114 real *rcoulomb; /* The coulomb radii [0...nr_inputfiles] */
115 real *rvdw; /* The vdW radii */
116 real *rlist; /* Neighbourlist cutoff radius */
117 int *nkx, *nky, *nkz;
118 real *fsx, *fsy, *fsz; /* Fourierspacing in x,y,z dimension */
122 static void sep_line(FILE *fp)
124 fprintf(fp, "\n------------------------------------------------------------\n");
128 /* Wrapper for system calls */
129 static int gmx_system_call(char *command)
131 return ( system(command) );
135 /* Check if string starts with substring */
136 static gmx_bool str_starts(const char *string, const char *substring)
138 return ( std::strncmp(string, substring, std::strlen(substring)) == 0);
142 static void cleandata(t_perf *perfdata, int test_nr)
144 perfdata->Gcycles[test_nr] = 0.0;
145 perfdata->ns_per_day[test_nr] = 0.0;
146 perfdata->PME_f_load[test_nr] = 0.0;
152 static void remove_if_exists(const char *fn)
156 fprintf(stdout, "Deleting %s\n", fn);
162 static void finalize(const char *fn_out)
168 fp = fopen(fn_out, "r");
169 fprintf(stdout, "\n\n");
171 while (fgets(buf, STRLEN-1, fp) != NULL)
173 fprintf(stdout, "%s", buf);
176 fprintf(stdout, "\n\n");
181 eFoundNothing, eFoundDDStr, eFoundAccountingStr, eFoundCycleStr
184 static int parse_logfile(const char *logfile, const char *errfile,
185 t_perf *perfdata, int test_nr, int presteps, gmx_int64_t cpt_steps,
189 char line[STRLEN], dumstring[STRLEN], dumstring2[STRLEN];
190 const char matchstrdd[] = "Domain decomposition grid";
191 const char matchstrcr[] = "resetting all time and cycle counters";
192 const char matchstrbal[] = "Average PME mesh/force load:";
193 const char matchstring[] = "R E A L C Y C L E A N D T I M E A C C O U N T I N G";
194 const char errSIG[] = "signal, stopping at the next";
196 float dum1, dum2, dum3, dum4;
199 gmx_int64_t resetsteps = -1;
200 gmx_bool bFoundResetStr = FALSE;
201 gmx_bool bResetChecked = FALSE;
204 if (!gmx_fexist(logfile))
206 fprintf(stderr, "WARNING: Could not find logfile %s.\n", logfile);
207 cleandata(perfdata, test_nr);
208 return eParselogNotFound;
211 fp = fopen(logfile, "r");
212 perfdata->PME_f_load[test_nr] = -1.0;
213 perfdata->guessPME = -1;
215 iFound = eFoundNothing;
218 iFound = eFoundDDStr; /* Skip some case statements */
221 while (fgets(line, STRLEN, fp) != NULL)
223 /* Remove leading spaces */
226 /* Check for TERM and INT signals from user: */
227 if (std::strstr(line, errSIG) != NULL)
230 cleandata(perfdata, test_nr);
231 return eParselogTerm;
234 /* Check whether cycle resetting worked */
235 if (presteps > 0 && !bFoundResetStr)
237 if (std::strstr(line, matchstrcr) != NULL)
239 sprintf(dumstring, "step %s", "%" GMX_SCNd64);
240 sscanf(line, dumstring, &resetsteps);
241 bFoundResetStr = TRUE;
242 if (resetsteps == presteps+cpt_steps)
244 bResetChecked = TRUE;
248 sprintf(dumstring, "%" GMX_PRId64, resetsteps);
249 sprintf(dumstring2, "%" GMX_PRId64, presteps+cpt_steps);
250 fprintf(stderr, "WARNING: Time step counters were reset at step %s,\n"
251 " though they were supposed to be reset at step %s!\n",
252 dumstring, dumstring2);
257 /* Look for strings that appear in a certain order in the log file: */
261 /* Look for domain decomp grid and separate PME nodes: */
262 if (str_starts(line, matchstrdd))
264 sscanf(line, "Domain decomposition grid %d x %d x %d, separate PME ranks %d",
265 &(perfdata->nx), &(perfdata->ny), &(perfdata->nz), &npme);
266 if (perfdata->nPMEnodes == -1)
268 perfdata->guessPME = npme;
270 else if (perfdata->nPMEnodes != npme)
272 gmx_fatal(FARGS, "PME ranks from command line and output file are not identical");
274 iFound = eFoundDDStr;
276 /* Catch a few errors that might have occurred: */
277 else if (str_starts(line, "There is no domain decomposition for"))
280 return eParselogNoDDGrid;
282 else if (str_starts(line, "The number of ranks you selected"))
285 return eParselogLargePrimeFactor;
287 else if (str_starts(line, "reading tpx file"))
290 return eParselogTPXVersion;
292 else if (str_starts(line, "The -dd or -npme option request a parallel simulation"))
295 return eParselogNotParallel;
299 /* Even after the "Domain decomposition grid" string was found,
300 * it could be that mdrun had to quit due to some error. */
301 if (str_starts(line, "Incorrect launch configuration: mismatching number of"))
304 return eParselogMismatchOfNumberOfPPRanksAndAvailableGPUs;
306 else if (str_starts(line, "Some of the requested GPUs do not exist"))
309 return eParselogGpuProblem;
311 /* Look for PME mesh/force balance (not necessarily present, though) */
312 else if (str_starts(line, matchstrbal))
314 sscanf(&line[std::strlen(matchstrbal)], "%f", &(perfdata->PME_f_load[test_nr]));
316 /* Look for matchstring */
317 else if (str_starts(line, matchstring))
319 iFound = eFoundAccountingStr;
322 case eFoundAccountingStr:
323 /* Already found matchstring - look for cycle data */
324 if (str_starts(line, "Total "))
326 sscanf(line, "Total %*f %lf", &(perfdata->Gcycles[test_nr]));
327 iFound = eFoundCycleStr;
331 /* Already found cycle data - look for remaining performance info and return */
332 if (str_starts(line, "Performance:"))
334 ndum = sscanf(line, "%s %f %f %f %f", dumstring, &dum1, &dum2, &dum3, &dum4);
335 /* (ns/day) is the second last entry, depending on whether GMX_DETAILED_PERF_STATS was set in print_perf(), nrnb.c */
336 perfdata->ns_per_day[test_nr] = (ndum == 5) ? dum3 : dum1;
338 if (bResetChecked || presteps == 0)
344 return eParselogResetProblem;
351 /* Close the log file */
354 /* Check why there is no performance data in the log file.
355 * Did a fatal errors occur? */
356 if (gmx_fexist(errfile))
358 fp = fopen(errfile, "r");
359 while (fgets(line, STRLEN, fp) != NULL)
361 if (str_starts(line, "Fatal error:") )
363 if (fgets(line, STRLEN, fp) != NULL)
365 fprintf(stderr, "\nWARNING: An error occurred during this benchmark:\n"
369 cleandata(perfdata, test_nr);
370 return eParselogFatal;
377 fprintf(stderr, "WARNING: Could not find stderr file %s.\n", errfile);
380 /* Giving up ... we could not find out why there is no performance data in
382 fprintf(stdout, "No performance data in log file.\n");
383 cleandata(perfdata, test_nr);
385 return eParselogNoPerfData;
389 static gmx_bool analyze_data(
398 int *index_tpr, /* OUT: Nr of mdp file with best settings */
399 int *npme_optimal) /* OUT: Optimal number of PME nodes */
402 int line = 0, line_win = -1;
403 int k_win = -1, i_win = -1, winPME;
404 double s = 0.0; /* standard deviation */
407 char str_PME_f_load[13];
408 gmx_bool bCanUseOrigTPR;
409 gmx_bool bRefinedCoul, bRefinedVdW, bRefinedGrid;
415 fprintf(fp, "Summary of successful runs:\n");
416 fprintf(fp, "Line tpr PME ranks Gcycles Av. Std.dev. ns/day PME/f");
419 fprintf(fp, " DD grid");
425 for (k = 0; k < ntprs; k++)
427 for (i = 0; i < ntests; i++)
429 /* Select the right dataset: */
430 pd = &(perfdata[k][i]);
432 pd->Gcycles_Av = 0.0;
433 pd->PME_f_load_Av = 0.0;
434 pd->ns_per_day_Av = 0.0;
436 if (pd->nPMEnodes == -1)
438 sprintf(strbuf, "(%3d)", pd->guessPME);
442 sprintf(strbuf, " ");
445 /* Get the average run time of a setting */
446 for (j = 0; j < nrepeats; j++)
448 pd->Gcycles_Av += pd->Gcycles[j];
449 pd->PME_f_load_Av += pd->PME_f_load[j];
451 pd->Gcycles_Av /= nrepeats;
452 pd->PME_f_load_Av /= nrepeats;
454 for (j = 0; j < nrepeats; j++)
456 if (pd->ns_per_day[j] > 0.0)
458 pd->ns_per_day_Av += pd->ns_per_day[j];
462 /* Somehow the performance number was not aquired for this run,
463 * therefor set the average to some negative value: */
464 pd->ns_per_day_Av = -1.0f*nrepeats;
468 pd->ns_per_day_Av /= nrepeats;
471 if (pd->PME_f_load_Av > 0.0)
473 sprintf(str_PME_f_load, "%12.3f", pd->PME_f_load_Av);
477 sprintf(str_PME_f_load, "%s", " - ");
481 /* We assume we had a successful run if both averages are positive */
482 if (pd->Gcycles_Av > 0.0 && pd->ns_per_day_Av > 0.0)
484 /* Output statistics if repeats were done */
487 /* Calculate the standard deviation */
489 for (j = 0; j < nrepeats; j++)
491 s += gmx::square( pd->Gcycles[j] - pd->Gcycles_Av );
496 fprintf(fp, "%4d %3d %4d%s %12.3f %12.3f %12.3f %s",
497 line, k, pd->nPMEnodes, strbuf, pd->Gcycles_Av, s,
498 pd->ns_per_day_Av, str_PME_f_load);
501 fprintf(fp, " %3d %3d %3d", pd->nx, pd->ny, pd->nz);
505 /* Store the index of the best run found so far in 'winner': */
506 if ( (k_win == -1) || (pd->Gcycles_Av < perfdata[k_win][i_win].Gcycles_Av) )
519 gmx_fatal(FARGS, "None of the runs was successful! Check %s for problems.", fn);
524 winPME = perfdata[k_win][i_win].nPMEnodes;
528 /* We stuck to a fixed number of PME-only nodes */
529 sprintf(strbuf, "settings No. %d", k_win);
533 /* We have optimized the number of PME-only nodes */
536 sprintf(strbuf, "%s", "the automatic number of PME ranks");
540 sprintf(strbuf, "%d PME ranks", winPME);
543 fprintf(fp, "Best performance was achieved with %s", strbuf);
544 if ((nrepeats > 1) && (ntests > 1))
546 fprintf(fp, " (see line %d)", line_win);
550 /* Only mention settings if they were modified: */
551 bRefinedCoul = !gmx_within_tol(info->rcoulomb[k_win], info->rcoulomb[0], GMX_REAL_EPS);
552 bRefinedVdW = !gmx_within_tol(info->rvdw[k_win], info->rvdw[0], GMX_REAL_EPS);
553 bRefinedGrid = !(info->nkx[k_win] == info->nkx[0] &&
554 info->nky[k_win] == info->nky[0] &&
555 info->nkz[k_win] == info->nkz[0]);
557 if (bRefinedCoul || bRefinedVdW || bRefinedGrid)
559 fprintf(fp, "Optimized PME settings:\n");
560 bCanUseOrigTPR = FALSE;
564 bCanUseOrigTPR = TRUE;
569 fprintf(fp, " New Coulomb radius: %f nm (was %f nm)\n", info->rcoulomb[k_win], info->rcoulomb[0]);
574 fprintf(fp, " New Van der Waals radius: %f nm (was %f nm)\n", info->rvdw[k_win], info->rvdw[0]);
579 fprintf(fp, " New Fourier grid xyz: %d %d %d (was %d %d %d)\n", info->nkx[k_win], info->nky[k_win], info->nkz[k_win],
580 info->nkx[0], info->nky[0], info->nkz[0]);
583 if (bCanUseOrigTPR && ntprs > 1)
585 fprintf(fp, "and original PME settings.\n");
590 /* Return the index of the mdp file that showed the highest performance
591 * and the optimal number of PME nodes */
593 *npme_optimal = winPME;
595 return bCanUseOrigTPR;
599 /* Get the commands we need to set up the runs from environment variables */
600 static void get_program_paths(gmx_bool bThreads, char *cmd_mpirun[], char *cmd_mdrun[])
603 const char def_mpirun[] = "mpirun";
605 const char empty_mpirun[] = "";
607 /* Get the commands we need to set up the runs from environment variables */
610 if ( (cp = getenv("MPIRUN")) != NULL)
612 *cmd_mpirun = gmx_strdup(cp);
616 *cmd_mpirun = gmx_strdup(def_mpirun);
621 *cmd_mpirun = gmx_strdup(empty_mpirun);
624 if (*cmd_mdrun == NULL)
626 /* The use of MDRUN is deprecated, but made available in 5.1
627 for backward compatibility. It may be removed in a future
629 if ( (cp = getenv("MDRUN" )) != NULL)
631 *cmd_mdrun = gmx_strdup(cp);
635 gmx_fatal(FARGS, "The way to call mdrun must be set in the -mdrun command-line flag.");
640 /* Check that the commands will run mdrun (perhaps via mpirun) by
641 * running a very quick test simulation. Requires MPI environment or
642 * GPU support to be available if applicable. */
643 /* TODO implement feature to parse the log file to get the list of
644 compatible GPUs from mdrun, if the user of gmx tune-pme has not
646 static void check_mdrun_works(gmx_bool bThreads,
647 const char *cmd_mpirun,
649 const char *cmd_mdrun,
650 gmx_bool bNeedGpuSupport)
652 char *command = NULL;
656 const char filename[] = "benchtest.log";
658 /* This string should always be identical to the one in copyrite.c,
659 * gmx_print_version_info() in the GMX_MPI section */
660 const char match_mpi[] = "MPI library: MPI";
661 const char match_mdrun[] = "Executable: ";
662 const char match_gpu[] = "GPU support: enabled";
663 gmx_bool bMdrun = FALSE;
664 gmx_bool bMPI = FALSE;
665 gmx_bool bHaveGpuSupport = FALSE;
667 /* Run a small test to see whether mpirun + mdrun work */
668 fprintf(stdout, "Making sure that mdrun can be executed. ");
671 snew(command, std::strlen(cmd_mdrun) + std::strlen(cmd_np) + std::strlen(filename) + 50);
672 sprintf(command, "%s%s-version -maxh 0.001 1> %s 2>&1", cmd_mdrun, cmd_np, filename);
676 snew(command, std::strlen(cmd_mpirun) + std::strlen(cmd_np) + std::strlen(cmd_mdrun) + std::strlen(filename) + 50);
677 sprintf(command, "%s%s%s -version -maxh 0.001 1> %s 2>&1", cmd_mpirun, cmd_np, cmd_mdrun, filename);
679 fprintf(stdout, "Trying '%s' ... ", command);
680 make_backup(filename);
681 gmx_system_call(command);
683 /* Check if we find the characteristic string in the output: */
684 if (!gmx_fexist(filename))
686 gmx_fatal(FARGS, "Output from test run could not be found.");
689 fp = fopen(filename, "r");
690 /* We need to scan the whole output file, since sometimes the queuing system
691 * also writes stuff to stdout/err */
694 cp = fgets(line, STRLEN, fp);
697 if (str_starts(line, match_mdrun) )
701 if (str_starts(line, match_mpi) )
705 if (str_starts(line, match_gpu) )
707 bHaveGpuSupport = TRUE;
717 gmx_fatal(FARGS, "Need a threaded version of mdrun. This one\n"
719 "seems to have been compiled with MPI instead.",
727 gmx_fatal(FARGS, "Need an MPI-enabled version of mdrun. This one\n"
729 "seems to have been compiled without MPI support.",
736 gmx_fatal(FARGS, "Cannot execute mdrun. Please check %s for problems!",
740 if (bNeedGpuSupport && !bHaveGpuSupport)
742 gmx_fatal(FARGS, "The mdrun executable did not have the expected GPU support.");
745 fprintf(stdout, "passed.\n");
752 /*! \brief Helper struct so we can parse the string with eligible GPU
753 IDs outside do_the_tests. */
754 typedef struct eligible_gpu_ids
756 int n; /**< Length of ids */
757 int *ids; /**< Array of length n. NULL if no GPUs in use */
758 } t_eligible_gpu_ids;
760 /* Handles the no-GPU case by emitting an empty string. */
761 static char *make_gpu_id_command_line(int numRanks, int numPmeRanks, const t_eligible_gpu_ids *gpu_ids)
763 char *command_line, *ptr;
764 const char *flag = "-gpu_id ";
767 /* Reserve enough room for the option name, enough single-digit
768 GPU ids (since that is currently all that is possible to use
769 with mdrun), and a terminating NULL. */
770 flag_length = std::strlen(flag);
771 snew(command_line, flag_length + numRanks + 1);
774 /* If the user has given no eligible GPU IDs, or we're trying the
775 * default behaviour, then there is nothing for g_tune_pme to give
776 * to mdrun -gpu_id */
777 if (gpu_ids->n > 0 && numPmeRanks > -1)
779 int numPpRanks, max_num_ranks_for_each_GPU;
782 /* Write the option flag */
783 std::strcpy(ptr, flag);
786 numPpRanks = numRanks - numPmeRanks;
787 max_num_ranks_for_each_GPU = numPpRanks / gpu_ids->n;
788 if (max_num_ranks_for_each_GPU * gpu_ids->n != numPpRanks)
790 /* Some GPUs will receive more work than others, which
791 * we choose to be those with the lowest indices */
792 max_num_ranks_for_each_GPU++;
795 /* Loop over all eligible GPU ids */
796 for (gpu_id = 0, rank = 0; gpu_id < gpu_ids->n; gpu_id++)
798 int rank_for_this_GPU;
799 /* Loop over all PP ranks for GPU with ID gpu_id, building the
800 assignment string. */
801 for (rank_for_this_GPU = 0;
802 rank_for_this_GPU < max_num_ranks_for_each_GPU && rank < numPpRanks;
803 rank++, rank_for_this_GPU++)
805 *ptr = '0' + gpu_ids->ids[gpu_id];
815 static void launch_simulation(
816 gmx_bool bLaunch, /* Should the simulation be launched? */
817 FILE *fp, /* General log file */
818 gmx_bool bThreads, /* whether to use threads */
819 char *cmd_mpirun, /* Command for mpirun */
820 char *cmd_np, /* Switch for -np or -ntmpi or empty */
821 char *cmd_mdrun, /* Command for mdrun */
822 char *args_for_mdrun, /* Arguments for mdrun */
823 const char *simulation_tpr, /* This tpr will be simulated */
824 int nnodes, /* Number of ranks to use */
825 int nPMEnodes, /* Number of PME ranks to use */
826 const t_eligible_gpu_ids *gpu_ids) /* Struct containing GPU IDs for
827 * constructing mdrun command lines */
829 char *command, *cmd_gpu_ids;
832 /* Make enough space for the system call command,
833 * (200 extra chars for -npme ... etc. options should suffice): */
834 snew(command, std::strlen(cmd_mpirun)+std::strlen(cmd_mdrun)+std::strlen(cmd_np)+std::strlen(args_for_mdrun)+std::strlen(simulation_tpr)+200);
836 cmd_gpu_ids = make_gpu_id_command_line(nnodes, nPMEnodes, gpu_ids);
838 /* Note that the -passall options requires args_for_mdrun to be at the end
839 * of the command line string */
842 sprintf(command, "%s%s-npme %d -s %s %s %s",
843 cmd_mdrun, cmd_np, nPMEnodes, simulation_tpr, args_for_mdrun, cmd_gpu_ids);
847 sprintf(command, "%s%s%s -npme %d -s %s %s %s",
848 cmd_mpirun, cmd_np, cmd_mdrun, nPMEnodes, simulation_tpr, args_for_mdrun, cmd_gpu_ids);
851 fprintf(fp, "%s this command line to launch the simulation:\n\n%s", bLaunch ? "Using" : "Please use", command);
855 /* Now the real thing! */
858 fprintf(stdout, "\nLaunching simulation with best parameters now.\nExecuting '%s'", command);
861 gmx_system_call(command);
866 static void modify_PMEsettings(
867 gmx_int64_t simsteps, /* Set this value as number of time steps */
868 gmx_int64_t init_step, /* Set this value as init_step */
869 const char *fn_best_tpr, /* tpr file with the best performance */
870 const char *fn_sim_tpr) /* name of tpr file to be launched */
877 gmx::MDModules mdModules;
878 ir = mdModules.inputrec();
879 read_tpx_state(fn_best_tpr, ir, &state, &mtop);
881 /* Reset nsteps and init_step to the value of the input .tpr file */
882 ir->nsteps = simsteps;
883 ir->init_step = init_step;
885 /* Write the tpr file which will be launched */
886 sprintf(buf, "Writing optimized simulation file %s with nsteps=%s.\n", fn_sim_tpr, "%" GMX_PRId64);
887 fprintf(stdout, buf, ir->nsteps);
889 write_tpx_state(fn_sim_tpr, ir, &state, &mtop);
892 static gmx_bool can_scale_rvdw(int vdwtype)
894 return (evdwCUT == vdwtype ||
898 #define EPME_SWITCHED(e) ((e) == eelPMESWITCH || (e) == eelPMEUSERSWITCH)
900 /* Make additional TPR files with more computational load for the
901 * direct space processors: */
902 static void make_benchmark_tprs(
903 const char *fn_sim_tpr, /* READ : User-provided tpr file */
904 char *fn_bench_tprs[], /* WRITE: Names of benchmark tpr files */
905 gmx_int64_t benchsteps, /* Number of time steps for benchmark runs */
906 gmx_int64_t statesteps, /* Step counter in checkpoint file */
907 real rmin, /* Minimal Coulomb radius */
908 real rmax, /* Maximal Coulomb radius */
909 real bScaleRvdw, /* Scale rvdw along with rcoulomb */
910 int *ntprs, /* No. of TPRs to write, each with a different
911 rcoulomb and fourierspacing */
912 t_inputinfo *info, /* Contains information about mdp file options */
913 FILE *fp) /* Write the output here */
919 real nlist_buffer; /* Thickness of the buffer regions for PME-switch potentials */
922 gmx_bool bNote = FALSE;
923 real add; /* Add this to rcoul for the next test */
924 real fac = 1.0; /* Scaling factor for Coulomb radius */
925 real fourierspacing; /* Basic fourierspacing from tpr */
928 sprintf(buf, "Making benchmark tpr file%s with %s time step%s",
929 *ntprs > 1 ? "s" : "", "%" GMX_PRId64, benchsteps > 1 ? "s" : "");
930 fprintf(stdout, buf, benchsteps);
933 sprintf(buf, " (adding %s steps from checkpoint file)", "%" GMX_PRId64);
934 fprintf(stdout, buf, statesteps);
935 benchsteps += statesteps;
937 fprintf(stdout, ".\n");
939 gmx::MDModules mdModules;
940 ir = mdModules.inputrec();
941 read_tpx_state(fn_sim_tpr, ir, &state, &mtop);
943 /* Check if some kind of PME was chosen */
944 if (EEL_PME(ir->coulombtype) == FALSE)
946 gmx_fatal(FARGS, "Can only do optimizations for simulations with %s electrostatics.",
950 /* Check if rcoulomb == rlist, which is necessary for plain PME. */
951 if ( (ir->cutoff_scheme != ecutsVERLET) &&
952 (eelPME == ir->coulombtype) && !(ir->rcoulomb == ir->rlist))
954 gmx_fatal(FARGS, "%s requires rcoulomb (%f) to be equal to rlist (%f).",
955 EELTYPE(eelPME), ir->rcoulomb, ir->rlist);
957 /* For other PME types, rcoulomb is allowed to be smaller than rlist */
958 else if (ir->rcoulomb > ir->rlist)
960 gmx_fatal(FARGS, "%s requires rcoulomb (%f) to be equal to or smaller than rlist (%f)",
961 EELTYPE(ir->coulombtype), ir->rcoulomb, ir->rlist);
964 if (bScaleRvdw && ir->rvdw != ir->rcoulomb)
966 fprintf(stdout, "NOTE: input rvdw != rcoulomb, will not scale rvdw\n");
970 /* Reduce the number of steps for the benchmarks */
971 info->orig_sim_steps = ir->nsteps;
972 ir->nsteps = benchsteps;
973 /* We must not use init_step from the input tpr file for the benchmarks */
974 info->orig_init_step = ir->init_step;
977 /* For PME-switch potentials, keep the radial distance of the buffer region */
978 nlist_buffer = ir->rlist - ir->rcoulomb;
980 /* Determine length of triclinic box vectors */
981 for (d = 0; d < DIM; d++)
984 for (i = 0; i < DIM; i++)
986 box_size[d] += state.box[d][i]*state.box[d][i];
988 box_size[d] = std::sqrt(box_size[d]);
991 if (ir->fourier_spacing > 0)
993 info->fsx[0] = ir->fourier_spacing;
994 info->fsy[0] = ir->fourier_spacing;
995 info->fsz[0] = ir->fourier_spacing;
999 /* Reconstruct fourierspacing per dimension from the number of grid points and box size */
1000 info->fsx[0] = box_size[XX]/ir->nkx;
1001 info->fsy[0] = box_size[YY]/ir->nky;
1002 info->fsz[0] = box_size[ZZ]/ir->nkz;
1005 /* If no value for the fourierspacing was provided on the command line, we
1006 * use the reconstruction from the tpr file */
1007 if (ir->fourier_spacing > 0)
1009 /* Use the spacing from the tpr */
1010 fourierspacing = ir->fourier_spacing;
1014 /* Use the maximum observed spacing */
1015 fourierspacing = std::max(std::max(info->fsx[0], info->fsy[0]), info->fsz[0]);
1018 fprintf(stdout, "Calculating PME grid points on the basis of a fourierspacing of %f nm\n", fourierspacing);
1020 /* For performance comparisons the number of particles is useful to have */
1021 fprintf(fp, " Number of particles : %d\n", mtop.natoms);
1023 /* Print information about settings of which some are potentially modified: */
1024 fprintf(fp, " Coulomb type : %s\n", EELTYPE(ir->coulombtype));
1025 fprintf(fp, " Grid spacing x y z : %f %f %f\n",
1026 box_size[XX]/ir->nkx, box_size[YY]/ir->nky, box_size[ZZ]/ir->nkz);
1027 fprintf(fp, " Van der Waals type : %s\n", EVDWTYPE(ir->vdwtype));
1028 if (ir_vdw_switched(ir))
1030 fprintf(fp, " rvdw_switch : %f nm\n", ir->rvdw_switch);
1032 if (EPME_SWITCHED(ir->coulombtype))
1034 fprintf(fp, " rlist : %f nm\n", ir->rlist);
1037 /* Print a descriptive line about the tpr settings tested */
1038 fprintf(fp, "\nWill try these real/reciprocal workload settings:\n");
1039 fprintf(fp, " No. scaling rcoulomb");
1040 fprintf(fp, " nkx nky nkz");
1041 fprintf(fp, " spacing");
1042 if (can_scale_rvdw(ir->vdwtype))
1044 fprintf(fp, " rvdw");
1046 if (EPME_SWITCHED(ir->coulombtype))
1048 fprintf(fp, " rlist");
1050 fprintf(fp, " tpr file\n");
1052 /* Loop to create the requested number of tpr input files */
1053 for (j = 0; j < *ntprs; j++)
1055 /* The first .tpr is the provided one, just need to modify nsteps,
1056 * so skip the following block */
1059 /* Determine which Coulomb radii rc to use in the benchmarks */
1060 add = (rmax-rmin)/(*ntprs-1);
1061 if (gmx_within_tol(rmin, info->rcoulomb[0], GMX_REAL_EPS))
1063 ir->rcoulomb = rmin + j*add;
1065 else if (gmx_within_tol(rmax, info->rcoulomb[0], GMX_REAL_EPS))
1067 ir->rcoulomb = rmin + (j-1)*add;
1071 /* rmin != rcoul != rmax, ergo test between rmin and rmax */
1072 add = (rmax-rmin)/(*ntprs-2);
1073 ir->rcoulomb = rmin + (j-1)*add;
1076 /* Determine the scaling factor fac */
1077 fac = ir->rcoulomb/info->rcoulomb[0];
1079 /* Scale the Fourier grid spacing */
1080 ir->nkx = ir->nky = ir->nkz = 0;
1081 calc_grid(NULL, state.box, fourierspacing*fac, &ir->nkx, &ir->nky, &ir->nkz);
1083 /* Adjust other radii since various conditions need to be fulfilled */
1084 if (eelPME == ir->coulombtype)
1086 /* plain PME, rcoulomb must be equal to rlist TODO only in the group scheme? */
1087 ir->rlist = ir->rcoulomb;
1091 /* rlist must be >= rcoulomb, we keep the size of the buffer region */
1092 ir->rlist = ir->rcoulomb + nlist_buffer;
1095 if (bScaleRvdw && can_scale_rvdw(ir->vdwtype))
1097 if (ecutsVERLET == ir->cutoff_scheme ||
1098 evdwPME == ir->vdwtype)
1100 /* With either the Verlet cutoff-scheme or LJ-PME,
1101 the van der Waals radius must always equal the
1103 ir->rvdw = ir->rcoulomb;
1107 /* For vdw cutoff, rvdw >= rlist */
1108 ir->rvdw = std::max(info->rvdw[0], ir->rlist);
1111 } /* end of "if (j != 0)" */
1113 /* for j==0: Save the original settings
1114 * for j >0: Save modified radii and Fourier grids */
1115 info->rcoulomb[j] = ir->rcoulomb;
1116 info->rvdw[j] = ir->rvdw;
1117 info->nkx[j] = ir->nkx;
1118 info->nky[j] = ir->nky;
1119 info->nkz[j] = ir->nkz;
1120 info->rlist[j] = ir->rlist;
1121 info->fsx[j] = fac*fourierspacing;
1122 info->fsy[j] = fac*fourierspacing;
1123 info->fsz[j] = fac*fourierspacing;
1125 /* Write the benchmark tpr file */
1126 std::strncpy(fn_bench_tprs[j], fn_sim_tpr, std::strlen(fn_sim_tpr)-std::strlen(".tpr"));
1127 sprintf(buf, "_bench%.2d.tpr", j);
1128 std::strcat(fn_bench_tprs[j], buf);
1129 fprintf(stdout, "Writing benchmark tpr %s with nsteps=", fn_bench_tprs[j]);
1130 fprintf(stdout, "%" GMX_PRId64, ir->nsteps);
1133 fprintf(stdout, ", scaling factor %f\n", fac);
1137 fprintf(stdout, ", unmodified settings\n");
1140 write_tpx_state(fn_bench_tprs[j], ir, &state, &mtop);
1142 /* Write information about modified tpr settings to log file */
1143 fprintf(fp, "%4d%10f%10f", j, fac, ir->rcoulomb);
1144 fprintf(fp, "%5d%5d%5d", ir->nkx, ir->nky, ir->nkz);
1145 fprintf(fp, " %9f ", info->fsx[j]);
1146 if (can_scale_rvdw(ir->vdwtype))
1148 fprintf(fp, "%10f", ir->rvdw);
1150 if (EPME_SWITCHED(ir->coulombtype))
1152 fprintf(fp, "%10f", ir->rlist);
1154 fprintf(fp, " %-14s\n", fn_bench_tprs[j]);
1156 /* Make it clear to the user that some additional settings were modified */
1157 if (!gmx_within_tol(ir->rvdw, info->rvdw[0], GMX_REAL_EPS)
1158 || !gmx_within_tol(ir->rlist, info->rlist[0], GMX_REAL_EPS) )
1165 fprintf(fp, "\nNote that in addition to the Coulomb radius and the Fourier grid\n"
1166 "other input settings were also changed (see table above).\n"
1167 "Please check if the modified settings are appropriate.\n");
1174 /* Rename the files we want to keep to some meaningful filename and
1175 * delete the rest */
1176 static void cleanup(const t_filenm *fnm, int nfile, int k, int nnodes,
1177 int nPMEnodes, int nr, gmx_bool bKeepStderr)
1179 char numstring[STRLEN];
1180 char newfilename[STRLEN];
1181 const char *fn = NULL;
1186 fprintf(stdout, "Cleaning up, deleting benchmark temp files ...\n");
1188 for (i = 0; i < nfile; i++)
1190 opt = (char *)fnm[i].opt;
1191 if (std::strcmp(opt, "-p") == 0)
1193 /* do nothing; keep this file */
1196 else if (std::strcmp(opt, "-bg") == 0)
1198 /* Give the log file a nice name so one can later see which parameters were used */
1199 numstring[0] = '\0';
1202 sprintf(numstring, "_%d", nr);
1204 sprintf(newfilename, "%s_no%d_np%d_npme%d%s", opt2fn("-bg", nfile, fnm), k, nnodes, nPMEnodes, numstring);
1205 if (gmx_fexist(opt2fn("-bg", nfile, fnm)))
1207 fprintf(stdout, "renaming log file to %s\n", newfilename);
1208 make_backup(newfilename);
1209 rename(opt2fn("-bg", nfile, fnm), newfilename);
1212 else if (std::strcmp(opt, "-err") == 0)
1214 /* This file contains the output of stderr. We want to keep it in
1215 * cases where there have been problems. */
1216 fn = opt2fn(opt, nfile, fnm);
1217 numstring[0] = '\0';
1220 sprintf(numstring, "_%d", nr);
1222 sprintf(newfilename, "%s_no%d_np%d_npme%d%s", fn, k, nnodes, nPMEnodes, numstring);
1227 fprintf(stdout, "Saving stderr output in %s\n", newfilename);
1228 make_backup(newfilename);
1229 rename(fn, newfilename);
1233 fprintf(stdout, "Deleting %s\n", fn);
1238 /* Delete the files which are created for each benchmark run: (options -b*) */
1239 else if ( (0 == std::strncmp(opt, "-b", 2)) && (opt2bSet(opt, nfile, fnm) || !is_optional(&fnm[i])) )
1241 remove_if_exists(opt2fn(opt, nfile, fnm));
1248 eNpmeAuto, eNpmeAll, eNpmeReduced, eNpmeSubset, eNpmeNr
1251 /* Create a list of numbers of PME nodes to test */
1252 static void make_npme_list(
1253 const char *npmevalues_opt, /* Make a complete list with all
1254 * possibilities or a short list that keeps only
1255 * reasonable numbers of PME nodes */
1256 int *nentries, /* Number of entries we put in the nPMEnodes list */
1257 int *nPMEnodes[], /* Each entry contains the value for -npme */
1258 int nnodes, /* Total number of nodes to do the tests on */
1259 int minPMEnodes, /* Minimum number of PME nodes */
1260 int maxPMEnodes) /* Maximum number of PME nodes */
1263 int min_factor = 1; /* We request that npp and npme have this minimal
1264 * largest common factor (depends on npp) */
1265 int nlistmax; /* Max. list size */
1266 int nlist; /* Actual number of entries in list */
1270 /* Do we need to check all possible values for -npme or is a reduced list enough? */
1271 if (!std::strcmp(npmevalues_opt, "all") )
1275 else if (!std::strcmp(npmevalues_opt, "subset") )
1277 eNPME = eNpmeSubset;
1279 else /* "auto" or "range" */
1285 else if (nnodes < 128)
1287 eNPME = eNpmeReduced;
1291 eNPME = eNpmeSubset;
1295 /* Calculate how many entries we could possibly have (in case of -npme all) */
1298 nlistmax = maxPMEnodes - minPMEnodes + 3;
1299 if (0 == minPMEnodes)
1309 /* Now make the actual list which is at most of size nlist */
1310 snew(*nPMEnodes, nlistmax);
1311 nlist = 0; /* start counting again, now the real entries in the list */
1312 for (i = 0; i < nlistmax - 2; i++)
1314 npme = maxPMEnodes - i;
1325 /* For 2d PME we want a common largest factor of at least the cube
1326 * root of the number of PP nodes */
1327 min_factor = static_cast<int>(std::cbrt(npp));
1330 gmx_fatal(FARGS, "Unknown option for eNPME in make_npme_list");
1333 if (gmx_greatest_common_divisor(npp, npme) >= min_factor)
1335 (*nPMEnodes)[nlist] = npme;
1339 /* We always test 0 PME nodes and the automatic number */
1340 *nentries = nlist + 2;
1341 (*nPMEnodes)[nlist ] = 0;
1342 (*nPMEnodes)[nlist+1] = -1;
1344 fprintf(stderr, "Will try the following %d different values for -npme:\n", *nentries);
1345 for (i = 0; i < *nentries-1; i++)
1347 fprintf(stderr, "%d, ", (*nPMEnodes)[i]);
1349 fprintf(stderr, "and %d (auto).\n", (*nPMEnodes)[*nentries-1]);
1353 /* Allocate memory to store the performance data */
1354 static void init_perfdata(t_perf *perfdata[], int ntprs, int datasets, int repeats)
1359 for (k = 0; k < ntprs; k++)
1361 snew(perfdata[k], datasets);
1362 for (i = 0; i < datasets; i++)
1364 for (j = 0; j < repeats; j++)
1366 snew(perfdata[k][i].Gcycles, repeats);
1367 snew(perfdata[k][i].ns_per_day, repeats);
1368 snew(perfdata[k][i].PME_f_load, repeats);
1375 /* Check for errors on mdrun -h */
1376 static void make_sure_it_runs(char *mdrun_cmd_line, int length, FILE *fp,
1377 const t_filenm *fnm, int nfile)
1379 char *command, *msg;
1382 snew(command, length + 15);
1383 snew(msg, length + 500);
1385 fprintf(stdout, "Making sure the benchmarks can be executed by running just 1 step...\n");
1386 sprintf(command, "%s -nsteps 1 -quiet", mdrun_cmd_line);
1387 fprintf(stdout, "Executing '%s' ...\n", command);
1388 ret = gmx_system_call(command);
1392 /* To prevent confusion, do not again issue a gmx_fatal here since we already
1393 * get the error message from mdrun itself */
1395 "Cannot run the first benchmark simulation! Please check the error message of\n"
1396 "mdrun for the source of the problem. Did you provide a command line\n"
1397 "argument that neither gmx tune_pme nor mdrun understands? If you're\n"
1398 "sure your command line should work, you can bypass this check with \n"
1399 "gmx tune_pme -nocheck. The failing command was:\n"
1400 "\n%s\n\n", command);
1402 fprintf(stderr, "%s", msg);
1404 fprintf(fp, "%s", msg);
1408 fprintf(stdout, "Benchmarks can be executed!\n");
1410 /* Clean up the benchmark output files we just created */
1411 fprintf(stdout, "Cleaning up ...\n");
1412 remove_if_exists(opt2fn("-bc", nfile, fnm));
1413 remove_if_exists(opt2fn("-be", nfile, fnm));
1414 remove_if_exists(opt2fn("-bcpo", nfile, fnm));
1415 remove_if_exists(opt2fn("-bg", nfile, fnm));
1416 remove_if_exists(opt2fn("-bo", nfile, fnm));
1417 remove_if_exists(opt2fn("-bx", nfile, fnm));
1423 static void do_the_tests(
1424 FILE *fp, /* General g_tune_pme output file */
1425 char **tpr_names, /* Filenames of the input files to test */
1426 int maxPMEnodes, /* Max fraction of nodes to use for PME */
1427 int minPMEnodes, /* Min fraction of nodes to use for PME */
1428 int npme_fixed, /* If >= -1, test fixed number of PME
1430 const char *npmevalues_opt, /* Which -npme values should be tested */
1431 t_perf **perfdata, /* Here the performace data is stored */
1432 int *pmeentries, /* Entries in the nPMEnodes list */
1433 int repeats, /* Repeat each test this often */
1434 int nnodes, /* Total number of nodes = nPP + nPME */
1435 int nr_tprs, /* Total number of tpr files to test */
1436 gmx_bool bThreads, /* Threads or MPI? */
1437 char *cmd_mpirun, /* mpirun command string */
1438 char *cmd_np, /* "-np", "-n", whatever mpirun needs */
1439 char *cmd_mdrun, /* mdrun command string */
1440 char *cmd_args_bench, /* arguments for mdrun in a string */
1441 const t_filenm *fnm, /* List of filenames from command line */
1442 int nfile, /* Number of files specified on the cmdl. */
1443 int presteps, /* DLB equilibration steps, is checked */
1444 gmx_int64_t cpt_steps, /* Time step counter in the checkpoint */
1445 gmx_bool bCheck, /* Check whether benchmark mdrun works */
1446 const t_eligible_gpu_ids *gpu_ids) /* Struct containing GPU IDs for
1447 * constructing mdrun command lines */
1449 int i, nr, k, ret, count = 0, totaltests;
1450 int *nPMEnodes = NULL;
1453 char *command, *cmd_stub;
1455 gmx_bool bResetProblem = FALSE;
1456 gmx_bool bFirst = TRUE;
1458 /* This string array corresponds to the eParselog enum type at the start
1460 const char* ParseLog[] = {
1462 "Logfile not found!",
1463 "No timings, logfile truncated?",
1464 "Run was terminated.",
1465 "Counters were not reset properly.",
1466 "No DD grid found for these settings.",
1467 "TPX version conflict!",
1468 "mdrun was not started in parallel!",
1469 "Number of PP ranks has a prime factor that is too large.",
1470 "The number of PP ranks did not suit the number of GPUs.",
1471 "Some GPUs were not detected or are incompatible.",
1472 "An error occurred."
1474 char str_PME_f_load[13];
1477 /* Allocate space for the mdrun command line. 100 extra characters should
1478 be more than enough for the -npme etcetera arguments */
1479 cmdline_length = std::strlen(cmd_mpirun)
1480 + std::strlen(cmd_np)
1481 + std::strlen(cmd_mdrun)
1482 + std::strlen(cmd_args_bench)
1483 + std::strlen(tpr_names[0]) + 100;
1484 snew(command, cmdline_length);
1485 snew(cmd_stub, cmdline_length);
1487 /* Construct the part of the command line that stays the same for all tests: */
1490 sprintf(cmd_stub, "%s%s", cmd_mdrun, cmd_np);
1494 sprintf(cmd_stub, "%s%s%s ", cmd_mpirun, cmd_np, cmd_mdrun);
1497 /* Create a list of numbers of PME nodes to test */
1498 if (npme_fixed < -1)
1500 make_npme_list(npmevalues_opt, pmeentries, &nPMEnodes,
1501 nnodes, minPMEnodes, maxPMEnodes);
1507 nPMEnodes[0] = npme_fixed;
1508 fprintf(stderr, "Will use a fixed number of %d PME-only ranks.\n", nPMEnodes[0]);
1513 fprintf(fp, "\nNo benchmarks done since number of repeats (-r) is 0.\n");
1515 finalize(opt2fn("-p", nfile, fnm));
1519 /* Allocate one dataset for each tpr input file: */
1520 init_perfdata(perfdata, nr_tprs, *pmeentries, repeats);
1522 /*****************************************/
1523 /* Main loop over all tpr files to test: */
1524 /*****************************************/
1525 totaltests = nr_tprs*(*pmeentries)*repeats;
1526 for (k = 0; k < nr_tprs; k++)
1528 fprintf(fp, "\nIndividual timings for input file %d (%s):\n", k, tpr_names[k]);
1529 fprintf(fp, "PME ranks Gcycles ns/day PME/f Remark\n");
1530 /* Loop over various numbers of PME nodes: */
1531 for (i = 0; i < *pmeentries; i++)
1533 char *cmd_gpu_ids = NULL;
1535 pd = &perfdata[k][i];
1537 cmd_gpu_ids = make_gpu_id_command_line(nnodes, nPMEnodes[i], gpu_ids);
1539 /* Loop over the repeats for each scenario: */
1540 for (nr = 0; nr < repeats; nr++)
1542 pd->nPMEnodes = nPMEnodes[i];
1544 /* Add -npme and -s to the command line and save it. Note that
1545 * the -passall (if set) options requires cmd_args_bench to be
1546 * at the end of the command line string */
1547 snew(pd->mdrun_cmd_line, cmdline_length);
1548 sprintf(pd->mdrun_cmd_line, "%s-npme %d -s %s %s %s",
1549 cmd_stub, pd->nPMEnodes, tpr_names[k], cmd_args_bench, cmd_gpu_ids);
1551 /* To prevent that all benchmarks fail due to a show-stopper argument
1552 * on the mdrun command line, we make a quick check first.
1553 * This check can be turned off in cases where the automatically chosen
1554 * number of PME-only ranks leads to a number of PP ranks for which no
1555 * decomposition can be found (e.g. for large prime numbers) */
1556 if (bFirst && bCheck)
1558 /* TODO This check is really for a functional
1559 * .tpr, and if we need it, it should take place
1560 * for every .tpr, and the logic for it should be
1561 * immediately inside the loop over k, not in
1562 * this inner loop. */
1563 char *temporary_cmd_line;
1565 snew(temporary_cmd_line, cmdline_length);
1566 /* TODO -npme 0 is more likely to succeed at low
1567 parallelism than the default of -npme -1, but
1568 is more likely to fail at the scaling limit
1569 when the PP domains may be too small. "mpirun
1570 -np 1 mdrun" is probably a reasonable thing to
1571 do for this check, but it'll be easier to
1572 implement that after some refactoring of how
1573 the number of MPI ranks is managed. */
1574 sprintf(temporary_cmd_line, "%s-npme 0 -nb cpu -s %s %s",
1575 cmd_stub, tpr_names[k], cmd_args_bench);
1576 make_sure_it_runs(temporary_cmd_line, cmdline_length, fp, fnm, nfile);
1580 /* Do a benchmark simulation: */
1583 sprintf(buf, ", pass %d/%d", nr+1, repeats);
1589 fprintf(stdout, "\n=== Progress %2.0f%%, tpr %d/%d, run %d/%d%s:\n",
1590 (100.0*count)/totaltests,
1591 k+1, nr_tprs, i+1, *pmeentries, buf);
1592 make_backup(opt2fn("-err", nfile, fnm));
1593 sprintf(command, "%s 1> /dev/null 2>%s", pd->mdrun_cmd_line, opt2fn("-err", nfile, fnm));
1594 fprintf(stdout, "%s\n", pd->mdrun_cmd_line);
1595 gmx_system_call(command);
1597 /* Collect the performance data from the log file; also check stderr
1598 * for fatal errors */
1599 ret = parse_logfile(opt2fn("-bg", nfile, fnm), opt2fn("-err", nfile, fnm),
1600 pd, nr, presteps, cpt_steps, nnodes);
1601 if ((presteps > 0) && (ret == eParselogResetProblem))
1603 bResetProblem = TRUE;
1606 if (-1 == pd->nPMEnodes)
1608 sprintf(buf, "(%3d)", pd->guessPME);
1616 if (pd->PME_f_load[nr] > 0.0)
1618 sprintf(str_PME_f_load, "%12.3f", pd->PME_f_load[nr]);
1622 sprintf(str_PME_f_load, "%s", " - ");
1625 /* Write the data we got to disk */
1626 fprintf(fp, "%4d%s %12.3f %12.3f %s %s", pd->nPMEnodes,
1627 buf, pd->Gcycles[nr], pd->ns_per_day[nr], str_PME_f_load, ParseLog[ret]);
1628 if (!(ret == eParselogOK || ret == eParselogNoDDGrid || ret == eParselogNotFound) )
1630 fprintf(fp, " Check %s file for problems.", ret == eParselogFatal ? "err" : "log");
1636 /* Do some cleaning up and delete the files we do not need any more */
1637 cleanup(fnm, nfile, k, nnodes, pd->nPMEnodes, nr, ret == eParselogFatal);
1639 /* If the first run with this number of processors already failed, do not try again: */
1640 if (pd->Gcycles[0] <= 0.0 && repeats > 1)
1642 fprintf(stdout, "Skipping remaining passes of unsuccessful setting, see log file for details.\n");
1643 count += repeats-(nr+1);
1646 } /* end of repeats loop */
1648 } /* end of -npme loop */
1649 } /* end of tpr file loop */
1654 fprintf(fp, "WARNING: The cycle and time step counters could not be reset properly. ");
1662 static void check_input(
1669 real maxPMEfraction,
1670 real minPMEfraction,
1672 gmx_int64_t bench_nsteps,
1673 const t_filenm *fnm,
1683 /* Make sure the input file exists */
1684 if (!gmx_fexist(opt2fn("-s", nfile, fnm)))
1686 gmx_fatal(FARGS, "File %s not found.", opt2fn("-s", nfile, fnm));
1689 /* Make sure that the checkpoint file is not overwritten during benchmarking */
1690 if ( (0 == std::strcmp(opt2fn("-cpi", nfile, fnm), opt2fn("-bcpo", nfile, fnm)) ) && (sim_part > 1) )
1692 gmx_fatal(FARGS, "Checkpoint input (-cpi) and benchmark checkpoint output (-bcpo) files must not be identical.\n"
1693 "The checkpoint input file must not be overwritten during the benchmarks.\n");
1696 /* Make sure that repeats is >= 0 (if == 0, only write tpr files) */
1699 gmx_fatal(FARGS, "Number of repeats < 0!");
1702 /* Check number of nodes */
1705 gmx_fatal(FARGS, "Number of ranks/threads must be a positive integer.");
1708 /* Automatically choose -ntpr if not set */
1718 /* Set a reasonable scaling factor for rcoulomb */
1721 *rmax = rcoulomb * 1.2;
1724 fprintf(stderr, "Will test %d tpr file%s.\n", *ntprs, *ntprs == 1 ? "" : "s");
1730 fprintf(stderr, "Note: Choose ntpr>1 to shift PME load between real and reciprocal space.\n");
1734 /* Make shure that rmin <= rcoulomb <= rmax */
1743 if (!(*rmin <= *rmax) )
1745 gmx_fatal(FARGS, "Please choose the Coulomb radii such that rmin <= rmax.\n"
1746 "rmin = %g, rmax = %g, actual rcoul from .tpr file = %g\n", *rmin, *rmax, rcoulomb);
1748 /* Add test scenarios if rmin or rmax were set */
1751 if (!gmx_within_tol(*rmin, rcoulomb, GMX_REAL_EPS) && (*ntprs == 1) )
1754 fprintf(stderr, "NOTE: Setting -rmin to %g changed -ntpr to %d\n",
1757 if (!gmx_within_tol(*rmax, rcoulomb, GMX_REAL_EPS) && (*ntprs == 1) )
1760 fprintf(stderr, "NOTE: Setting -rmax to %g changed -ntpr to %d\n",
1765 /* If one of rmin, rmax is set, we need 2 tpr files at minimum */
1766 if (!gmx_within_tol(*rmax, rcoulomb, GMX_REAL_EPS) || !gmx_within_tol(*rmin, rcoulomb, GMX_REAL_EPS) )
1768 *ntprs = std::max(*ntprs, 2);
1771 /* If both rmin, rmax are set, we need 3 tpr files at minimum */
1772 if (!gmx_within_tol(*rmax, rcoulomb, GMX_REAL_EPS) && !gmx_within_tol(*rmin, rcoulomb, GMX_REAL_EPS) )
1774 *ntprs = std::max(*ntprs, 3);
1779 fprintf(stderr, "NOTE: Your rmin, rmax setting changed -ntpr to %d\n", *ntprs);
1784 if (gmx_within_tol(*rmin, rcoulomb, GMX_REAL_EPS) && gmx_within_tol(rcoulomb, *rmax, GMX_REAL_EPS)) /* We have just a single rc */
1786 fprintf(stderr, "WARNING: Resetting -ntpr to 1 since no Coulomb radius scaling is requested.\n"
1787 "Please set rmin < rmax to test Coulomb radii in the [rmin, rmax] interval\n"
1788 "with correspondingly adjusted PME grid settings\n");
1793 /* Check whether max and min fraction are within required values */
1794 if (maxPMEfraction > 0.5 || maxPMEfraction < 0)
1796 gmx_fatal(FARGS, "-max must be between 0 and 0.5");
1798 if (minPMEfraction > 0.5 || minPMEfraction < 0)
1800 gmx_fatal(FARGS, "-min must be between 0 and 0.5");
1802 if (maxPMEfraction < minPMEfraction)
1804 gmx_fatal(FARGS, "-max must be larger or equal to -min");
1807 /* Check whether the number of steps - if it was set - has a reasonable value */
1808 if (bench_nsteps < 0)
1810 gmx_fatal(FARGS, "Number of steps must be positive.");
1813 if (bench_nsteps > 10000 || bench_nsteps < 100)
1815 fprintf(stderr, "WARNING: steps=");
1816 fprintf(stderr, "%" GMX_PRId64, bench_nsteps);
1817 fprintf(stderr, ". Are you sure you want to perform so %s steps for each benchmark?\n", (bench_nsteps < 100) ? "few" : "many");
1822 gmx_fatal(FARGS, "Cannot have a negative number of presteps.\n");
1825 /* Check for rcoulomb scaling if more than one .tpr file is tested */
1828 if (*rmin/rcoulomb < 0.75 || *rmax/rcoulomb > 1.25)
1830 fprintf(stderr, "WARNING: Applying extreme scaling factor. I hope you know what you are doing.\n");
1834 /* If a fixed number of PME nodes is set we do rcoulomb and PME gird tuning
1835 * only. We need to check whether the requested number of PME-only nodes
1837 if (npme_fixed > -1)
1839 /* No more than 50% of all nodes can be assigned as PME-only nodes. */
1840 if (2*npme_fixed > nnodes)
1842 gmx_fatal(FARGS, "Cannot have more than %d PME-only ranks for a total of %d ranks (you chose %d).\n",
1843 nnodes/2, nnodes, npme_fixed);
1845 if ((npme_fixed > 0) && (5*npme_fixed < nnodes))
1847 fprintf(stderr, "WARNING: Only %g percent of the ranks are assigned as PME-only ranks.\n",
1848 (100.0*npme_fixed)/nnodes);
1850 if (opt2parg_bSet("-min", npargs, pa) || opt2parg_bSet("-max", npargs, pa))
1852 fprintf(stderr, "NOTE: The -min, -max, and -npme options have no effect when a\n"
1853 " fixed number of PME-only ranks is requested with -fix.\n");
1859 /* Returns TRUE when "opt" is needed at launch time */
1860 static gmx_bool is_launch_file(char *opt, gmx_bool bSet)
1862 if (0 == std::strncmp(opt, "-swap", 5))
1867 /* Apart from the input .tpr and the output log files we need all options that
1868 * were set on the command line and that do not start with -b */
1869 if (0 == std::strncmp(opt, "-b", 2) || 0 == std::strncmp(opt, "-s", 2)
1870 || 0 == std::strncmp(opt, "-err", 4) || 0 == std::strncmp(opt, "-p", 2) )
1879 /* Returns TRUE when "opt" defines a file which is needed for the benchmarks runs */
1880 static gmx_bool is_bench_file(char *opt, gmx_bool bSet, gmx_bool bOptional, gmx_bool bIsOutput)
1882 /* Apart from the input .tpr, all files starting with "-b" are for
1883 * _b_enchmark files exclusively */
1884 if (0 == std::strncmp(opt, "-s", 2))
1889 if (0 == std::strncmp(opt, "-b", 2) || 0 == std::strncmp(opt, "-s", 2))
1891 if (!bOptional || bSet)
1908 if (bSet) /* These are additional input files like -cpi -ei */
1921 /* Adds 'buf' to 'str' */
1922 static void add_to_string(char **str, const char *buf)
1927 len = std::strlen(*str) + std::strlen(buf) + 1;
1929 std::strcat(*str, buf);
1933 /* Create the command line for the benchmark as well as for the real run */
1934 static void create_command_line_snippets(
1935 gmx_bool bAppendFiles,
1936 gmx_bool bKeepAndNumCPT,
1937 gmx_bool bResetHWay,
1941 char *cmd_args_bench[], /* command line arguments for benchmark runs */
1942 char *cmd_args_launch[], /* command line arguments for simulation run */
1943 char extra_args[], /* Add this to the end of the command line */
1944 char *deffnm) /* Default file names, or NULL if not set */
1949 char strbuf[STRLEN];
1952 /* strlen needs at least '\0' as a string: */
1953 snew(*cmd_args_bench, 1);
1954 snew(*cmd_args_launch, 1);
1955 *cmd_args_launch[0] = '\0';
1956 *cmd_args_bench[0] = '\0';
1959 /*******************************************/
1960 /* 1. Process other command line arguments */
1961 /*******************************************/
1964 /* Add equilibration steps to benchmark options */
1965 sprintf(strbuf, "-resetstep %d ", presteps);
1966 add_to_string(cmd_args_bench, strbuf);
1968 /* These switches take effect only at launch time */
1971 sprintf(strbuf, "-deffnm %s ", deffnm);
1972 add_to_string(cmd_args_launch, strbuf);
1974 if (FALSE == bAppendFiles)
1976 add_to_string(cmd_args_launch, "-noappend ");
1980 add_to_string(cmd_args_launch, "-cpnum ");
1984 add_to_string(cmd_args_launch, "-resethway ");
1987 /********************/
1988 /* 2. Process files */
1989 /********************/
1990 for (i = 0; i < nfile; i++)
1992 opt = (char *)fnm[i].opt;
1993 name = opt2fn(opt, nfile, fnm);
1995 /* Strbuf contains the options, now let's sort out where we need that */
1996 sprintf(strbuf, "%s %s ", opt, name);
1998 if (is_bench_file(opt, opt2bSet(opt, nfile, fnm), is_optional(&fnm[i]), is_output(&fnm[i])) )
2000 /* All options starting with -b* need the 'b' removed,
2001 * therefore overwrite strbuf */
2002 if (0 == std::strncmp(opt, "-b", 2))
2004 sprintf(strbuf, "-%s %s ", &opt[2], name);
2007 add_to_string(cmd_args_bench, strbuf);
2010 if (is_launch_file(opt, opt2bSet(opt, nfile, fnm)) )
2012 add_to_string(cmd_args_launch, strbuf);
2016 add_to_string(cmd_args_bench, extra_args);
2017 add_to_string(cmd_args_launch, extra_args);
2021 /* Set option opt */
2022 static void setopt(const char *opt, int nfile, t_filenm fnm[])
2026 for (i = 0; (i < nfile); i++)
2028 if (std::strcmp(opt, fnm[i].opt) == 0)
2030 fnm[i].flag |= ffSET;
2036 /* This routine inspects the tpr file and ...
2037 * 1. checks for output files that get triggered by a tpr option. These output
2038 * files are marked as 'set' to allow for a proper cleanup after each
2040 * 2. returns the PME:PP load ratio
2041 * 3. returns rcoulomb from the tpr */
2042 static float inspect_tpr(int nfile, t_filenm fnm[], real *rcoulomb)
2044 gmx_bool bTpi; /* Is test particle insertion requested? */
2045 gmx_bool bFree; /* Is a free energy simulation requested? */
2046 gmx_bool bNM; /* Is a normal mode analysis requested? */
2047 gmx_bool bSwap; /* Is water/ion position swapping requested? */
2053 /* Check tpr file for options that trigger extra output files */
2054 gmx::MDModules mdModules;
2055 ir = mdModules.inputrec();
2056 read_tpx_state(opt2fn("-s", nfile, fnm), ir, &state, &mtop);
2057 bFree = (efepNO != ir->efep );
2058 bNM = (eiNM == ir->eI );
2059 bSwap = (eswapNO != ir->eSwapCoords);
2060 bTpi = EI_TPI(ir->eI);
2062 /* Set these output files on the tuning command-line */
2065 setopt("-pf", nfile, fnm);
2066 setopt("-px", nfile, fnm);
2070 setopt("-dhdl", nfile, fnm);
2074 setopt("-tpi", nfile, fnm);
2075 setopt("-tpid", nfile, fnm);
2079 setopt("-mtx", nfile, fnm);
2083 setopt("-swap", nfile, fnm);
2086 *rcoulomb = ir->rcoulomb;
2088 /* Return the estimate for the number of PME nodes */
2089 float npme = pme_load_estimate(&mtop, ir, state.box);
2094 static void couple_files_options(int nfile, t_filenm fnm[])
2097 gmx_bool bSet, bBench;
2102 for (i = 0; i < nfile; i++)
2104 opt = (char *)fnm[i].opt;
2105 bSet = ((fnm[i].flag & ffSET) != 0);
2106 bBench = (0 == std::strncmp(opt, "-b", 2));
2108 /* Check optional files */
2109 /* If e.g. -eo is set, then -beo also needs to be set */
2110 if (is_optional(&fnm[i]) && bSet && !bBench)
2112 sprintf(buf, "-b%s", &opt[1]);
2113 setopt(buf, nfile, fnm);
2115 /* If -beo is set, then -eo also needs to be! */
2116 if (is_optional(&fnm[i]) && bSet && bBench)
2118 sprintf(buf, "-%s", &opt[2]);
2119 setopt(buf, nfile, fnm);
2125 #define BENCHSTEPS (1000)
2127 int gmx_tune_pme(int argc, char *argv[])
2129 const char *desc[] = {
2130 "For a given number [TT]-np[tt] or [TT]-ntmpi[tt] of ranks, [THISMODULE] systematically",
2131 "times [gmx-mdrun] with various numbers of PME-only ranks and determines",
2132 "which setting is fastest. It will also test whether performance can",
2133 "be enhanced by shifting load from the reciprocal to the real space",
2134 "part of the Ewald sum. ",
2135 "Simply pass your [REF].tpr[ref] file to [THISMODULE] together with other options",
2136 "for [gmx-mdrun] as needed.[PAR]",
2137 "[THISMODULE] needs to call [gmx-mdrun] and so requires that you",
2138 "specify how to call mdrun with the argument to the [TT]-mdrun[tt]",
2139 "parameter. Depending how you have built GROMACS, values such as",
2140 "'gmx mdrun', 'gmx_d mdrun', or 'mdrun_mpi' might be needed.[PAR]",
2141 "The program that runs MPI programs can be set in the environment variable",
2142 "MPIRUN (defaults to 'mpirun'). Note that for certain MPI frameworks,",
2143 "you need to provide a machine- or hostfile. This can also be passed",
2144 "via the MPIRUN variable, e.g.[PAR]",
2145 "[TT]export MPIRUN=\"/usr/local/mpirun -machinefile hosts\"[tt]",
2146 "Note that in such cases it is normally necessary to compile",
2147 "and/or run [THISMODULE] without MPI support, so that it can call",
2148 "the MPIRUN program.[PAR]",
2149 "Before doing the actual benchmark runs, [THISMODULE] will do a quick",
2150 "check whether [gmx-mdrun] works as expected with the provided parallel settings",
2151 "if the [TT]-check[tt] option is activated (the default).",
2152 "Please call [THISMODULE] with the normal options you would pass to",
2153 "[gmx-mdrun] and add [TT]-np[tt] for the number of ranks to perform the",
2154 "tests on, or [TT]-ntmpi[tt] for the number of threads. You can also add [TT]-r[tt]",
2155 "to repeat each test several times to get better statistics. [PAR]",
2156 "[THISMODULE] can test various real space / reciprocal space workloads",
2157 "for you. With [TT]-ntpr[tt] you control how many extra [REF].tpr[ref] files will be",
2158 "written with enlarged cutoffs and smaller Fourier grids respectively.",
2159 "Typically, the first test (number 0) will be with the settings from the input",
2160 "[REF].tpr[ref] file; the last test (number [TT]ntpr[tt]) will have the Coulomb cutoff",
2161 "specified by [TT]-rmax[tt] with a somewhat smaller PME grid at the same time. ",
2162 "In this last test, the Fourier spacing is multiplied with [TT]rmax[tt]/rcoulomb. ",
2163 "The remaining [REF].tpr[ref] files will have equally-spaced Coulomb radii (and Fourier "
2164 "spacings) between these extremes. [BB]Note[bb] that you can set [TT]-ntpr[tt] to 1",
2165 "if you just seek the optimal number of PME-only ranks; in that case",
2166 "your input [REF].tpr[ref] file will remain unchanged.[PAR]",
2167 "For the benchmark runs, the default of 1000 time steps should suffice for most",
2168 "MD systems. The dynamic load balancing needs about 100 time steps",
2169 "to adapt to local load imbalances, therefore the time step counters",
2170 "are by default reset after 100 steps. For large systems (>1M atoms), as well as ",
2171 "for a higher accuracy of the measurements, you should set [TT]-resetstep[tt] to a higher value.",
2172 "From the 'DD' load imbalance entries in the md.log output file you",
2173 "can tell after how many steps the load is sufficiently balanced. Example call:[PAR]"
2174 "[TT]gmx tune_pme -np 64 -s protein.tpr -launch[tt][PAR]",
2175 "After calling [gmx-mdrun] several times, detailed performance information",
2176 "is available in the output file [TT]perf.out[tt].",
2177 "[BB]Note[bb] that during the benchmarks, a couple of temporary files are written",
2178 "(options [TT]-b*[tt]), these will be automatically deleted after each test.[PAR]",
2179 "If you want the simulation to be started automatically with the",
2180 "optimized parameters, use the command line option [TT]-launch[tt].[PAR]",
2181 "Basic support for GPU-enabled [TT]mdrun[tt] exists. Give a string containing the IDs",
2182 "of the GPUs that you wish to use in the optimization in the [TT]-gpu_id[tt]",
2183 "command-line argument. Unlike [TT]mdrun -gpu_id[tt], this does not imply a mapping",
2184 "but merely the eligible set. [TT]g_tune_pme[tt] will construct calls to",
2185 "mdrun that use this set appropriately, assuming that PP ranks with low indices",
2186 "should map to GPUs with low indices, and increasing both monotonically",
2187 "over the respective sets.[PAR]",
2192 int pmeentries = 0; /* How many values for -npme do we actually test for each tpr file */
2193 real maxPMEfraction = 0.50;
2194 real minPMEfraction = 0.25;
2195 int maxPMEnodes, minPMEnodes;
2196 float guessPMEratio; /* guessed PME:PP ratio based on the tpr file */
2197 float guessPMEnodes;
2198 int npme_fixed = -2; /* If >= -1, use only this number
2199 * of PME-only nodes */
2201 real rmin = 0.0, rmax = 0.0; /* min and max value for rcoulomb if scaling is requested */
2202 real rcoulomb = -1.0; /* Coulomb radius as set in .tpr file */
2203 gmx_bool bScaleRvdw = TRUE;
2204 gmx_int64_t bench_nsteps = BENCHSTEPS;
2205 gmx_int64_t new_sim_nsteps = -1; /* -1 indicates: not set by the user */
2206 gmx_int64_t cpt_steps = 0; /* Step counter in .cpt input file */
2207 int presteps = 100; /* Do a full cycle reset after presteps steps */
2208 gmx_bool bOverwrite = FALSE, bKeepTPR;
2209 gmx_bool bLaunch = FALSE;
2210 char *ExtraArgs = NULL;
2211 char **tpr_names = NULL;
2212 const char *simulation_tpr = NULL;
2213 char *deffnm = NULL;
2214 int best_npme, best_tpr;
2215 int sim_part = 1; /* For benchmarks with checkpoint files */
2219 /* Default program names if nothing else is found */
2220 char *cmd_mpirun = NULL, *cmd_mdrun = NULL;
2221 char *cmd_args_bench, *cmd_args_launch;
2222 char *cmd_np = NULL;
2224 /* IDs of GPUs that are eligible for computation */
2225 char *eligible_gpu_ids = NULL;
2226 t_eligible_gpu_ids *gpu_ids = NULL;
2228 t_perf **perfdata = NULL;
2233 /* Print out how long the tuning took */
2236 static t_filenm fnm[] = {
2238 { efOUT, "-p", "perf", ffWRITE },
2239 { efLOG, "-err", "bencherr", ffWRITE },
2240 { efTPR, "-so", "tuned", ffWRITE },
2242 { efTPR, NULL, NULL, ffREAD },
2243 { efTRN, "-o", NULL, ffWRITE },
2244 { efCOMPRESSED, "-x", NULL, ffOPTWR },
2245 { efCPT, "-cpi", NULL, ffOPTRD },
2246 { efCPT, "-cpo", NULL, ffOPTWR },
2247 { efSTO, "-c", "confout", ffWRITE },
2248 { efEDR, "-e", "ener", ffWRITE },
2249 { efLOG, "-g", "md", ffWRITE },
2250 { efXVG, "-dhdl", "dhdl", ffOPTWR },
2251 { efXVG, "-field", "field", ffOPTWR },
2252 { efXVG, "-table", "table", ffOPTRD },
2253 { efXVG, "-tablep", "tablep", ffOPTRD },
2254 { efXVG, "-tableb", "table", ffOPTRD },
2255 { efTRX, "-rerun", "rerun", ffOPTRD },
2256 { efXVG, "-tpi", "tpi", ffOPTWR },
2257 { efXVG, "-tpid", "tpidist", ffOPTWR },
2258 { efEDI, "-ei", "sam", ffOPTRD },
2259 { efXVG, "-eo", "edsam", ffOPTWR },
2260 { efXVG, "-devout", "deviatie", ffOPTWR },
2261 { efXVG, "-runav", "runaver", ffOPTWR },
2262 { efXVG, "-px", "pullx", ffOPTWR },
2263 { efXVG, "-pf", "pullf", ffOPTWR },
2264 { efXVG, "-ro", "rotation", ffOPTWR },
2265 { efLOG, "-ra", "rotangles", ffOPTWR },
2266 { efLOG, "-rs", "rotslabs", ffOPTWR },
2267 { efLOG, "-rt", "rottorque", ffOPTWR },
2268 { efMTX, "-mtx", "nm", ffOPTWR },
2269 { efXVG, "-swap", "swapions", ffOPTWR },
2270 /* Output files that are deleted after each benchmark run */
2271 { efTRN, "-bo", "bench", ffWRITE },
2272 { efXTC, "-bx", "bench", ffWRITE },
2273 { efCPT, "-bcpo", "bench", ffWRITE },
2274 { efSTO, "-bc", "bench", ffWRITE },
2275 { efEDR, "-be", "bench", ffWRITE },
2276 { efLOG, "-bg", "bench", ffWRITE },
2277 { efXVG, "-beo", "benchedo", ffOPTWR },
2278 { efXVG, "-bdhdl", "benchdhdl", ffOPTWR },
2279 { efXVG, "-bfield", "benchfld", ffOPTWR },
2280 { efXVG, "-btpi", "benchtpi", ffOPTWR },
2281 { efXVG, "-btpid", "benchtpid", ffOPTWR },
2282 { efXVG, "-bdevout", "benchdev", ffOPTWR },
2283 { efXVG, "-brunav", "benchrnav", ffOPTWR },
2284 { efXVG, "-bpx", "benchpx", ffOPTWR },
2285 { efXVG, "-bpf", "benchpf", ffOPTWR },
2286 { efXVG, "-bro", "benchrot", ffOPTWR },
2287 { efLOG, "-bra", "benchrota", ffOPTWR },
2288 { efLOG, "-brs", "benchrots", ffOPTWR },
2289 { efLOG, "-brt", "benchrott", ffOPTWR },
2290 { efMTX, "-bmtx", "benchn", ffOPTWR },
2291 { efNDX, "-bdn", "bench", ffOPTWR },
2292 { efXVG, "-bswap", "benchswp", ffOPTWR }
2295 gmx_bool bThreads = FALSE;
2299 const char *procstring[] =
2300 { NULL, "np", "n", "none", NULL };
2301 const char *npmevalues_opt[] =
2302 { NULL, "auto", "all", "subset", NULL };
2304 gmx_bool bAppendFiles = TRUE;
2305 gmx_bool bKeepAndNumCPT = FALSE;
2306 gmx_bool bResetCountersHalfWay = FALSE;
2307 gmx_bool bBenchmark = TRUE;
2308 gmx_bool bCheck = TRUE;
2310 gmx_output_env_t *oenv = NULL;
2313 /***********************/
2314 /* g_tune_pme options: */
2315 /***********************/
2316 { "-mdrun", FALSE, etSTR, {&cmd_mdrun},
2317 "Command line to run a simulation, e.g. 'gmx mdrun' or 'mdrun_mpi'" },
2318 { "-np", FALSE, etINT, {&nnodes},
2319 "Number of ranks to run the tests on (must be > 2 for separate PME ranks)" },
2320 { "-npstring", FALSE, etENUM, {procstring},
2321 "Name of the [TT]$MPIRUN[tt] option that specifies the number of ranks to use ('np', or 'n'; use 'none' if there is no such option)" },
2322 { "-ntmpi", FALSE, etINT, {&nthreads},
2323 "Number of MPI-threads to run the tests on (turns MPI & mpirun off)"},
2324 { "-r", FALSE, etINT, {&repeats},
2325 "Repeat each test this often" },
2326 { "-max", FALSE, etREAL, {&maxPMEfraction},
2327 "Max fraction of PME ranks to test with" },
2328 { "-min", FALSE, etREAL, {&minPMEfraction},
2329 "Min fraction of PME ranks to test with" },
2330 { "-npme", FALSE, etENUM, {npmevalues_opt},
2331 "Within -min and -max, benchmark all possible values for [TT]-npme[tt], or just a reasonable subset. "
2332 "Auto neglects -min and -max and chooses reasonable values around a guess for npme derived from the .tpr"},
2333 { "-fix", FALSE, etINT, {&npme_fixed},
2334 "If >= -1, do not vary the number of PME-only ranks, instead use this fixed value and only vary rcoulomb and the PME grid spacing."},
2335 { "-rmax", FALSE, etREAL, {&rmax},
2336 "If >0, maximal rcoulomb for -ntpr>1 (rcoulomb upscaling results in fourier grid downscaling)" },
2337 { "-rmin", FALSE, etREAL, {&rmin},
2338 "If >0, minimal rcoulomb for -ntpr>1" },
2339 { "-scalevdw", FALSE, etBOOL, {&bScaleRvdw},
2340 "Scale rvdw along with rcoulomb"},
2341 { "-ntpr", FALSE, etINT, {&ntprs},
2342 "Number of [REF].tpr[ref] files to benchmark. Create this many files with different rcoulomb scaling factors depending on -rmin and -rmax. "
2343 "If < 1, automatically choose the number of [REF].tpr[ref] files to test" },
2344 { "-steps", FALSE, etINT64, {&bench_nsteps},
2345 "Take timings for this many steps in the benchmark runs" },
2346 { "-resetstep", FALSE, etINT, {&presteps},
2347 "Let dlb equilibrate this many steps before timings are taken (reset cycle counters after this many steps)" },
2348 { "-nsteps", FALSE, etINT64, {&new_sim_nsteps},
2349 "If non-negative, perform this many steps in the real run (overwrites nsteps from [REF].tpr[ref], add [REF].cpt[ref] steps)" },
2350 { "-launch", FALSE, etBOOL, {&bLaunch},
2351 "Launch the real simulation after optimization" },
2352 { "-bench", FALSE, etBOOL, {&bBenchmark},
2353 "Run the benchmarks or just create the input [REF].tpr[ref] files?" },
2354 { "-check", FALSE, etBOOL, {&bCheck},
2355 "Before the benchmark runs, check whether mdrun works in parallel" },
2356 { "-gpu_id", FALSE, etSTR, {&eligible_gpu_ids},
2357 "List of GPU device id-s that are eligible for use (unlike mdrun, does not imply any mapping)" },
2358 /******************/
2359 /* mdrun options: */
2360 /******************/
2361 /* We let g_tune_pme parse and understand these options, because we need to
2362 * prevent that they appear on the mdrun command line for the benchmarks */
2363 { "-append", FALSE, etBOOL, {&bAppendFiles},
2364 "Append to previous output files when continuing from checkpoint instead of adding the simulation part number to all file names (for launch only)" },
2365 { "-cpnum", FALSE, etBOOL, {&bKeepAndNumCPT},
2366 "Keep and number checkpoint files (launch only)" },
2367 { "-deffnm", FALSE, etSTR, {&deffnm},
2368 "Set the default filenames (launch only)" },
2369 { "-resethway", FALSE, etBOOL, {&bResetCountersHalfWay},
2370 "HIDDENReset the cycle counters after half the number of steps or halfway [TT]-maxh[tt] (launch only)" }
2373 #define NFILE asize(fnm)
2375 seconds = gmx_gettime();
2377 if (!parse_common_args(&argc, argv, PCA_NOEXIT_ON_ARGS,
2378 NFILE, fnm, asize(pa), pa, asize(desc), desc,
2384 // procstring[0]Â is used inside two different conditionals further down
2385 GMX_RELEASE_ASSERT(procstring[0] != NULL, "Options inconsistency; procstring[0]Â is NULL");
2387 /* Store the remaining unparsed command line entries in a string which
2388 * is then attached to the mdrun command line */
2390 ExtraArgs[0] = '\0';
2391 for (i = 1; i < argc; i++) /* argc will now be 1 if everything was understood */
2393 add_to_string(&ExtraArgs, argv[i]);
2394 add_to_string(&ExtraArgs, " ");
2397 if (opt2parg_bSet("-ntmpi", asize(pa), pa))
2400 if (opt2parg_bSet("-npstring", asize(pa), pa))
2402 fprintf(stderr, "WARNING: -npstring has no effect when using threads.\n");
2407 gmx_fatal(FARGS, "Can't run multi-threaded MPI simulation yet!");
2409 /* and now we just set this; a bit of an ugly hack*/
2412 /* Check for PME:PP ratio and whether tpr triggers additional output files */
2413 guessPMEratio = inspect_tpr(NFILE, fnm, &rcoulomb);
2415 /* Automatically set -beo options if -eo is set etc. */
2416 couple_files_options(NFILE, fnm);
2418 /* Construct the command line arguments for benchmark runs
2419 * as well as for the simulation run */
2422 sprintf(bbuf, " -ntmpi %d ", nthreads);
2426 /* This string will be used for MPI runs and will appear after the
2427 * mpirun command. */
2428 if (std::strcmp(procstring[0], "none") != 0)
2430 sprintf(bbuf, " -%s %d ", procstring[0], nnodes);
2440 create_command_line_snippets(bAppendFiles, bKeepAndNumCPT, bResetCountersHalfWay, presteps,
2441 NFILE, fnm, &cmd_args_bench, &cmd_args_launch, ExtraArgs, deffnm);
2443 /* Prepare to use checkpoint file if requested */
2445 if (opt2bSet("-cpi", NFILE, fnm))
2447 const char *filename = opt2fn("-cpi", NFILE, fnm);
2449 read_checkpoint_part_and_step(filename,
2450 &cpt_sim_part, &cpt_steps);
2451 if (cpt_sim_part == 0)
2453 gmx_fatal(FARGS, "Checkpoint file %s could not be read!", filename);
2455 /* tune_pme will run the next part of the simulation */
2456 sim_part = cpt_sim_part + 1;
2459 /* Open performance output file and write header info */
2460 fp = gmx_ffopen(opt2fn("-p", NFILE, fnm), "w");
2462 /* Make a quick consistency check of command line parameters */
2463 check_input(nnodes, repeats, &ntprs, &rmin, rcoulomb, &rmax,
2464 maxPMEfraction, minPMEfraction, npme_fixed,
2465 bench_nsteps, fnm, NFILE, sim_part, presteps,
2467 /* Check any GPU IDs passed make sense, and fill the data structure for them */
2469 parse_digits_from_string(eligible_gpu_ids, &gpu_ids->n, &gpu_ids->ids);
2471 /* Determine the maximum and minimum number of PME nodes to test,
2472 * the actual list of settings is build in do_the_tests(). */
2473 if ((nnodes > 2) && (npme_fixed < -1))
2475 if (0 == std::strcmp(npmevalues_opt[0], "auto"))
2477 /* Determine the npme range automatically based on the PME:PP load guess */
2478 if (guessPMEratio > 1.0)
2480 /* More PME than PP work, probably we do not need separate PME nodes at all! */
2481 maxPMEnodes = nnodes/2;
2482 minPMEnodes = nnodes/2;
2486 /* PME : PP load is in the range 0..1, let's test around the guess */
2487 guessPMEnodes = static_cast<int>(nnodes/(1.0 + 1.0/guessPMEratio));
2488 minPMEnodes = static_cast<int>(std::floor(0.7*guessPMEnodes));
2489 maxPMEnodes = static_cast<int>(std::ceil(1.6*guessPMEnodes));
2490 maxPMEnodes = std::min(maxPMEnodes, nnodes/2);
2495 /* Determine the npme range based on user input */
2496 maxPMEnodes = static_cast<int>(std::floor(maxPMEfraction*nnodes));
2497 minPMEnodes = std::max(static_cast<int>(std::floor(minPMEfraction*nnodes)), 0);
2498 fprintf(stdout, "Will try runs with %d ", minPMEnodes);
2499 if (maxPMEnodes != minPMEnodes)
2501 fprintf(stdout, "- %d ", maxPMEnodes);
2503 fprintf(stdout, "PME-only ranks.\n Note that the automatic number of PME-only ranks and no separate PME ranks are always tested.\n");
2512 /* Get the commands we need to set up the runs from environment variables */
2513 get_program_paths(bThreads, &cmd_mpirun, &cmd_mdrun);
2514 if (bBenchmark && repeats > 0)
2516 check_mdrun_works(bThreads, cmd_mpirun, cmd_np, cmd_mdrun, NULL != eligible_gpu_ids);
2519 /* Print some header info to file */
2521 fprintf(fp, "\n P E R F O R M A N C E R E S U L T S\n");
2523 fprintf(fp, "%s for GROMACS %s\n", output_env_get_program_display_name(oenv),
2527 fprintf(fp, "Number of ranks : %d\n", nnodes);
2528 fprintf(fp, "The mpirun command is : %s\n", cmd_mpirun);
2529 if (std::strcmp(procstring[0], "none") != 0)
2531 fprintf(fp, "Passing # of ranks via : -%s\n", procstring[0]);
2535 fprintf(fp, "Not setting number of ranks in system call\n");
2540 fprintf(fp, "Number of threads : %d\n", nnodes);
2543 fprintf(fp, "The mdrun command is : %s\n", cmd_mdrun);
2544 fprintf(fp, "mdrun args benchmarks : %s\n", cmd_args_bench);
2545 fprintf(fp, "Benchmark steps : ");
2546 fprintf(fp, "%" GMX_PRId64, bench_nsteps);
2548 fprintf(fp, "dlb equilibration steps : %d\n", presteps);
2551 fprintf(fp, "Checkpoint time step : ");
2552 fprintf(fp, "%" GMX_PRId64, cpt_steps);
2555 fprintf(fp, "mdrun args at launchtime: %s\n", cmd_args_launch);
2557 if (new_sim_nsteps >= 0)
2560 fprintf(stderr, "Note: Simulation input file %s will have ", opt2fn("-so", NFILE, fnm));
2561 fprintf(stderr, "%" GMX_PRId64, new_sim_nsteps+cpt_steps);
2562 fprintf(stderr, " steps.\n");
2563 fprintf(fp, "Simulation steps : ");
2564 fprintf(fp, "%" GMX_PRId64, new_sim_nsteps);
2569 fprintf(fp, "Repeats for each test : %d\n", repeats);
2572 if (npme_fixed >= -1)
2574 fprintf(fp, "Fixing -npme at : %d\n", npme_fixed);
2577 fprintf(fp, "Input file : %s\n", opt2fn("-s", NFILE, fnm));
2578 fprintf(fp, " PME/PP load estimate : %g\n", guessPMEratio);
2580 /* Allocate memory for the inputinfo struct: */
2582 info->nr_inputfiles = ntprs;
2583 for (i = 0; i < ntprs; i++)
2585 snew(info->rcoulomb, ntprs);
2586 snew(info->rvdw, ntprs);
2587 snew(info->rlist, ntprs);
2588 snew(info->nkx, ntprs);
2589 snew(info->nky, ntprs);
2590 snew(info->nkz, ntprs);
2591 snew(info->fsx, ntprs);
2592 snew(info->fsy, ntprs);
2593 snew(info->fsz, ntprs);
2595 /* Make alternative tpr files to test: */
2596 snew(tpr_names, ntprs);
2597 for (i = 0; i < ntprs; i++)
2599 snew(tpr_names[i], STRLEN);
2602 /* It can be that ntprs is reduced by make_benchmark_tprs if not enough
2603 * different grids could be found. */
2604 make_benchmark_tprs(opt2fn("-s", NFILE, fnm), tpr_names, bench_nsteps+presteps,
2605 cpt_steps, rmin, rmax, bScaleRvdw, &ntprs, info, fp);
2607 /********************************************************************************/
2608 /* Main loop over all scenarios we need to test: tpr files, PME nodes, repeats */
2609 /********************************************************************************/
2610 snew(perfdata, ntprs);
2613 GMX_RELEASE_ASSERT(npmevalues_opt[0] != NULL, "Options inconsistency; npmevalues_opt[0] is NULL");
2614 do_the_tests(fp, tpr_names, maxPMEnodes, minPMEnodes, npme_fixed, npmevalues_opt[0], perfdata, &pmeentries,
2615 repeats, nnodes, ntprs, bThreads, cmd_mpirun, cmd_np, cmd_mdrun,
2616 cmd_args_bench, fnm, NFILE, presteps, cpt_steps, bCheck, gpu_ids);
2618 fprintf(fp, "\nTuning took%8.1f minutes.\n", (gmx_gettime()-seconds)/60.0);
2620 /* Analyse the results and give a suggestion for optimal settings: */
2621 bKeepTPR = analyze_data(fp, opt2fn("-p", NFILE, fnm), perfdata, nnodes, ntprs, pmeentries,
2622 repeats, info, &best_tpr, &best_npme);
2624 /* Take the best-performing tpr file and enlarge nsteps to original value */
2625 if (bKeepTPR && !bOverwrite)
2627 simulation_tpr = opt2fn("-s", NFILE, fnm);
2631 simulation_tpr = opt2fn("-so", NFILE, fnm);
2632 modify_PMEsettings(bOverwrite ? (new_sim_nsteps+cpt_steps) : info->orig_sim_steps,
2633 info->orig_init_step, tpr_names[best_tpr], simulation_tpr);
2636 /* Let's get rid of the temporary benchmark input files */
2637 for (i = 0; i < ntprs; i++)
2639 fprintf(stdout, "Deleting temporary benchmark input file %s\n", tpr_names[i]);
2640 remove(tpr_names[i]);
2643 /* Now start the real simulation if the user requested it ... */
2644 launch_simulation(bLaunch, fp, bThreads, cmd_mpirun, cmd_np, cmd_mdrun,
2645 cmd_args_launch, simulation_tpr, nnodes, best_npme, gpu_ids);
2649 /* ... or simply print the performance results to screen: */
2652 finalize(opt2fn("-p", NFILE, fnm));