include(CheckLibraryExists)
check_library_exists(m sqrt "" HAVE_LIBM)
-check_library_exists(rt clock_gettime "" HAVE_LIBRT)
+check_library_exists(rt clock_gettime "" HAVE_CLOCK_GETTIME)
include(CheckTypeSize)
endif(GMX_LOAD_PLUGINS)
set(VMD_QUIETLY TRUE CACHE INTERNAL "")
-# Link real-time library for POSIX timers
-if(HAVE_TIME_H AND HAVE_UNISTD_H AND HAVE_LIBRT)
+# Link real-time library for POSIX timers. The check for clock_gettime
+# confirms the linkability of rt.
+if(HAVE_TIME_H AND HAVE_UNISTD_H AND HAVE_CLOCK_GETTIME)
list(APPEND GMX_EXTRA_LIBRARIES rt)
endif()
/* Define to 1 if you have the MSVC _aligned_malloc() function. */
#cmakedefine HAVE__ALIGNED_MALLOC
+/* Define to 1 if you have the clock_gettime() function. */
+#cmakedefine HAVE_CLOCK_GETTIME
+
/* Define to 1 if you have the gettimeofday() function. */
#cmakedefine HAVE_GETTIMEOFDAY
add_subdirectory(linearalgebra)
add_subdirectory(onlinehelp)
add_subdirectory(options)
+add_subdirectory(timing)
add_subdirectory(utility)
if (NOT GMX_BUILD_MDRUN_ONLY)
add_subdirectory(legacyheaders)
{ "-start", FALSE, etBOOL, {&bStart},
"Call mdrun with membed options" },
{ "-stepout", FALSE, etINT, {&nstepout},
- "HIDDENFrequency of writing the remaining runtime" },
+ "HIDDENFrequency of writing the remaining wall clock time for the run" },
{ "-v", FALSE, etBOOL, {&bVerbose},
"Be loud and noisy" },
{ "-mdrun_path", FALSE, etSTR, {&mdrun_path},
#include "gmx_ana.h"
#include "names.h"
#include "perf_est.h"
-#include "sim_util.h"
+#include "gromacs/timing/walltime_accounting.h"
/* Enum for situations that can occur during log file parsing, the
gmx_large_int_t nsteps, real delta_t,
double nbfs, double mflop)
{
- real runtime;
+ real wallclocktime;
fprintf(out, "\n");
}
if (delta_t > 0)
{
- mflop = mflop/time_per_node;
- runtime = nsteps*delta_t;
+ mflop = mflop/time_per_node;
+ wallclocktime = nsteps*delta_t;
if (getenv("GMX_DETAILED_PERF_STATS") == NULL)
{
fprintf(out, "%12s %12s %12s\n",
"", "(ns/day)", "(hour/ns)");
fprintf(out, "%12s %12.3f %12.3f\n", "Performance:",
- runtime*24*3.6/time_per_node, 1000*time_per_node/(3600*runtime));
+ wallclocktime*24*3.6/time_per_node, 1000*time_per_node/(3600*wallclocktime));
}
else
{
"(ns/day)", "(hour/ns)");
fprintf(out, "%12s %12.3f %12.3f %12.3f %12.3f\n", "Performance:",
nbfs/time_per_node, (mflop > 1000) ? (mflop/1000) : mflop,
- runtime*24*3.6/time_per_node, 1000*time_per_node/(3600*runtime));
+ wallclocktime*24*3.6/time_per_node, 1000*time_per_node/(3600*wallclocktime));
}
}
else
real cpt_period, real max_hours,
const char *deviceOptions,
unsigned long Flags,
- gmx_runtime_t *runtime);
+ gmx_walltime_accounting_t walltime_accounting);
/* ROUTINES from md.c */
int gmx_pmeonly(gmx_pme_t pme,
t_commrec *cr, t_nrnb *mynrnb,
gmx_wallcycle_t wcycle,
- gmx_runtime_t *runtime,
+ gmx_walltime_accounting_t walltime_accounting,
real ewaldcoeff,
t_inputrec *ir);
/* Called on the nodes that do PME exclusively (as slaves)
#ifndef _sim_util_h
#define _sim_util_h
-#include <time.h>
#include "typedefs.h"
#include "enxio.h"
#include "mdebin.h"
#include "update.h"
#include "vcm.h"
+#include "gromacs/timing/walltime_accounting.h"
#ifdef __cplusplus
extern "C" {
typedef struct gmx_global_stat *gmx_global_stat_t;
-/*! /brief Manages measuring wall clock times for simulations */
-typedef struct {
- double start_time_stamp; //!< Seconds since the epoch recorded at the start of the simulation
- double start_time_stamp_per_thread; //!< Seconds since the epoch recorded at the start of the simulation for this thread
- double elapsed_run_time; //!< Total seconds elapsed over the simulation
- double elapsed_run_time_per_thread; //!< Total seconds elapsed over the simulation running this thread
- gmx_large_int_t nsteps_done; //!< Used by integrators to report the amount of work they did
-} gmx_runtime_t;
-
-
void do_pbc_first(FILE *log, matrix box, t_forcerec *fr,
t_graph *graph, rvec x[]);
/* ROUTINES from sim_util.c */
-double gmx_gettime();
-
-void print_time(FILE *out, gmx_runtime_t *runtime,
+void print_time(FILE *out, gmx_walltime_accounting_t walltime_accounting,
gmx_large_int_t step, t_inputrec *ir, t_commrec *cr);
-void runtime_start(gmx_runtime_t *runtime);
-
-void runtime_end(gmx_runtime_t *runtime);
-
-double runtime_get_elapsed_time(gmx_runtime_t *runtime);
-
void print_date_and_time(FILE *log, int pid, const char *title,
- const gmx_runtime_t *runtime);
+ const gmx_walltime_accounting_t walltime_accounting);
void finish_run(FILE *log, t_commrec *cr,
t_inputrec *inputrec,
t_nrnb nrnb[], gmx_wallcycle_t wcycle,
- gmx_runtime_t *runtime,
+ gmx_walltime_accounting_t walltime_accounting,
wallclock_gpu_t *gputimes,
gmx_bool bWriteStat);
#include "gromacs/linearalgebra/mtxio.h"
#include "gromacs/linearalgebra/sparsematrix.h"
+#include "gromacs/timing/walltime_accounting.h"
typedef struct {
t_state s;
return ems;
}
-static void print_em_start(FILE *fplog, t_commrec *cr, gmx_runtime_t *runtime,
- gmx_wallcycle_t wcycle,
- const char *name)
+static void print_em_start(FILE *fplog,
+ t_commrec *cr,
+ gmx_walltime_accounting_t walltime_accounting,
+ gmx_wallcycle_t wcycle,
+ const char *name)
{
char buf[STRLEN];
- runtime_start(runtime);
+ walltime_accounting_start(walltime_accounting);
sprintf(buf, "Started %s", name);
print_date_and_time(fplog, cr->nodeid, buf, NULL);
wallcycle_start(wcycle, ewcRUN);
}
-static void em_time_end(gmx_runtime_t *runtime,
- gmx_wallcycle_t wcycle)
+static void em_time_end(gmx_walltime_accounting_t walltime_accounting,
+ gmx_wallcycle_t wcycle)
{
wallcycle_stop(wcycle, ewcRUN);
- runtime_end(runtime);
+ walltime_accounting_end(walltime_accounting);
}
static void sp_header(FILE *out, const char *minimizer, real ftol, int nsteps)
}
static void finish_em(t_commrec *cr, gmx_mdoutf_t *outf,
- gmx_runtime_t *runtime, gmx_wallcycle_t wcycle)
+ gmx_walltime_accounting_t walltime_accounting,
+ gmx_wallcycle_t wcycle)
{
if (!(cr->duty & DUTY_PME))
{
done_mdoutf(outf);
- em_time_end(runtime, wcycle);
+ em_time_end(walltime_accounting, wcycle);
}
static void swap_em_state(em_state_t *ems1, em_state_t *ems2)
real gmx_unused cpt_period, real gmx_unused max_hours,
const char gmx_unused *deviceOptions,
unsigned long gmx_unused Flags,
- gmx_runtime_t *runtime)
+ gmx_walltime_accounting_t walltime_accounting)
{
const char *CG = "Polak-Ribiere Conjugate Gradients";
nfile, fnm, &outf, &mdebin);
/* Print to log file */
- print_em_start(fplog, cr, runtime, wcycle, CG);
+ print_em_start(fplog, cr, walltime_accounting, wcycle, CG);
/* Max number of steps */
number_steps = inputrec->nsteps;
fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
}
- finish_em(cr, outf, runtime, wcycle);
+ finish_em(cr, outf, walltime_accounting, wcycle);
/* To print the actual number of steps we needed somewhere */
- runtime->nsteps_done = step;
+ walltime_accounting_set_nsteps_done(walltime_accounting, step);
return 0;
} /* That's all folks */
real gmx_unused cpt_period, real gmx_unused max_hours,
const char gmx_unused *deviceOptions,
unsigned long gmx_unused Flags,
- gmx_runtime_t *runtime)
+ gmx_walltime_accounting_t walltime_accounting)
{
static const char *LBFGS = "Low-Memory BFGS Minimizer";
em_state_t ems;
end = mdatoms->homenr + start;
/* Print to log file */
- print_em_start(fplog, cr, runtime, wcycle, LBFGS);
+ print_em_start(fplog, cr, walltime_accounting, wcycle, LBFGS);
do_log = do_ene = do_x = do_f = TRUE;
fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
}
- finish_em(cr, outf, runtime, wcycle);
+ finish_em(cr, outf, walltime_accounting, wcycle);
/* To print the actual number of steps we needed somewhere */
- runtime->nsteps_done = step;
+ walltime_accounting_set_nsteps_done(walltime_accounting, step);
return 0;
} /* That's all folks */
real gmx_unused cpt_period, real gmx_unused max_hours,
const char gmx_unused *deviceOptions,
unsigned long gmx_unused Flags,
- gmx_runtime_t *runtime)
+ gmx_walltime_accounting_t walltime_accounting)
{
const char *SD = "Steepest Descents";
em_state_t *s_min, *s_try;
nfile, fnm, &outf, &mdebin);
/* Print to log file */
- print_em_start(fplog, cr, runtime, wcycle, SD);
+ print_em_start(fplog, cr, walltime_accounting, wcycle, SD);
/* Set variables for stepsize (in nm). This is the largest
* step that we are going to make in any direction.
s_min->epot, s_min->fmax, s_min->a_fmax, fnormn);
}
- finish_em(cr, outf, runtime, wcycle);
+ finish_em(cr, outf, walltime_accounting, wcycle);
/* To print the actual number of steps we needed somewhere */
inputrec->nsteps = count;
- runtime->nsteps_done = count;
+ walltime_accounting_set_nsteps_done(walltime_accounting, count);
return 0;
} /* That's all folks */
real gmx_unused cpt_period, real gmx_unused max_hours,
const char gmx_unused *deviceOptions,
unsigned long gmx_unused Flags,
- gmx_runtime_t *runtime)
+ gmx_walltime_accounting_t walltime_accounting)
{
const char *NM = "Normal Mode Analysis";
gmx_mdoutf_t *outf;
where();
/* Write start time and temperature */
- print_em_start(fplog, cr, runtime, wcycle, NM);
+ print_em_start(fplog, cr, walltime_accounting, wcycle, NM);
/* fudge nr of steps to nr of atoms */
inputrec->nsteps = natoms*2;
gmx_mtxio_write(ftp2fn(efMTX, nfile, fnm), sz, sz, full_matrix, sparse_matrix);
}
- finish_em(cr, outf, runtime, wcycle);
+ finish_em(cr, outf, walltime_accounting, wcycle);
- runtime->nsteps_done = natoms*2;
+ walltime_accounting_set_nsteps_done(walltime_accounting, natoms*2);
return 0;
}
static void reset_pmeonly_counters(gmx_wallcycle_t wcycle,
- gmx_runtime_t *runtime,
+ gmx_walltime_accounting_t walltime_accounting,
t_nrnb *nrnb, t_inputrec *ir,
gmx_large_int_t step)
{
}
ir->init_step = step;
wallcycle_start(wcycle, ewcRUN);
- runtime_start(runtime);
+ walltime_accounting_start(walltime_accounting);
}
int gmx_pmeonly(gmx_pme_t pme,
t_commrec *cr, t_nrnb *nrnb,
gmx_wallcycle_t wcycle,
- gmx_runtime_t *runtime,
+ gmx_walltime_accounting_t walltime_accounting,
real ewaldcoeff,
t_inputrec *ir)
{
if (ret == pmerecvqxRESETCOUNTERS)
{
/* Reset the cycle and flop counters */
- reset_pmeonly_counters(wcycle, runtime, nrnb, ir, step);
+ reset_pmeonly_counters(wcycle, walltime_accounting, nrnb, ir, step);
}
}
while (ret == pmerecvqxSWITCHGRID || ret == pmerecvqxRESETCOUNTERS);
if (count == 0)
{
wallcycle_start(wcycle, ewcRUN);
- runtime_start(runtime);
+ walltime_accounting_start(walltime_accounting);
}
wallcycle_start(wcycle, ewcPMEMESH);
} /***** end of quasi-loop, we stop with the break above */
while (TRUE);
- runtime_end(runtime);
+ walltime_accounting_end(walltime_accounting);
return 0;
}
#endif
#include <stdio.h>
-#include <time.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include "nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
#include "gromacs/utility/gmxmpi.h"
+#include "gromacs/timing/walltime_accounting.h"
#include "adress.h"
#include "qmmm.h"
#include "nbnxn_cuda_data_mgmt.h"
#include "nbnxn_cuda/nbnxn_cuda.h"
-double
-gmx_gettime()
-{
-#if _POSIX_TIMERS > 0
- /* Mac and Windows do not support this */
- struct timespec t;
- double seconds;
-
- clock_gettime(CLOCK_REALTIME, &t);
- seconds = (double) t.tv_sec + 1e-9*(double)t.tv_nsec;
- return seconds;
-#elif defined HAVE_GETTIMEOFDAY
- struct timeval t;
- double seconds;
-
- gettimeofday(&t, NULL);
- seconds = (double) t.tv_sec + 1e-6*(double)t.tv_usec;
-
- return seconds;
-#else
- double seconds;
-
- seconds = time(NULL);
-
- return seconds;
-#endif
-}
-
-double
-gmx_gettime_per_thread()
-{
-#if _POSIX_THREAD_CPUTIME > 0
- struct timespec t;
- double seconds;
-
- clock_gettime(CLOCK_THREAD_CPUTIME_ID, &t);
- seconds = (double) t.tv_sec + 1e-9*(double)t.tv_nsec;
- return seconds;
-#else
- return gmx_gettime();
-#endif
-}
-
-void print_time(FILE *out, gmx_runtime_t *runtime, gmx_large_int_t step,
- t_inputrec *ir, t_commrec gmx_unused *cr)
+void print_time(FILE *out,
+ gmx_walltime_accounting_t walltime_accounting,
+ gmx_large_int_t step,
+ t_inputrec *ir,
+ t_commrec gmx_unused *cr)
{
time_t finish;
char timebuf[STRLEN];
- double dt, time_per_step;
+ double dt, elapsed_seconds, time_per_step;
char buf[48];
#ifndef GMX_THREAD_MPI
if ((step >= ir->nstlist))
{
double seconds_since_epoch = gmx_gettime();
- dt = seconds_since_epoch - runtime->start_time_stamp;
- time_per_step = dt/(step - ir->init_step + 1);
-
- dt = (ir->nsteps + ir->init_step - step) * time_per_step;
+ elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
+ time_per_step = elapsed_seconds/(step - ir->init_step + 1);
+ dt = (ir->nsteps + ir->init_step - step) * time_per_step;
if (ir->nsteps >= 0)
{
}
else
{
- fprintf(out, ", remaining runtime: %5d s ", (int)dt);
+ fprintf(out, ", remaining wall clock time: %5d s ", (int)dt);
}
}
else
fflush(out);
}
-void runtime_start(gmx_runtime_t *runtime)
-{
- runtime->start_time_stamp = gmx_gettime();
- runtime->start_time_stamp_per_thread = gmx_gettime_per_thread();
- runtime->elapsed_run_time = 0;
-}
-
-void runtime_end(gmx_runtime_t *runtime)
-{
- double now, now_per_thread;
-
- now = gmx_gettime();
- now_per_thread = gmx_gettime_per_thread();
-
- runtime->elapsed_run_time = now - runtime->start_time_stamp;
- runtime->elapsed_run_time_per_thread = now_per_thread - runtime->start_time_stamp_per_thread;
-}
-
-double runtime_get_elapsed_time(gmx_runtime_t *runtime)
-{
- return gmx_gettime() - runtime->start_time_stamp;
-}
-
void print_date_and_time(FILE *fplog, int nodeid, const char *title,
- const gmx_runtime_t *runtime)
+ const gmx_walltime_accounting_t walltime_accounting)
{
int i;
char timebuf[STRLEN];
if (fplog)
{
- if (runtime != NULL)
+ if (walltime_accounting != NULL)
{
- tmptime = (time_t) runtime->start_time_stamp;
+ tmptime = (time_t) walltime_accounting_get_start_time_stamp(walltime_accounting);
gmx_ctime_r(&tmptime, timebuf, STRLEN);
}
else
void finish_run(FILE *fplog, t_commrec *cr,
t_inputrec *inputrec,
t_nrnb nrnb[], gmx_wallcycle_t wcycle,
- gmx_runtime_t *runtime,
+ gmx_walltime_accounting_t walltime_accounting,
wallclock_gpu_t *gputimes,
gmx_bool bWriteStat)
{
t_nrnb *nrnb_tot = NULL;
real delta_t;
double nbfs, mflop;
- double elapsed_run_time_over_all_ranks = 0;
- double elapsed_run_time_per_thread_over_all_ranks = 0;
+ double elapsed_time,
+ elapsed_time_over_all_ranks,
+ elapsed_time_over_all_threads,
+ elapsed_time_over_all_threads_over_all_ranks;
wallcycle_sum(cr, wcycle);
if (cr->nnodes > 1)
nrnb_tot = nrnb;
}
+ elapsed_time = walltime_accounting_get_elapsed_time(walltime_accounting);
+ elapsed_time_over_all_ranks = elapsed_time;
+ elapsed_time_over_all_threads = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
+ elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
#ifdef GMX_MPI
if (cr->nnodes > 1)
{
- /* reduce elapsed_run_time over all MPI ranks in the current simulation */
- MPI_Allreduce(&runtime->elapsed_run_time,
- &elapsed_run_time_over_all_ranks,
+ /* reduce elapsed_time over all MPI ranks in the current simulation */
+ MPI_Allreduce(&elapsed_time,
+ &elapsed_time_over_all_ranks,
1, MPI_DOUBLE, MPI_SUM,
cr->mpi_comm_mysim);
- elapsed_run_time_over_all_ranks /= cr->nnodes;
- /* reduce elapsed_run_time_per_thread over all MPI ranks in the current simulation */
- MPI_Allreduce(&runtime->elapsed_run_time_per_thread,
- &elapsed_run_time_per_thread_over_all_ranks,
+ elapsed_time_over_all_ranks /= cr->nnodes;
+ /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
+ * current simulation. */
+ MPI_Allreduce(&elapsed_time_over_all_threads,
+ &elapsed_time_over_all_threads_over_all_ranks,
1, MPI_DOUBLE, MPI_SUM,
cr->mpi_comm_mysim);
}
- else
#endif
- {
- elapsed_run_time_over_all_ranks = runtime->elapsed_run_time;
- elapsed_run_time_per_thread_over_all_ranks = runtime->elapsed_run_time_per_thread;
- }
if (SIMMASTER(cr))
{
if (SIMMASTER(cr))
{
- wallcycle_print(fplog, cr->nnodes, cr->npmenodes, runtime->elapsed_run_time,
+ wallcycle_print(fplog, cr->nnodes, cr->npmenodes,
+ elapsed_time_over_all_ranks,
wcycle, gputimes);
if (EI_DYNAMICS(inputrec->eI))
if (fplog)
{
- print_perf(fplog, elapsed_run_time_per_thread_over_all_ranks,
- elapsed_run_time_over_all_ranks,
- runtime->nsteps_done, delta_t, nbfs, mflop);
+ print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
+ elapsed_time_over_all_ranks,
+ walltime_accounting_get_nsteps_done(walltime_accounting),
+ delta_t, nbfs, mflop);
}
if (bWriteStat)
{
- print_perf(stderr, elapsed_run_time_per_thread_over_all_ranks,
- elapsed_run_time_over_all_ranks,
- runtime->nsteps_done, delta_t, nbfs, mflop);
+ print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
+ elapsed_time_over_all_ranks,
+ walltime_accounting_get_nsteps_done(walltime_accounting),
+ delta_t, nbfs, mflop);
}
}
}
#include "gmxfio.h"
#include "pme.h"
#include "gbutil.h"
+#include "gromacs/timing/walltime_accounting.h"
#ifdef GMX_X86_SSE2
#include "gmx_x86_sse2.h"
real gmx_unused cpt_period, real gmx_unused max_hours,
const char gmx_unused *deviceOptions,
unsigned long gmx_unused Flags,
- gmx_runtime_t *runtime)
+ gmx_walltime_accounting_t walltime_accounting)
{
const char *TPI = "Test Particle Insertion";
gmx_localtop_t *top;
snew(f, top_global->natoms);
/* Print to log file */
- runtime_start(runtime);
+ walltime_accounting_start(walltime_accounting);
print_date_and_time(fplog, cr->nodeid,
- "Started Test Particle Insertion", runtime);
+ "Started Test Particle Insertion",
+ walltime_accounting);
wallcycle_start(wcycle, ewcRUN);
/* The last charge group is the group to be inserted */
bNotLastFrame = read_next_frame(oenv, status, &rerun_fr);
} /* End of the loop */
- runtime_end(runtime);
+ walltime_accounting_end(walltime_accounting);
close_trj(status);
sfree(sum_UgembU);
- runtime->nsteps_done = frame*inputrec->nsteps;
+ walltime_accounting_set_nsteps_done(walltime_accounting, frame*inputrec->nsteps);
return 0;
}
--- /dev/null
+#
+# This file is part of the GROMACS molecular simulation package.
+#
+# Copyright (c) 2013, by the GROMACS development team, led by
+# David van der Spoel, Berk Hess, Erik Lindahl, and including many
+# others, as listed in the AUTHORS file in the top-level source
+# directory and at http://www.gromacs.org.
+#
+# GROMACS is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public License
+# as published by the Free Software Foundation; either version 2.1
+# of the License, or (at your option) any later version.
+#
+# GROMACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with GROMACS; if not, see
+# http://www.gnu.org/licenses, or write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# If you want to redistribute modifications to GROMACS, please
+# consider that scientific software is very special. Version
+# control is crucial - bugs must be traceable. We will be happy to
+# consider code for inclusion in the official distribution, but
+# derived work must not be called official GROMACS. Details are found
+# in the README & COPYING files - if they are missing, get the
+# official version at http://www.gromacs.org.
+#
+# To help us fund GROMACS development, we humbly ask that you cite
+# the research papers on the package. Check out http://www.gromacs.org.
+
+file(GLOB TIMING_SOURCES *.cpp *.c)
+set(LIBGROMACS_SOURCES ${LIBGROMACS_SOURCES} ${TIMING_SOURCES} PARENT_SCOPE)
+
+# No installed headers for this module
+
+if (BUILD_TESTING)
+# add_subdirectory(tests)
+endif (BUILD_TESTING)
--- /dev/null
+/*
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * GROningen MAchine for Chemical Simulations
+ *
+ * VERSION 3.2.0
+ * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
+ * Copyright (c) 2013, The GROMACS development team,
+ * check out http://www.gromacs.org for more information.
+
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * If you want to redistribute modifications, please consider that
+ * scientific software is very special. Version control is crucial -
+ * bugs must be traceable. We will be happy to consider code for
+ * inclusion in the official distribution, but derived work must not
+ * be called official GROMACS. Details are found in the README & COPYING
+ * files - if they are missing, get the official version at www.gromacs.org.
+ *
+ * To help us fund GROMACS development, we humbly ask that you cite
+ * the papers on the package - you can find them in the top README file.
+ *
+ * For more info, check our website at http://www.gromacs.org
+ *
+ * And Hey:
+ * GROwing Monsters And Cloning Shrimps
+ */
+#include "gromacs/timing/walltime_accounting.h"
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include "gromacs/legacyheaders/smalloc.h"
+#include "gromacs/legacyheaders/types/simple.h"
+
+#include <time.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+/* TODO in future: convert gmx_walltime_accounting to a class,
+ * resolve who should have responsibility for recording the number of
+ * steps done, consider whether parts of finish_time, print_perf,
+ * wallcycle_print belong in this module.
+ *
+ * If/when any kind of task parallelism is implemented (even OpenMP
+ * regions simultaneously assigned to different tasks), consider
+ * whether this data structure (and/or cycle counters) should be
+ * maintained on a per-OpenMP-thread basis. */
+
+/*! \brief Manages caching wall-clock time measurements for
+ * simulations */
+typedef struct gmx_walltime_accounting {
+ //! Seconds since the epoch recorded at the start of the simulation
+ double start_time_stamp;
+ //! Seconds since the epoch recorded at the start of the simulation for this thread
+ double start_time_stamp_per_thread;
+ //! Total seconds elapsed over the simulation
+ double elapsed_time;
+ //! Total seconds elapsed over the simulation running this thread
+ double elapsed_time_over_all_threads;
+ /*! \brief Number of OpenMP threads that will be launched by this
+ * MPI rank.
+ *
+ * This is used to scale elapsed_time_over_all_threads so
+ * that any combination of real MPI, thread MPI and OpenMP (even
+ * mdrun -ntomp_pme) processes/threads would (when run at maximum
+ * efficiency) return values such that the sum of
+ * elapsed_time_over_all_threads over all threads was constant
+ * with respect to parallelism implementation. */
+ int numOpenMPThreads;
+ //! Set by integrators to report the amount of work they did
+ gmx_large_int_t nsteps_done;
+} t_gmx_walltime_accounting;
+
+/*! \brief Calls system timing routines (e.g. clock_gettime) to get
+ * the (fractional) number of seconds elapsed since the epoch when
+ * this thread was executing.
+ *
+ * This can be used to measure system load. This can be unreliable if
+ * threads migrate between sockets. If thread-specific timers are not
+ * supported by the OS (e.g. if the OS is not POSIX-compliant), this
+ * function is implemented by gmx_gettime. */
+static double gmx_gettime_per_thread();
+
+// TODO In principle, all this should get protected by checks that
+// walltime_accounting is not null. In practice, that NULL condition
+// does not happen, and future refactoring will likely enforce it by
+// having the gmx_walltime_accounting_t object be owned by the runner
+// object. When these become member functions, existence will be
+// guaranteed.
+
+gmx_walltime_accounting_t
+walltime_accounting_init(int numOpenMPThreads)
+{
+ gmx_walltime_accounting_t walltime_accounting;
+
+ snew(walltime_accounting, 1);
+ walltime_accounting->start_time_stamp = 0;
+ walltime_accounting->start_time_stamp_per_thread = 0;
+ walltime_accounting->elapsed_time = 0;
+ walltime_accounting->nsteps_done = 0;
+ walltime_accounting->numOpenMPThreads = numOpenMPThreads;
+
+ return walltime_accounting;
+}
+
+void
+walltime_accounting_destroy(gmx_walltime_accounting_t walltime_accounting)
+{
+ sfree(walltime_accounting);
+}
+
+void
+walltime_accounting_start(gmx_walltime_accounting_t walltime_accounting)
+{
+ walltime_accounting->start_time_stamp = gmx_gettime();
+ walltime_accounting->start_time_stamp_per_thread = gmx_gettime_per_thread();
+ walltime_accounting->elapsed_time = 0;
+ walltime_accounting->nsteps_done = 0;
+}
+
+void
+walltime_accounting_end(gmx_walltime_accounting_t walltime_accounting)
+{
+ double now, now_per_thread;
+
+ now = gmx_gettime();
+ now_per_thread = gmx_gettime_per_thread();
+
+ walltime_accounting->elapsed_time = now - walltime_accounting->start_time_stamp;
+ walltime_accounting->elapsed_time_over_all_threads = now_per_thread - walltime_accounting->start_time_stamp_per_thread;
+ /* For thread-MPI, the per-thread CPU timer makes this just
+ * work. For OpenMP threads, the per-thread CPU timer measurement
+ * needs to be multiplied by the number of OpenMP threads used,
+ * under the current assumption that all regions ever opened
+ * within a process are of the same size, and each thread should
+ * keep one core busy.
+ */
+ walltime_accounting->elapsed_time_over_all_threads *= walltime_accounting->numOpenMPThreads;
+}
+
+double
+walltime_accounting_get_current_elapsed_time(gmx_walltime_accounting_t walltime_accounting)
+{
+ return gmx_gettime() - walltime_accounting->start_time_stamp;
+}
+
+double
+walltime_accounting_get_elapsed_time(gmx_walltime_accounting_t walltime_accounting)
+{
+ return walltime_accounting->elapsed_time;
+}
+
+double
+walltime_accounting_get_elapsed_time_over_all_threads(gmx_walltime_accounting_t walltime_accounting)
+{
+ return walltime_accounting->elapsed_time_over_all_threads;
+}
+
+double
+walltime_accounting_get_start_time_stamp(gmx_walltime_accounting_t walltime_accounting)
+{
+ return walltime_accounting->start_time_stamp;
+}
+
+double
+walltime_accounting_get_nsteps_done(gmx_walltime_accounting_t walltime_accounting)
+{
+ return walltime_accounting->nsteps_done;
+}
+
+void
+walltime_accounting_set_nsteps_done(gmx_walltime_accounting_t walltime_accounting,
+ gmx_large_int_t nsteps_done)
+{
+ walltime_accounting->nsteps_done = nsteps_done;
+}
+
+double
+gmx_gettime()
+{
+#if defined HAVE_CLOCK_GETTIME && _POSIX_TIMERS >= 0
+ /* Mac and Windows do not support this. For added fun, Windows
+ * defines _POSIX_TIMERS without actually providing the
+ * implementation. */
+ struct timespec t;
+ double seconds;
+
+ clock_gettime(CLOCK_REALTIME, &t);
+ seconds = (double) t.tv_sec + 1e-9*(double)t.tv_nsec;
+
+ return seconds;
+#elif defined HAVE_GETTIMEOFDAY
+ // Note that gettimeofday() is deprecated by POSIX, but since Mac
+ // and Windows do not yet support POSIX, we are still stuck.
+ struct timeval t;
+ double seconds;
+
+ gettimeofday(&t, NULL);
+ seconds = (double) t.tv_sec + 1e-6*(double)t.tv_usec;
+
+ return seconds;
+#else
+ double seconds;
+
+ seconds = time(NULL);
+
+ return seconds;
+#endif
+}
+
+static double
+gmx_gettime_per_thread()
+{
+#if defined HAVE_CLOCK_GETTIME && _POSIX_THREAD_CPUTIME >= 0
+ struct timespec t;
+ double seconds;
+
+ clock_gettime(CLOCK_THREAD_CPUTIME_ID, &t);
+ seconds = (double) t.tv_sec + 1e-9*(double)t.tv_nsec;
+
+ return seconds;
+#else
+ return gmx_gettime();
+#endif
+}
--- /dev/null
+/*
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * GROningen MAchine for Chemical Simulations
+ *
+ * VERSION 3.2.0
+ * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
+ * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
+ * Copyright (c) 2001-2004, The GROMACS development team,
+ * check out http://www.gromacs.org for more information.
+
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * If you want to redistribute modifications, please consider that
+ * scientific software is very special. Version control is crucial -
+ * bugs must be traceable. We will be happy to consider code for
+ * inclusion in the official distribution, but derived work must not
+ * be called official GROMACS. Details are found in the README & COPYING
+ * files - if they are missing, get the official version at www.gromacs.org.
+ *
+ * To help us fund GROMACS development, we humbly ask that you cite
+ * the papers on the package - you can find them in the top README file.
+ *
+ * For more info, check our website at http://www.gromacs.org
+ *
+ * And Hey:
+ * Gromacs Runs On Most of All Computer Systems
+ */
+
+#ifndef GMX_TIMING_WALLTIME_ACCOUNTING_H
+#define GMX_TIMING_WALLTIME_ACCOUNTING_H
+
+#include "gromacs/legacyheaders/types/simple.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if 0
+}
+#endif
+
+/*! Contains per-process and per-thread data about elapsed wall-clock
+ * times and integration steps performed. */
+typedef struct gmx_walltime_accounting *gmx_walltime_accounting_t;
+
+//! Constructor
+gmx_walltime_accounting_t
+walltime_accounting_init(int numOpenMPThreads);
+
+//! Destructor
+void
+walltime_accounting_destroy(gmx_walltime_accounting_t walltime_accounting);
+
+/*! Record initial time stamps, e.g. at run end or counter
+ * re-initalization time */
+void
+walltime_accounting_start(gmx_walltime_accounting_t walltime_accounting);
+
+/*! Measure and cache the elapsed wall-clock time since
+ * walltime_accounting_start */
+void
+walltime_accounting_end(gmx_walltime_accounting_t walltime_accounting);
+
+/*! Measure and return the elapsed wall-clock time since
+ * walltime_accounting_start */
+double
+walltime_accounting_get_current_elapsed_time(gmx_walltime_accounting_t walltime_accounting);
+
+//! Get the cached wall-clock time for this node
+double
+walltime_accounting_get_elapsed_time(gmx_walltime_accounting_t walltime_accounting);
+
+//! Get the cached wall-clock time, multiplied by the number of OpenMP threads
+double
+walltime_accounting_get_elapsed_time_over_all_threads(gmx_walltime_accounting_t walltime_accounting);
+
+//! Get the cached initial time stamp for this node
+double
+walltime_accounting_get_start_time_stamp(gmx_walltime_accounting_t walltime_accounting);
+
+//! Get the number of integration steps done
+double
+walltime_accounting_get_nsteps_done(gmx_walltime_accounting_t walltime_accounting);
+
+/*! Set the number of integration steps done
+ *
+ * TODO consider whether this should get done in walltime_accounting_end */
+void
+walltime_accounting_set_nsteps_done(gmx_walltime_accounting_t walltime_accounting,
+ gmx_large_int_t nsteps_done);
+
+/*! \brief Calls system timing routines (e.g. clock_gettime) to get the
+ * (fractional) number of seconds elapsed since the epoch.
+ *
+ * Resolution is implementation-dependent, but typically nanoseconds
+ * or microseconds. */
+double gmx_gettime();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GMX_TIMING_WALLTIME_ACCOUNTING_H */
#include "nbnxn_cuda_data_mgmt.h"
#include "gromacs/utility/gmxmpi.h"
+#include "gromacs/timing/walltime_accounting.h"
#ifdef GMX_FAHCORE
#include "corewrap.h"
gmx_large_int_t step,
gmx_large_int_t *step_rel, t_inputrec *ir,
gmx_wallcycle_t wcycle, t_nrnb *nrnb,
- gmx_runtime_t *runtime,
+ gmx_walltime_accounting_t walltime_accounting,
nbnxn_cuda_ptr_t cu_nbv)
{
char sbuf[STEPSTRSIZE];
ir->nsteps -= *step_rel;
*step_rel = 0;
wallcycle_start(wcycle, ewcRUN);
- runtime_start(runtime);
- print_date_and_time(fplog, cr->nodeid, "Restarted time", runtime);
+ walltime_accounting_start(walltime_accounting);
+ print_date_and_time(fplog, cr->nodeid, "Restarted time", walltime_accounting);
}
double do_md(FILE *fplog, t_commrec *cr, int nfile, const t_filenm fnm[],
real cpt_period, real max_hours,
const char gmx_unused *deviceOptions,
unsigned long Flags,
- gmx_runtime_t *runtime)
+ gmx_walltime_accounting_t walltime_accounting)
{
gmx_mdoutf_t *outf;
gmx_large_int_t step, step_rel;
- double elapsed_run_time;
+ double elapsed_time;
double t, t0, lam0[efptNR];
gmx_bool bGStatEveryStep, bGStat, bCalcVir, bCalcEner;
gmx_bool bNS, bNStList, bSimAnn, bStopCM, bRerunMD, bNotLastFrame = FALSE,
}
/* Set and write start time */
- runtime_start(runtime);
- print_date_and_time(fplog, cr->nodeid, "Started mdrun", runtime);
+ walltime_accounting_start(walltime_accounting);
+ print_date_and_time(fplog, cr->nodeid, "Started mdrun", walltime_accounting);
wallcycle_start(wcycle, ewcRUN);
if (fplog)
{
copy_mat(state->fvir_prev, force_vir);
}
- elapsed_run_time = runtime_get_elapsed_time(runtime);
+ elapsed_time = walltime_accounting_get_current_elapsed_time(walltime_accounting);
/* Check whether everything is still allright */
if (((int)gmx_get_stop_condition() > handled_stop_condition)
handled_stop_condition = (int)gmx_get_stop_condition();
}
else if (MASTER(cr) && (bNS || ir->nstlist <= 0) &&
- (max_hours > 0 && elapsed_run_time > max_hours*60.0*60.0*0.99) &&
+ (max_hours > 0 && elapsed_time > max_hours*60.0*60.0*0.99) &&
gs.sig[eglsSTOPCOND] == 0 && gs.set[eglsSTOPCOND] == 0)
{
/* Signal to terminate the run */
}
if (bResetCountersHalfMaxH && MASTER(cr) &&
- elapsed_run_time > max_hours*60.0*60.0*0.495)
+ elapsed_time > max_hours*60.0*60.0*0.495)
{
gs.sig[eglsRESETCOUNTERS] = 1;
}
if (MASTER(cr) && ((bGStat || !PAR(cr)) &&
cpt_period >= 0 &&
(cpt_period == 0 ||
- elapsed_run_time >= nchkpt*cpt_period*60.0)) &&
+ elapsed_time >= nchkpt*cpt_period*60.0)) &&
gs.set[eglsCHKPT] == 0)
{
gs.sig[eglsCHKPT] = 1;
/* Gets written into the state at the beginning of next loop*/
state->fep_state = lamnew;
}
-
- /* Remaining runtime */
+ /* Print the remaining wall clock time for the run */
if (MULTIMASTER(cr) && (do_verbose || gmx_got_usr_signal()) && !bPMETuneRunning)
{
if (shellfc)
{
fprintf(stderr, "\n");
}
- print_time(stderr, runtime, step, ir, cr);
+ print_time(stderr, walltime_accounting, step, ir, cr);
}
/* Replica exchange */
gs.set[eglsRESETCOUNTERS] != 0)
{
/* Reset all the counters related to performance over the run */
- reset_all_counters(fplog, cr, step, &step_rel, ir, wcycle, nrnb, runtime,
+ reset_all_counters(fplog, cr, step, &step_rel, ir, wcycle, nrnb, walltime_accounting,
fr->nbv != NULL && fr->nbv->bUseGPU ? fr->nbv->cu_nbv : NULL);
wcycle_set_reset_counters(wcycle, -1);
if (!(cr->duty & DUTY_PME))
gmx_pme_send_resetcounters(cr, step);
}
/* Correct max_hours for the elapsed time */
- max_hours -= elapsed_run_time/(60.0*60.0);
+ max_hours -= elapsed_time/(60.0*60.0);
bResetCountersHalfMaxH = FALSE;
gs.set[eglsRESETCOUNTERS] = 0;
}
/* End of main MD loop */
debug_gmx();
- /* Stop the time */
- runtime_end(runtime);
+ /* Stop measuring walltime */
+ walltime_accounting_end(walltime_accounting);
if (bRerunMD && MASTER(cr))
{
print_replica_exchange_statistics(fplog, repl_ex);
}
- runtime->nsteps_done = step_rel;
+ walltime_accounting_set_nsteps_done(walltime_accounting, step_rel);
return 0;
}
{ "-confout", FALSE, etBOOL, {&bConfout},
"HIDDENWrite the last configuration with [TT]-c[tt] and force checkpointing at the last step" },
{ "-stepout", FALSE, etINT, {&nstepout},
- "HIDDENFrequency of writing the remaining runtime" },
+ "HIDDENFrequency of writing the remaining wall clock time for the run" },
{ "-resetstep", FALSE, etINT, {&resetstep},
"HIDDENReset cycle counters after these many time steps" },
{ "-resethway", FALSE, etBOOL, {&bResetCountersHalfWay},
int repl_ex_seed, real pforce, real cpt_period, real max_hours,
const char *deviceOptions, unsigned long Flags)
{
- gmx_bool bForceUseGPU, bTryUseGPU;
- double nodetime = 0, realtime;
- t_inputrec *inputrec;
- t_state *state = NULL;
- matrix box;
- gmx_ddbox_t ddbox = {0};
- int npme_major, npme_minor;
- real tmpr1, tmpr2;
- t_nrnb *nrnb;
- gmx_mtop_t *mtop = NULL;
- t_mdatoms *mdatoms = NULL;
- t_forcerec *fr = NULL;
- t_fcdata *fcd = NULL;
- real ewaldcoeff = 0;
- gmx_pme_t *pmedata = NULL;
- gmx_vsite_t *vsite = NULL;
- gmx_constr_t constr;
- int i, m, nChargePerturbed = -1, status, nalloc;
- char *gro;
- gmx_wallcycle_t wcycle;
- gmx_bool bReadRNG, bReadEkin;
- int list;
- gmx_runtime_t runtime;
- int rc;
- gmx_large_int_t reset_counters;
- gmx_edsam_t ed = NULL;
- t_commrec *cr_old = cr;
- int nthreads_pme = 1;
- int nthreads_pp = 1;
- gmx_membed_t membed = NULL;
- gmx_hw_info_t *hwinfo = NULL;
- master_inf_t minf = {-1, FALSE};
+ gmx_bool bForceUseGPU, bTryUseGPU;
+ double nodetime = 0, realtime;
+ t_inputrec *inputrec;
+ t_state *state = NULL;
+ matrix box;
+ gmx_ddbox_t ddbox = {0};
+ int npme_major, npme_minor;
+ real tmpr1, tmpr2;
+ t_nrnb *nrnb;
+ gmx_mtop_t *mtop = NULL;
+ t_mdatoms *mdatoms = NULL;
+ t_forcerec *fr = NULL;
+ t_fcdata *fcd = NULL;
+ real ewaldcoeff = 0;
+ gmx_pme_t *pmedata = NULL;
+ gmx_vsite_t *vsite = NULL;
+ gmx_constr_t constr;
+ int i, m, nChargePerturbed = -1, status, nalloc;
+ char *gro;
+ gmx_wallcycle_t wcycle;
+ gmx_bool bReadRNG, bReadEkin;
+ int list;
+ gmx_walltime_accounting_t walltime_accounting = NULL;
+ int rc;
+ gmx_large_int_t reset_counters;
+ gmx_edsam_t ed = NULL;
+ t_commrec *cr_old = cr;
+ int nthreads_pme = 1;
+ int nthreads_pp = 1;
+ gmx_membed_t membed = NULL;
+ gmx_hw_info_t *hwinfo = NULL;
+ master_inf_t minf = {-1, FALSE};
/* CAUTION: threads may be started later on in this function, so
cr doesn't reflect the final parallel state right now */
if (cr->duty & DUTY_PP)
{
+ /* Assumes uniform use of the number of OpenMP threads */
+ walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault));
+
if (inputrec->ePull != epullNO)
{
/* Initialize pull code */
cpt_period, max_hours,
deviceOptions,
Flags,
- &runtime);
+ walltime_accounting);
if (inputrec->ePull != epullNO)
{
else
{
/* do PME only */
- gmx_pmeonly(*pmedata, cr, nrnb, wcycle, &runtime, ewaldcoeff, inputrec);
+ walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME));
+ gmx_pmeonly(*pmedata, cr, nrnb, wcycle, walltime_accounting, ewaldcoeff, inputrec);
}
wallcycle_stop(wcycle, ewcRUN);
* if rerunMD, don't write last frame again
*/
finish_run(fplog, cr,
- inputrec, nrnb, wcycle, &runtime,
+ inputrec, nrnb, wcycle, walltime_accounting,
fr != NULL && fr->nbv != NULL && fr->nbv->bUseGPU ?
nbnxn_cuda_get_timings(fr->nbv->cu_nbv) : NULL,
EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr));
gmx_hardware_info_free(hwinfo);
/* Does what it says */
- print_date_and_time(fplog, cr->nodeid, "Finished mdrun", &runtime);
+ print_date_and_time(fplog, cr->nodeid, "Finished mdrun", walltime_accounting);
+ walltime_accounting_destroy(walltime_accounting);
/* Close logfile already here if we were appending to it */
if (MASTER(cr) && (Flags & MD_APPENDFILES))