# Machinery for running the external project
set(EXTERNAL_FFTW_VERSION 3.3.3)
+set(GMX_BUILD_OWN_FFTW_URL "http://www.fftw.org/fftw-${EXTERNAL_FFTW_VERSION}.tar.gz" CACHE PATH "URL from where to download fftw, (use an absolute path when offline)")
+mark_as_advanced(GMX_BUILD_OWN_FFTW_URL)
set(EXTERNAL_FFTW_MD5SUM 0a05ca9c7b3bfddc8278e7c40791a1c2)
set (EXTERNAL_FFTW_BUILD_TARGET fftwBuild)
include(ExternalProject)
# (ie. at least version > 2.8.11.2), consider reverting to using an
# md5sum check to avoid needing the above warning
ExternalProject_add(${EXTERNAL_FFTW_BUILD_TARGET}
- URL "http://www.fftw.org/fftw-${EXTERNAL_FFTW_VERSION}.tar.gz"
+ URL "${GMX_BUILD_OWN_FFTW_URL}"
CONFIGURE_COMMAND <SOURCE_DIR>/configure --prefix=<INSTALL_DIR> --libdir=<INSTALL_DIR>/lib --disable-fortran
${GMX_BUILD_OWN_FFTW_SHARED_FLAG} ${GMX_BUILD_OWN_FFTW_OPTIMIZATION_CONFIGURATION}
${GMX_BUILD_OWN_FFTW_PREC}
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
int nbmin, int nbmax)
{
int i, j;
- double vvhh, vv, v, h, hh2, vv2, varv, hh, varh, tt, cv, cp, alpha, kappa, dcp, et, varet;
+ double vv, v, h, varv, hh, varh, tt, cv, cp, alpha, kappa, dcp, et, varet;
double NANO3;
enum {
eVol, eEnth, eTemp, eEtot, eNR
fprintf(fp,"Found %s data.\n",my_ener[i]);
*/ }
/* Compute it all! */
- vvhh = alpha = kappa = cp = dcp = cv = NOTSET;
+ alpha = kappa = cp = dcp = cv = NOTSET;
/* Temperature */
tt = NOTSET;
/* Alpha, dcp */
if ((ii[eVol] < nset) && (ii[eEnth] < nset) && (ii[eTemp] < nset))
{
- vvhh = 0;
+ double v_sum, h_sum, vh_sum, v_aver, h_aver, vh_aver;
+ vh_sum = v_sum = h_sum = 0;
for (j = 0; (j < edat->nframes); j++)
{
- v = edat->s[ii[eVol]].ener[j]*NANO3;
- h = KILO*edat->s[ii[eEnth]].ener[j]/AVOGADRO;
- vvhh += (v*h);
+ v = edat->s[ii[eVol]].ener[j]*NANO3;
+ h = KILO*edat->s[ii[eEnth]].ener[j]/AVOGADRO;
+ v_sum += v;
+ h_sum += h;
+ vh_sum += (v*h);
}
- vvhh /= edat->nframes;
- alpha = (vvhh-vv*hh)/(vv*BOLTZMANN*tt*tt);
- dcp = (vv*AVOGADRO/nmol)*tt*sqr(alpha)/(kappa);
+ vh_aver = vh_sum / edat->nframes;
+ v_aver = v_sum / edat->nframes;
+ h_aver = h_sum / edat->nframes;
+ alpha = (vh_aver-v_aver*h_aver)/(v_aver*BOLTZMANN*tt*tt);
+ dcp = (v_aver*AVOGADRO/nmol)*tt*sqr(alpha)/(kappa);
}
if (tt != NOTSET)
{
fprintf(fp, "varv = %10g (m^6)\n", varv*AVOGADRO/nmol);
}
- if (vvhh != NOTSET)
- {
- fprintf(fp, "vvhh = %10g (m^3 J)\n", vvhh);
- }
}
if (vv != NOTSET)
{
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
void print_date_and_time(FILE *log, int pid, const char *title,
const gmx_walltime_accounting_t walltime_accounting);
+void print_start(FILE *fplog, t_commrec *cr,
+ gmx_walltime_accounting_t walltime_accounting,
+ const char *name);
+
void finish_run(FILE *log, t_commrec *cr,
t_inputrec *inputrec,
t_nrnb nrnb[], gmx_wallcycle_t wcycle,
#include "force.h"
#include "mdrun.h"
#include "md_support.h"
+#include "sim_util.h"
#include "domdec.h"
#include "partdec.h"
#include "mdatoms.h"
gmx_wallcycle_t wcycle,
const char *name)
{
- char buf[STRLEN];
-
walltime_accounting_start(walltime_accounting);
-
- sprintf(buf, "Started %s", name);
- print_date_and_time(fplog, cr->nodeid, buf, NULL);
-
wallcycle_start(wcycle, ewcRUN);
+ print_start(fplog, cr, walltime_accounting, name);
}
static void em_time_end(gmx_walltime_accounting_t walltime_accounting,
gmx_wallcycle_t wcycle)
}
}
+void print_start(FILE *fplog, t_commrec *cr,
+ gmx_walltime_accounting_t walltime_accounting,
+ const char *name)
+{
+ char buf[STRLEN];
+
+ sprintf(buf, "Started %s", name);
+ print_date_and_time(fplog, cr->nodeid, buf, walltime_accounting);
+}
+
static void sum_forces(int start, int end, rvec f[], rvec flr[])
{
int i;
/* Print to log file */
walltime_accounting_start(walltime_accounting);
- print_date_and_time(fplog, cr->nodeid,
- "Started Test Particle Insertion",
- walltime_accounting);
wallcycle_start(wcycle, ewcRUN);
+ print_start(fplog, cr, walltime_accounting, "Test Particle Insertion");
/* The last charge group is the group to be inserted */
cg_tp = top->cgs.nr - 1;
fprintf(fplog, "\n");
}
- /* Set and write start time */
walltime_accounting_start(walltime_accounting);
- print_date_and_time(fplog, cr->nodeid, "Started mdrun", walltime_accounting);
wallcycle_start(wcycle, ewcRUN);
- if (fplog)
- {
- fprintf(fplog, "\n");
- }
+ print_start(fplog, cr, walltime_accounting, "mdrun");
/* safest point to do file checkpointing is here. More general point would be immediately before integrator call */
#ifdef GMX_FAHCORE
"With thread-MPI there are additional options [TT]-nt[tt], which sets",
"the total number of threads, and [TT]-ntmpi[tt], which sets the number",
"of thread-MPI threads.",
- "Note that using combined MPI+OpenMP parallelization is almost always",
- "slower than single parallelization, except at the scaling limit, where",
- "especially OpenMP parallelization of PME reduces the communication cost.",
- "OpenMP-only parallelization is much faster than MPI-only parallelization",
+ "The number of OpenMP threads used by [TT]mdrun[tt] can also be set with",
+ "the standard environment variable, [TT]OMP_NUM_THREADS[tt].",
+ "The [TT]GMX_PME_NUM_THREADS[tt] environment variable can be used to specify",
+ "the number of threads used by the PME-only processes.[PAR]",
+ "Note that combined MPI+OpenMP parallelization is in many cases",
+ "slower than either on its own. However, at high parallelization, using the",
+ "combination is often beneficial as it reduces the number of domains and/or",
+ "the number of MPI ranks. (Less and larger domains can improve scaling,",
+ "with separate PME processes fewer MPI ranks reduces communication cost.)",
+ "OpenMP-only parallelization is typically faster than MPI-only parallelization",
"on a single CPU(-die). Since we currently don't have proper hardware",
"topology detection, [TT]mdrun[tt] compiled with thread-MPI will only",
"automatically use OpenMP-only parallelization when you use up to 4",