Merge "Merge branch 'release-4-6'"
authorTeemu Murtola <teemu.murtola@gmail.com>
Wed, 29 Jan 2014 05:14:11 +0000 (06:14 +0100)
committerGerrit Code Review <gerrit@gerrit.gromacs.org>
Wed, 29 Jan 2014 05:14:11 +0000 (06:14 +0100)
src/contrib/fftw/CMakeLists.txt
src/gromacs/gmxana/gmx_energy.c
src/gromacs/legacyheaders/sim_util.h
src/gromacs/mdlib/minimize.c
src/gromacs/mdlib/sim_util.c
src/gromacs/mdlib/tpi.c
src/programs/mdrun/md.c
src/programs/mdrun/mdrun.cpp

index d3bd2a9c7a637d50af832db23fd0a4fb0c524440..e02a0c1ac112795627f2ab4aaf57905a7e7ddd78 100644 (file)
@@ -67,6 +67,8 @@ endif()
 
 # Machinery for running the external project
 set(EXTERNAL_FFTW_VERSION 3.3.3)
+set(GMX_BUILD_OWN_FFTW_URL "http://www.fftw.org/fftw-${EXTERNAL_FFTW_VERSION}.tar.gz" CACHE PATH "URL from where to download fftw, (use an absolute path when offline)")
+mark_as_advanced(GMX_BUILD_OWN_FFTW_URL)
 set(EXTERNAL_FFTW_MD5SUM 0a05ca9c7b3bfddc8278e7c40791a1c2)
 set (EXTERNAL_FFTW_BUILD_TARGET fftwBuild)
 include(ExternalProject)
@@ -78,7 +80,7 @@ message(WARNING "The GROMACS build will download FFTW ${EXTERNAL_FFTW_VERSION} a
 # (ie. at least version > 2.8.11.2), consider reverting to using an
 # md5sum check to avoid needing the above warning
     ExternalProject_add(${EXTERNAL_FFTW_BUILD_TARGET}
-        URL "http://www.fftw.org/fftw-${EXTERNAL_FFTW_VERSION}.tar.gz"
+        URL "${GMX_BUILD_OWN_FFTW_URL}"
         CONFIGURE_COMMAND <SOURCE_DIR>/configure --prefix=<INSTALL_DIR> --libdir=<INSTALL_DIR>/lib --disable-fortran
         ${GMX_BUILD_OWN_FFTW_SHARED_FLAG} ${GMX_BUILD_OWN_FFTW_OPTIMIZATION_CONFIGURATION}
         ${GMX_BUILD_OWN_FFTW_PREC}
index 8192741d06050c483735c1eafd9e2fc48347e878..6ea86f10809d0177c13178ac926f9082cc93eb0a 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
  * Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014, by the GROMACS development team, led by
  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
  * and including many others, as listed in the AUTHORS file in the
  * top-level source directory and at http://www.gromacs.org.
@@ -980,7 +980,7 @@ static void calc_fluctuation_props(FILE *fp,
                                    int nbmin, int nbmax)
 {
     int    i, j;
-    double vvhh, vv, v, h, hh2, vv2, varv, hh, varh, tt, cv, cp, alpha, kappa, dcp, et, varet;
+    double vv, v, h, varv, hh, varh, tt, cv, cp, alpha, kappa, dcp, et, varet;
     double NANO3;
     enum {
         eVol, eEnth, eTemp, eEtot, eNR
@@ -1010,7 +1010,7 @@ static void calc_fluctuation_props(FILE *fp,
             fprintf(fp,"Found %s data.\n",my_ener[i]);
  */ }
     /* Compute it all! */
-    vvhh = alpha = kappa = cp = dcp = cv = NOTSET;
+    alpha = kappa = cp = dcp = cv = NOTSET;
 
     /* Temperature */
     tt = NOTSET;
@@ -1048,16 +1048,21 @@ static void calc_fluctuation_props(FILE *fp,
     /* Alpha, dcp */
     if ((ii[eVol] < nset) && (ii[eEnth] < nset) && (ii[eTemp] < nset))
     {
-        vvhh = 0;
+        double v_sum, h_sum, vh_sum, v_aver, h_aver, vh_aver;
+        vh_sum = v_sum = h_sum = 0;
         for (j = 0; (j < edat->nframes); j++)
         {
-            v     = edat->s[ii[eVol]].ener[j]*NANO3;
-            h     = KILO*edat->s[ii[eEnth]].ener[j]/AVOGADRO;
-            vvhh += (v*h);
+            v       = edat->s[ii[eVol]].ener[j]*NANO3;
+            h       = KILO*edat->s[ii[eEnth]].ener[j]/AVOGADRO;
+            v_sum  += v;
+            h_sum  += h;
+            vh_sum += (v*h);
         }
-        vvhh /= edat->nframes;
-        alpha = (vvhh-vv*hh)/(vv*BOLTZMANN*tt*tt);
-        dcp   = (vv*AVOGADRO/nmol)*tt*sqr(alpha)/(kappa);
+        vh_aver = vh_sum / edat->nframes;
+        v_aver  = v_sum  / edat->nframes;
+        h_aver  = h_sum  / edat->nframes;
+        alpha   = (vh_aver-v_aver*h_aver)/(v_aver*BOLTZMANN*tt*tt);
+        dcp     = (v_aver*AVOGADRO/nmol)*tt*sqr(alpha)/(kappa);
     }
 
     if (tt != NOTSET)
@@ -1080,10 +1085,6 @@ static void calc_fluctuation_props(FILE *fp,
             {
                 fprintf(fp, "varv  =  %10g (m^6)\n", varv*AVOGADRO/nmol);
             }
-            if (vvhh != NOTSET)
-            {
-                fprintf(fp, "vvhh  =  %10g (m^3 J)\n", vvhh);
-            }
         }
         if (vv != NOTSET)
         {
index d515c7b19a62e4899535e103e0225ceedab1e78d..47d68cb2717f501a649976f272aeda20f6308ca0 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
  * Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014, by the GROMACS development team, led by
  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
  * and including many others, as listed in the AUTHORS file in the
  * top-level source directory and at http://www.gromacs.org.
@@ -90,6 +90,10 @@ void print_time(FILE *out, gmx_walltime_accounting_t walltime_accounting,
 void print_date_and_time(FILE *log, int pid, const char *title,
                          const gmx_walltime_accounting_t walltime_accounting);
 
+void print_start(FILE *fplog, t_commrec *cr,
+                 gmx_walltime_accounting_t walltime_accounting,
+                 const char *name);
+
 void finish_run(FILE *log, t_commrec *cr,
                 t_inputrec *inputrec,
                 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
index 3f605e6808aaa564d8b353e334a7284c6740ad75..c9b00c98b69b81ac9fbb16e89d6d0d31053e40fd 100644 (file)
@@ -63,6 +63,7 @@
 #include "force.h"
 #include "mdrun.h"
 #include "md_support.h"
+#include "sim_util.h"
 #include "domdec.h"
 #include "partdec.h"
 #include "mdatoms.h"
@@ -107,14 +108,9 @@ static void print_em_start(FILE                     *fplog,
                            gmx_wallcycle_t           wcycle,
                            const char               *name)
 {
-    char buf[STRLEN];
-
     walltime_accounting_start(walltime_accounting);
-
-    sprintf(buf, "Started %s", name);
-    print_date_and_time(fplog, cr->nodeid, buf, NULL);
-
     wallcycle_start(wcycle, ewcRUN);
+    print_start(fplog, cr, walltime_accounting, name);
 }
 static void em_time_end(gmx_walltime_accounting_t walltime_accounting,
                         gmx_wallcycle_t           wcycle)
index e5b968a8735ce30fe7cd9697cff12ab8f649074c..251f64e284e1ed77c6b379a8854edb7da49f69eb 100644 (file)
@@ -180,6 +180,16 @@ void print_date_and_time(FILE *fplog, int nodeid, const char *title,
     }
 }
 
+void print_start(FILE *fplog, t_commrec *cr,
+                 gmx_walltime_accounting_t walltime_accounting,
+                 const char *name)
+{
+    char buf[STRLEN];
+
+    sprintf(buf, "Started %s", name);
+    print_date_and_time(fplog, cr->nodeid, buf, walltime_accounting);
+}
+
 static void sum_forces(int start, int end, rvec f[], rvec flr[])
 {
     int i;
index 190a6cf287461451f4784f18616861c31f5de406..489a2c958c739937bd15e9230cef472ac062a84f 100644 (file)
@@ -256,10 +256,8 @@ double do_tpi(FILE *fplog, t_commrec *cr,
 
     /* Print to log file  */
     walltime_accounting_start(walltime_accounting);
-    print_date_and_time(fplog, cr->nodeid,
-                        "Started Test Particle Insertion",
-                        walltime_accounting);
     wallcycle_start(wcycle, ewcRUN);
+    print_start(fplog, cr, walltime_accounting, "Test Particle Insertion");
 
     /* The last charge group is the group to be inserted */
     cg_tp = top->cgs.nr - 1;
index 3173bea653ece43cfd347bb8d32976ca7ac79ee4..1ab75d2ddc5d770df9f14ce5e2d5bd52fa078e18 100644 (file)
@@ -668,14 +668,9 @@ double do_md(FILE *fplog, t_commrec *cr, int nfile, const t_filenm fnm[],
         fprintf(fplog, "\n");
     }
 
-    /* Set and write start time */
     walltime_accounting_start(walltime_accounting);
-    print_date_and_time(fplog, cr->nodeid, "Started mdrun", walltime_accounting);
     wallcycle_start(wcycle, ewcRUN);
-    if (fplog)
-    {
-        fprintf(fplog, "\n");
-    }
+    print_start(fplog, cr, walltime_accounting, "mdrun");
 
     /* safest point to do file checkpointing is here.  More general point would be immediately before integrator call */
 #ifdef GMX_FAHCORE
index bc2ec331c535b27b4a987ee9c0c64b836224bf80..61cc64db84ce9149a911a6960abffda9f8c4743b 100644 (file)
@@ -97,10 +97,16 @@ int gmx_mdrun(int argc, char *argv[])
         "With thread-MPI there are additional options [TT]-nt[tt], which sets",
         "the total number of threads, and [TT]-ntmpi[tt], which sets the number",
         "of thread-MPI threads.",
-        "Note that using combined MPI+OpenMP parallelization is almost always",
-        "slower than single parallelization, except at the scaling limit, where",
-        "especially OpenMP parallelization of PME reduces the communication cost.",
-        "OpenMP-only parallelization is much faster than MPI-only parallelization",
+        "The number of OpenMP threads used by [TT]mdrun[tt] can also be set with",
+        "the standard environment variable, [TT]OMP_NUM_THREADS[tt].",
+        "The [TT]GMX_PME_NUM_THREADS[tt] environment variable can be used to specify",
+        "the number of threads used by the PME-only processes.[PAR]",
+        "Note that combined MPI+OpenMP parallelization is in many cases",
+        "slower than either on its own. However, at high parallelization, using the",
+        "combination is often beneficial as it reduces the number of domains and/or",
+        "the number of MPI ranks. (Less and larger domains can improve scaling,",
+        "with separate PME processes fewer MPI ranks reduces communication cost.)",
+        "OpenMP-only parallelization is typically faster than MPI-only parallelization",
         "on a single CPU(-die). Since we currently don't have proper hardware",
         "topology detection, [TT]mdrun[tt] compiled with thread-MPI will only",
         "automatically use OpenMP-only parallelization when you use up to 4",