Merge branch 'master' into pygromacs
[alexxy/gromacs.git] / src / gromacs / mdlib / expanded.cpp
similarity index 88%
rename from src/gromacs/mdlib/expanded.c
rename to src/gromacs/mdlib/expanded.cpp
index 402662de4c31989a0eb6c13f3a657c3fe60cd7cd..6c53c76b1bc8c99340164f71edf6081d7368fb40 100644 (file)
  */
 #include "gmxpre.h"
 
-#include <math.h>
 #include <stdio.h>
 
+#include <cmath>
+
+#include <algorithm>
+
 #include "gromacs/domdec/domdec.h"
 #include "gromacs/fileio/confio.h"
 #include "gromacs/fileio/gmxfio.h"
@@ -104,12 +107,12 @@ static void GenerateGibbsProbabilities(real *ene, double *p_k, double *pks, int
     /* find the denominator */
     for (i = minfep; i <= maxfep; i++)
     {
-        *pks += exp(ene[i]-maxene);
+        *pks += std::exp(ene[i]-maxene);
     }
     /*numerators*/
     for (i = minfep; i <= maxfep; i++)
     {
-        p_k[i] = exp(ene[i]-maxene) / *pks;
+        p_k[i] = std::exp(ene[i]-maxene) / *pks;
     }
 }
 
@@ -128,11 +131,11 @@ static void GenerateWeightedGibbsProbabilities(real *ene, double *p_k, double *p
         {
             /* add the delta, since we need to make sure it's greater than zero, and
                we need a non-arbitrary number? */
-            nene[i] = ene[i] + log(nvals[i]+delta);
+            nene[i] = ene[i] + std::log(nvals[i]+delta);
         }
         else
         {
-            nene[i] = ene[i] + log(nvals[i]);
+            nene[i] = ene[i] + std::log(nvals[i]);
         }
     }
 
@@ -155,13 +158,13 @@ static void GenerateWeightedGibbsProbabilities(real *ene, double *p_k, double *p
     /* find the denominator */
     for (i = 0; i < nlim; i++)
     {
-        *pks += exp(nene[i]);
+        *pks += std::exp(nene[i]);
     }
 
     /*numerators*/
     for (i = 0; i < nlim; i++)
     {
-        p_k[i] = exp(nene[i]) / *pks;
+        p_k[i] = std::exp(nene[i]) / *pks;
     }
     sfree(nene);
 }
@@ -180,18 +183,18 @@ real do_logsum(int N, real *a_n)
     maxarg = a_n[0];
     for (i = 1; i < N; i++)
     {
-        maxarg = max(maxarg, a_n[i]);
+        maxarg = std::max(maxarg, a_n[i]);
     }
 
     /* compute sum of exp(a_n - maxarg) */
     sum = 0.0;
     for (i = 0; i < N; i++)
     {
-        sum = sum + exp(a_n[i] - maxarg);
+        sum = sum + std::exp(a_n[i] - maxarg);
     }
 
     /*     compute log sum */
-    logsum = log(sum) + maxarg;
+    logsum = std::log(sum) + maxarg;
     return logsum;
 }
 
@@ -256,95 +259,102 @@ static gmx_bool CheckIfDoneEquilibrating(int nlim, t_expanded *expand, df_histor
     gmx_bool bDoneEquilibrating = TRUE;
     gmx_bool bIfFlat;
 
-    /* assume we have equilibrated the weights, then check to see if any of the conditions are not met */
-
-    /* calculate the total number of samples */
-    switch (expand->elmceq)
+    /* If we are doing slow growth to get initial values, we haven't finished equilibrating */
+    if (expand->lmc_forced_nstart > 0)
     {
-        case elmceqNO:
-            /* We have not equilibrated, and won't, ever. */
-            return FALSE;
-        case elmceqYES:
-            /* we have equilibrated -- we're done */
-            return TRUE;
-        case elmceqSTEPS:
-            /* first, check if we are equilibrating by steps, if we're still under */
-            if (step < expand->equil_steps)
+        for (i = 0; i < nlim; i++)
+        {
+            if (dfhist->n_at_lam[i] < expand->lmc_forced_nstart) /* we are still doing the initial sweep, so we're definitely not
+                                                                    done equilibrating*/
             {
                 bDoneEquilibrating = FALSE;
+                break;
             }
-            break;
-        case elmceqSAMPLES:
-            totalsamples = 0;
-            for (i = 0; i < nlim; i++)
-            {
-                totalsamples += dfhist->n_at_lam[i];
-            }
-            if (totalsamples < expand->equil_samples)
-            {
+        }
+    }
+    else
+    {
+        /* assume we have equilibrated the weights, then check to see if any of the conditions are not met */
+        bDoneEquilibrating = TRUE;
+
+        /* calculate the total number of samples */
+        switch (expand->elmceq)
+        {
+            case elmceqNO:
+                /* We have not equilibrated, and won't, ever. */
                 bDoneEquilibrating = FALSE;
-            }
-            break;
-        case elmceqNUMATLAM:
-            for (i = 0; i < nlim; i++)
-            {
-                if (dfhist->n_at_lam[i] < expand->equil_n_at_lam) /* we are still doing the initial sweep, so we're definitely not
-                                                                     done equilibrating*/
+                break;
+            case elmceqYES:
+                /* we have equilibrated -- we're done */
+                bDoneEquilibrating = TRUE;
+                break;
+            case elmceqSTEPS:
+                /* first, check if we are equilibrating by steps, if we're still under */
+                if (step < expand->equil_steps)
                 {
-                    bDoneEquilibrating  = FALSE;
-                    break;
+                    bDoneEquilibrating = FALSE;
                 }
-            }
-            break;
-        case elmceqWLDELTA:
-            if (EWL(expand->elamstats)) /* This check is in readir as well, but
-                                           just to be sure */
-            {
-                if (dfhist->wl_delta > expand->equil_wl_delta)
+                break;
+            case elmceqSAMPLES:
+                totalsamples = 0;
+                for (i = 0; i < nlim; i++)
+                {
+                    totalsamples += dfhist->n_at_lam[i];
+                }
+                if (totalsamples < expand->equil_samples)
                 {
                     bDoneEquilibrating = FALSE;
                 }
-            }
-            break;
-        case elmceqRATIO:
-            /* we can use the flatness as a judge of good weights, as long as
-               we're not doing minvar, or Wang-Landau.
-               But turn off for now until we figure out exactly how we do this.
-             */
-
-            if (!(EWL(expand->elamstats) || expand->elamstats == elamstatsMINVAR))
-            {
-                /* we want to use flatness -avoiding- the forced-through samples.  Plus, we need to convert to
-                   floats for this histogram function. */
-
-                real *modhisto;
-                snew(modhisto, nlim);
+                break;
+            case elmceqNUMATLAM:
                 for (i = 0; i < nlim; i++)
                 {
-                    modhisto[i] = 1.0*(dfhist->n_at_lam[i]-expand->lmc_forced_nstart);
+                    if (dfhist->n_at_lam[i] < expand->equil_n_at_lam) /* we are still doing the initial sweep, so we're definitely not
+                                                                         done equilibrating*/
+                    {
+                        bDoneEquilibrating  = FALSE;
+                        break;
+                    }
                 }
-                bIfFlat = CheckHistogramRatios(nlim, modhisto, expand->equil_ratio);
-                sfree(modhisto);
-                if (!bIfFlat)
+                break;
+            case elmceqWLDELTA:
+                if (EWL(expand->elamstats)) /* This check is in readir as well, but
+                                               just to be sure */
                 {
-                    bDoneEquilibrating = FALSE;
+                    if (dfhist->wl_delta > expand->equil_wl_delta)
+                    {
+                        bDoneEquilibrating = FALSE;
+                    }
                 }
-            }
-        default:
-            bDoneEquilibrating = TRUE;
-    }
-    /* one last case to go though, if we are doing slow growth to get initial values, we haven't finished equilibrating */
+                break;
+            case elmceqRATIO:
+                /* we can use the flatness as a judge of good weights, as long as
+                   we're not doing minvar, or Wang-Landau.
+                   But turn off for now until we figure out exactly how we do this.
+                 */
 
-    if (expand->lmc_forced_nstart > 0)
-    {
-        for (i = 0; i < nlim; i++)
-        {
-            if (dfhist->n_at_lam[i] < expand->lmc_forced_nstart) /* we are still doing the initial sweep, so we're definitely not
-                                                                    done equilibrating*/
-            {
-                bDoneEquilibrating = FALSE;
+                if (!(EWL(expand->elamstats) || expand->elamstats == elamstatsMINVAR))
+                {
+                    /* we want to use flatness -avoiding- the forced-through samples.  Plus, we need to convert to
+                       floats for this histogram function. */
+
+                    real *modhisto;
+                    snew(modhisto, nlim);
+                    for (i = 0; i < nlim; i++)
+                    {
+                        modhisto[i] = 1.0*(dfhist->n_at_lam[i]-expand->lmc_forced_nstart);
+                    }
+                    bIfFlat = CheckHistogramRatios(nlim, modhisto, expand->equil_ratio);
+                    sfree(modhisto);
+                    if (!bIfFlat)
+                    {
+                        bDoneEquilibrating = FALSE;
+                    }
+                }
+                break;
+            default:
+                bDoneEquilibrating = TRUE;
                 break;
-            }
         }
     }
     return bDoneEquilibrating;
@@ -353,21 +363,18 @@ static gmx_bool CheckIfDoneEquilibrating(int nlim, t_expanded *expand, df_histor
 static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist,
                               int fep_state, real *scaled_lamee, real *weighted_lamee, gmx_int64_t step)
 {
-    real     maxdiff = 0.000000001;
-    gmx_bool bSufficientSamples;
-    int      i, k, n, nz, indexi, indexk, min_n, max_n, totali;
-    int      n0, np1, nm1, nval, min_nvalm, min_nvalp, maxc;
-    real     omega_m1_0, omega_p1_m1, omega_m1_p1, omega_p1_0, clam_osum;
-    real     de, de_function, dr, denom, maxdr;
-    real     min_val, cnval, zero_sum_weights;
-    real    *omegam_array, *weightsm_array, *omegap_array, *weightsp_array, *varm_array, *varp_array, *dwp_array, *dwm_array;
-    real     clam_varm, clam_varp, clam_weightsm, clam_weightsp, clam_minvar;
-    real    *lam_weights, *lam_minvar_corr, *lam_variance, *lam_dg;
-    double  *p_k;
-    double   pks = 0;
-    real    *numweighted_lamee, *logfrac;
-    int     *nonzero;
-    real     chi_m1_0, chi_p1_0, chi_m2_0, chi_p2_0, chi_p1_m1, chi_p2_m1, chi_m1_p1, chi_m2_p1;
+    gmx_bool  bSufficientSamples;
+    int       i;
+    int       n0, np1, nm1, nval, min_nvalm, min_nvalp, maxc;
+    real      omega_m1_0, omega_p1_m1, omega_m1_p1, omega_p1_0, clam_osum;
+    real      de, de_function;
+    real      cnval, zero_sum_weights;
+    real     *omegam_array, *weightsm_array, *omegap_array, *weightsp_array, *varm_array, *varp_array, *dwp_array, *dwm_array;
+    real      clam_varm, clam_varp, clam_weightsm, clam_weightsp, clam_minvar;
+    real     *lam_variance, *lam_dg;
+    double   *p_k;
+    double    pks = 0;
+    real      chi_m1_0, chi_p1_0, chi_m2_0, chi_p2_0, chi_p1_m1, chi_p2_m1, chi_m1_p1, chi_m2_p1;
 
     /* if we have equilibrated the weights, exit now */
     if (dfhist->bEquil)
@@ -459,7 +466,7 @@ static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist
         for (i = 0; i < nlim-1; i++)
         {   /* only through the second to last */
             lam_dg[i]       = dfhist->sum_dg[i+1] - dfhist->sum_dg[i];
-            lam_variance[i] = pow(dfhist->sum_variance[i+1], 2) - pow(dfhist->sum_variance[i], 2);
+            lam_variance[i] = sqr(dfhist->sum_variance[i+1]) - sqr(dfhist->sum_variance[i]);
         }
 
         /* accumulate running averages */
@@ -470,7 +477,7 @@ static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist
             /* actually, should be able to rewrite it w/o exponential, for better numerical stability */
             if (fep_state > 0)
             {
-                de = exp(cnval - (scaled_lamee[fep_state]-scaled_lamee[fep_state-1]));
+                de = std::exp(cnval - (scaled_lamee[fep_state]-scaled_lamee[fep_state-1]));
                 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
                 {
                     de_function = 1.0/(1.0+de);
@@ -492,7 +499,7 @@ static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist
 
             if (fep_state < nlim-1)
             {
-                de = exp(-cnval + (scaled_lamee[fep_state+1]-scaled_lamee[fep_state]));
+                de = std::exp(-cnval + (scaled_lamee[fep_state+1]-scaled_lamee[fep_state]));
                 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
                 {
                     de_function = 1.0/(1.0+de);
@@ -574,7 +581,7 @@ static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist
                 }
                 if ((n0 > 0) && (nm1 > 0))
                 {
-                    clam_weightsm = (log(chi_m1_0) - log(chi_p1_m1)) + cnval;
+                    clam_weightsm = (std::log(chi_m1_0) - std::log(chi_p1_m1)) + cnval;
                     clam_varm     = (1.0/n0)*(omega_m1_0) + (1.0/nm1)*(omega_p1_m1);
                 }
             }
@@ -591,7 +598,7 @@ static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist
                 }
                 if ((n0 > 0) && (np1 > 0))
                 {
-                    clam_weightsp = (log(chi_m1_p1) - log(chi_p1_0)) + cnval;
+                    clam_weightsp = (std::log(chi_m1_p1) - std::log(chi_p1_0)) + cnval;
                     clam_varp     = (1.0/np1)*(omega_m1_p1) + (1.0/n0)*(omega_p1_0);
                 }
             }
@@ -608,7 +615,7 @@ static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist
             varm_array[nval]               = clam_varm;
             if (nm1 > 0)
             {
-                dwm_array[nval]  = fabs( (cnval + log((1.0*n0)/nm1)) - lam_dg[fep_state-1] );
+                dwm_array[nval]  = fabs( (cnval + std::log((1.0*n0)/nm1)) - lam_dg[fep_state-1] );
             }
             else
             {
@@ -627,7 +634,7 @@ static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist
             varp_array[nval]               = clam_varp;
             if ((np1 > 0) && (n0 > 0))
             {
-                dwp_array[nval]  = fabs( (cnval + log((1.0*np1)/n0)) - lam_dg[fep_state] );
+                dwp_array[nval]  = fabs( (cnval + std::log((1.0*np1)/n0)) - lam_dg[fep_state] );
             }
             else
             {
@@ -652,7 +659,7 @@ static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist
         clam_minvar = 0;
         if (clam_osum > 0)
         {
-            clam_minvar = 0.5*log(clam_osum);
+            clam_minvar = 0.5*std::log(clam_osum);
         }
 
         if (fep_state > 0)
@@ -705,7 +712,7 @@ static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist
         for (i = 1; i < nlim; i++)
         {
             dfhist->sum_dg[i]       = lam_dg[i-1] + dfhist->sum_dg[i-1];
-            dfhist->sum_variance[i] = sqrt(lam_variance[i-1] + pow(dfhist->sum_variance[i-1], 2));
+            dfhist->sum_variance[i] = std::sqrt(lam_variance[i-1] + sqr(dfhist->sum_variance[i-1]));
             dfhist->sum_weights[i]  = dfhist->sum_dg[i] + dfhist->sum_minvar[i];
         }
 
@@ -730,13 +737,11 @@ static int ChooseNewLambda(int nlim, t_expanded *expand, df_history_t *dfhist, i
 {
     /* Choose new lambda value, and update transition matrix */
 
-    int      i, ifep, jfep, minfep, maxfep, lamnew, lamtrial, starting_fep_state;
-    real     r1, r2, de_old, de_new, de, trialprob, tprob = 0;
-    real   **Tij;
+    int      i, ifep, minfep, maxfep, lamnew, lamtrial, starting_fep_state;
+    real     r1, r2, de, trialprob, tprob = 0;
     double  *propose, *accept, *remainder;
     double   pks;
-    real     sum, pnorm;
-    gmx_bool bRestricted;
+    real     pnorm;
 
     starting_fep_state = fep_state;
     lamnew             = fep_state; /* so that there is a default setting -- stays the same */
@@ -784,13 +789,11 @@ static int ChooseNewLambda(int nlim, t_expanded *expand, df_history_t *dfhist, i
 
         if ((expand->elmcmove == elmcmoveGIBBS) || (expand->elmcmove == elmcmoveMETGIBBS))
         {
-            bRestricted = TRUE;
             /* use the Gibbs sampler, with restricted range */
             if (expand->gibbsdeltalam < 0)
             {
                 minfep      = 0;
                 maxfep      = nlim-1;
-                bRestricted = FALSE;
             }
             else
             {
@@ -968,7 +971,7 @@ static int ChooseNewLambda(int nlim, t_expanded *expand, df_history_t *dfhist, i
             if (expand->elmcmove == elmcmoveMETROPOLIS)
             {
                 tprob     = 1.0;
-                trialprob = exp(de);
+                trialprob = std::exp(de);
                 if (trialprob < tprob)
                 {
                     tprob = trialprob;
@@ -981,7 +984,7 @@ static int ChooseNewLambda(int nlim, t_expanded *expand, df_history_t *dfhist, i
             }
             else if (expand->elmcmove == elmcmoveBARKER)
             {
-                tprob = 1.0/(1.0+exp(-de));
+                tprob = 1.0/(1.0+std::exp(-de));
 
                 propose[fep_state] = (1-tprob);
                 propose[lamtrial] += tprob; /* we add, to account for the fact that at the end, they might be the same point */
@@ -1022,8 +1025,7 @@ extern void PrintFreeEnergyInfoToFile(FILE *outfile, t_lambda *fep, t_expanded *
                                       int fep_state, int frequency, gmx_int64_t step)
 {
     int         nlim, i, ifep, jfep;
-    real        dw, dg, dv, dm, Tprint;
-    real       *temps;
+    real        dw, dg, dv, Tprint;
     const char *print_names[efptNR] = {" FEPL", "MassL", "CoulL", " VdwL", "BondL", "RestT", "Temp.(K)"};
     gmx_bool    bSimTemp            = FALSE;
 
@@ -1033,7 +1035,7 @@ extern void PrintFreeEnergyInfoToFile(FILE *outfile, t_lambda *fep, t_expanded *
         bSimTemp = TRUE;
     }
 
-    if (mod(step, frequency) == 0)
+    if (step % frequency == 0)
     {
         fprintf(outfile, "             MC-lambda information\n");
         if (EWL(expand->elamstats) && (!(dfhist->bEquil)))
@@ -1068,15 +1070,12 @@ extern void PrintFreeEnergyInfoToFile(FILE *outfile, t_lambda *fep, t_expanded *
                 dw = 0.0;
                 dg = 0.0;
                 dv = 0.0;
-                dm = 0.0;
             }
             else
             {
                 dw = dfhist->sum_weights[ifep+1] - dfhist->sum_weights[ifep];
                 dg = dfhist->sum_dg[ifep+1] - dfhist->sum_dg[ifep];
-                dv = sqrt(pow(dfhist->sum_variance[ifep+1], 2) - pow(dfhist->sum_variance[ifep], 2));
-                dm = dfhist->sum_minvar[ifep+1] - dfhist->sum_minvar[ifep];
-
+                dv = std::sqrt(sqr(dfhist->sum_variance[ifep+1]) - sqr(dfhist->sum_variance[ifep]));
             }
             fprintf(outfile, "%3d", (ifep+1));
             for (i = 0; i < efptNR; i++)
@@ -1124,7 +1123,7 @@ extern void PrintFreeEnergyInfoToFile(FILE *outfile, t_lambda *fep, t_expanded *
         }
         fprintf(outfile, "\n");
 
-        if ((mod(step, expand->nstTij) == 0) && (expand->nstTij > 0) && (step > 0))
+        if ((step % expand->nstTij == 0) && (expand->nstTij > 0) && (step > 0))
         {
             fprintf(outfile, "                     Transition Matrix\n");
             for (ifep = 0; ifep < nlim; ifep++)
@@ -1202,7 +1201,6 @@ extern int ExpandedEnsembleDynamics(FILE *log, t_inputrec *ir, gmx_enerdata_t *e
     real        oneovert, maxscaled = 0, maxweighted = 0;
     t_expanded *expand;
     t_simtemp  *simtemp;
-    double     *temperature_lambdas;
     gmx_bool    bIfReset, bSwitchtoOneOverT, bDoneEquilibrating = FALSE;
 
     expand  = ir->expandedvals;
@@ -1322,7 +1320,7 @@ extern int ExpandedEnsembleDynamics(FILE *log, t_inputrec *ir, gmx_enerdata_t *e
             {
                 told              = ir->opts.ref_t[i];
                 ir->opts.ref_t[i] =  simtemp->temperatures[lamnew];
-                buf_ngtc[i]       = sqrt(ir->opts.ref_t[i]/told); /* using the buffer as temperature scaling */
+                buf_ngtc[i]       = std::sqrt(ir->opts.ref_t[i]/told); /* using the buffer as temperature scaling */
             }
         }