2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012-2018, The GROMACS development team.
5 * Copyright (c) 2019, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
45 #include "gromacs/domdec/domdec.h"
46 #include "gromacs/fileio/confio.h"
47 #include "gromacs/fileio/gmxfio.h"
48 #include "gromacs/fileio/xtcio.h"
49 #include "gromacs/gmxlib/network.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/listed_forces/disre.h"
52 #include "gromacs/listed_forces/orires.h"
53 #include "gromacs/math/functions.h"
54 #include "gromacs/math/units.h"
55 #include "gromacs/math/vec.h"
56 #include "gromacs/mdlib/calcmu.h"
57 #include "gromacs/mdlib/constr.h"
58 #include "gromacs/mdlib/force.h"
59 #include "gromacs/mdlib/update.h"
60 #include "gromacs/mdtypes/enerdata.h"
61 #include "gromacs/mdtypes/forcerec.h"
62 #include "gromacs/mdtypes/inputrec.h"
63 #include "gromacs/mdtypes/md_enums.h"
64 #include "gromacs/mdtypes/mdatom.h"
65 #include "gromacs/mdtypes/state.h"
66 #include "gromacs/random/threefry.h"
67 #include "gromacs/random/uniformrealdistribution.h"
68 #include "gromacs/timing/wallcycle.h"
69 #include "gromacs/utility/fatalerror.h"
70 #include "gromacs/utility/gmxmpi.h"
71 #include "gromacs/utility/smalloc.h"
73 static void init_df_history_weights(df_history_t* dfhist, const t_expanded* expand, int nlim)
76 dfhist->wl_delta = expand->init_wl_delta;
77 for (i = 0; i < nlim; i++)
79 dfhist->sum_weights[i] = expand->init_lambda_weights[i];
80 dfhist->sum_dg[i] = expand->init_lambda_weights[i];
84 /* Eventually should contain all the functions needed to initialize expanded ensemble
85 before the md loop starts */
86 void init_expanded_ensemble(gmx_bool bStateFromCP, const t_inputrec* ir, df_history_t* dfhist)
90 init_df_history_weights(dfhist, ir->expandedvals, ir->fepvals->n_lambda);
94 static void GenerateGibbsProbabilities(const real* ene, double* p_k, double* pks, int minfep, int maxfep)
101 maxene = ene[minfep];
102 /* find the maximum value */
103 for (i = minfep; i <= maxfep; i++)
110 /* find the denominator */
111 for (i = minfep; i <= maxfep; i++)
113 *pks += std::exp(ene[i] - maxene);
116 for (i = minfep; i <= maxfep; i++)
118 p_k[i] = std::exp(ene[i] - maxene) / *pks;
123 GenerateWeightedGibbsProbabilities(const real* ene, double* p_k, double* pks, int nlim, real* nvals, real delta)
132 for (i = 0; i < nlim; i++)
136 /* add the delta, since we need to make sure it's greater than zero, and
137 we need a non-arbitrary number? */
138 nene[i] = ene[i] + std::log(nvals[i] + delta);
142 nene[i] = ene[i] + std::log(nvals[i]);
146 /* find the maximum value */
148 for (i = 0; i < nlim; i++)
150 if (nene[i] > maxene)
156 /* subtract off the maximum, avoiding overflow */
157 for (i = 0; i < nlim; i++)
162 /* find the denominator */
163 for (i = 0; i < nlim; i++)
165 *pks += std::exp(nene[i]);
169 for (i = 0; i < nlim; i++)
171 p_k[i] = std::exp(nene[i]) / *pks;
176 static int FindMinimum(const real* min_metric, int N)
183 min_val = min_metric[0];
185 for (nval = 0; nval < N; nval++)
187 if (min_metric[nval] < min_val)
189 min_val = min_metric[nval];
196 static gmx_bool CheckHistogramRatios(int nhisto, const real* histo, real ratio)
204 for (i = 0; i < nhisto; i++)
211 /* no samples! is bad!*/
215 nmean /= static_cast<real>(nhisto);
218 for (i = 0; i < nhisto; i++)
220 /* make sure that all points are in the ratio < x < 1/ratio range */
221 if (!((histo[i] / nmean < 1.0 / ratio) && (histo[i] / nmean > ratio)))
230 static gmx_bool CheckIfDoneEquilibrating(int nlim, const t_expanded* expand, const df_history_t* dfhist, int64_t step)
234 gmx_bool bDoneEquilibrating = TRUE;
237 /* If we are doing slow growth to get initial values, we haven't finished equilibrating */
238 if (expand->lmc_forced_nstart > 0)
240 for (i = 0; i < nlim; i++)
242 if (dfhist->n_at_lam[i]
243 < expand->lmc_forced_nstart) /* we are still doing the initial sweep, so we're
244 definitely not done equilibrating*/
246 bDoneEquilibrating = FALSE;
253 /* assume we have equilibrated the weights, then check to see if any of the conditions are not met */
254 bDoneEquilibrating = TRUE;
256 /* calculate the total number of samples */
257 switch (expand->elmceq)
260 /* We have not equilibrated, and won't, ever. */
261 bDoneEquilibrating = FALSE;
264 /* we have equilibrated -- we're done */
265 bDoneEquilibrating = TRUE;
268 /* first, check if we are equilibrating by steps, if we're still under */
269 if (step < expand->equil_steps)
271 bDoneEquilibrating = FALSE;
276 for (i = 0; i < nlim; i++)
278 totalsamples += dfhist->n_at_lam[i];
280 if (totalsamples < expand->equil_samples)
282 bDoneEquilibrating = FALSE;
286 for (i = 0; i < nlim; i++)
288 if (dfhist->n_at_lam[i]
289 < expand->equil_n_at_lam) /* we are still doing the initial sweep, so we're
290 definitely not done equilibrating*/
292 bDoneEquilibrating = FALSE;
298 if (EWL(expand->elamstats)) /* This check is in readir as well, but
301 if (dfhist->wl_delta > expand->equil_wl_delta)
303 bDoneEquilibrating = FALSE;
308 /* we can use the flatness as a judge of good weights, as long as
309 we're not doing minvar, or Wang-Landau.
310 But turn off for now until we figure out exactly how we do this.
313 if (!(EWL(expand->elamstats) || expand->elamstats == elamstatsMINVAR))
315 /* we want to use flatness -avoiding- the forced-through samples. Plus, we need
316 to convert to floats for this histogram function. */
319 snew(modhisto, nlim);
320 for (i = 0; i < nlim; i++)
322 modhisto[i] = 1.0 * (dfhist->n_at_lam[i] - expand->lmc_forced_nstart);
324 bIfFlat = CheckHistogramRatios(nlim, modhisto, expand->equil_ratio);
328 bDoneEquilibrating = FALSE;
332 default: bDoneEquilibrating = TRUE; break;
335 return bDoneEquilibrating;
338 static gmx_bool UpdateWeights(int nlim,
340 df_history_t* dfhist,
342 const real* scaled_lamee,
343 const real* weighted_lamee,
346 gmx_bool bSufficientSamples;
348 int n0, np1, nm1, nval, min_nvalm, min_nvalp, maxc;
349 real omega_m1_0, omega_p1_0, clam_osum;
350 real de, de_function;
351 real cnval, zero_sum_weights;
352 real *omegam_array, *weightsm_array, *omegap_array, *weightsp_array, *varm_array, *varp_array,
353 *dwp_array, *dwm_array;
354 real clam_varm, clam_varp, clam_weightsm, clam_weightsp, clam_minvar;
355 real * lam_variance, *lam_dg;
358 real chi_m1_0, chi_p1_0, chi_m2_0, chi_p2_0, chi_p1_m1, chi_p2_m1, chi_m1_p1, chi_m2_p1;
360 /* if we have equilibrated the weights, exit now */
366 if (CheckIfDoneEquilibrating(nlim, expand, dfhist, step))
368 dfhist->bEquil = TRUE;
369 /* zero out the visited states so we know how many equilibrated states we have
371 for (i = 0; i < nlim; i++)
373 dfhist->n_at_lam[i] = 0;
378 /* If we reached this far, we have not equilibrated yet, keep on
379 going resetting the weights */
381 if (EWL(expand->elamstats))
383 if (expand->elamstats == elamstatsWL) /* Standard Wang-Landau */
385 dfhist->sum_weights[fep_state] -= dfhist->wl_delta;
386 dfhist->wl_histo[fep_state] += 1.0;
388 else if (expand->elamstats == elamstatsWWL) /* Weighted Wang-Landau */
392 /* first increment count */
393 GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, 0, nlim - 1);
394 for (i = 0; i < nlim; i++)
396 dfhist->wl_histo[i] += static_cast<real>(p_k[i]);
399 /* then increment weights (uses count) */
401 GenerateWeightedGibbsProbabilities(weighted_lamee, p_k, &pks, nlim, dfhist->wl_histo,
404 for (i = 0; i < nlim; i++)
406 dfhist->sum_weights[i] -= dfhist->wl_delta * static_cast<real>(p_k[i]);
408 /* Alternate definition, using logarithms. Shouldn't make very much difference! */
413 di = (real)1.0 + dfhist->wl_delta*(real)p_k[i];
414 dfhist->sum_weights[i] -= log(di);
420 zero_sum_weights = dfhist->sum_weights[0];
421 for (i = 0; i < nlim; i++)
423 dfhist->sum_weights[i] -= zero_sum_weights;
427 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMETROPOLIS
428 || expand->elamstats == elamstatsMINVAR)
431 de_function = 0; /* to get rid of warnings, but this value will not be used because of the logic */
432 maxc = 2 * expand->c_range + 1;
435 snew(lam_variance, nlim);
437 snew(omegap_array, maxc);
438 snew(weightsp_array, maxc);
439 snew(varp_array, maxc);
440 snew(dwp_array, maxc);
442 snew(omegam_array, maxc);
443 snew(weightsm_array, maxc);
444 snew(varm_array, maxc);
445 snew(dwm_array, maxc);
447 /* unpack the current lambdas -- we will only update 2 of these */
449 for (i = 0; i < nlim - 1; i++)
450 { /* only through the second to last */
451 lam_dg[i] = dfhist->sum_dg[i + 1] - dfhist->sum_dg[i];
453 gmx::square(dfhist->sum_variance[i + 1]) - gmx::square(dfhist->sum_variance[i]);
456 /* accumulate running averages */
457 for (nval = 0; nval < maxc; nval++)
459 /* constants for later use */
460 cnval = static_cast<real>(nval - expand->c_range);
461 /* actually, should be able to rewrite it w/o exponential, for better numerical stability */
464 de = std::exp(cnval - (scaled_lamee[fep_state] - scaled_lamee[fep_state - 1]));
465 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
467 de_function = 1.0 / (1.0 + de);
469 else if (expand->elamstats == elamstatsMETROPOLIS)
477 de_function = 1.0 / de;
480 dfhist->accum_m[fep_state][nval] += de_function;
481 dfhist->accum_m2[fep_state][nval] += de_function * de_function;
484 if (fep_state < nlim - 1)
486 de = std::exp(-cnval + (scaled_lamee[fep_state + 1] - scaled_lamee[fep_state]));
487 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
489 de_function = 1.0 / (1.0 + de);
491 else if (expand->elamstats == elamstatsMETROPOLIS)
499 de_function = 1.0 / de;
502 dfhist->accum_p[fep_state][nval] += de_function;
503 dfhist->accum_p2[fep_state][nval] += de_function * de_function;
506 /* Metropolis transition and Barker transition (unoptimized Bennett) acceptance weight determination */
508 n0 = dfhist->n_at_lam[fep_state];
511 nm1 = dfhist->n_at_lam[fep_state - 1];
517 if (fep_state < nlim - 1)
519 np1 = dfhist->n_at_lam[fep_state + 1];
526 /* logic SHOULD keep these all set correctly whatever the logic, but apparently it can't figure it out. */
527 chi_m1_0 = chi_p1_0 = chi_m2_0 = chi_p2_0 = chi_p1_m1 = chi_p2_m1 = chi_m1_p1 = chi_m2_p1 = 0;
531 chi_m1_0 = dfhist->accum_m[fep_state][nval] / n0;
532 chi_p1_0 = dfhist->accum_p[fep_state][nval] / n0;
533 chi_m2_0 = dfhist->accum_m2[fep_state][nval] / n0;
534 chi_p2_0 = dfhist->accum_p2[fep_state][nval] / n0;
537 if ((fep_state > 0) && (nm1 > 0))
539 chi_p1_m1 = dfhist->accum_p[fep_state - 1][nval] / nm1;
540 chi_p2_m1 = dfhist->accum_p2[fep_state - 1][nval] / nm1;
543 if ((fep_state < nlim - 1) && (np1 > 0))
545 chi_m1_p1 = dfhist->accum_m[fep_state + 1][nval] / np1;
546 chi_m2_p1 = dfhist->accum_m2[fep_state + 1][nval] / np1;
560 omega_m1_0 = chi_m2_0 / (chi_m1_0 * chi_m1_0) - 1.0;
563 real omega_p1_m1 = chi_p2_m1 / (chi_p1_m1 * chi_p1_m1) - 1.0;
564 clam_weightsm = (std::log(chi_m1_0) - std::log(chi_p1_m1)) + cnval;
565 clam_varm = (1.0 / n0) * (omega_m1_0) + (1.0 / nm1) * (omega_p1_m1);
570 if (fep_state < nlim - 1)
574 omega_p1_0 = chi_p2_0 / (chi_p1_0 * chi_p1_0) - 1.0;
577 real omega_m1_p1 = chi_m2_p1 / (chi_m1_p1 * chi_m1_p1) - 1.0;
578 clam_weightsp = (std::log(chi_m1_p1) - std::log(chi_p1_0)) + cnval;
579 clam_varp = (1.0 / np1) * (omega_m1_p1) + (1.0 / n0) * (omega_p1_0);
586 omegam_array[nval] = omega_m1_0;
590 omegam_array[nval] = 0;
592 weightsm_array[nval] = clam_weightsm;
593 varm_array[nval] = clam_varm;
596 dwm_array[nval] = fabs((cnval + std::log((1.0 * n0) / nm1)) - lam_dg[fep_state - 1]);
600 dwm_array[nval] = std::fabs(cnval - lam_dg[fep_state - 1]);
605 omegap_array[nval] = omega_p1_0;
609 omegap_array[nval] = 0;
611 weightsp_array[nval] = clam_weightsp;
612 varp_array[nval] = clam_varp;
613 if ((np1 > 0) && (n0 > 0))
615 dwp_array[nval] = fabs((cnval + std::log((1.0 * np1) / n0)) - lam_dg[fep_state]);
619 dwp_array[nval] = std::fabs(cnval - lam_dg[fep_state]);
623 /* find the C's closest to the old weights value */
625 min_nvalm = FindMinimum(dwm_array, maxc);
626 omega_m1_0 = omegam_array[min_nvalm];
627 clam_weightsm = weightsm_array[min_nvalm];
628 clam_varm = varm_array[min_nvalm];
630 min_nvalp = FindMinimum(dwp_array, maxc);
631 omega_p1_0 = omegap_array[min_nvalp];
632 clam_weightsp = weightsp_array[min_nvalp];
633 clam_varp = varp_array[min_nvalp];
635 clam_osum = omega_m1_0 + omega_p1_0;
639 clam_minvar = 0.5 * std::log(clam_osum);
644 lam_dg[fep_state - 1] = clam_weightsm;
645 lam_variance[fep_state - 1] = clam_varm;
648 if (fep_state < nlim - 1)
650 lam_dg[fep_state] = clam_weightsp;
651 lam_variance[fep_state] = clam_varp;
654 if (expand->elamstats == elamstatsMINVAR)
656 bSufficientSamples = TRUE;
657 /* make sure they are all past a threshold */
658 for (i = 0; i < nlim; i++)
660 if (dfhist->n_at_lam[i] < expand->minvarmin)
662 bSufficientSamples = FALSE;
665 if (bSufficientSamples)
667 dfhist->sum_minvar[fep_state] = clam_minvar;
670 for (i = 0; i < nlim; i++)
672 dfhist->sum_minvar[i] += (expand->minvar_const - clam_minvar);
674 expand->minvar_const = clam_minvar;
675 dfhist->sum_minvar[fep_state] = 0.0;
679 dfhist->sum_minvar[fep_state] -= expand->minvar_const;
684 /* we need to rezero minvar now, since it could change at fep_state = 0 */
685 dfhist->sum_dg[0] = 0.0;
686 dfhist->sum_variance[0] = 0.0;
687 dfhist->sum_weights[0] = dfhist->sum_dg[0] + dfhist->sum_minvar[0]; /* should be zero */
689 for (i = 1; i < nlim; i++)
691 dfhist->sum_dg[i] = lam_dg[i - 1] + dfhist->sum_dg[i - 1];
692 dfhist->sum_variance[i] =
693 std::sqrt(lam_variance[i - 1] + gmx::square(dfhist->sum_variance[i - 1]));
694 dfhist->sum_weights[i] = dfhist->sum_dg[i] + dfhist->sum_minvar[i];
701 sfree(weightsm_array);
706 sfree(weightsp_array);
713 static int ChooseNewLambda(int nlim,
714 const t_expanded* expand,
715 df_history_t* dfhist,
717 const real* weighted_lamee,
722 /* Choose new lambda value, and update transition matrix */
724 int i, ifep, minfep, maxfep, lamnew, lamtrial, starting_fep_state;
725 real r1, r2, de, trialprob, tprob = 0;
726 double * propose, *accept, *remainder;
729 gmx::ThreeFry2x64<0> rng(
730 seed, gmx::RandomDomain::ExpandedEnsemble); // We only draw once, so zero bits internal counter is fine
731 gmx::UniformRealDistribution<real> dist;
733 starting_fep_state = fep_state;
734 lamnew = fep_state; /* so that there is a default setting -- stays the same */
736 if (!EWL(expand->elamstats)) /* ignore equilibrating the weights if using WL */
738 if ((expand->lmc_forced_nstart > 0) && (dfhist->n_at_lam[nlim - 1] <= expand->lmc_forced_nstart))
740 /* Use a marching method to run through the lambdas and get preliminary free energy data,
741 before starting 'free' sampling. We start free sampling when we have enough at each lambda */
743 /* if we have enough at this lambda, move on to the next one */
745 if (dfhist->n_at_lam[fep_state] == expand->lmc_forced_nstart)
747 lamnew = fep_state + 1;
748 if (lamnew == nlim) /* whoops, stepped too far! */
763 snew(remainder, nlim);
765 for (i = 0; i < expand->lmc_repeats; i++)
767 rng.restart(step, i);
770 for (ifep = 0; ifep < nlim; ifep++)
776 if ((expand->elmcmove == elmcmoveGIBBS) || (expand->elmcmove == elmcmoveMETGIBBS))
778 /* use the Gibbs sampler, with restricted range */
779 if (expand->gibbsdeltalam < 0)
786 minfep = fep_state - expand->gibbsdeltalam;
787 maxfep = fep_state + expand->gibbsdeltalam;
792 if (maxfep > nlim - 1)
798 GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, minfep, maxfep);
800 if (expand->elmcmove == elmcmoveGIBBS)
802 for (ifep = minfep; ifep <= maxfep; ifep++)
804 propose[ifep] = p_k[ifep];
809 for (lamnew = minfep; lamnew <= maxfep; lamnew++)
811 if (r1 <= p_k[lamnew])
818 else if (expand->elmcmove == elmcmoveMETGIBBS)
821 /* Metropolized Gibbs sampling */
822 for (ifep = minfep; ifep <= maxfep; ifep++)
824 remainder[ifep] = 1 - p_k[ifep];
827 /* find the proposal probabilities */
829 if (remainder[fep_state] == 0)
831 /* only the current state has any probability */
832 /* we have to stay at the current state */
837 for (ifep = minfep; ifep <= maxfep; ifep++)
839 if (ifep != fep_state)
841 propose[ifep] = p_k[ifep] / remainder[fep_state];
850 for (lamtrial = minfep; lamtrial <= maxfep; lamtrial++)
852 pnorm = p_k[lamtrial] / remainder[fep_state];
853 if (lamtrial != fep_state)
863 /* we have now selected lamtrial according to p(lamtrial)/1-p(fep_state) */
865 /* trial probability is min{1,\frac{1 - p(old)}{1-p(new)} MRS 1/8/2008 */
866 trialprob = (remainder[fep_state]) / (remainder[lamtrial]);
867 if (trialprob < tprob)
882 /* now figure out the acceptance probability for each */
883 for (ifep = minfep; ifep <= maxfep; ifep++)
886 if (remainder[ifep] != 0)
888 trialprob = (remainder[fep_state]) / (remainder[ifep]);
892 trialprob = 1.0; /* this state is the only choice! */
894 if (trialprob < tprob)
898 /* probability for fep_state=0, but that's fine, it's never proposed! */
899 accept[ifep] = tprob;
905 /* it's possible some rounding is failing */
906 if (gmx_within_tol(remainder[fep_state], 0, 50 * GMX_DOUBLE_EPS))
908 /* numerical rounding error -- no state other than the original has weight */
913 /* probably not a numerical issue */
915 int nerror = 200 + (maxfep - minfep + 1) * 60;
917 snew(errorstr, nerror);
918 /* if its greater than maxfep, then something went wrong -- probably underflow
919 in the calculation of sum weights. Generated detailed info for failure */
922 "Something wrong in choosing new lambda state with a Gibbs move -- "
923 "probably underflow in weight determination.\nDenominator is: "
924 "%3d%17.10e\n i dE numerator weights\n",
926 for (ifep = minfep; ifep <= maxfep; ifep++)
928 loc += sprintf(&errorstr[loc], "%3d %17.10e%17.10e%17.10e\n", ifep,
929 weighted_lamee[ifep], p_k[ifep], dfhist->sum_weights[ifep]);
931 gmx_fatal(FARGS, "%s", errorstr);
935 else if ((expand->elmcmove == elmcmoveMETROPOLIS) || (expand->elmcmove == elmcmoveBARKER))
937 /* use the metropolis sampler with trial +/- 1 */
943 lamtrial = fep_state;
947 lamtrial = fep_state - 1;
952 if (fep_state == nlim - 1)
954 lamtrial = fep_state;
958 lamtrial = fep_state + 1;
962 de = weighted_lamee[lamtrial] - weighted_lamee[fep_state];
963 if (expand->elmcmove == elmcmoveMETROPOLIS)
966 trialprob = std::exp(de);
967 if (trialprob < tprob)
971 propose[fep_state] = 0;
972 propose[lamtrial] = 1.0; /* note that this overwrites the above line if fep_state = ntrial, which only occurs at the ends */
974 1.0; /* doesn't actually matter, never proposed unless fep_state = ntrial, in which case it's 1.0 anyway */
975 accept[lamtrial] = tprob;
977 else if (expand->elmcmove == elmcmoveBARKER)
979 tprob = 1.0 / (1.0 + std::exp(-de));
981 propose[fep_state] = (1 - tprob);
983 tprob; /* we add, to account for the fact that at the end, they might be the same point */
984 accept[fep_state] = 1.0;
985 accept[lamtrial] = 1.0;
999 for (ifep = 0; ifep < nlim; ifep++)
1001 dfhist->Tij[fep_state][ifep] += propose[ifep] * accept[ifep];
1002 dfhist->Tij[fep_state][fep_state] += propose[ifep] * (1.0 - accept[ifep]);
1007 dfhist->Tij_empirical[starting_fep_state][lamnew] += 1.0;
1016 /* print out the weights to the log, along with current state */
1017 void PrintFreeEnergyInfoToFile(FILE* outfile,
1018 const t_lambda* fep,
1019 const t_expanded* expand,
1020 const t_simtemp* simtemp,
1021 const df_history_t* dfhist,
1026 int nlim, i, ifep, jfep;
1027 real dw, dg, dv, Tprint;
1028 const char* print_names[efptNR] = { " FEPL", "MassL", "CoulL", " VdwL",
1029 "BondL", "RestT", "Temp.(K)" };
1030 gmx_bool bSimTemp = FALSE;
1032 nlim = fep->n_lambda;
1033 if (simtemp != nullptr)
1038 if (step % frequency == 0)
1040 fprintf(outfile, " MC-lambda information\n");
1041 if (EWL(expand->elamstats) && (!(dfhist->bEquil)))
1043 fprintf(outfile, " Wang-Landau incrementor is: %11.5g\n", dfhist->wl_delta);
1045 fprintf(outfile, " N");
1046 for (i = 0; i < efptNR; i++)
1048 if (fep->separate_dvdl[i])
1050 fprintf(outfile, "%7s", print_names[i]);
1052 else if ((i == efptTEMPERATURE) && bSimTemp)
1054 fprintf(outfile, "%10s", print_names[i]); /* more space for temperature formats */
1057 fprintf(outfile, " Count ");
1058 if (expand->elamstats == elamstatsMINVAR)
1060 fprintf(outfile, "W(in kT) G(in kT) dG(in kT) dV(in kT)\n");
1064 fprintf(outfile, "G(in kT) dG(in kT)\n");
1066 for (ifep = 0; ifep < nlim; ifep++)
1068 if (ifep == nlim - 1)
1076 dw = dfhist->sum_weights[ifep + 1] - dfhist->sum_weights[ifep];
1077 dg = dfhist->sum_dg[ifep + 1] - dfhist->sum_dg[ifep];
1078 dv = std::sqrt(gmx::square(dfhist->sum_variance[ifep + 1])
1079 - gmx::square(dfhist->sum_variance[ifep]));
1081 fprintf(outfile, "%3d", (ifep + 1));
1082 for (i = 0; i < efptNR; i++)
1084 if (fep->separate_dvdl[i])
1086 fprintf(outfile, "%7.3f", fep->all_lambda[i][ifep]);
1088 else if (i == efptTEMPERATURE && bSimTemp)
1090 fprintf(outfile, "%9.3f", simtemp->temperatures[ifep]);
1093 if (EWL(expand->elamstats)
1094 && (!(dfhist->bEquil))) /* if performing WL and still haven't equilibrated */
1096 if (expand->elamstats == elamstatsWL)
1098 fprintf(outfile, " %8d", static_cast<int>(dfhist->wl_histo[ifep]));
1102 fprintf(outfile, " %8.3f", dfhist->wl_histo[ifep]);
1105 else /* we have equilibrated weights */
1107 fprintf(outfile, " %8d", dfhist->n_at_lam[ifep]);
1109 if (expand->elamstats == elamstatsMINVAR)
1111 fprintf(outfile, " %10.5f %10.5f %10.5f %10.5f", dfhist->sum_weights[ifep],
1112 dfhist->sum_dg[ifep], dg, dv);
1116 fprintf(outfile, " %10.5f %10.5f", dfhist->sum_weights[ifep], dw);
1118 if (ifep == fep_state)
1120 fprintf(outfile, " <<\n");
1124 fprintf(outfile, " \n");
1127 fprintf(outfile, "\n");
1129 if ((step % expand->nstTij == 0) && (expand->nstTij > 0) && (step > 0))
1131 fprintf(outfile, " Transition Matrix\n");
1132 for (ifep = 0; ifep < nlim; ifep++)
1134 fprintf(outfile, "%12d", (ifep + 1));
1136 fprintf(outfile, "\n");
1137 for (ifep = 0; ifep < nlim; ifep++)
1139 for (jfep = 0; jfep < nlim; jfep++)
1141 if (dfhist->n_at_lam[ifep] > 0)
1143 if (expand->bSymmetrizedTMatrix)
1145 Tprint = (dfhist->Tij[ifep][jfep] + dfhist->Tij[jfep][ifep])
1146 / (dfhist->n_at_lam[ifep] + dfhist->n_at_lam[jfep]);
1150 Tprint = (dfhist->Tij[ifep][jfep]) / (dfhist->n_at_lam[ifep]);
1157 fprintf(outfile, "%12.8f", Tprint);
1159 fprintf(outfile, "%3d\n", (ifep + 1));
1162 fprintf(outfile, " Empirical Transition Matrix\n");
1163 for (ifep = 0; ifep < nlim; ifep++)
1165 fprintf(outfile, "%12d", (ifep + 1));
1167 fprintf(outfile, "\n");
1168 for (ifep = 0; ifep < nlim; ifep++)
1170 for (jfep = 0; jfep < nlim; jfep++)
1172 if (dfhist->n_at_lam[ifep] > 0)
1174 if (expand->bSymmetrizedTMatrix)
1176 Tprint = (dfhist->Tij_empirical[ifep][jfep] + dfhist->Tij_empirical[jfep][ifep])
1177 / (dfhist->n_at_lam[ifep] + dfhist->n_at_lam[jfep]);
1181 Tprint = dfhist->Tij_empirical[ifep][jfep] / (dfhist->n_at_lam[ifep]);
1188 fprintf(outfile, "%12.8f", Tprint);
1190 fprintf(outfile, "%3d\n", (ifep + 1));
1196 int ExpandedEnsembleDynamics(FILE* log,
1197 const t_inputrec* ir,
1198 const gmx_enerdata_t* enerd,
1202 df_history_t* dfhist,
1205 const t_mdatoms* mdatoms)
1206 /* Note that the state variable is only needed for simulated tempering, not
1207 Hamiltonian expanded ensemble. May be able to remove it after integrator refactoring. */
1209 real * pfep_lamee, *scaled_lamee, *weighted_lamee;
1211 int i, nlim, lamnew, totalsamples;
1212 real oneovert, maxscaled = 0, maxweighted = 0;
1215 gmx_bool bIfReset, bSwitchtoOneOverT, bDoneEquilibrating = FALSE;
1217 expand = ir->expandedvals;
1218 simtemp = ir->simtempvals;
1219 nlim = ir->fepvals->n_lambda;
1221 snew(scaled_lamee, nlim);
1222 snew(weighted_lamee, nlim);
1223 snew(pfep_lamee, nlim);
1226 /* update the count at the current lambda*/
1227 dfhist->n_at_lam[fep_state]++;
1229 /* need to calculate the PV term somewhere, but not needed here? Not until there's a lambda
1230 state that's pressure controlled.*/
1233 where does this PV term go?
1234 for (i=0;i<nlim;i++)
1236 fep_lamee[i] += pVTerm;
1240 /* determine the minimum value to avoid overflow. Probably a better way to do this */
1241 /* we don't need to include the pressure term, since the volume is the same between the two.
1242 is there some term we are neglecting, however? */
1244 if (ir->efep != efepNO)
1246 for (i = 0; i < nlim; i++)
1250 /* Note -- this assumes no mass changes, since kinetic energy is not added . . . */
1251 scaled_lamee[i] = (enerd->enerpart_lambda[i + 1] - enerd->enerpart_lambda[0])
1252 / (simtemp->temperatures[i] * BOLTZ)
1253 + enerd->term[F_EPOT]
1254 * (1.0 / (simtemp->temperatures[i])
1255 - 1.0 / (simtemp->temperatures[fep_state]))
1260 scaled_lamee[i] = (enerd->enerpart_lambda[i + 1] - enerd->enerpart_lambda[0])
1261 / (expand->mc_temp * BOLTZ);
1262 /* mc_temp is currently set to the system reft unless otherwise defined */
1265 /* save these energies for printing, so they don't get overwritten by the next step */
1266 /* they aren't overwritten in the non-free energy case, but we always print with these
1274 for (i = 0; i < nlim; i++)
1278 * (1.0 / simtemp->temperatures[i] - 1.0 / simtemp->temperatures[fep_state]) / BOLTZ;
1283 for (i = 0; i < nlim; i++)
1285 pfep_lamee[i] = scaled_lamee[i];
1287 weighted_lamee[i] = dfhist->sum_weights[i] - scaled_lamee[i];
1290 maxscaled = scaled_lamee[i];
1291 maxweighted = weighted_lamee[i];
1295 if (scaled_lamee[i] > maxscaled)
1297 maxscaled = scaled_lamee[i];
1299 if (weighted_lamee[i] > maxweighted)
1301 maxweighted = weighted_lamee[i];
1306 for (i = 0; i < nlim; i++)
1308 scaled_lamee[i] -= maxscaled;
1309 weighted_lamee[i] -= maxweighted;
1312 /* update weights - we decide whether or not to actually do this inside */
1314 bDoneEquilibrating =
1315 UpdateWeights(nlim, expand, dfhist, fep_state, scaled_lamee, weighted_lamee, step);
1316 if (bDoneEquilibrating)
1320 fprintf(log, "\nStep %" PRId64 ": Weights have equilibrated, using criteria: %s\n",
1321 step, elmceq_names[expand->elmceq]);
1325 lamnew = ChooseNewLambda(nlim, expand, dfhist, fep_state, weighted_lamee, p_k,
1326 ir->expandedvals->lmc_seed, step);
1327 /* if using simulated tempering, we need to adjust the temperatures */
1328 if (ir->bSimTemp && (lamnew != fep_state)) /* only need to change the temperatures if we change the state */
1333 int nstart, nend, gt;
1335 snew(buf_ngtc, ir->opts.ngtc);
1337 for (i = 0; i < ir->opts.ngtc; i++)
1339 if (ir->opts.ref_t[i] > 0)
1341 told = ir->opts.ref_t[i];
1342 ir->opts.ref_t[i] = simtemp->temperatures[lamnew];
1343 buf_ngtc[i] = std::sqrt(ir->opts.ref_t[i] / told); /* using the buffer as temperature scaling */
1347 /* we don't need to manipulate the ekind information, as it isn't due to be reset until the next step anyway */
1350 nend = mdatoms->homenr;
1351 for (n = nstart; n < nend; n++)
1356 gt = mdatoms->cTC[n];
1358 for (d = 0; d < DIM; d++)
1360 v[n][d] *= buf_ngtc[gt];
1364 if (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir))
1366 /* we need to recalculate the masses if the temperature has changed */
1367 init_npt_masses(ir, state, MassQ, FALSE);
1368 for (i = 0; i < state->nnhpres; i++)
1370 for (j = 0; j < ir->opts.nhchainlength; j++)
1372 state->nhpres_vxi[i + j] *= buf_ngtc[i];
1375 for (i = 0; i < ir->opts.ngtc; i++)
1377 for (j = 0; j < ir->opts.nhchainlength; j++)
1379 state->nosehoover_vxi[i + j] *= buf_ngtc[i];
1386 /* now check on the Wang-Landau updating critera */
1388 if (EWL(expand->elamstats))
1390 bSwitchtoOneOverT = FALSE;
1391 if (expand->bWLoneovert)
1394 for (i = 0; i < nlim; i++)
1396 totalsamples += dfhist->n_at_lam[i];
1398 oneovert = (1.0 * nlim) / totalsamples;
1399 /* oneovert has decreasd by a bit since last time, so we actually make sure its within one of this number */
1400 /* switch to 1/t incrementing when wl_delta has decreased at least once, and wl_delta is now less than 1/t */
1401 if ((dfhist->wl_delta <= ((totalsamples) / (totalsamples - 1.00001)) * oneovert)
1402 && (dfhist->wl_delta < expand->init_wl_delta))
1404 bSwitchtoOneOverT = TRUE;
1407 if (bSwitchtoOneOverT)
1410 oneovert; /* now we reduce by this each time, instead of only at flatness */
1414 bIfReset = CheckHistogramRatios(nlim, dfhist->wl_histo, expand->wl_ratio);
1417 for (i = 0; i < nlim; i++)
1419 dfhist->wl_histo[i] = 0;
1421 dfhist->wl_delta *= expand->wl_scale;
1424 fprintf(log, "\nStep %d: weights are now:", static_cast<int>(step));
1425 for (i = 0; i < nlim; i++)
1427 fprintf(log, " %.5f", dfhist->sum_weights[i]);
1435 sfree(scaled_lamee);
1436 sfree(weighted_lamee);