2d82013c81c06cca3893d0812b8fc41ba23d5bbb
[alexxy/gromacs.git] / src / gromacs / mdlib / expanded.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012-2018, The GROMACS development team.
5  * Copyright (c) 2019, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36 #include "gmxpre.h"
37
38 #include "expanded.h"
39
40 #include <cmath>
41 #include <cstdio>
42
43 #include <algorithm>
44
45 #include "gromacs/domdec/domdec.h"
46 #include "gromacs/fileio/confio.h"
47 #include "gromacs/fileio/gmxfio.h"
48 #include "gromacs/fileio/xtcio.h"
49 #include "gromacs/gmxlib/network.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/listed_forces/disre.h"
52 #include "gromacs/listed_forces/orires.h"
53 #include "gromacs/math/functions.h"
54 #include "gromacs/math/units.h"
55 #include "gromacs/math/vec.h"
56 #include "gromacs/mdlib/calcmu.h"
57 #include "gromacs/mdlib/constr.h"
58 #include "gromacs/mdlib/force.h"
59 #include "gromacs/mdlib/update.h"
60 #include "gromacs/mdtypes/enerdata.h"
61 #include "gromacs/mdtypes/forcerec.h"
62 #include "gromacs/mdtypes/inputrec.h"
63 #include "gromacs/mdtypes/md_enums.h"
64 #include "gromacs/mdtypes/mdatom.h"
65 #include "gromacs/mdtypes/state.h"
66 #include "gromacs/random/threefry.h"
67 #include "gromacs/random/uniformrealdistribution.h"
68 #include "gromacs/timing/wallcycle.h"
69 #include "gromacs/utility/fatalerror.h"
70 #include "gromacs/utility/gmxmpi.h"
71 #include "gromacs/utility/smalloc.h"
72
73 static void init_df_history_weights(df_history_t* dfhist, const t_expanded* expand, int nlim)
74 {
75     int i;
76     dfhist->wl_delta = expand->init_wl_delta;
77     for (i = 0; i < nlim; i++)
78     {
79         dfhist->sum_weights[i] = expand->init_lambda_weights[i];
80         dfhist->sum_dg[i]      = expand->init_lambda_weights[i];
81     }
82 }
83
84 /* Eventually should contain all the functions needed to initialize expanded ensemble
85    before the md loop starts */
86 void init_expanded_ensemble(gmx_bool bStateFromCP, const t_inputrec* ir, df_history_t* dfhist)
87 {
88     if (!bStateFromCP)
89     {
90         init_df_history_weights(dfhist, ir->expandedvals, ir->fepvals->n_lambda);
91     }
92 }
93
94 static void GenerateGibbsProbabilities(const real* ene, double* p_k, double* pks, int minfep, int maxfep)
95 {
96
97     int  i;
98     real maxene;
99
100     *pks   = 0.0;
101     maxene = ene[minfep];
102     /* find the maximum value */
103     for (i = minfep; i <= maxfep; i++)
104     {
105         if (ene[i] > maxene)
106         {
107             maxene = ene[i];
108         }
109     }
110     /* find the denominator */
111     for (i = minfep; i <= maxfep; i++)
112     {
113         *pks += std::exp(ene[i] - maxene);
114     }
115     /*numerators*/
116     for (i = minfep; i <= maxfep; i++)
117     {
118         p_k[i] = std::exp(ene[i] - maxene) / *pks;
119     }
120 }
121
122 static void
123 GenerateWeightedGibbsProbabilities(const real* ene, double* p_k, double* pks, int nlim, real* nvals, real delta)
124 {
125
126     int   i;
127     real  maxene;
128     real* nene;
129     *pks = 0.0;
130
131     snew(nene, nlim);
132     for (i = 0; i < nlim; i++)
133     {
134         if (nvals[i] == 0)
135         {
136             /* add the delta, since we need to make sure it's greater than zero, and
137                we need a non-arbitrary number? */
138             nene[i] = ene[i] + std::log(nvals[i] + delta);
139         }
140         else
141         {
142             nene[i] = ene[i] + std::log(nvals[i]);
143         }
144     }
145
146     /* find the maximum value */
147     maxene = nene[0];
148     for (i = 0; i < nlim; i++)
149     {
150         if (nene[i] > maxene)
151         {
152             maxene = nene[i];
153         }
154     }
155
156     /* subtract off the maximum, avoiding overflow */
157     for (i = 0; i < nlim; i++)
158     {
159         nene[i] -= maxene;
160     }
161
162     /* find the denominator */
163     for (i = 0; i < nlim; i++)
164     {
165         *pks += std::exp(nene[i]);
166     }
167
168     /*numerators*/
169     for (i = 0; i < nlim; i++)
170     {
171         p_k[i] = std::exp(nene[i]) / *pks;
172     }
173     sfree(nene);
174 }
175
176 static int FindMinimum(const real* min_metric, int N)
177 {
178
179     real min_val;
180     int  min_nval, nval;
181
182     min_nval = 0;
183     min_val  = min_metric[0];
184
185     for (nval = 0; nval < N; nval++)
186     {
187         if (min_metric[nval] < min_val)
188         {
189             min_val  = min_metric[nval];
190             min_nval = nval;
191         }
192     }
193     return min_nval;
194 }
195
196 static gmx_bool CheckHistogramRatios(int nhisto, const real* histo, real ratio)
197 {
198
199     int      i;
200     real     nmean;
201     gmx_bool bIfFlat;
202
203     nmean = 0;
204     for (i = 0; i < nhisto; i++)
205     {
206         nmean += histo[i];
207     }
208
209     if (nmean == 0)
210     {
211         /* no samples! is bad!*/
212         bIfFlat = FALSE;
213         return bIfFlat;
214     }
215     nmean /= static_cast<real>(nhisto);
216
217     bIfFlat = TRUE;
218     for (i = 0; i < nhisto; i++)
219     {
220         /* make sure that all points are in the ratio < x <  1/ratio range  */
221         if (!((histo[i] / nmean < 1.0 / ratio) && (histo[i] / nmean > ratio)))
222         {
223             bIfFlat = FALSE;
224             break;
225         }
226     }
227     return bIfFlat;
228 }
229
230 static gmx_bool CheckIfDoneEquilibrating(int nlim, const t_expanded* expand, const df_history_t* dfhist, int64_t step)
231 {
232
233     int      i, totalsamples;
234     gmx_bool bDoneEquilibrating = TRUE;
235     gmx_bool bIfFlat;
236
237     /* If we are doing slow growth to get initial values, we haven't finished equilibrating */
238     if (expand->lmc_forced_nstart > 0)
239     {
240         for (i = 0; i < nlim; i++)
241         {
242             if (dfhist->n_at_lam[i]
243                 < expand->lmc_forced_nstart) /* we are still doing the initial sweep, so we're
244                                                 definitely not done equilibrating*/
245             {
246                 bDoneEquilibrating = FALSE;
247                 break;
248             }
249         }
250     }
251     else
252     {
253         /* assume we have equilibrated the weights, then check to see if any of the conditions are not met */
254         bDoneEquilibrating = TRUE;
255
256         /* calculate the total number of samples */
257         switch (expand->elmceq)
258         {
259             case elmceqNO:
260                 /* We have not equilibrated, and won't, ever. */
261                 bDoneEquilibrating = FALSE;
262                 break;
263             case elmceqYES:
264                 /* we have equilibrated -- we're done */
265                 bDoneEquilibrating = TRUE;
266                 break;
267             case elmceqSTEPS:
268                 /* first, check if we are equilibrating by steps, if we're still under */
269                 if (step < expand->equil_steps)
270                 {
271                     bDoneEquilibrating = FALSE;
272                 }
273                 break;
274             case elmceqSAMPLES:
275                 totalsamples = 0;
276                 for (i = 0; i < nlim; i++)
277                 {
278                     totalsamples += dfhist->n_at_lam[i];
279                 }
280                 if (totalsamples < expand->equil_samples)
281                 {
282                     bDoneEquilibrating = FALSE;
283                 }
284                 break;
285             case elmceqNUMATLAM:
286                 for (i = 0; i < nlim; i++)
287                 {
288                     if (dfhist->n_at_lam[i]
289                         < expand->equil_n_at_lam) /* we are still doing the initial sweep, so we're
290                                                      definitely not done equilibrating*/
291                     {
292                         bDoneEquilibrating = FALSE;
293                         break;
294                     }
295                 }
296                 break;
297             case elmceqWLDELTA:
298                 if (EWL(expand->elamstats)) /* This check is in readir as well, but
299                                                just to be sure */
300                 {
301                     if (dfhist->wl_delta > expand->equil_wl_delta)
302                     {
303                         bDoneEquilibrating = FALSE;
304                     }
305                 }
306                 break;
307             case elmceqRATIO:
308                 /* we can use the flatness as a judge of good weights, as long as
309                    we're not doing minvar, or Wang-Landau.
310                    But turn off for now until we figure out exactly how we do this.
311                  */
312
313                 if (!(EWL(expand->elamstats) || expand->elamstats == elamstatsMINVAR))
314                 {
315                     /* we want to use flatness -avoiding- the forced-through samples.  Plus, we need
316                        to convert to floats for this histogram function. */
317
318                     real* modhisto;
319                     snew(modhisto, nlim);
320                     for (i = 0; i < nlim; i++)
321                     {
322                         modhisto[i] = 1.0 * (dfhist->n_at_lam[i] - expand->lmc_forced_nstart);
323                     }
324                     bIfFlat = CheckHistogramRatios(nlim, modhisto, expand->equil_ratio);
325                     sfree(modhisto);
326                     if (!bIfFlat)
327                     {
328                         bDoneEquilibrating = FALSE;
329                     }
330                 }
331                 break;
332             default: bDoneEquilibrating = TRUE; break;
333         }
334     }
335     return bDoneEquilibrating;
336 }
337
338 static gmx_bool UpdateWeights(int           nlim,
339                               t_expanded*   expand,
340                               df_history_t* dfhist,
341                               int           fep_state,
342                               const real*   scaled_lamee,
343                               const real*   weighted_lamee,
344                               int64_t       step)
345 {
346     gmx_bool bSufficientSamples;
347     int      i;
348     int      n0, np1, nm1, nval, min_nvalm, min_nvalp, maxc;
349     real     omega_m1_0, omega_p1_0, clam_osum;
350     real     de, de_function;
351     real     cnval, zero_sum_weights;
352     real *omegam_array, *weightsm_array, *omegap_array, *weightsp_array, *varm_array, *varp_array,
353             *dwp_array, *dwm_array;
354     real    clam_varm, clam_varp, clam_weightsm, clam_weightsp, clam_minvar;
355     real *  lam_variance, *lam_dg;
356     double* p_k;
357     double  pks = 0;
358     real    chi_m1_0, chi_p1_0, chi_m2_0, chi_p2_0, chi_p1_m1, chi_p2_m1, chi_m1_p1, chi_m2_p1;
359
360     /* if we have equilibrated the weights, exit now */
361     if (dfhist->bEquil)
362     {
363         return FALSE;
364     }
365
366     if (CheckIfDoneEquilibrating(nlim, expand, dfhist, step))
367     {
368         dfhist->bEquil = TRUE;
369         /* zero out the visited states so we know how many equilibrated states we have
370            from here on out.*/
371         for (i = 0; i < nlim; i++)
372         {
373             dfhist->n_at_lam[i] = 0;
374         }
375         return TRUE;
376     }
377
378     /* If we reached this far, we have not equilibrated yet, keep on
379        going resetting the weights */
380
381     if (EWL(expand->elamstats))
382     {
383         if (expand->elamstats == elamstatsWL) /* Standard Wang-Landau */
384         {
385             dfhist->sum_weights[fep_state] -= dfhist->wl_delta;
386             dfhist->wl_histo[fep_state] += 1.0;
387         }
388         else if (expand->elamstats == elamstatsWWL) /* Weighted Wang-Landau */
389         {
390             snew(p_k, nlim);
391
392             /* first increment count */
393             GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, 0, nlim - 1);
394             for (i = 0; i < nlim; i++)
395             {
396                 dfhist->wl_histo[i] += static_cast<real>(p_k[i]);
397             }
398
399             /* then increment weights (uses count) */
400             pks = 0.0;
401             GenerateWeightedGibbsProbabilities(weighted_lamee, p_k, &pks, nlim, dfhist->wl_histo,
402                                                dfhist->wl_delta);
403
404             for (i = 0; i < nlim; i++)
405             {
406                 dfhist->sum_weights[i] -= dfhist->wl_delta * static_cast<real>(p_k[i]);
407             }
408             /* Alternate definition, using logarithms. Shouldn't make very much difference! */
409             /*
410                real di;
411                for (i=0;i<nlim;i++)
412                {
413                 di = (real)1.0 + dfhist->wl_delta*(real)p_k[i];
414                 dfhist->sum_weights[i] -= log(di);
415                }
416              */
417             sfree(p_k);
418         }
419
420         zero_sum_weights = dfhist->sum_weights[0];
421         for (i = 0; i < nlim; i++)
422         {
423             dfhist->sum_weights[i] -= zero_sum_weights;
424         }
425     }
426
427     if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMETROPOLIS
428         || expand->elamstats == elamstatsMINVAR)
429     {
430
431         de_function = 0; /* to get rid of warnings, but this value will not be used because of the logic */
432         maxc = 2 * expand->c_range + 1;
433
434         snew(lam_dg, nlim);
435         snew(lam_variance, nlim);
436
437         snew(omegap_array, maxc);
438         snew(weightsp_array, maxc);
439         snew(varp_array, maxc);
440         snew(dwp_array, maxc);
441
442         snew(omegam_array, maxc);
443         snew(weightsm_array, maxc);
444         snew(varm_array, maxc);
445         snew(dwm_array, maxc);
446
447         /* unpack the current lambdas -- we will only update 2 of these */
448
449         for (i = 0; i < nlim - 1; i++)
450         { /* only through the second to last */
451             lam_dg[i] = dfhist->sum_dg[i + 1] - dfhist->sum_dg[i];
452             lam_variance[i] =
453                     gmx::square(dfhist->sum_variance[i + 1]) - gmx::square(dfhist->sum_variance[i]);
454         }
455
456         /* accumulate running averages */
457         for (nval = 0; nval < maxc; nval++)
458         {
459             /* constants for later use */
460             cnval = static_cast<real>(nval - expand->c_range);
461             /* actually, should be able to rewrite it w/o exponential, for better numerical stability */
462             if (fep_state > 0)
463             {
464                 de = std::exp(cnval - (scaled_lamee[fep_state] - scaled_lamee[fep_state - 1]));
465                 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
466                 {
467                     de_function = 1.0 / (1.0 + de);
468                 }
469                 else if (expand->elamstats == elamstatsMETROPOLIS)
470                 {
471                     if (de < 1.0)
472                     {
473                         de_function = 1.0;
474                     }
475                     else
476                     {
477                         de_function = 1.0 / de;
478                     }
479                 }
480                 dfhist->accum_m[fep_state][nval] += de_function;
481                 dfhist->accum_m2[fep_state][nval] += de_function * de_function;
482             }
483
484             if (fep_state < nlim - 1)
485             {
486                 de = std::exp(-cnval + (scaled_lamee[fep_state + 1] - scaled_lamee[fep_state]));
487                 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
488                 {
489                     de_function = 1.0 / (1.0 + de);
490                 }
491                 else if (expand->elamstats == elamstatsMETROPOLIS)
492                 {
493                     if (de < 1.0)
494                     {
495                         de_function = 1.0;
496                     }
497                     else
498                     {
499                         de_function = 1.0 / de;
500                     }
501                 }
502                 dfhist->accum_p[fep_state][nval] += de_function;
503                 dfhist->accum_p2[fep_state][nval] += de_function * de_function;
504             }
505
506             /* Metropolis transition and Barker transition (unoptimized Bennett) acceptance weight determination */
507
508             n0 = dfhist->n_at_lam[fep_state];
509             if (fep_state > 0)
510             {
511                 nm1 = dfhist->n_at_lam[fep_state - 1];
512             }
513             else
514             {
515                 nm1 = 0;
516             }
517             if (fep_state < nlim - 1)
518             {
519                 np1 = dfhist->n_at_lam[fep_state + 1];
520             }
521             else
522             {
523                 np1 = 0;
524             }
525
526             /* logic SHOULD keep these all set correctly whatever the logic, but apparently it can't figure it out. */
527             chi_m1_0 = chi_p1_0 = chi_m2_0 = chi_p2_0 = chi_p1_m1 = chi_p2_m1 = chi_m1_p1 = chi_m2_p1 = 0;
528
529             if (n0 > 0)
530             {
531                 chi_m1_0 = dfhist->accum_m[fep_state][nval] / n0;
532                 chi_p1_0 = dfhist->accum_p[fep_state][nval] / n0;
533                 chi_m2_0 = dfhist->accum_m2[fep_state][nval] / n0;
534                 chi_p2_0 = dfhist->accum_p2[fep_state][nval] / n0;
535             }
536
537             if ((fep_state > 0) && (nm1 > 0))
538             {
539                 chi_p1_m1 = dfhist->accum_p[fep_state - 1][nval] / nm1;
540                 chi_p2_m1 = dfhist->accum_p2[fep_state - 1][nval] / nm1;
541             }
542
543             if ((fep_state < nlim - 1) && (np1 > 0))
544             {
545                 chi_m1_p1 = dfhist->accum_m[fep_state + 1][nval] / np1;
546                 chi_m2_p1 = dfhist->accum_m2[fep_state + 1][nval] / np1;
547             }
548
549             omega_m1_0    = 0;
550             omega_p1_0    = 0;
551             clam_weightsm = 0;
552             clam_weightsp = 0;
553             clam_varm     = 0;
554             clam_varp     = 0;
555
556             if (fep_state > 0)
557             {
558                 if (n0 > 0)
559                 {
560                     omega_m1_0 = chi_m2_0 / (chi_m1_0 * chi_m1_0) - 1.0;
561                     if (nm1 > 0)
562                     {
563                         real omega_p1_m1 = chi_p2_m1 / (chi_p1_m1 * chi_p1_m1) - 1.0;
564                         clam_weightsm    = (std::log(chi_m1_0) - std::log(chi_p1_m1)) + cnval;
565                         clam_varm        = (1.0 / n0) * (omega_m1_0) + (1.0 / nm1) * (omega_p1_m1);
566                     }
567                 }
568             }
569
570             if (fep_state < nlim - 1)
571             {
572                 if (n0 > 0)
573                 {
574                     omega_p1_0 = chi_p2_0 / (chi_p1_0 * chi_p1_0) - 1.0;
575                     if (np1 > 0)
576                     {
577                         real omega_m1_p1 = chi_m2_p1 / (chi_m1_p1 * chi_m1_p1) - 1.0;
578                         clam_weightsp    = (std::log(chi_m1_p1) - std::log(chi_p1_0)) + cnval;
579                         clam_varp        = (1.0 / np1) * (omega_m1_p1) + (1.0 / n0) * (omega_p1_0);
580                     }
581                 }
582             }
583
584             if (n0 > 0)
585             {
586                 omegam_array[nval] = omega_m1_0;
587             }
588             else
589             {
590                 omegam_array[nval] = 0;
591             }
592             weightsm_array[nval] = clam_weightsm;
593             varm_array[nval]     = clam_varm;
594             if (nm1 > 0)
595             {
596                 dwm_array[nval] = fabs((cnval + std::log((1.0 * n0) / nm1)) - lam_dg[fep_state - 1]);
597             }
598             else
599             {
600                 dwm_array[nval] = std::fabs(cnval - lam_dg[fep_state - 1]);
601             }
602
603             if (n0 > 0)
604             {
605                 omegap_array[nval] = omega_p1_0;
606             }
607             else
608             {
609                 omegap_array[nval] = 0;
610             }
611             weightsp_array[nval] = clam_weightsp;
612             varp_array[nval]     = clam_varp;
613             if ((np1 > 0) && (n0 > 0))
614             {
615                 dwp_array[nval] = fabs((cnval + std::log((1.0 * np1) / n0)) - lam_dg[fep_state]);
616             }
617             else
618             {
619                 dwp_array[nval] = std::fabs(cnval - lam_dg[fep_state]);
620             }
621         }
622
623         /* find the C's closest to the old weights value */
624
625         min_nvalm     = FindMinimum(dwm_array, maxc);
626         omega_m1_0    = omegam_array[min_nvalm];
627         clam_weightsm = weightsm_array[min_nvalm];
628         clam_varm     = varm_array[min_nvalm];
629
630         min_nvalp     = FindMinimum(dwp_array, maxc);
631         omega_p1_0    = omegap_array[min_nvalp];
632         clam_weightsp = weightsp_array[min_nvalp];
633         clam_varp     = varp_array[min_nvalp];
634
635         clam_osum   = omega_m1_0 + omega_p1_0;
636         clam_minvar = 0;
637         if (clam_osum > 0)
638         {
639             clam_minvar = 0.5 * std::log(clam_osum);
640         }
641
642         if (fep_state > 0)
643         {
644             lam_dg[fep_state - 1]       = clam_weightsm;
645             lam_variance[fep_state - 1] = clam_varm;
646         }
647
648         if (fep_state < nlim - 1)
649         {
650             lam_dg[fep_state]       = clam_weightsp;
651             lam_variance[fep_state] = clam_varp;
652         }
653
654         if (expand->elamstats == elamstatsMINVAR)
655         {
656             bSufficientSamples = TRUE;
657             /* make sure they are all past a threshold */
658             for (i = 0; i < nlim; i++)
659             {
660                 if (dfhist->n_at_lam[i] < expand->minvarmin)
661                 {
662                     bSufficientSamples = FALSE;
663                 }
664             }
665             if (bSufficientSamples)
666             {
667                 dfhist->sum_minvar[fep_state] = clam_minvar;
668                 if (fep_state == 0)
669                 {
670                     for (i = 0; i < nlim; i++)
671                     {
672                         dfhist->sum_minvar[i] += (expand->minvar_const - clam_minvar);
673                     }
674                     expand->minvar_const          = clam_minvar;
675                     dfhist->sum_minvar[fep_state] = 0.0;
676                 }
677                 else
678                 {
679                     dfhist->sum_minvar[fep_state] -= expand->minvar_const;
680                 }
681             }
682         }
683
684         /* we need to rezero minvar now, since it could change at fep_state = 0 */
685         dfhist->sum_dg[0]       = 0.0;
686         dfhist->sum_variance[0] = 0.0;
687         dfhist->sum_weights[0]  = dfhist->sum_dg[0] + dfhist->sum_minvar[0]; /* should be zero */
688
689         for (i = 1; i < nlim; i++)
690         {
691             dfhist->sum_dg[i] = lam_dg[i - 1] + dfhist->sum_dg[i - 1];
692             dfhist->sum_variance[i] =
693                     std::sqrt(lam_variance[i - 1] + gmx::square(dfhist->sum_variance[i - 1]));
694             dfhist->sum_weights[i] = dfhist->sum_dg[i] + dfhist->sum_minvar[i];
695         }
696
697         sfree(lam_dg);
698         sfree(lam_variance);
699
700         sfree(omegam_array);
701         sfree(weightsm_array);
702         sfree(varm_array);
703         sfree(dwm_array);
704
705         sfree(omegap_array);
706         sfree(weightsp_array);
707         sfree(varp_array);
708         sfree(dwp_array);
709     }
710     return FALSE;
711 }
712
713 static int ChooseNewLambda(int               nlim,
714                            const t_expanded* expand,
715                            df_history_t*     dfhist,
716                            int               fep_state,
717                            const real*       weighted_lamee,
718                            double*           p_k,
719                            int64_t           seed,
720                            int64_t           step)
721 {
722     /* Choose new lambda value, and update transition matrix */
723
724     int                  i, ifep, minfep, maxfep, lamnew, lamtrial, starting_fep_state;
725     real                 r1, r2, de, trialprob, tprob = 0;
726     double *             propose, *accept, *remainder;
727     double               pks;
728     real                 pnorm;
729     gmx::ThreeFry2x64<0> rng(
730             seed, gmx::RandomDomain::ExpandedEnsemble); // We only draw once, so zero bits internal counter is fine
731     gmx::UniformRealDistribution<real> dist;
732
733     starting_fep_state = fep_state;
734     lamnew             = fep_state; /* so that there is a default setting -- stays the same */
735
736     if (!EWL(expand->elamstats)) /* ignore equilibrating the weights if using WL */
737     {
738         if ((expand->lmc_forced_nstart > 0) && (dfhist->n_at_lam[nlim - 1] <= expand->lmc_forced_nstart))
739         {
740             /* Use a marching method to run through the lambdas and get preliminary free energy data,
741                before starting 'free' sampling.  We start free sampling when we have enough at each lambda */
742
743             /* if we have enough at this lambda, move on to the next one */
744
745             if (dfhist->n_at_lam[fep_state] == expand->lmc_forced_nstart)
746             {
747                 lamnew = fep_state + 1;
748                 if (lamnew == nlim) /* whoops, stepped too far! */
749                 {
750                     lamnew -= 1;
751                 }
752             }
753             else
754             {
755                 lamnew = fep_state;
756             }
757             return lamnew;
758         }
759     }
760
761     snew(propose, nlim);
762     snew(accept, nlim);
763     snew(remainder, nlim);
764
765     for (i = 0; i < expand->lmc_repeats; i++)
766     {
767         rng.restart(step, i);
768         dist.reset();
769
770         for (ifep = 0; ifep < nlim; ifep++)
771         {
772             propose[ifep] = 0;
773             accept[ifep]  = 0;
774         }
775
776         if ((expand->elmcmove == elmcmoveGIBBS) || (expand->elmcmove == elmcmoveMETGIBBS))
777         {
778             /* use the Gibbs sampler, with restricted range */
779             if (expand->gibbsdeltalam < 0)
780             {
781                 minfep = 0;
782                 maxfep = nlim - 1;
783             }
784             else
785             {
786                 minfep = fep_state - expand->gibbsdeltalam;
787                 maxfep = fep_state + expand->gibbsdeltalam;
788                 if (minfep < 0)
789                 {
790                     minfep = 0;
791                 }
792                 if (maxfep > nlim - 1)
793                 {
794                     maxfep = nlim - 1;
795                 }
796             }
797
798             GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, minfep, maxfep);
799
800             if (expand->elmcmove == elmcmoveGIBBS)
801             {
802                 for (ifep = minfep; ifep <= maxfep; ifep++)
803                 {
804                     propose[ifep] = p_k[ifep];
805                     accept[ifep]  = 1.0;
806                 }
807                 /* Gibbs sampling */
808                 r1 = dist(rng);
809                 for (lamnew = minfep; lamnew <= maxfep; lamnew++)
810                 {
811                     if (r1 <= p_k[lamnew])
812                     {
813                         break;
814                     }
815                     r1 -= p_k[lamnew];
816                 }
817             }
818             else if (expand->elmcmove == elmcmoveMETGIBBS)
819             {
820
821                 /* Metropolized Gibbs sampling */
822                 for (ifep = minfep; ifep <= maxfep; ifep++)
823                 {
824                     remainder[ifep] = 1 - p_k[ifep];
825                 }
826
827                 /* find the proposal probabilities */
828
829                 if (remainder[fep_state] == 0)
830                 {
831                     /* only the current state has any probability */
832                     /* we have to stay at the current state */
833                     lamnew = fep_state;
834                 }
835                 else
836                 {
837                     for (ifep = minfep; ifep <= maxfep; ifep++)
838                     {
839                         if (ifep != fep_state)
840                         {
841                             propose[ifep] = p_k[ifep] / remainder[fep_state];
842                         }
843                         else
844                         {
845                             propose[ifep] = 0;
846                         }
847                     }
848
849                     r1 = dist(rng);
850                     for (lamtrial = minfep; lamtrial <= maxfep; lamtrial++)
851                     {
852                         pnorm = p_k[lamtrial] / remainder[fep_state];
853                         if (lamtrial != fep_state)
854                         {
855                             if (r1 <= pnorm)
856                             {
857                                 break;
858                             }
859                             r1 -= pnorm;
860                         }
861                     }
862
863                     /* we have now selected lamtrial according to p(lamtrial)/1-p(fep_state) */
864                     tprob = 1.0;
865                     /* trial probability is min{1,\frac{1 - p(old)}{1-p(new)} MRS 1/8/2008 */
866                     trialprob = (remainder[fep_state]) / (remainder[lamtrial]);
867                     if (trialprob < tprob)
868                     {
869                         tprob = trialprob;
870                     }
871                     r2 = dist(rng);
872                     if (r2 < tprob)
873                     {
874                         lamnew = lamtrial;
875                     }
876                     else
877                     {
878                         lamnew = fep_state;
879                     }
880                 }
881
882                 /* now figure out the acceptance probability for each */
883                 for (ifep = minfep; ifep <= maxfep; ifep++)
884                 {
885                     tprob = 1.0;
886                     if (remainder[ifep] != 0)
887                     {
888                         trialprob = (remainder[fep_state]) / (remainder[ifep]);
889                     }
890                     else
891                     {
892                         trialprob = 1.0; /* this state is the only choice! */
893                     }
894                     if (trialprob < tprob)
895                     {
896                         tprob = trialprob;
897                     }
898                     /* probability for fep_state=0, but that's fine, it's never proposed! */
899                     accept[ifep] = tprob;
900                 }
901             }
902
903             if (lamnew > maxfep)
904             {
905                 /* it's possible some rounding is failing */
906                 if (gmx_within_tol(remainder[fep_state], 0, 50 * GMX_DOUBLE_EPS))
907                 {
908                     /* numerical rounding error -- no state other than the original has weight */
909                     lamnew = fep_state;
910                 }
911                 else
912                 {
913                     /* probably not a numerical issue */
914                     int   loc    = 0;
915                     int   nerror = 200 + (maxfep - minfep + 1) * 60;
916                     char* errorstr;
917                     snew(errorstr, nerror);
918                     /* if its greater than maxfep, then something went wrong -- probably underflow
919                        in the calculation of sum weights. Generated detailed info for failure */
920                     loc += sprintf(
921                             errorstr,
922                             "Something wrong in choosing new lambda state with a Gibbs move -- "
923                             "probably underflow in weight determination.\nDenominator is: "
924                             "%3d%17.10e\n  i                dE        numerator          weights\n",
925                             0, pks);
926                     for (ifep = minfep; ifep <= maxfep; ifep++)
927                     {
928                         loc += sprintf(&errorstr[loc], "%3d %17.10e%17.10e%17.10e\n", ifep,
929                                        weighted_lamee[ifep], p_k[ifep], dfhist->sum_weights[ifep]);
930                     }
931                     gmx_fatal(FARGS, "%s", errorstr);
932                 }
933             }
934         }
935         else if ((expand->elmcmove == elmcmoveMETROPOLIS) || (expand->elmcmove == elmcmoveBARKER))
936         {
937             /* use the metropolis sampler with trial +/- 1 */
938             r1 = dist(rng);
939             if (r1 < 0.5)
940             {
941                 if (fep_state == 0)
942                 {
943                     lamtrial = fep_state;
944                 }
945                 else
946                 {
947                     lamtrial = fep_state - 1;
948                 }
949             }
950             else
951             {
952                 if (fep_state == nlim - 1)
953                 {
954                     lamtrial = fep_state;
955                 }
956                 else
957                 {
958                     lamtrial = fep_state + 1;
959                 }
960             }
961
962             de = weighted_lamee[lamtrial] - weighted_lamee[fep_state];
963             if (expand->elmcmove == elmcmoveMETROPOLIS)
964             {
965                 tprob     = 1.0;
966                 trialprob = std::exp(de);
967                 if (trialprob < tprob)
968                 {
969                     tprob = trialprob;
970                 }
971                 propose[fep_state] = 0;
972                 propose[lamtrial]  = 1.0; /* note that this overwrites the above line if fep_state = ntrial, which only occurs at the ends */
973                 accept[fep_state] =
974                         1.0; /* doesn't actually matter, never proposed unless fep_state = ntrial, in which case it's 1.0 anyway */
975                 accept[lamtrial] = tprob;
976             }
977             else if (expand->elmcmove == elmcmoveBARKER)
978             {
979                 tprob = 1.0 / (1.0 + std::exp(-de));
980
981                 propose[fep_state] = (1 - tprob);
982                 propose[lamtrial] +=
983                         tprob; /* we add, to account for the fact that at the end, they might be the same point */
984                 accept[fep_state] = 1.0;
985                 accept[lamtrial]  = 1.0;
986             }
987
988             r2 = dist(rng);
989             if (r2 < tprob)
990             {
991                 lamnew = lamtrial;
992             }
993             else
994             {
995                 lamnew = fep_state;
996             }
997         }
998
999         for (ifep = 0; ifep < nlim; ifep++)
1000         {
1001             dfhist->Tij[fep_state][ifep] += propose[ifep] * accept[ifep];
1002             dfhist->Tij[fep_state][fep_state] += propose[ifep] * (1.0 - accept[ifep]);
1003         }
1004         fep_state = lamnew;
1005     }
1006
1007     dfhist->Tij_empirical[starting_fep_state][lamnew] += 1.0;
1008
1009     sfree(propose);
1010     sfree(accept);
1011     sfree(remainder);
1012
1013     return lamnew;
1014 }
1015
1016 /* print out the weights to the log, along with current state */
1017 void PrintFreeEnergyInfoToFile(FILE*               outfile,
1018                                const t_lambda*     fep,
1019                                const t_expanded*   expand,
1020                                const t_simtemp*    simtemp,
1021                                const df_history_t* dfhist,
1022                                int                 fep_state,
1023                                int                 frequency,
1024                                int64_t             step)
1025 {
1026     int         nlim, i, ifep, jfep;
1027     real        dw, dg, dv, Tprint;
1028     const char* print_names[efptNR] = { " FEPL", "MassL", "CoulL",   " VdwL",
1029                                         "BondL", "RestT", "Temp.(K)" };
1030     gmx_bool    bSimTemp            = FALSE;
1031
1032     nlim = fep->n_lambda;
1033     if (simtemp != nullptr)
1034     {
1035         bSimTemp = TRUE;
1036     }
1037
1038     if (step % frequency == 0)
1039     {
1040         fprintf(outfile, "             MC-lambda information\n");
1041         if (EWL(expand->elamstats) && (!(dfhist->bEquil)))
1042         {
1043             fprintf(outfile, "  Wang-Landau incrementor is: %11.5g\n", dfhist->wl_delta);
1044         }
1045         fprintf(outfile, "  N");
1046         for (i = 0; i < efptNR; i++)
1047         {
1048             if (fep->separate_dvdl[i])
1049             {
1050                 fprintf(outfile, "%7s", print_names[i]);
1051             }
1052             else if ((i == efptTEMPERATURE) && bSimTemp)
1053             {
1054                 fprintf(outfile, "%10s", print_names[i]); /* more space for temperature formats */
1055             }
1056         }
1057         fprintf(outfile, "    Count   ");
1058         if (expand->elamstats == elamstatsMINVAR)
1059         {
1060             fprintf(outfile, "W(in kT)   G(in kT)  dG(in kT)  dV(in kT)\n");
1061         }
1062         else
1063         {
1064             fprintf(outfile, "G(in kT)  dG(in kT)\n");
1065         }
1066         for (ifep = 0; ifep < nlim; ifep++)
1067         {
1068             if (ifep == nlim - 1)
1069             {
1070                 dw = 0.0;
1071                 dg = 0.0;
1072                 dv = 0.0;
1073             }
1074             else
1075             {
1076                 dw = dfhist->sum_weights[ifep + 1] - dfhist->sum_weights[ifep];
1077                 dg = dfhist->sum_dg[ifep + 1] - dfhist->sum_dg[ifep];
1078                 dv = std::sqrt(gmx::square(dfhist->sum_variance[ifep + 1])
1079                                - gmx::square(dfhist->sum_variance[ifep]));
1080             }
1081             fprintf(outfile, "%3d", (ifep + 1));
1082             for (i = 0; i < efptNR; i++)
1083             {
1084                 if (fep->separate_dvdl[i])
1085                 {
1086                     fprintf(outfile, "%7.3f", fep->all_lambda[i][ifep]);
1087                 }
1088                 else if (i == efptTEMPERATURE && bSimTemp)
1089                 {
1090                     fprintf(outfile, "%9.3f", simtemp->temperatures[ifep]);
1091                 }
1092             }
1093             if (EWL(expand->elamstats)
1094                 && (!(dfhist->bEquil))) /* if performing WL and still haven't equilibrated */
1095             {
1096                 if (expand->elamstats == elamstatsWL)
1097                 {
1098                     fprintf(outfile, " %8d", static_cast<int>(dfhist->wl_histo[ifep]));
1099                 }
1100                 else
1101                 {
1102                     fprintf(outfile, " %8.3f", dfhist->wl_histo[ifep]);
1103                 }
1104             }
1105             else /* we have equilibrated weights */
1106             {
1107                 fprintf(outfile, " %8d", dfhist->n_at_lam[ifep]);
1108             }
1109             if (expand->elamstats == elamstatsMINVAR)
1110             {
1111                 fprintf(outfile, " %10.5f %10.5f %10.5f %10.5f", dfhist->sum_weights[ifep],
1112                         dfhist->sum_dg[ifep], dg, dv);
1113             }
1114             else
1115             {
1116                 fprintf(outfile, " %10.5f %10.5f", dfhist->sum_weights[ifep], dw);
1117             }
1118             if (ifep == fep_state)
1119             {
1120                 fprintf(outfile, " <<\n");
1121             }
1122             else
1123             {
1124                 fprintf(outfile, "   \n");
1125             }
1126         }
1127         fprintf(outfile, "\n");
1128
1129         if ((step % expand->nstTij == 0) && (expand->nstTij > 0) && (step > 0))
1130         {
1131             fprintf(outfile, "                     Transition Matrix\n");
1132             for (ifep = 0; ifep < nlim; ifep++)
1133             {
1134                 fprintf(outfile, "%12d", (ifep + 1));
1135             }
1136             fprintf(outfile, "\n");
1137             for (ifep = 0; ifep < nlim; ifep++)
1138             {
1139                 for (jfep = 0; jfep < nlim; jfep++)
1140                 {
1141                     if (dfhist->n_at_lam[ifep] > 0)
1142                     {
1143                         if (expand->bSymmetrizedTMatrix)
1144                         {
1145                             Tprint = (dfhist->Tij[ifep][jfep] + dfhist->Tij[jfep][ifep])
1146                                      / (dfhist->n_at_lam[ifep] + dfhist->n_at_lam[jfep]);
1147                         }
1148                         else
1149                         {
1150                             Tprint = (dfhist->Tij[ifep][jfep]) / (dfhist->n_at_lam[ifep]);
1151                         }
1152                     }
1153                     else
1154                     {
1155                         Tprint = 0.0;
1156                     }
1157                     fprintf(outfile, "%12.8f", Tprint);
1158                 }
1159                 fprintf(outfile, "%3d\n", (ifep + 1));
1160             }
1161
1162             fprintf(outfile, "                  Empirical Transition Matrix\n");
1163             for (ifep = 0; ifep < nlim; ifep++)
1164             {
1165                 fprintf(outfile, "%12d", (ifep + 1));
1166             }
1167             fprintf(outfile, "\n");
1168             for (ifep = 0; ifep < nlim; ifep++)
1169             {
1170                 for (jfep = 0; jfep < nlim; jfep++)
1171                 {
1172                     if (dfhist->n_at_lam[ifep] > 0)
1173                     {
1174                         if (expand->bSymmetrizedTMatrix)
1175                         {
1176                             Tprint = (dfhist->Tij_empirical[ifep][jfep] + dfhist->Tij_empirical[jfep][ifep])
1177                                      / (dfhist->n_at_lam[ifep] + dfhist->n_at_lam[jfep]);
1178                         }
1179                         else
1180                         {
1181                             Tprint = dfhist->Tij_empirical[ifep][jfep] / (dfhist->n_at_lam[ifep]);
1182                         }
1183                     }
1184                     else
1185                     {
1186                         Tprint = 0.0;
1187                     }
1188                     fprintf(outfile, "%12.8f", Tprint);
1189                 }
1190                 fprintf(outfile, "%3d\n", (ifep + 1));
1191             }
1192         }
1193     }
1194 }
1195
1196 int ExpandedEnsembleDynamics(FILE*                 log,
1197                              const t_inputrec*     ir,
1198                              const gmx_enerdata_t* enerd,
1199                              t_state*              state,
1200                              t_extmass*            MassQ,
1201                              int                   fep_state,
1202                              df_history_t*         dfhist,
1203                              int64_t               step,
1204                              rvec*                 v,
1205                              const t_mdatoms*      mdatoms)
1206 /* Note that the state variable is only needed for simulated tempering, not
1207    Hamiltonian expanded ensemble.  May be able to remove it after integrator refactoring. */
1208 {
1209     real *      pfep_lamee, *scaled_lamee, *weighted_lamee;
1210     double*     p_k;
1211     int         i, nlim, lamnew, totalsamples;
1212     real        oneovert, maxscaled = 0, maxweighted = 0;
1213     t_expanded* expand;
1214     t_simtemp*  simtemp;
1215     gmx_bool    bIfReset, bSwitchtoOneOverT, bDoneEquilibrating = FALSE;
1216
1217     expand  = ir->expandedvals;
1218     simtemp = ir->simtempvals;
1219     nlim    = ir->fepvals->n_lambda;
1220
1221     snew(scaled_lamee, nlim);
1222     snew(weighted_lamee, nlim);
1223     snew(pfep_lamee, nlim);
1224     snew(p_k, nlim);
1225
1226     /* update the count at the current lambda*/
1227     dfhist->n_at_lam[fep_state]++;
1228
1229     /* need to calculate the PV term somewhere, but not needed here? Not until there's a lambda
1230        state that's pressure controlled.*/
1231     /*
1232        pVTerm = 0;
1233        where does this PV term go?
1234        for (i=0;i<nlim;i++)
1235        {
1236        fep_lamee[i] += pVTerm;
1237        }
1238      */
1239
1240     /* determine the minimum value to avoid overflow.  Probably a better way to do this */
1241     /* we don't need to include the pressure term, since the volume is the same between the two.
1242        is there some term we are neglecting, however? */
1243
1244     if (ir->efep != efepNO)
1245     {
1246         for (i = 0; i < nlim; i++)
1247         {
1248             if (ir->bSimTemp)
1249             {
1250                 /* Note -- this assumes no mass changes, since kinetic energy is not added  . . . */
1251                 scaled_lamee[i] = (enerd->enerpart_lambda[i + 1] - enerd->enerpart_lambda[0])
1252                                           / (simtemp->temperatures[i] * BOLTZ)
1253                                   + enerd->term[F_EPOT]
1254                                             * (1.0 / (simtemp->temperatures[i])
1255                                                - 1.0 / (simtemp->temperatures[fep_state]))
1256                                             / BOLTZ;
1257             }
1258             else
1259             {
1260                 scaled_lamee[i] = (enerd->enerpart_lambda[i + 1] - enerd->enerpart_lambda[0])
1261                                   / (expand->mc_temp * BOLTZ);
1262                 /* mc_temp is currently set to the system reft unless otherwise defined */
1263             }
1264
1265             /* save these energies for printing, so they don't get overwritten by the next step */
1266             /* they aren't overwritten in the non-free energy case, but we always print with these
1267                for simplicity */
1268         }
1269     }
1270     else
1271     {
1272         if (ir->bSimTemp)
1273         {
1274             for (i = 0; i < nlim; i++)
1275             {
1276                 scaled_lamee[i] =
1277                         enerd->term[F_EPOT]
1278                         * (1.0 / simtemp->temperatures[i] - 1.0 / simtemp->temperatures[fep_state]) / BOLTZ;
1279             }
1280         }
1281     }
1282
1283     for (i = 0; i < nlim; i++)
1284     {
1285         pfep_lamee[i] = scaled_lamee[i];
1286
1287         weighted_lamee[i] = dfhist->sum_weights[i] - scaled_lamee[i];
1288         if (i == 0)
1289         {
1290             maxscaled   = scaled_lamee[i];
1291             maxweighted = weighted_lamee[i];
1292         }
1293         else
1294         {
1295             if (scaled_lamee[i] > maxscaled)
1296             {
1297                 maxscaled = scaled_lamee[i];
1298             }
1299             if (weighted_lamee[i] > maxweighted)
1300             {
1301                 maxweighted = weighted_lamee[i];
1302             }
1303         }
1304     }
1305
1306     for (i = 0; i < nlim; i++)
1307     {
1308         scaled_lamee[i] -= maxscaled;
1309         weighted_lamee[i] -= maxweighted;
1310     }
1311
1312     /* update weights - we decide whether or not to actually do this inside */
1313
1314     bDoneEquilibrating =
1315             UpdateWeights(nlim, expand, dfhist, fep_state, scaled_lamee, weighted_lamee, step);
1316     if (bDoneEquilibrating)
1317     {
1318         if (log)
1319         {
1320             fprintf(log, "\nStep %" PRId64 ": Weights have equilibrated, using criteria: %s\n",
1321                     step, elmceq_names[expand->elmceq]);
1322         }
1323     }
1324
1325     lamnew = ChooseNewLambda(nlim, expand, dfhist, fep_state, weighted_lamee, p_k,
1326                              ir->expandedvals->lmc_seed, step);
1327     /* if using simulated tempering, we need to adjust the temperatures */
1328     if (ir->bSimTemp && (lamnew != fep_state)) /* only need to change the temperatures if we change the state */
1329     {
1330         int   i, j, n, d;
1331         real* buf_ngtc;
1332         real  told;
1333         int   nstart, nend, gt;
1334
1335         snew(buf_ngtc, ir->opts.ngtc);
1336
1337         for (i = 0; i < ir->opts.ngtc; i++)
1338         {
1339             if (ir->opts.ref_t[i] > 0)
1340             {
1341                 told              = ir->opts.ref_t[i];
1342                 ir->opts.ref_t[i] = simtemp->temperatures[lamnew];
1343                 buf_ngtc[i]       = std::sqrt(ir->opts.ref_t[i] / told); /* using the buffer as temperature scaling */
1344             }
1345         }
1346
1347         /* we don't need to manipulate the ekind information, as it isn't due to be reset until the next step anyway */
1348
1349         nstart = 0;
1350         nend   = mdatoms->homenr;
1351         for (n = nstart; n < nend; n++)
1352         {
1353             gt = 0;
1354             if (mdatoms->cTC)
1355             {
1356                 gt = mdatoms->cTC[n];
1357             }
1358             for (d = 0; d < DIM; d++)
1359             {
1360                 v[n][d] *= buf_ngtc[gt];
1361             }
1362         }
1363
1364         if (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir))
1365         {
1366             /* we need to recalculate the masses if the temperature has changed */
1367             init_npt_masses(ir, state, MassQ, FALSE);
1368             for (i = 0; i < state->nnhpres; i++)
1369             {
1370                 for (j = 0; j < ir->opts.nhchainlength; j++)
1371                 {
1372                     state->nhpres_vxi[i + j] *= buf_ngtc[i];
1373                 }
1374             }
1375             for (i = 0; i < ir->opts.ngtc; i++)
1376             {
1377                 for (j = 0; j < ir->opts.nhchainlength; j++)
1378                 {
1379                     state->nosehoover_vxi[i + j] *= buf_ngtc[i];
1380                 }
1381             }
1382         }
1383         sfree(buf_ngtc);
1384     }
1385
1386     /* now check on the Wang-Landau updating critera */
1387
1388     if (EWL(expand->elamstats))
1389     {
1390         bSwitchtoOneOverT = FALSE;
1391         if (expand->bWLoneovert)
1392         {
1393             totalsamples = 0;
1394             for (i = 0; i < nlim; i++)
1395             {
1396                 totalsamples += dfhist->n_at_lam[i];
1397             }
1398             oneovert = (1.0 * nlim) / totalsamples;
1399             /* oneovert has decreasd by a bit since last time, so we actually make sure its within one of this number */
1400             /* switch to 1/t incrementing when wl_delta has decreased at least once, and wl_delta is now less than 1/t */
1401             if ((dfhist->wl_delta <= ((totalsamples) / (totalsamples - 1.00001)) * oneovert)
1402                 && (dfhist->wl_delta < expand->init_wl_delta))
1403             {
1404                 bSwitchtoOneOverT = TRUE;
1405             }
1406         }
1407         if (bSwitchtoOneOverT)
1408         {
1409             dfhist->wl_delta =
1410                     oneovert; /* now we reduce by this each time, instead of only at flatness */
1411         }
1412         else
1413         {
1414             bIfReset = CheckHistogramRatios(nlim, dfhist->wl_histo, expand->wl_ratio);
1415             if (bIfReset)
1416             {
1417                 for (i = 0; i < nlim; i++)
1418                 {
1419                     dfhist->wl_histo[i] = 0;
1420                 }
1421                 dfhist->wl_delta *= expand->wl_scale;
1422                 if (log)
1423                 {
1424                     fprintf(log, "\nStep %d: weights are now:", static_cast<int>(step));
1425                     for (i = 0; i < nlim; i++)
1426                     {
1427                         fprintf(log, " %.5f", dfhist->sum_weights[i]);
1428                     }
1429                     fprintf(log, "\n");
1430                 }
1431             }
1432         }
1433     }
1434     sfree(pfep_lamee);
1435     sfree(scaled_lamee);
1436     sfree(weighted_lamee);
1437     sfree(p_k);
1438
1439     return lamnew;
1440 }