5e0c61b0fdcc4c74725b1d7d19084ec3f1e325e3
[alexxy/gromacs.git] / src / gromacs / mdlib / expanded.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 #include "gmxpre.h"
36
37 #include "expanded.h"
38
39 #include <cmath>
40 #include <cstdio>
41
42 #include <algorithm>
43
44 #include "gromacs/domdec/domdec.h"
45 #include "gromacs/fileio/confio.h"
46 #include "gromacs/fileio/gmxfio.h"
47 #include "gromacs/fileio/xtcio.h"
48 #include "gromacs/gmxlib/chargegroup.h"
49 #include "gromacs/gmxlib/network.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/listed_forces/disre.h"
52 #include "gromacs/listed_forces/orires.h"
53 #include "gromacs/math/functions.h"
54 #include "gromacs/math/units.h"
55 #include "gromacs/math/vec.h"
56 #include "gromacs/mdlib/calcmu.h"
57 #include "gromacs/mdlib/constr.h"
58 #include "gromacs/mdlib/force.h"
59 #include "gromacs/mdlib/update.h"
60 #include "gromacs/mdtypes/enerdata.h"
61 #include "gromacs/mdtypes/forcerec.h"
62 #include "gromacs/mdtypes/inputrec.h"
63 #include "gromacs/mdtypes/md_enums.h"
64 #include "gromacs/mdtypes/mdatom.h"
65 #include "gromacs/mdtypes/state.h"
66 #include "gromacs/random/threefry.h"
67 #include "gromacs/random/uniformrealdistribution.h"
68 #include "gromacs/timing/wallcycle.h"
69 #include "gromacs/utility/fatalerror.h"
70 #include "gromacs/utility/gmxmpi.h"
71 #include "gromacs/utility/smalloc.h"
72
73 static void init_df_history_weights(df_history_t *dfhist, const t_expanded *expand, int nlim)
74 {
75     int i;
76     dfhist->wl_delta = expand->init_wl_delta;
77     for (i = 0; i < nlim; i++)
78     {
79         dfhist->sum_weights[i] = expand->init_lambda_weights[i];
80         dfhist->sum_dg[i]      = expand->init_lambda_weights[i];
81     }
82 }
83
84 /* Eventually should contain all the functions needed to initialize expanded ensemble
85    before the md loop starts */
86 void init_expanded_ensemble(gmx_bool bStateFromCP, const t_inputrec *ir, df_history_t *dfhist)
87 {
88     if (!bStateFromCP)
89     {
90         init_df_history_weights(dfhist, ir->expandedvals, ir->fepvals->n_lambda);
91     }
92 }
93
94 static void GenerateGibbsProbabilities(const real *ene, double *p_k, double *pks, int minfep, int maxfep)
95 {
96
97     int  i;
98     real maxene;
99
100     *pks   = 0.0;
101     maxene = ene[minfep];
102     /* find the maximum value */
103     for (i = minfep; i <= maxfep; i++)
104     {
105         if (ene[i] > maxene)
106         {
107             maxene = ene[i];
108         }
109     }
110     /* find the denominator */
111     for (i = minfep; i <= maxfep; i++)
112     {
113         *pks += std::exp(ene[i]-maxene);
114     }
115     /*numerators*/
116     for (i = minfep; i <= maxfep; i++)
117     {
118         p_k[i] = std::exp(ene[i]-maxene) / *pks;
119     }
120 }
121
122 static void GenerateWeightedGibbsProbabilities(const real *ene, double *p_k, double *pks, int nlim, real *nvals, real delta)
123 {
124
125     int   i;
126     real  maxene;
127     real *nene;
128     *pks = 0.0;
129
130     snew(nene, nlim);
131     for (i = 0; i < nlim; i++)
132     {
133         if (nvals[i] == 0)
134         {
135             /* add the delta, since we need to make sure it's greater than zero, and
136                we need a non-arbitrary number? */
137             nene[i] = ene[i] + std::log(nvals[i]+delta);
138         }
139         else
140         {
141             nene[i] = ene[i] + std::log(nvals[i]);
142         }
143     }
144
145     /* find the maximum value */
146     maxene = nene[0];
147     for (i = 0; i < nlim; i++)
148     {
149         if (nene[i] > maxene)
150         {
151             maxene = nene[i];
152         }
153     }
154
155     /* subtract off the maximum, avoiding overflow */
156     for (i = 0; i < nlim; i++)
157     {
158         nene[i] -= maxene;
159     }
160
161     /* find the denominator */
162     for (i = 0; i < nlim; i++)
163     {
164         *pks += std::exp(nene[i]);
165     }
166
167     /*numerators*/
168     for (i = 0; i < nlim; i++)
169     {
170         p_k[i] = std::exp(nene[i]) / *pks;
171     }
172     sfree(nene);
173 }
174
175 static int FindMinimum(const real *min_metric, int N)
176 {
177
178     real min_val;
179     int  min_nval, nval;
180
181     min_nval = 0;
182     min_val  = min_metric[0];
183
184     for (nval = 0; nval < N; nval++)
185     {
186         if (min_metric[nval] < min_val)
187         {
188             min_val  = min_metric[nval];
189             min_nval = nval;
190         }
191     }
192     return min_nval;
193 }
194
195 static gmx_bool CheckHistogramRatios(int nhisto, const real *histo, real ratio)
196 {
197
198     int      i;
199     real     nmean;
200     gmx_bool bIfFlat;
201
202     nmean = 0;
203     for (i = 0; i < nhisto; i++)
204     {
205         nmean += histo[i];
206     }
207
208     if (nmean == 0)
209     {
210         /* no samples! is bad!*/
211         bIfFlat = FALSE;
212         return bIfFlat;
213     }
214     nmean /= static_cast<real>(nhisto);
215
216     bIfFlat = TRUE;
217     for (i = 0; i < nhisto; i++)
218     {
219         /* make sure that all points are in the ratio < x <  1/ratio range  */
220         if (!((histo[i]/nmean < 1.0/ratio) && (histo[i]/nmean > ratio)))
221         {
222             bIfFlat = FALSE;
223             break;
224         }
225     }
226     return bIfFlat;
227 }
228
229 static gmx_bool CheckIfDoneEquilibrating(int nlim, const t_expanded *expand, const df_history_t *dfhist, int64_t step)
230 {
231
232     int      i, totalsamples;
233     gmx_bool bDoneEquilibrating = TRUE;
234     gmx_bool bIfFlat;
235
236     /* If we are doing slow growth to get initial values, we haven't finished equilibrating */
237     if (expand->lmc_forced_nstart > 0)
238     {
239         for (i = 0; i < nlim; i++)
240         {
241             if (dfhist->n_at_lam[i] < expand->lmc_forced_nstart) /* we are still doing the initial sweep, so we're definitely not
242                                                                     done equilibrating*/
243             {
244                 bDoneEquilibrating = FALSE;
245                 break;
246             }
247         }
248     }
249     else
250     {
251         /* assume we have equilibrated the weights, then check to see if any of the conditions are not met */
252         bDoneEquilibrating = TRUE;
253
254         /* calculate the total number of samples */
255         switch (expand->elmceq)
256         {
257             case elmceqNO:
258                 /* We have not equilibrated, and won't, ever. */
259                 bDoneEquilibrating = FALSE;
260                 break;
261             case elmceqYES:
262                 /* we have equilibrated -- we're done */
263                 bDoneEquilibrating = TRUE;
264                 break;
265             case elmceqSTEPS:
266                 /* first, check if we are equilibrating by steps, if we're still under */
267                 if (step < expand->equil_steps)
268                 {
269                     bDoneEquilibrating = FALSE;
270                 }
271                 break;
272             case elmceqSAMPLES:
273                 totalsamples = 0;
274                 for (i = 0; i < nlim; i++)
275                 {
276                     totalsamples += dfhist->n_at_lam[i];
277                 }
278                 if (totalsamples < expand->equil_samples)
279                 {
280                     bDoneEquilibrating = FALSE;
281                 }
282                 break;
283             case elmceqNUMATLAM:
284                 for (i = 0; i < nlim; i++)
285                 {
286                     if (dfhist->n_at_lam[i] < expand->equil_n_at_lam) /* we are still doing the initial sweep, so we're definitely not
287                                                                          done equilibrating*/
288                     {
289                         bDoneEquilibrating  = FALSE;
290                         break;
291                     }
292                 }
293                 break;
294             case elmceqWLDELTA:
295                 if (EWL(expand->elamstats)) /* This check is in readir as well, but
296                                                just to be sure */
297                 {
298                     if (dfhist->wl_delta > expand->equil_wl_delta)
299                     {
300                         bDoneEquilibrating = FALSE;
301                     }
302                 }
303                 break;
304             case elmceqRATIO:
305                 /* we can use the flatness as a judge of good weights, as long as
306                    we're not doing minvar, or Wang-Landau.
307                    But turn off for now until we figure out exactly how we do this.
308                  */
309
310                 if (!(EWL(expand->elamstats) || expand->elamstats == elamstatsMINVAR))
311                 {
312                     /* we want to use flatness -avoiding- the forced-through samples.  Plus, we need to convert to
313                        floats for this histogram function. */
314
315                     real *modhisto;
316                     snew(modhisto, nlim);
317                     for (i = 0; i < nlim; i++)
318                     {
319                         modhisto[i] = 1.0*(dfhist->n_at_lam[i]-expand->lmc_forced_nstart);
320                     }
321                     bIfFlat = CheckHistogramRatios(nlim, modhisto, expand->equil_ratio);
322                     sfree(modhisto);
323                     if (!bIfFlat)
324                     {
325                         bDoneEquilibrating = FALSE;
326                     }
327                 }
328                 break;
329             default:
330                 bDoneEquilibrating = TRUE;
331                 break;
332         }
333     }
334     return bDoneEquilibrating;
335 }
336
337 static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist,
338                               int fep_state, const real *scaled_lamee, const real *weighted_lamee, int64_t step)
339 {
340     gmx_bool  bSufficientSamples;
341     int       i;
342     int       n0, np1, nm1, nval, min_nvalm, min_nvalp, maxc;
343     real      omega_m1_0, omega_p1_0, clam_osum;
344     real      de, de_function;
345     real      cnval, zero_sum_weights;
346     real     *omegam_array, *weightsm_array, *omegap_array, *weightsp_array, *varm_array, *varp_array, *dwp_array, *dwm_array;
347     real      clam_varm, clam_varp, clam_weightsm, clam_weightsp, clam_minvar;
348     real     *lam_variance, *lam_dg;
349     double   *p_k;
350     double    pks = 0;
351     real      chi_m1_0, chi_p1_0, chi_m2_0, chi_p2_0, chi_p1_m1, chi_p2_m1, chi_m1_p1, chi_m2_p1;
352
353     /* if we have equilibrated the weights, exit now */
354     if (dfhist->bEquil)
355     {
356         return FALSE;
357     }
358
359     if (CheckIfDoneEquilibrating(nlim, expand, dfhist, step))
360     {
361         dfhist->bEquil = TRUE;
362         /* zero out the visited states so we know how many equilibrated states we have
363            from here on out.*/
364         for (i = 0; i < nlim; i++)
365         {
366             dfhist->n_at_lam[i] = 0;
367         }
368         return TRUE;
369     }
370
371     /* If we reached this far, we have not equilibrated yet, keep on
372        going resetting the weights */
373
374     if (EWL(expand->elamstats))
375     {
376         if (expand->elamstats == elamstatsWL)  /* Standard Wang-Landau */
377         {
378             dfhist->sum_weights[fep_state] -= dfhist->wl_delta;
379             dfhist->wl_histo[fep_state]    += 1.0;
380         }
381         else if (expand->elamstats == elamstatsWWL) /* Weighted Wang-Landau */
382         {
383             snew(p_k, nlim);
384
385             /* first increment count */
386             GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, 0, nlim-1);
387             for (i = 0; i < nlim; i++)
388             {
389                 dfhist->wl_histo[i] += static_cast<real>(p_k[i]);
390             }
391
392             /* then increment weights (uses count) */
393             pks = 0.0;
394             GenerateWeightedGibbsProbabilities(weighted_lamee, p_k, &pks, nlim, dfhist->wl_histo, dfhist->wl_delta);
395
396             for (i = 0; i < nlim; i++)
397             {
398                 dfhist->sum_weights[i] -= dfhist->wl_delta*static_cast<real>(p_k[i]);
399             }
400             /* Alternate definition, using logarithms. Shouldn't make very much difference! */
401             /*
402                real di;
403                for (i=0;i<nlim;i++)
404                {
405                 di = (real)1.0 + dfhist->wl_delta*(real)p_k[i];
406                 dfhist->sum_weights[i] -= log(di);
407                }
408              */
409             sfree(p_k);
410         }
411
412         zero_sum_weights =  dfhist->sum_weights[0];
413         for (i = 0; i < nlim; i++)
414         {
415             dfhist->sum_weights[i] -= zero_sum_weights;
416         }
417     }
418
419     if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMETROPOLIS || expand->elamstats == elamstatsMINVAR)
420     {
421
422         de_function = 0;  /* to get rid of warnings, but this value will not be used because of the logic */
423         maxc        = 2*expand->c_range+1;
424
425         snew(lam_dg, nlim);
426         snew(lam_variance, nlim);
427
428         snew(omegap_array, maxc);
429         snew(weightsp_array, maxc);
430         snew(varp_array, maxc);
431         snew(dwp_array, maxc);
432
433         snew(omegam_array, maxc);
434         snew(weightsm_array, maxc);
435         snew(varm_array, maxc);
436         snew(dwm_array, maxc);
437
438         /* unpack the current lambdas -- we will only update 2 of these */
439
440         for (i = 0; i < nlim-1; i++)
441         {   /* only through the second to last */
442             lam_dg[i]       = dfhist->sum_dg[i+1] - dfhist->sum_dg[i];
443             lam_variance[i] = gmx::square(dfhist->sum_variance[i+1]) - gmx::square(dfhist->sum_variance[i]);
444         }
445
446         /* accumulate running averages */
447         for (nval = 0; nval < maxc; nval++)
448         {
449             /* constants for later use */
450             cnval = static_cast<real>(nval-expand->c_range);
451             /* actually, should be able to rewrite it w/o exponential, for better numerical stability */
452             if (fep_state > 0)
453             {
454                 de = std::exp(cnval - (scaled_lamee[fep_state]-scaled_lamee[fep_state-1]));
455                 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
456                 {
457                     de_function = 1.0/(1.0+de);
458                 }
459                 else if (expand->elamstats == elamstatsMETROPOLIS)
460                 {
461                     if (de < 1.0)
462                     {
463                         de_function = 1.0;
464                     }
465                     else
466                     {
467                         de_function = 1.0/de;
468                     }
469                 }
470                 dfhist->accum_m[fep_state][nval]  += de_function;
471                 dfhist->accum_m2[fep_state][nval] += de_function*de_function;
472             }
473
474             if (fep_state < nlim-1)
475             {
476                 de = std::exp(-cnval + (scaled_lamee[fep_state+1]-scaled_lamee[fep_state]));
477                 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
478                 {
479                     de_function = 1.0/(1.0+de);
480                 }
481                 else if (expand->elamstats == elamstatsMETROPOLIS)
482                 {
483                     if (de < 1.0)
484                     {
485                         de_function = 1.0;
486                     }
487                     else
488                     {
489                         de_function = 1.0/de;
490                     }
491                 }
492                 dfhist->accum_p[fep_state][nval]  += de_function;
493                 dfhist->accum_p2[fep_state][nval] += de_function*de_function;
494             }
495
496             /* Metropolis transition and Barker transition (unoptimized Bennett) acceptance weight determination */
497
498             n0  = dfhist->n_at_lam[fep_state];
499             if (fep_state > 0)
500             {
501                 nm1 = dfhist->n_at_lam[fep_state-1];
502             }
503             else
504             {
505                 nm1 = 0;
506             }
507             if (fep_state < nlim-1)
508             {
509                 np1 = dfhist->n_at_lam[fep_state+1];
510             }
511             else
512             {
513                 np1 = 0;
514             }
515
516             /* logic SHOULD keep these all set correctly whatever the logic, but apparently it can't figure it out. */
517             chi_m1_0 = chi_p1_0 = chi_m2_0 = chi_p2_0 = chi_p1_m1 = chi_p2_m1 = chi_m1_p1 = chi_m2_p1 = 0;
518
519             if (n0 > 0)
520             {
521                 chi_m1_0 = dfhist->accum_m[fep_state][nval]/n0;
522                 chi_p1_0 = dfhist->accum_p[fep_state][nval]/n0;
523                 chi_m2_0 = dfhist->accum_m2[fep_state][nval]/n0;
524                 chi_p2_0 = dfhist->accum_p2[fep_state][nval]/n0;
525             }
526
527             if ((fep_state > 0 ) && (nm1 > 0))
528             {
529                 chi_p1_m1 = dfhist->accum_p[fep_state-1][nval]/nm1;
530                 chi_p2_m1 = dfhist->accum_p2[fep_state-1][nval]/nm1;
531             }
532
533             if ((fep_state < nlim-1) && (np1 > 0))
534             {
535                 chi_m1_p1 = dfhist->accum_m[fep_state+1][nval]/np1;
536                 chi_m2_p1 = dfhist->accum_m2[fep_state+1][nval]/np1;
537             }
538
539             omega_m1_0    = 0;
540             omega_p1_0    = 0;
541             clam_weightsm = 0;
542             clam_weightsp = 0;
543             clam_varm     = 0;
544             clam_varp     = 0;
545
546             if (fep_state > 0)
547             {
548                 if (n0 > 0)
549                 {
550                     omega_m1_0 = chi_m2_0/(chi_m1_0*chi_m1_0) - 1.0;
551                     if (nm1 > 0)
552                     {
553                         real omega_p1_m1 = chi_p2_m1/(chi_p1_m1*chi_p1_m1) - 1.0;
554                         clam_weightsm = (std::log(chi_m1_0) - std::log(chi_p1_m1)) + cnval;
555                         clam_varm     = (1.0/n0)*(omega_m1_0) + (1.0/nm1)*(omega_p1_m1);
556                     }
557                 }
558             }
559
560             if (fep_state < nlim-1)
561             {
562                 if (n0 > 0)
563                 {
564                     omega_p1_0 = chi_p2_0/(chi_p1_0*chi_p1_0) - 1.0;
565                     if (np1 > 0)
566                     {
567                         real omega_m1_p1 = chi_m2_p1/(chi_m1_p1*chi_m1_p1) - 1.0;
568                         clam_weightsp = (std::log(chi_m1_p1) - std::log(chi_p1_0)) + cnval;
569                         clam_varp     = (1.0/np1)*(omega_m1_p1) + (1.0/n0)*(omega_p1_0);
570                     }
571                 }
572             }
573
574             if (n0 > 0)
575             {
576                 omegam_array[nval]             = omega_m1_0;
577             }
578             else
579             {
580                 omegam_array[nval]             = 0;
581             }
582             weightsm_array[nval]           = clam_weightsm;
583             varm_array[nval]               = clam_varm;
584             if (nm1 > 0)
585             {
586                 dwm_array[nval]  = fabs( (cnval + std::log((1.0*n0)/nm1)) - lam_dg[fep_state-1] );
587             }
588             else
589             {
590                 dwm_array[nval]  = std::fabs( cnval - lam_dg[fep_state-1] );
591             }
592
593             if (n0 > 0)
594             {
595                 omegap_array[nval]             = omega_p1_0;
596             }
597             else
598             {
599                 omegap_array[nval]             = 0;
600             }
601             weightsp_array[nval]           = clam_weightsp;
602             varp_array[nval]               = clam_varp;
603             if ((np1 > 0) && (n0 > 0))
604             {
605                 dwp_array[nval]  = fabs( (cnval + std::log((1.0*np1)/n0)) - lam_dg[fep_state] );
606             }
607             else
608             {
609                 dwp_array[nval]  = std::fabs( cnval - lam_dg[fep_state] );
610             }
611
612         }
613
614         /* find the C's closest to the old weights value */
615
616         min_nvalm     = FindMinimum(dwm_array, maxc);
617         omega_m1_0    = omegam_array[min_nvalm];
618         clam_weightsm = weightsm_array[min_nvalm];
619         clam_varm     = varm_array[min_nvalm];
620
621         min_nvalp     = FindMinimum(dwp_array, maxc);
622         omega_p1_0    = omegap_array[min_nvalp];
623         clam_weightsp = weightsp_array[min_nvalp];
624         clam_varp     = varp_array[min_nvalp];
625
626         clam_osum   = omega_m1_0 + omega_p1_0;
627         clam_minvar = 0;
628         if (clam_osum > 0)
629         {
630             clam_minvar = 0.5*std::log(clam_osum);
631         }
632
633         if (fep_state > 0)
634         {
635             lam_dg[fep_state-1]       = clam_weightsm;
636             lam_variance[fep_state-1] = clam_varm;
637         }
638
639         if (fep_state < nlim-1)
640         {
641             lam_dg[fep_state]       = clam_weightsp;
642             lam_variance[fep_state] = clam_varp;
643         }
644
645         if (expand->elamstats == elamstatsMINVAR)
646         {
647             bSufficientSamples = TRUE;
648             /* make sure they are all past a threshold */
649             for (i = 0; i < nlim; i++)
650             {
651                 if (dfhist->n_at_lam[i] < expand->minvarmin)
652                 {
653                     bSufficientSamples = FALSE;
654                 }
655             }
656             if (bSufficientSamples)
657             {
658                 dfhist->sum_minvar[fep_state] = clam_minvar;
659                 if (fep_state == 0)
660                 {
661                     for (i = 0; i < nlim; i++)
662                     {
663                         dfhist->sum_minvar[i] += (expand->minvar_const-clam_minvar);
664                     }
665                     expand->minvar_const          = clam_minvar;
666                     dfhist->sum_minvar[fep_state] = 0.0;
667                 }
668                 else
669                 {
670                     dfhist->sum_minvar[fep_state] -= expand->minvar_const;
671                 }
672             }
673         }
674
675         /* we need to rezero minvar now, since it could change at fep_state = 0 */
676         dfhist->sum_dg[0]       = 0.0;
677         dfhist->sum_variance[0] = 0.0;
678         dfhist->sum_weights[0]  = dfhist->sum_dg[0] + dfhist->sum_minvar[0]; /* should be zero */
679
680         for (i = 1; i < nlim; i++)
681         {
682             dfhist->sum_dg[i]       = lam_dg[i-1] + dfhist->sum_dg[i-1];
683             dfhist->sum_variance[i] = std::sqrt(lam_variance[i-1] + gmx::square(dfhist->sum_variance[i-1]));
684             dfhist->sum_weights[i]  = dfhist->sum_dg[i] + dfhist->sum_minvar[i];
685         }
686
687         sfree(lam_dg);
688         sfree(lam_variance);
689
690         sfree(omegam_array);
691         sfree(weightsm_array);
692         sfree(varm_array);
693         sfree(dwm_array);
694
695         sfree(omegap_array);
696         sfree(weightsp_array);
697         sfree(varp_array);
698         sfree(dwp_array);
699     }
700     return FALSE;
701 }
702
703 static int ChooseNewLambda(int nlim, const t_expanded *expand, df_history_t *dfhist, int fep_state,
704                            const real *weighted_lamee, double *p_k,
705                            int64_t seed, int64_t step)
706 {
707     /* Choose new lambda value, and update transition matrix */
708
709     int                                  i, ifep, minfep, maxfep, lamnew, lamtrial, starting_fep_state;
710     real                                 r1, r2, de, trialprob, tprob = 0;
711     double                              *propose, *accept, *remainder;
712     double                               pks;
713     real                                 pnorm;
714     gmx::ThreeFry2x64<0>                 rng(seed, gmx::RandomDomain::ExpandedEnsemble); // We only draw once, so zero bits internal counter is fine
715     gmx::UniformRealDistribution<real>   dist;
716
717     starting_fep_state = fep_state;
718     lamnew             = fep_state; /* so that there is a default setting -- stays the same */
719
720     if (!EWL(expand->elamstats))    /* ignore equilibrating the weights if using WL */
721     {
722         if ((expand->lmc_forced_nstart > 0) && (dfhist->n_at_lam[nlim-1] <= expand->lmc_forced_nstart))
723         {
724             /* Use a marching method to run through the lambdas and get preliminary free energy data,
725                before starting 'free' sampling.  We start free sampling when we have enough at each lambda */
726
727             /* if we have enough at this lambda, move on to the next one */
728
729             if (dfhist->n_at_lam[fep_state] == expand->lmc_forced_nstart)
730             {
731                 lamnew = fep_state+1;
732                 if (lamnew == nlim)  /* whoops, stepped too far! */
733                 {
734                     lamnew -= 1;
735                 }
736             }
737             else
738             {
739                 lamnew = fep_state;
740             }
741             return lamnew;
742         }
743     }
744
745     snew(propose, nlim);
746     snew(accept, nlim);
747     snew(remainder, nlim);
748
749     for (i = 0; i < expand->lmc_repeats; i++)
750     {
751         rng.restart(step, i);
752         dist.reset();
753
754         for (ifep = 0; ifep < nlim; ifep++)
755         {
756             propose[ifep] = 0;
757             accept[ifep]  = 0;
758         }
759
760         if ((expand->elmcmove == elmcmoveGIBBS) || (expand->elmcmove == elmcmoveMETGIBBS))
761         {
762             /* use the Gibbs sampler, with restricted range */
763             if (expand->gibbsdeltalam < 0)
764             {
765                 minfep      = 0;
766                 maxfep      = nlim-1;
767             }
768             else
769             {
770                 minfep = fep_state - expand->gibbsdeltalam;
771                 maxfep = fep_state + expand->gibbsdeltalam;
772                 if (minfep < 0)
773                 {
774                     minfep = 0;
775                 }
776                 if (maxfep > nlim-1)
777                 {
778                     maxfep = nlim-1;
779                 }
780             }
781
782             GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, minfep, maxfep);
783
784             if (expand->elmcmove == elmcmoveGIBBS)
785             {
786                 for (ifep = minfep; ifep <= maxfep; ifep++)
787                 {
788                     propose[ifep] = p_k[ifep];
789                     accept[ifep]  = 1.0;
790                 }
791                 /* Gibbs sampling */
792                 r1 = dist(rng);
793                 for (lamnew = minfep; lamnew <= maxfep; lamnew++)
794                 {
795                     if (r1 <= p_k[lamnew])
796                     {
797                         break;
798                     }
799                     r1 -= p_k[lamnew];
800                 }
801             }
802             else if (expand->elmcmove == elmcmoveMETGIBBS)
803             {
804
805                 /* Metropolized Gibbs sampling */
806                 for (ifep = minfep; ifep <= maxfep; ifep++)
807                 {
808                     remainder[ifep] = 1 - p_k[ifep];
809                 }
810
811                 /* find the proposal probabilities */
812
813                 if (remainder[fep_state] == 0)
814                 {
815                     /* only the current state has any probability */
816                     /* we have to stay at the current state */
817                     lamnew = fep_state;
818                 }
819                 else
820                 {
821                     for (ifep = minfep; ifep <= maxfep; ifep++)
822                     {
823                         if (ifep != fep_state)
824                         {
825                             propose[ifep] = p_k[ifep]/remainder[fep_state];
826                         }
827                         else
828                         {
829                             propose[ifep] = 0;
830                         }
831                     }
832
833                     r1 = dist(rng);
834                     for (lamtrial = minfep; lamtrial <= maxfep; lamtrial++)
835                     {
836                         pnorm = p_k[lamtrial]/remainder[fep_state];
837                         if (lamtrial != fep_state)
838                         {
839                             if (r1 <= pnorm)
840                             {
841                                 break;
842                             }
843                             r1 -= pnorm;
844                         }
845                     }
846
847                     /* we have now selected lamtrial according to p(lamtrial)/1-p(fep_state) */
848                     tprob = 1.0;
849                     /* trial probability is min{1,\frac{1 - p(old)}{1-p(new)} MRS 1/8/2008 */
850                     trialprob = (remainder[fep_state])/(remainder[lamtrial]);
851                     if (trialprob < tprob)
852                     {
853                         tprob = trialprob;
854                     }
855                     r2 = dist(rng);
856                     if (r2 < tprob)
857                     {
858                         lamnew = lamtrial;
859                     }
860                     else
861                     {
862                         lamnew = fep_state;
863                     }
864                 }
865
866                 /* now figure out the acceptance probability for each */
867                 for (ifep = minfep; ifep <= maxfep; ifep++)
868                 {
869                     tprob = 1.0;
870                     if (remainder[ifep] != 0)
871                     {
872                         trialprob = (remainder[fep_state])/(remainder[ifep]);
873                     }
874                     else
875                     {
876                         trialprob = 1.0; /* this state is the only choice! */
877                     }
878                     if (trialprob < tprob)
879                     {
880                         tprob = trialprob;
881                     }
882                     /* probability for fep_state=0, but that's fine, it's never proposed! */
883                     accept[ifep] = tprob;
884                 }
885             }
886
887             if (lamnew > maxfep)
888             {
889                 /* it's possible some rounding is failing */
890                 if (gmx_within_tol(remainder[fep_state], 0, 50*GMX_DOUBLE_EPS))
891                 {
892                     /* numerical rounding error -- no state other than the original has weight */
893                     lamnew = fep_state;
894                 }
895                 else
896                 {
897                     /* probably not a numerical issue */
898                     int   loc    = 0;
899                     int   nerror = 200+(maxfep-minfep+1)*60;
900                     char *errorstr;
901                     snew(errorstr, nerror);
902                     /* if its greater than maxfep, then something went wrong -- probably underflow in the calculation
903                        of sum weights. Generated detailed info for failure */
904                     loc += sprintf(errorstr, "Something wrong in choosing new lambda state with a Gibbs move -- probably underflow in weight determination.\nDenominator is: %3d%17.10e\n  i                dE        numerator          weights\n", 0, pks);
905                     for (ifep = minfep; ifep <= maxfep; ifep++)
906                     {
907                         loc += sprintf(&errorstr[loc], "%3d %17.10e%17.10e%17.10e\n", ifep, weighted_lamee[ifep], p_k[ifep], dfhist->sum_weights[ifep]);
908                     }
909                     gmx_fatal(FARGS, "%s", errorstr);
910                 }
911             }
912         }
913         else if ((expand->elmcmove == elmcmoveMETROPOLIS) || (expand->elmcmove == elmcmoveBARKER))
914         {
915             /* use the metropolis sampler with trial +/- 1 */
916             r1 = dist(rng);
917             if (r1 < 0.5)
918             {
919                 if (fep_state == 0)
920                 {
921                     lamtrial = fep_state;
922                 }
923                 else
924                 {
925                     lamtrial = fep_state-1;
926                 }
927             }
928             else
929             {
930                 if (fep_state == nlim-1)
931                 {
932                     lamtrial = fep_state;
933                 }
934                 else
935                 {
936                     lamtrial = fep_state+1;
937                 }
938             }
939
940             de = weighted_lamee[lamtrial] - weighted_lamee[fep_state];
941             if (expand->elmcmove == elmcmoveMETROPOLIS)
942             {
943                 tprob     = 1.0;
944                 trialprob = std::exp(de);
945                 if (trialprob < tprob)
946                 {
947                     tprob = trialprob;
948                 }
949                 propose[fep_state] = 0;
950                 propose[lamtrial]  = 1.0; /* note that this overwrites the above line if fep_state = ntrial, which only occurs at the ends */
951                 accept[fep_state]  = 1.0; /* doesn't actually matter, never proposed unless fep_state = ntrial, in which case it's 1.0 anyway */
952                 accept[lamtrial]   = tprob;
953
954             }
955             else if (expand->elmcmove == elmcmoveBARKER)
956             {
957                 tprob = 1.0/(1.0+std::exp(-de));
958
959                 propose[fep_state] = (1-tprob);
960                 propose[lamtrial] += tprob; /* we add, to account for the fact that at the end, they might be the same point */
961                 accept[fep_state]  = 1.0;
962                 accept[lamtrial]   = 1.0;
963             }
964
965             r2 = dist(rng);
966             if (r2 < tprob)
967             {
968                 lamnew = lamtrial;
969             }
970             else
971             {
972                 lamnew = fep_state;
973             }
974         }
975
976         for (ifep = 0; ifep < nlim; ifep++)
977         {
978             dfhist->Tij[fep_state][ifep]      += propose[ifep]*accept[ifep];
979             dfhist->Tij[fep_state][fep_state] += propose[ifep]*(1.0-accept[ifep]);
980         }
981         fep_state = lamnew;
982     }
983
984     dfhist->Tij_empirical[starting_fep_state][lamnew] += 1.0;
985
986     sfree(propose);
987     sfree(accept);
988     sfree(remainder);
989
990     return lamnew;
991 }
992
993 /* print out the weights to the log, along with current state */
994 void PrintFreeEnergyInfoToFile(FILE *outfile, const t_lambda *fep, const t_expanded *expand,
995                                const t_simtemp *simtemp, const df_history_t *dfhist,
996                                int fep_state, int frequency, int64_t step)
997 {
998     int         nlim, i, ifep, jfep;
999     real        dw, dg, dv, Tprint;
1000     const char *print_names[efptNR] = {" FEPL", "MassL", "CoulL", " VdwL", "BondL", "RestT", "Temp.(K)"};
1001     gmx_bool    bSimTemp            = FALSE;
1002
1003     nlim = fep->n_lambda;
1004     if (simtemp != nullptr)
1005     {
1006         bSimTemp = TRUE;
1007     }
1008
1009     if (step % frequency == 0)
1010     {
1011         fprintf(outfile, "             MC-lambda information\n");
1012         if (EWL(expand->elamstats) && (!(dfhist->bEquil)))
1013         {
1014             fprintf(outfile, "  Wang-Landau incrementor is: %11.5g\n", dfhist->wl_delta);
1015         }
1016         fprintf(outfile, "  N");
1017         for (i = 0; i < efptNR; i++)
1018         {
1019             if (fep->separate_dvdl[i])
1020             {
1021                 fprintf(outfile, "%7s", print_names[i]);
1022             }
1023             else if ((i == efptTEMPERATURE) && bSimTemp)
1024             {
1025                 fprintf(outfile, "%10s", print_names[i]); /* more space for temperature formats */
1026             }
1027         }
1028         fprintf(outfile, "    Count   ");
1029         if (expand->elamstats == elamstatsMINVAR)
1030         {
1031             fprintf(outfile, "W(in kT)   G(in kT)  dG(in kT)  dV(in kT)\n");
1032         }
1033         else
1034         {
1035             fprintf(outfile, "G(in kT)  dG(in kT)\n");
1036         }
1037         for (ifep = 0; ifep < nlim; ifep++)
1038         {
1039             if (ifep == nlim-1)
1040             {
1041                 dw = 0.0;
1042                 dg = 0.0;
1043                 dv = 0.0;
1044             }
1045             else
1046             {
1047                 dw = dfhist->sum_weights[ifep+1] - dfhist->sum_weights[ifep];
1048                 dg = dfhist->sum_dg[ifep+1] - dfhist->sum_dg[ifep];
1049                 dv = std::sqrt(gmx::square(dfhist->sum_variance[ifep+1]) - gmx::square(dfhist->sum_variance[ifep]));
1050             }
1051             fprintf(outfile, "%3d", (ifep+1));
1052             for (i = 0; i < efptNR; i++)
1053             {
1054                 if (fep->separate_dvdl[i])
1055                 {
1056                     fprintf(outfile, "%7.3f", fep->all_lambda[i][ifep]);
1057                 }
1058                 else if (i == efptTEMPERATURE && bSimTemp)
1059                 {
1060                     fprintf(outfile, "%9.3f", simtemp->temperatures[ifep]);
1061                 }
1062             }
1063             if (EWL(expand->elamstats) && (!(dfhist->bEquil)))  /* if performing WL and still haven't equilibrated */
1064             {
1065                 if (expand->elamstats == elamstatsWL)
1066                 {
1067                     fprintf(outfile, " %8d", static_cast<int>(dfhist->wl_histo[ifep]));
1068                 }
1069                 else
1070                 {
1071                     fprintf(outfile, " %8.3f", dfhist->wl_histo[ifep]);
1072                 }
1073             }
1074             else   /* we have equilibrated weights */
1075             {
1076                 fprintf(outfile, " %8d", dfhist->n_at_lam[ifep]);
1077             }
1078             if (expand->elamstats == elamstatsMINVAR)
1079             {
1080                 fprintf(outfile, " %10.5f %10.5f %10.5f %10.5f", dfhist->sum_weights[ifep], dfhist->sum_dg[ifep], dg, dv);
1081             }
1082             else
1083             {
1084                 fprintf(outfile, " %10.5f %10.5f", dfhist->sum_weights[ifep], dw);
1085             }
1086             if (ifep == fep_state)
1087             {
1088                 fprintf(outfile, " <<\n");
1089             }
1090             else
1091             {
1092                 fprintf(outfile, "   \n");
1093             }
1094         }
1095         fprintf(outfile, "\n");
1096
1097         if ((step % expand->nstTij == 0) && (expand->nstTij > 0) && (step > 0))
1098         {
1099             fprintf(outfile, "                     Transition Matrix\n");
1100             for (ifep = 0; ifep < nlim; ifep++)
1101             {
1102                 fprintf(outfile, "%12d", (ifep+1));
1103             }
1104             fprintf(outfile, "\n");
1105             for (ifep = 0; ifep < nlim; ifep++)
1106             {
1107                 for (jfep = 0; jfep < nlim; jfep++)
1108                 {
1109                     if (dfhist->n_at_lam[ifep] > 0)
1110                     {
1111                         if (expand->bSymmetrizedTMatrix)
1112                         {
1113                             Tprint = (dfhist->Tij[ifep][jfep]+dfhist->Tij[jfep][ifep])/(dfhist->n_at_lam[ifep]+dfhist->n_at_lam[jfep]);
1114                         }
1115                         else
1116                         {
1117                             Tprint = (dfhist->Tij[ifep][jfep])/(dfhist->n_at_lam[ifep]);
1118                         }
1119                     }
1120                     else
1121                     {
1122                         Tprint = 0.0;
1123                     }
1124                     fprintf(outfile, "%12.8f", Tprint);
1125                 }
1126                 fprintf(outfile, "%3d\n", (ifep+1));
1127             }
1128
1129             fprintf(outfile, "                  Empirical Transition Matrix\n");
1130             for (ifep = 0; ifep < nlim; ifep++)
1131             {
1132                 fprintf(outfile, "%12d", (ifep+1));
1133             }
1134             fprintf(outfile, "\n");
1135             for (ifep = 0; ifep < nlim; ifep++)
1136             {
1137                 for (jfep = 0; jfep < nlim; jfep++)
1138                 {
1139                     if (dfhist->n_at_lam[ifep] > 0)
1140                     {
1141                         if (expand->bSymmetrizedTMatrix)
1142                         {
1143                             Tprint = (dfhist->Tij_empirical[ifep][jfep]+dfhist->Tij_empirical[jfep][ifep])/(dfhist->n_at_lam[ifep]+dfhist->n_at_lam[jfep]);
1144                         }
1145                         else
1146                         {
1147                             Tprint = dfhist->Tij_empirical[ifep][jfep]/(dfhist->n_at_lam[ifep]);
1148                         }
1149                     }
1150                     else
1151                     {
1152                         Tprint = 0.0;
1153                     }
1154                     fprintf(outfile, "%12.8f", Tprint);
1155                 }
1156                 fprintf(outfile, "%3d\n", (ifep+1));
1157             }
1158         }
1159     }
1160 }
1161
1162 int ExpandedEnsembleDynamics(FILE *log, const t_inputrec *ir, const gmx_enerdata_t *enerd,
1163                              t_state *state, t_extmass *MassQ, int fep_state, df_history_t *dfhist,
1164                              int64_t step,
1165                              rvec *v, const t_mdatoms *mdatoms)
1166 /* Note that the state variable is only needed for simulated tempering, not
1167    Hamiltonian expanded ensemble.  May be able to remove it after integrator refactoring. */
1168 {
1169     real       *pfep_lamee, *scaled_lamee, *weighted_lamee;
1170     double     *p_k;
1171     int         i, nlim, lamnew, totalsamples;
1172     real        oneovert, maxscaled = 0, maxweighted = 0;
1173     t_expanded *expand;
1174     t_simtemp  *simtemp;
1175     gmx_bool    bIfReset, bSwitchtoOneOverT, bDoneEquilibrating = FALSE;
1176
1177     expand  = ir->expandedvals;
1178     simtemp = ir->simtempvals;
1179     nlim    = ir->fepvals->n_lambda;
1180
1181     snew(scaled_lamee, nlim);
1182     snew(weighted_lamee, nlim);
1183     snew(pfep_lamee, nlim);
1184     snew(p_k, nlim);
1185
1186     /* update the count at the current lambda*/
1187     dfhist->n_at_lam[fep_state]++;
1188
1189     /* need to calculate the PV term somewhere, but not needed here? Not until there's a lambda state that's
1190        pressure controlled.*/
1191     /*
1192        pVTerm = 0;
1193        where does this PV term go?
1194        for (i=0;i<nlim;i++)
1195        {
1196        fep_lamee[i] += pVTerm;
1197        }
1198      */
1199
1200     /* determine the minimum value to avoid overflow.  Probably a better way to do this */
1201     /* we don't need to include the pressure term, since the volume is the same between the two.
1202        is there some term we are neglecting, however? */
1203
1204     if (ir->efep != efepNO)
1205     {
1206         for (i = 0; i < nlim; i++)
1207         {
1208             if (ir->bSimTemp)
1209             {
1210                 /* Note -- this assumes no mass changes, since kinetic energy is not added  . . . */
1211                 scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(simtemp->temperatures[i]*BOLTZ)
1212                     + enerd->term[F_EPOT]*(1.0/(simtemp->temperatures[i])- 1.0/(simtemp->temperatures[fep_state]))/BOLTZ;
1213             }
1214             else
1215             {
1216                 scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(expand->mc_temp*BOLTZ);
1217                 /* mc_temp is currently set to the system reft unless otherwise defined */
1218             }
1219
1220             /* save these energies for printing, so they don't get overwritten by the next step */
1221             /* they aren't overwritten in the non-free energy case, but we always print with these
1222                for simplicity */
1223         }
1224     }
1225     else
1226     {
1227         if (ir->bSimTemp)
1228         {
1229             for (i = 0; i < nlim; i++)
1230             {
1231                 scaled_lamee[i] = enerd->term[F_EPOT]*(1.0/simtemp->temperatures[i] - 1.0/simtemp->temperatures[fep_state])/BOLTZ;
1232             }
1233         }
1234     }
1235
1236     for (i = 0; i < nlim; i++)
1237     {
1238         pfep_lamee[i] = scaled_lamee[i];
1239
1240         weighted_lamee[i] = dfhist->sum_weights[i] - scaled_lamee[i];
1241         if (i == 0)
1242         {
1243             maxscaled   = scaled_lamee[i];
1244             maxweighted = weighted_lamee[i];
1245         }
1246         else
1247         {
1248             if (scaled_lamee[i] > maxscaled)
1249             {
1250                 maxscaled = scaled_lamee[i];
1251             }
1252             if (weighted_lamee[i] > maxweighted)
1253             {
1254                 maxweighted = weighted_lamee[i];
1255             }
1256         }
1257     }
1258
1259     for (i = 0; i < nlim; i++)
1260     {
1261         scaled_lamee[i]   -= maxscaled;
1262         weighted_lamee[i] -= maxweighted;
1263     }
1264
1265     /* update weights - we decide whether or not to actually do this inside */
1266
1267     bDoneEquilibrating = UpdateWeights(nlim, expand, dfhist, fep_state, scaled_lamee, weighted_lamee, step);
1268     if (bDoneEquilibrating)
1269     {
1270         if (log)
1271         {
1272             fprintf(log, "\nStep %" PRId64 ": Weights have equilibrated, using criteria: %s\n", step, elmceq_names[expand->elmceq]);
1273         }
1274     }
1275
1276     lamnew = ChooseNewLambda(nlim, expand, dfhist, fep_state, weighted_lamee, p_k,
1277                              ir->expandedvals->lmc_seed, step);
1278     /* if using simulated tempering, we need to adjust the temperatures */
1279     if (ir->bSimTemp && (lamnew != fep_state)) /* only need to change the temperatures if we change the state */
1280     {
1281         int   i, j, n, d;
1282         real *buf_ngtc;
1283         real  told;
1284         int   nstart, nend, gt;
1285
1286         snew(buf_ngtc, ir->opts.ngtc);
1287
1288         for (i = 0; i < ir->opts.ngtc; i++)
1289         {
1290             if (ir->opts.ref_t[i] > 0)
1291             {
1292                 told              = ir->opts.ref_t[i];
1293                 ir->opts.ref_t[i] =  simtemp->temperatures[lamnew];
1294                 buf_ngtc[i]       = std::sqrt(ir->opts.ref_t[i]/told); /* using the buffer as temperature scaling */
1295             }
1296         }
1297
1298         /* we don't need to manipulate the ekind information, as it isn't due to be reset until the next step anyway */
1299
1300         nstart = 0;
1301         nend   = mdatoms->homenr;
1302         for (n = nstart; n < nend; n++)
1303         {
1304             gt = 0;
1305             if (mdatoms->cTC)
1306             {
1307                 gt = mdatoms->cTC[n];
1308             }
1309             for (d = 0; d < DIM; d++)
1310             {
1311                 v[n][d] *= buf_ngtc[gt];
1312             }
1313         }
1314
1315         if (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir))
1316         {
1317             /* we need to recalculate the masses if the temperature has changed */
1318             init_npt_masses(ir, state, MassQ, FALSE);
1319             for (i = 0; i < state->nnhpres; i++)
1320             {
1321                 for (j = 0; j < ir->opts.nhchainlength; j++)
1322                 {
1323                     state->nhpres_vxi[i+j] *= buf_ngtc[i];
1324                 }
1325             }
1326             for (i = 0; i < ir->opts.ngtc; i++)
1327             {
1328                 for (j = 0; j < ir->opts.nhchainlength; j++)
1329                 {
1330                     state->nosehoover_vxi[i+j] *= buf_ngtc[i];
1331                 }
1332             }
1333         }
1334         sfree(buf_ngtc);
1335     }
1336
1337     /* now check on the Wang-Landau updating critera */
1338
1339     if (EWL(expand->elamstats))
1340     {
1341         bSwitchtoOneOverT = FALSE;
1342         if (expand->bWLoneovert)
1343         {
1344             totalsamples = 0;
1345             for (i = 0; i < nlim; i++)
1346             {
1347                 totalsamples += dfhist->n_at_lam[i];
1348             }
1349             oneovert = (1.0*nlim)/totalsamples;
1350             /* oneovert has decreasd by a bit since last time, so we actually make sure its within one of this number */
1351             /* switch to 1/t incrementing when wl_delta has decreased at least once, and wl_delta is now less than 1/t */
1352             if ((dfhist->wl_delta <= ((totalsamples)/(totalsamples-1.00001))*oneovert) &&
1353                 (dfhist->wl_delta < expand->init_wl_delta))
1354             {
1355                 bSwitchtoOneOverT = TRUE;
1356             }
1357         }
1358         if (bSwitchtoOneOverT)
1359         {
1360             dfhist->wl_delta = oneovert; /* now we reduce by this each time, instead of only at flatness */
1361         }
1362         else
1363         {
1364             bIfReset = CheckHistogramRatios(nlim, dfhist->wl_histo, expand->wl_ratio);
1365             if (bIfReset)
1366             {
1367                 for (i = 0; i < nlim; i++)
1368                 {
1369                     dfhist->wl_histo[i] = 0;
1370                 }
1371                 dfhist->wl_delta *= expand->wl_scale;
1372                 if (log)
1373                 {
1374                     fprintf(log, "\nStep %d: weights are now:", static_cast<int>(step));
1375                     for (i = 0; i < nlim; i++)
1376                     {
1377                         fprintf(log, " %.5f", dfhist->sum_weights[i]);
1378                     }
1379                     fprintf(log, "\n");
1380                 }
1381             }
1382         }
1383     }
1384     sfree(pfep_lamee);
1385     sfree(scaled_lamee);
1386     sfree(weighted_lamee);
1387     sfree(p_k);
1388
1389     return lamnew;
1390 }