Remove some pieces from forcerec.h
[alexxy/gromacs.git] / src / gromacs / mdlib / expanded.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 #include "gmxpre.h"
36
37 #include "expanded.h"
38
39 #include <stdio.h>
40
41 #include <cmath>
42
43 #include <algorithm>
44
45 #include "gromacs/domdec/domdec.h"
46 #include "gromacs/fileio/confio.h"
47 #include "gromacs/fileio/gmxfio.h"
48 #include "gromacs/fileio/xtcio.h"
49 #include "gromacs/gmxlib/chargegroup.h"
50 #include "gromacs/gmxlib/network.h"
51 #include "gromacs/gmxlib/nrnb.h"
52 #include "gromacs/listed-forces/disre.h"
53 #include "gromacs/listed-forces/orires.h"
54 #include "gromacs/math/functions.h"
55 #include "gromacs/math/units.h"
56 #include "gromacs/math/vec.h"
57 #include "gromacs/mdlib/calcmu.h"
58 #include "gromacs/mdlib/constr.h"
59 #include "gromacs/mdlib/force.h"
60 #include "gromacs/mdlib/update.h"
61 #include "gromacs/mdtypes/enerdata.h"
62 #include "gromacs/mdtypes/forcerec.h"
63 #include "gromacs/mdtypes/inputrec.h"
64 #include "gromacs/mdtypes/md_enums.h"
65 #include "gromacs/mdtypes/mdatom.h"
66 #include "gromacs/mdtypes/state.h"
67 #include "gromacs/random/threefry.h"
68 #include "gromacs/random/uniformrealdistribution.h"
69 #include "gromacs/timing/wallcycle.h"
70 #include "gromacs/utility/fatalerror.h"
71 #include "gromacs/utility/gmxmpi.h"
72 #include "gromacs/utility/smalloc.h"
73
74 static void init_df_history_weights(df_history_t *dfhist, t_expanded *expand, int nlim)
75 {
76     int i;
77     dfhist->wl_delta = expand->init_wl_delta;
78     for (i = 0; i < nlim; i++)
79     {
80         dfhist->sum_weights[i] = expand->init_lambda_weights[i];
81         dfhist->sum_dg[i]      = expand->init_lambda_weights[i];
82     }
83 }
84
85 /* Eventually should contain all the functions needed to initialize expanded ensemble
86    before the md loop starts */
87 void init_expanded_ensemble(gmx_bool bStateFromCP, t_inputrec *ir, df_history_t *dfhist)
88 {
89     if (!bStateFromCP)
90     {
91         init_df_history_weights(dfhist, ir->expandedvals, ir->fepvals->n_lambda);
92     }
93 }
94
95 static void GenerateGibbsProbabilities(real *ene, double *p_k, double *pks, int minfep, int maxfep)
96 {
97
98     int  i;
99     real maxene;
100
101     *pks   = 0.0;
102     maxene = ene[minfep];
103     /* find the maximum value */
104     for (i = minfep; i <= maxfep; i++)
105     {
106         if (ene[i] > maxene)
107         {
108             maxene = ene[i];
109         }
110     }
111     /* find the denominator */
112     for (i = minfep; i <= maxfep; i++)
113     {
114         *pks += std::exp(ene[i]-maxene);
115     }
116     /*numerators*/
117     for (i = minfep; i <= maxfep; i++)
118     {
119         p_k[i] = std::exp(ene[i]-maxene) / *pks;
120     }
121 }
122
123 static void GenerateWeightedGibbsProbabilities(const real *ene, double *p_k, double *pks, int nlim, real *nvals, real delta)
124 {
125
126     int   i;
127     real  maxene;
128     real *nene;
129     *pks = 0.0;
130
131     snew(nene, nlim);
132     for (i = 0; i < nlim; i++)
133     {
134         if (nvals[i] == 0)
135         {
136             /* add the delta, since we need to make sure it's greater than zero, and
137                we need a non-arbitrary number? */
138             nene[i] = ene[i] + std::log(nvals[i]+delta);
139         }
140         else
141         {
142             nene[i] = ene[i] + std::log(nvals[i]);
143         }
144     }
145
146     /* find the maximum value */
147     maxene = nene[0];
148     for (i = 0; i < nlim; i++)
149     {
150         if (nene[i] > maxene)
151         {
152             maxene = nene[i];
153         }
154     }
155
156     /* subtract off the maximum, avoiding overflow */
157     for (i = 0; i < nlim; i++)
158     {
159         nene[i] -= maxene;
160     }
161
162     /* find the denominator */
163     for (i = 0; i < nlim; i++)
164     {
165         *pks += std::exp(nene[i]);
166     }
167
168     /*numerators*/
169     for (i = 0; i < nlim; i++)
170     {
171         p_k[i] = std::exp(nene[i]) / *pks;
172     }
173     sfree(nene);
174 }
175
176 static int FindMinimum(const real *min_metric, int N)
177 {
178
179     real min_val;
180     int  min_nval, nval;
181
182     min_nval = 0;
183     min_val  = min_metric[0];
184
185     for (nval = 0; nval < N; nval++)
186     {
187         if (min_metric[nval] < min_val)
188         {
189             min_val  = min_metric[nval];
190             min_nval = nval;
191         }
192     }
193     return min_nval;
194 }
195
196 static gmx_bool CheckHistogramRatios(int nhisto, const real *histo, real ratio)
197 {
198
199     int      i;
200     real     nmean;
201     gmx_bool bIfFlat;
202
203     nmean = 0;
204     for (i = 0; i < nhisto; i++)
205     {
206         nmean += histo[i];
207     }
208
209     if (nmean == 0)
210     {
211         /* no samples! is bad!*/
212         bIfFlat = FALSE;
213         return bIfFlat;
214     }
215     nmean /= (real)nhisto;
216
217     bIfFlat = TRUE;
218     for (i = 0; i < nhisto; i++)
219     {
220         /* make sure that all points are in the ratio < x <  1/ratio range  */
221         if (!((histo[i]/nmean < 1.0/ratio) && (histo[i]/nmean > ratio)))
222         {
223             bIfFlat = FALSE;
224             break;
225         }
226     }
227     return bIfFlat;
228 }
229
230 static gmx_bool CheckIfDoneEquilibrating(int nlim, t_expanded *expand, df_history_t *dfhist, gmx_int64_t step)
231 {
232
233     int      i, totalsamples;
234     gmx_bool bDoneEquilibrating = TRUE;
235     gmx_bool bIfFlat;
236
237     /* If we are doing slow growth to get initial values, we haven't finished equilibrating */
238     if (expand->lmc_forced_nstart > 0)
239     {
240         for (i = 0; i < nlim; i++)
241         {
242             if (dfhist->n_at_lam[i] < expand->lmc_forced_nstart) /* we are still doing the initial sweep, so we're definitely not
243                                                                     done equilibrating*/
244             {
245                 bDoneEquilibrating = FALSE;
246                 break;
247             }
248         }
249     }
250     else
251     {
252         /* assume we have equilibrated the weights, then check to see if any of the conditions are not met */
253         bDoneEquilibrating = TRUE;
254
255         /* calculate the total number of samples */
256         switch (expand->elmceq)
257         {
258             case elmceqNO:
259                 /* We have not equilibrated, and won't, ever. */
260                 bDoneEquilibrating = FALSE;
261                 break;
262             case elmceqYES:
263                 /* we have equilibrated -- we're done */
264                 bDoneEquilibrating = TRUE;
265                 break;
266             case elmceqSTEPS:
267                 /* first, check if we are equilibrating by steps, if we're still under */
268                 if (step < expand->equil_steps)
269                 {
270                     bDoneEquilibrating = FALSE;
271                 }
272                 break;
273             case elmceqSAMPLES:
274                 totalsamples = 0;
275                 for (i = 0; i < nlim; i++)
276                 {
277                     totalsamples += dfhist->n_at_lam[i];
278                 }
279                 if (totalsamples < expand->equil_samples)
280                 {
281                     bDoneEquilibrating = FALSE;
282                 }
283                 break;
284             case elmceqNUMATLAM:
285                 for (i = 0; i < nlim; i++)
286                 {
287                     if (dfhist->n_at_lam[i] < expand->equil_n_at_lam) /* we are still doing the initial sweep, so we're definitely not
288                                                                          done equilibrating*/
289                     {
290                         bDoneEquilibrating  = FALSE;
291                         break;
292                     }
293                 }
294                 break;
295             case elmceqWLDELTA:
296                 if (EWL(expand->elamstats)) /* This check is in readir as well, but
297                                                just to be sure */
298                 {
299                     if (dfhist->wl_delta > expand->equil_wl_delta)
300                     {
301                         bDoneEquilibrating = FALSE;
302                     }
303                 }
304                 break;
305             case elmceqRATIO:
306                 /* we can use the flatness as a judge of good weights, as long as
307                    we're not doing minvar, or Wang-Landau.
308                    But turn off for now until we figure out exactly how we do this.
309                  */
310
311                 if (!(EWL(expand->elamstats) || expand->elamstats == elamstatsMINVAR))
312                 {
313                     /* we want to use flatness -avoiding- the forced-through samples.  Plus, we need to convert to
314                        floats for this histogram function. */
315
316                     real *modhisto;
317                     snew(modhisto, nlim);
318                     for (i = 0; i < nlim; i++)
319                     {
320                         modhisto[i] = 1.0*(dfhist->n_at_lam[i]-expand->lmc_forced_nstart);
321                     }
322                     bIfFlat = CheckHistogramRatios(nlim, modhisto, expand->equil_ratio);
323                     sfree(modhisto);
324                     if (!bIfFlat)
325                     {
326                         bDoneEquilibrating = FALSE;
327                     }
328                 }
329                 break;
330             default:
331                 bDoneEquilibrating = TRUE;
332                 break;
333         }
334     }
335     return bDoneEquilibrating;
336 }
337
338 static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist,
339                               int fep_state, real *scaled_lamee, real *weighted_lamee, gmx_int64_t step)
340 {
341     gmx_bool  bSufficientSamples;
342     int       i;
343     int       n0, np1, nm1, nval, min_nvalm, min_nvalp, maxc;
344     real      omega_m1_0, omega_p1_0, clam_osum;
345     real      de, de_function;
346     real      cnval, zero_sum_weights;
347     real     *omegam_array, *weightsm_array, *omegap_array, *weightsp_array, *varm_array, *varp_array, *dwp_array, *dwm_array;
348     real      clam_varm, clam_varp, clam_weightsm, clam_weightsp, clam_minvar;
349     real     *lam_variance, *lam_dg;
350     double   *p_k;
351     double    pks = 0;
352     real      chi_m1_0, chi_p1_0, chi_m2_0, chi_p2_0, chi_p1_m1, chi_p2_m1, chi_m1_p1, chi_m2_p1;
353
354     /* if we have equilibrated the weights, exit now */
355     if (dfhist->bEquil)
356     {
357         return FALSE;
358     }
359
360     if (CheckIfDoneEquilibrating(nlim, expand, dfhist, step))
361     {
362         dfhist->bEquil = TRUE;
363         /* zero out the visited states so we know how many equilibrated states we have
364            from here on out.*/
365         for (i = 0; i < nlim; i++)
366         {
367             dfhist->n_at_lam[i] = 0;
368         }
369         return TRUE;
370     }
371
372     /* If we reached this far, we have not equilibrated yet, keep on
373        going resetting the weights */
374
375     if (EWL(expand->elamstats))
376     {
377         if (expand->elamstats == elamstatsWL)  /* Standard Wang-Landau */
378         {
379             dfhist->sum_weights[fep_state] -= dfhist->wl_delta;
380             dfhist->wl_histo[fep_state]    += 1.0;
381         }
382         else if (expand->elamstats == elamstatsWWL) /* Weighted Wang-Landau */
383         {
384             snew(p_k, nlim);
385
386             /* first increment count */
387             GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, 0, nlim-1);
388             for (i = 0; i < nlim; i++)
389             {
390                 dfhist->wl_histo[i] += (real)p_k[i];
391             }
392
393             /* then increment weights (uses count) */
394             pks = 0.0;
395             GenerateWeightedGibbsProbabilities(weighted_lamee, p_k, &pks, nlim, dfhist->wl_histo, dfhist->wl_delta);
396
397             for (i = 0; i < nlim; i++)
398             {
399                 dfhist->sum_weights[i] -= dfhist->wl_delta*(real)p_k[i];
400             }
401             /* Alternate definition, using logarithms. Shouldn't make very much difference! */
402             /*
403                real di;
404                for (i=0;i<nlim;i++)
405                {
406                 di = (real)1.0 + dfhist->wl_delta*(real)p_k[i];
407                 dfhist->sum_weights[i] -= log(di);
408                }
409              */
410             sfree(p_k);
411         }
412
413         zero_sum_weights =  dfhist->sum_weights[0];
414         for (i = 0; i < nlim; i++)
415         {
416             dfhist->sum_weights[i] -= zero_sum_weights;
417         }
418     }
419
420     if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMETROPOLIS || expand->elamstats == elamstatsMINVAR)
421     {
422
423         de_function = 0;  /* to get rid of warnings, but this value will not be used because of the logic */
424         maxc        = 2*expand->c_range+1;
425
426         snew(lam_dg, nlim);
427         snew(lam_variance, nlim);
428
429         snew(omegap_array, maxc);
430         snew(weightsp_array, maxc);
431         snew(varp_array, maxc);
432         snew(dwp_array, maxc);
433
434         snew(omegam_array, maxc);
435         snew(weightsm_array, maxc);
436         snew(varm_array, maxc);
437         snew(dwm_array, maxc);
438
439         /* unpack the current lambdas -- we will only update 2 of these */
440
441         for (i = 0; i < nlim-1; i++)
442         {   /* only through the second to last */
443             lam_dg[i]       = dfhist->sum_dg[i+1] - dfhist->sum_dg[i];
444             lam_variance[i] = gmx::square(dfhist->sum_variance[i+1]) - gmx::square(dfhist->sum_variance[i]);
445         }
446
447         /* accumulate running averages */
448         for (nval = 0; nval < maxc; nval++)
449         {
450             /* constants for later use */
451             cnval = (real)(nval-expand->c_range);
452             /* actually, should be able to rewrite it w/o exponential, for better numerical stability */
453             if (fep_state > 0)
454             {
455                 de = std::exp(cnval - (scaled_lamee[fep_state]-scaled_lamee[fep_state-1]));
456                 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
457                 {
458                     de_function = 1.0/(1.0+de);
459                 }
460                 else if (expand->elamstats == elamstatsMETROPOLIS)
461                 {
462                     if (de < 1.0)
463                     {
464                         de_function = 1.0;
465                     }
466                     else
467                     {
468                         de_function = 1.0/de;
469                     }
470                 }
471                 dfhist->accum_m[fep_state][nval]  += de_function;
472                 dfhist->accum_m2[fep_state][nval] += de_function*de_function;
473             }
474
475             if (fep_state < nlim-1)
476             {
477                 de = std::exp(-cnval + (scaled_lamee[fep_state+1]-scaled_lamee[fep_state]));
478                 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
479                 {
480                     de_function = 1.0/(1.0+de);
481                 }
482                 else if (expand->elamstats == elamstatsMETROPOLIS)
483                 {
484                     if (de < 1.0)
485                     {
486                         de_function = 1.0;
487                     }
488                     else
489                     {
490                         de_function = 1.0/de;
491                     }
492                 }
493                 dfhist->accum_p[fep_state][nval]  += de_function;
494                 dfhist->accum_p2[fep_state][nval] += de_function*de_function;
495             }
496
497             /* Metropolis transition and Barker transition (unoptimized Bennett) acceptance weight determination */
498
499             n0  = dfhist->n_at_lam[fep_state];
500             if (fep_state > 0)
501             {
502                 nm1 = dfhist->n_at_lam[fep_state-1];
503             }
504             else
505             {
506                 nm1 = 0;
507             }
508             if (fep_state < nlim-1)
509             {
510                 np1 = dfhist->n_at_lam[fep_state+1];
511             }
512             else
513             {
514                 np1 = 0;
515             }
516
517             /* logic SHOULD keep these all set correctly whatever the logic, but apparently it can't figure it out. */
518             chi_m1_0 = chi_p1_0 = chi_m2_0 = chi_p2_0 = chi_p1_m1 = chi_p2_m1 = chi_m1_p1 = chi_m2_p1 = 0;
519
520             if (n0 > 0)
521             {
522                 chi_m1_0 = dfhist->accum_m[fep_state][nval]/n0;
523                 chi_p1_0 = dfhist->accum_p[fep_state][nval]/n0;
524                 chi_m2_0 = dfhist->accum_m2[fep_state][nval]/n0;
525                 chi_p2_0 = dfhist->accum_p2[fep_state][nval]/n0;
526             }
527
528             if ((fep_state > 0 ) && (nm1 > 0))
529             {
530                 chi_p1_m1 = dfhist->accum_p[fep_state-1][nval]/nm1;
531                 chi_p2_m1 = dfhist->accum_p2[fep_state-1][nval]/nm1;
532             }
533
534             if ((fep_state < nlim-1) && (np1 > 0))
535             {
536                 chi_m1_p1 = dfhist->accum_m[fep_state+1][nval]/np1;
537                 chi_m2_p1 = dfhist->accum_m2[fep_state+1][nval]/np1;
538             }
539
540             omega_m1_0    = 0;
541             omega_p1_0    = 0;
542             clam_weightsm = 0;
543             clam_weightsp = 0;
544             clam_varm     = 0;
545             clam_varp     = 0;
546
547             if (fep_state > 0)
548             {
549                 if (n0 > 0)
550                 {
551                     omega_m1_0 = chi_m2_0/(chi_m1_0*chi_m1_0) - 1.0;
552                     if (nm1 > 0)
553                     {
554                         real omega_p1_m1 = chi_p2_m1/(chi_p1_m1*chi_p1_m1) - 1.0;
555                         clam_weightsm = (std::log(chi_m1_0) - std::log(chi_p1_m1)) + cnval;
556                         clam_varm     = (1.0/n0)*(omega_m1_0) + (1.0/nm1)*(omega_p1_m1);
557                     }
558                 }
559             }
560
561             if (fep_state < nlim-1)
562             {
563                 if (n0 > 0)
564                 {
565                     omega_p1_0 = chi_p2_0/(chi_p1_0*chi_p1_0) - 1.0;
566                     if (np1 > 0)
567                     {
568                         real omega_m1_p1 = chi_m2_p1/(chi_m1_p1*chi_m1_p1) - 1.0;
569                         clam_weightsp = (std::log(chi_m1_p1) - std::log(chi_p1_0)) + cnval;
570                         clam_varp     = (1.0/np1)*(omega_m1_p1) + (1.0/n0)*(omega_p1_0);
571                     }
572                 }
573             }
574
575             if (n0 > 0)
576             {
577                 omegam_array[nval]             = omega_m1_0;
578             }
579             else
580             {
581                 omegam_array[nval]             = 0;
582             }
583             weightsm_array[nval]           = clam_weightsm;
584             varm_array[nval]               = clam_varm;
585             if (nm1 > 0)
586             {
587                 dwm_array[nval]  = fabs( (cnval + std::log((1.0*n0)/nm1)) - lam_dg[fep_state-1] );
588             }
589             else
590             {
591                 dwm_array[nval]  = std::fabs( cnval - lam_dg[fep_state-1] );
592             }
593
594             if (n0 > 0)
595             {
596                 omegap_array[nval]             = omega_p1_0;
597             }
598             else
599             {
600                 omegap_array[nval]             = 0;
601             }
602             weightsp_array[nval]           = clam_weightsp;
603             varp_array[nval]               = clam_varp;
604             if ((np1 > 0) && (n0 > 0))
605             {
606                 dwp_array[nval]  = fabs( (cnval + std::log((1.0*np1)/n0)) - lam_dg[fep_state] );
607             }
608             else
609             {
610                 dwp_array[nval]  = std::fabs( cnval - lam_dg[fep_state] );
611             }
612
613         }
614
615         /* find the C's closest to the old weights value */
616
617         min_nvalm     = FindMinimum(dwm_array, maxc);
618         omega_m1_0    = omegam_array[min_nvalm];
619         clam_weightsm = weightsm_array[min_nvalm];
620         clam_varm     = varm_array[min_nvalm];
621
622         min_nvalp     = FindMinimum(dwp_array, maxc);
623         omega_p1_0    = omegap_array[min_nvalp];
624         clam_weightsp = weightsp_array[min_nvalp];
625         clam_varp     = varp_array[min_nvalp];
626
627         clam_osum   = omega_m1_0 + omega_p1_0;
628         clam_minvar = 0;
629         if (clam_osum > 0)
630         {
631             clam_minvar = 0.5*std::log(clam_osum);
632         }
633
634         if (fep_state > 0)
635         {
636             lam_dg[fep_state-1]       = clam_weightsm;
637             lam_variance[fep_state-1] = clam_varm;
638         }
639
640         if (fep_state < nlim-1)
641         {
642             lam_dg[fep_state]       = clam_weightsp;
643             lam_variance[fep_state] = clam_varp;
644         }
645
646         if (expand->elamstats == elamstatsMINVAR)
647         {
648             bSufficientSamples = TRUE;
649             /* make sure they are all past a threshold */
650             for (i = 0; i < nlim; i++)
651             {
652                 if (dfhist->n_at_lam[i] < expand->minvarmin)
653                 {
654                     bSufficientSamples = FALSE;
655                 }
656             }
657             if (bSufficientSamples)
658             {
659                 dfhist->sum_minvar[fep_state] = clam_minvar;
660                 if (fep_state == 0)
661                 {
662                     for (i = 0; i < nlim; i++)
663                     {
664                         dfhist->sum_minvar[i] += (expand->minvar_const-clam_minvar);
665                     }
666                     expand->minvar_const          = clam_minvar;
667                     dfhist->sum_minvar[fep_state] = 0.0;
668                 }
669                 else
670                 {
671                     dfhist->sum_minvar[fep_state] -= expand->minvar_const;
672                 }
673             }
674         }
675
676         /* we need to rezero minvar now, since it could change at fep_state = 0 */
677         dfhist->sum_dg[0]       = 0.0;
678         dfhist->sum_variance[0] = 0.0;
679         dfhist->sum_weights[0]  = dfhist->sum_dg[0] + dfhist->sum_minvar[0]; /* should be zero */
680
681         for (i = 1; i < nlim; i++)
682         {
683             dfhist->sum_dg[i]       = lam_dg[i-1] + dfhist->sum_dg[i-1];
684             dfhist->sum_variance[i] = std::sqrt(lam_variance[i-1] + gmx::square(dfhist->sum_variance[i-1]));
685             dfhist->sum_weights[i]  = dfhist->sum_dg[i] + dfhist->sum_minvar[i];
686         }
687
688         sfree(lam_dg);
689         sfree(lam_variance);
690
691         sfree(omegam_array);
692         sfree(weightsm_array);
693         sfree(varm_array);
694         sfree(dwm_array);
695
696         sfree(omegap_array);
697         sfree(weightsp_array);
698         sfree(varp_array);
699         sfree(dwp_array);
700     }
701     return FALSE;
702 }
703
704 static int ChooseNewLambda(int nlim, t_expanded *expand, df_history_t *dfhist, int fep_state, real *weighted_lamee, double *p_k,
705                            gmx_int64_t seed, gmx_int64_t step)
706 {
707     /* Choose new lambda value, and update transition matrix */
708
709     int                                  i, ifep, minfep, maxfep, lamnew, lamtrial, starting_fep_state;
710     real                                 r1, r2, de, trialprob, tprob = 0;
711     double                              *propose, *accept, *remainder;
712     double                               pks;
713     real                                 pnorm;
714     gmx::ThreeFry2x64<0>                 rng(seed, gmx::RandomDomain::ExpandedEnsemble); // We only draw once, so zero bits internal counter is fine
715     gmx::UniformRealDistribution<real>   dist;
716
717     starting_fep_state = fep_state;
718     lamnew             = fep_state; /* so that there is a default setting -- stays the same */
719
720     if (!EWL(expand->elamstats))    /* ignore equilibrating the weights if using WL */
721     {
722         if ((expand->lmc_forced_nstart > 0) && (dfhist->n_at_lam[nlim-1] <= expand->lmc_forced_nstart))
723         {
724             /* Use a marching method to run through the lambdas and get preliminary free energy data,
725                before starting 'free' sampling.  We start free sampling when we have enough at each lambda */
726
727             /* if we have enough at this lambda, move on to the next one */
728
729             if (dfhist->n_at_lam[fep_state] == expand->lmc_forced_nstart)
730             {
731                 lamnew = fep_state+1;
732                 if (lamnew == nlim)  /* whoops, stepped too far! */
733                 {
734                     lamnew -= 1;
735                 }
736             }
737             else
738             {
739                 lamnew = fep_state;
740             }
741             return lamnew;
742         }
743     }
744
745     snew(propose, nlim);
746     snew(accept, nlim);
747     snew(remainder, nlim);
748
749     for (i = 0; i < expand->lmc_repeats; i++)
750     {
751         rng.restart(step, i);
752         dist.reset();
753
754         for (ifep = 0; ifep < nlim; ifep++)
755         {
756             propose[ifep] = 0;
757             accept[ifep]  = 0;
758         }
759
760         if ((expand->elmcmove == elmcmoveGIBBS) || (expand->elmcmove == elmcmoveMETGIBBS))
761         {
762             /* use the Gibbs sampler, with restricted range */
763             if (expand->gibbsdeltalam < 0)
764             {
765                 minfep      = 0;
766                 maxfep      = nlim-1;
767             }
768             else
769             {
770                 minfep = fep_state - expand->gibbsdeltalam;
771                 maxfep = fep_state + expand->gibbsdeltalam;
772                 if (minfep < 0)
773                 {
774                     minfep = 0;
775                 }
776                 if (maxfep > nlim-1)
777                 {
778                     maxfep = nlim-1;
779                 }
780             }
781
782             GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, minfep, maxfep);
783
784             if (expand->elmcmove == elmcmoveGIBBS)
785             {
786                 for (ifep = minfep; ifep <= maxfep; ifep++)
787                 {
788                     propose[ifep] = p_k[ifep];
789                     accept[ifep]  = 1.0;
790                 }
791                 /* Gibbs sampling */
792                 r1 = dist(rng);
793                 for (lamnew = minfep; lamnew <= maxfep; lamnew++)
794                 {
795                     if (r1 <= p_k[lamnew])
796                     {
797                         break;
798                     }
799                     r1 -= p_k[lamnew];
800                 }
801             }
802             else if (expand->elmcmove == elmcmoveMETGIBBS)
803             {
804
805                 /* Metropolized Gibbs sampling */
806                 for (ifep = minfep; ifep <= maxfep; ifep++)
807                 {
808                     remainder[ifep] = 1 - p_k[ifep];
809                 }
810
811                 /* find the proposal probabilities */
812
813                 if (remainder[fep_state] == 0)
814                 {
815                     /* only the current state has any probability */
816                     /* we have to stay at the current state */
817                     lamnew = fep_state;
818                 }
819                 else
820                 {
821                     for (ifep = minfep; ifep <= maxfep; ifep++)
822                     {
823                         if (ifep != fep_state)
824                         {
825                             propose[ifep] = p_k[ifep]/remainder[fep_state];
826                         }
827                         else
828                         {
829                             propose[ifep] = 0;
830                         }
831                     }
832
833                     r1 = dist(rng);
834                     for (lamtrial = minfep; lamtrial <= maxfep; lamtrial++)
835                     {
836                         pnorm = p_k[lamtrial]/remainder[fep_state];
837                         if (lamtrial != fep_state)
838                         {
839                             if (r1 <= pnorm)
840                             {
841                                 break;
842                             }
843                             r1 -= pnorm;
844                         }
845                     }
846
847                     /* we have now selected lamtrial according to p(lamtrial)/1-p(fep_state) */
848                     tprob = 1.0;
849                     /* trial probability is min{1,\frac{1 - p(old)}{1-p(new)} MRS 1/8/2008 */
850                     trialprob = (remainder[fep_state])/(remainder[lamtrial]);
851                     if (trialprob < tprob)
852                     {
853                         tprob = trialprob;
854                     }
855                     r2 = dist(rng);
856                     if (r2 < tprob)
857                     {
858                         lamnew = lamtrial;
859                     }
860                     else
861                     {
862                         lamnew = fep_state;
863                     }
864                 }
865
866                 /* now figure out the acceptance probability for each */
867                 for (ifep = minfep; ifep <= maxfep; ifep++)
868                 {
869                     tprob = 1.0;
870                     if (remainder[ifep] != 0)
871                     {
872                         trialprob = (remainder[fep_state])/(remainder[ifep]);
873                     }
874                     else
875                     {
876                         trialprob = 1.0; /* this state is the only choice! */
877                     }
878                     if (trialprob < tprob)
879                     {
880                         tprob = trialprob;
881                     }
882                     /* probability for fep_state=0, but that's fine, it's never proposed! */
883                     accept[ifep] = tprob;
884                 }
885             }
886
887             if (lamnew > maxfep)
888             {
889                 /* it's possible some rounding is failing */
890                 if (gmx_within_tol(remainder[fep_state], 0, 50*GMX_DOUBLE_EPS))
891                 {
892                     /* numerical rounding error -- no state other than the original has weight */
893                     lamnew = fep_state;
894                 }
895                 else
896                 {
897                     /* probably not a numerical issue */
898                     int   loc    = 0;
899                     int   nerror = 200+(maxfep-minfep+1)*60;
900                     char *errorstr;
901                     snew(errorstr, nerror);
902                     /* if its greater than maxfep, then something went wrong -- probably underflow in the calculation
903                        of sum weights. Generated detailed info for failure */
904                     loc += sprintf(errorstr, "Something wrong in choosing new lambda state with a Gibbs move -- probably underflow in weight determination.\nDenominator is: %3d%17.10e\n  i                dE        numerator          weights\n", 0, pks);
905                     for (ifep = minfep; ifep <= maxfep; ifep++)
906                     {
907                         loc += sprintf(&errorstr[loc], "%3d %17.10e%17.10e%17.10e\n", ifep, weighted_lamee[ifep], p_k[ifep], dfhist->sum_weights[ifep]);
908                     }
909                     gmx_fatal(FARGS, errorstr);
910                 }
911             }
912         }
913         else if ((expand->elmcmove == elmcmoveMETROPOLIS) || (expand->elmcmove == elmcmoveBARKER))
914         {
915             /* use the metropolis sampler with trial +/- 1 */
916             r1 = dist(rng);
917             if (r1 < 0.5)
918             {
919                 if (fep_state == 0)
920                 {
921                     lamtrial = fep_state;
922                 }
923                 else
924                 {
925                     lamtrial = fep_state-1;
926                 }
927             }
928             else
929             {
930                 if (fep_state == nlim-1)
931                 {
932                     lamtrial = fep_state;
933                 }
934                 else
935                 {
936                     lamtrial = fep_state+1;
937                 }
938             }
939
940             de = weighted_lamee[lamtrial] - weighted_lamee[fep_state];
941             if (expand->elmcmove == elmcmoveMETROPOLIS)
942             {
943                 tprob     = 1.0;
944                 trialprob = std::exp(de);
945                 if (trialprob < tprob)
946                 {
947                     tprob = trialprob;
948                 }
949                 propose[fep_state] = 0;
950                 propose[lamtrial]  = 1.0; /* note that this overwrites the above line if fep_state = ntrial, which only occurs at the ends */
951                 accept[fep_state]  = 1.0; /* doesn't actually matter, never proposed unless fep_state = ntrial, in which case it's 1.0 anyway */
952                 accept[lamtrial]   = tprob;
953
954             }
955             else if (expand->elmcmove == elmcmoveBARKER)
956             {
957                 tprob = 1.0/(1.0+std::exp(-de));
958
959                 propose[fep_state] = (1-tprob);
960                 propose[lamtrial] += tprob; /* we add, to account for the fact that at the end, they might be the same point */
961                 accept[fep_state]  = 1.0;
962                 accept[lamtrial]   = 1.0;
963             }
964
965             r2 = dist(rng);
966             if (r2 < tprob)
967             {
968                 lamnew = lamtrial;
969             }
970             else
971             {
972                 lamnew = fep_state;
973             }
974         }
975
976         for (ifep = 0; ifep < nlim; ifep++)
977         {
978             dfhist->Tij[fep_state][ifep]      += propose[ifep]*accept[ifep];
979             dfhist->Tij[fep_state][fep_state] += propose[ifep]*(1.0-accept[ifep]);
980         }
981         fep_state = lamnew;
982     }
983
984     dfhist->Tij_empirical[starting_fep_state][lamnew] += 1.0;
985
986     sfree(propose);
987     sfree(accept);
988     sfree(remainder);
989
990     return lamnew;
991 }
992
993 /* print out the weights to the log, along with current state */
994 void PrintFreeEnergyInfoToFile(FILE *outfile, t_lambda *fep, t_expanded *expand, t_simtemp *simtemp, df_history_t *dfhist,
995                                int fep_state, int frequency, gmx_int64_t step)
996 {
997     int         nlim, i, ifep, jfep;
998     real        dw, dg, dv, Tprint;
999     const char *print_names[efptNR] = {" FEPL", "MassL", "CoulL", " VdwL", "BondL", "RestT", "Temp.(K)"};
1000     gmx_bool    bSimTemp            = FALSE;
1001
1002     nlim = fep->n_lambda;
1003     if (simtemp != nullptr)
1004     {
1005         bSimTemp = TRUE;
1006     }
1007
1008     if (step % frequency == 0)
1009     {
1010         fprintf(outfile, "             MC-lambda information\n");
1011         if (EWL(expand->elamstats) && (!(dfhist->bEquil)))
1012         {
1013             fprintf(outfile, "  Wang-Landau incrementor is: %11.5g\n", dfhist->wl_delta);
1014         }
1015         fprintf(outfile, "  N");
1016         for (i = 0; i < efptNR; i++)
1017         {
1018             if (fep->separate_dvdl[i])
1019             {
1020                 fprintf(outfile, "%7s", print_names[i]);
1021             }
1022             else if ((i == efptTEMPERATURE) && bSimTemp)
1023             {
1024                 fprintf(outfile, "%10s", print_names[i]); /* more space for temperature formats */
1025             }
1026         }
1027         fprintf(outfile, "    Count   ");
1028         if (expand->elamstats == elamstatsMINVAR)
1029         {
1030             fprintf(outfile, "W(in kT)   G(in kT)  dG(in kT)  dV(in kT)\n");
1031         }
1032         else
1033         {
1034             fprintf(outfile, "G(in kT)  dG(in kT)\n");
1035         }
1036         for (ifep = 0; ifep < nlim; ifep++)
1037         {
1038             if (ifep == nlim-1)
1039             {
1040                 dw = 0.0;
1041                 dg = 0.0;
1042                 dv = 0.0;
1043             }
1044             else
1045             {
1046                 dw = dfhist->sum_weights[ifep+1] - dfhist->sum_weights[ifep];
1047                 dg = dfhist->sum_dg[ifep+1] - dfhist->sum_dg[ifep];
1048                 dv = std::sqrt(gmx::square(dfhist->sum_variance[ifep+1]) - gmx::square(dfhist->sum_variance[ifep]));
1049             }
1050             fprintf(outfile, "%3d", (ifep+1));
1051             for (i = 0; i < efptNR; i++)
1052             {
1053                 if (fep->separate_dvdl[i])
1054                 {
1055                     fprintf(outfile, "%7.3f", fep->all_lambda[i][ifep]);
1056                 }
1057                 else if (i == efptTEMPERATURE && bSimTemp)
1058                 {
1059                     fprintf(outfile, "%9.3f", simtemp->temperatures[ifep]);
1060                 }
1061             }
1062             if (EWL(expand->elamstats) && (!(dfhist->bEquil)))  /* if performing WL and still haven't equilibrated */
1063             {
1064                 if (expand->elamstats == elamstatsWL)
1065                 {
1066                     fprintf(outfile, " %8d", (int)dfhist->wl_histo[ifep]);
1067                 }
1068                 else
1069                 {
1070                     fprintf(outfile, " %8.3f", dfhist->wl_histo[ifep]);
1071                 }
1072             }
1073             else   /* we have equilibrated weights */
1074             {
1075                 fprintf(outfile, " %8d", dfhist->n_at_lam[ifep]);
1076             }
1077             if (expand->elamstats == elamstatsMINVAR)
1078             {
1079                 fprintf(outfile, " %10.5f %10.5f %10.5f %10.5f", dfhist->sum_weights[ifep], dfhist->sum_dg[ifep], dg, dv);
1080             }
1081             else
1082             {
1083                 fprintf(outfile, " %10.5f %10.5f", dfhist->sum_weights[ifep], dw);
1084             }
1085             if (ifep == fep_state)
1086             {
1087                 fprintf(outfile, " <<\n");
1088             }
1089             else
1090             {
1091                 fprintf(outfile, "   \n");
1092             }
1093         }
1094         fprintf(outfile, "\n");
1095
1096         if ((step % expand->nstTij == 0) && (expand->nstTij > 0) && (step > 0))
1097         {
1098             fprintf(outfile, "                     Transition Matrix\n");
1099             for (ifep = 0; ifep < nlim; ifep++)
1100             {
1101                 fprintf(outfile, "%12d", (ifep+1));
1102             }
1103             fprintf(outfile, "\n");
1104             for (ifep = 0; ifep < nlim; ifep++)
1105             {
1106                 for (jfep = 0; jfep < nlim; jfep++)
1107                 {
1108                     if (dfhist->n_at_lam[ifep] > 0)
1109                     {
1110                         if (expand->bSymmetrizedTMatrix)
1111                         {
1112                             Tprint = (dfhist->Tij[ifep][jfep]+dfhist->Tij[jfep][ifep])/(dfhist->n_at_lam[ifep]+dfhist->n_at_lam[jfep]);
1113                         }
1114                         else
1115                         {
1116                             Tprint = (dfhist->Tij[ifep][jfep])/(dfhist->n_at_lam[ifep]);
1117                         }
1118                     }
1119                     else
1120                     {
1121                         Tprint = 0.0;
1122                     }
1123                     fprintf(outfile, "%12.8f", Tprint);
1124                 }
1125                 fprintf(outfile, "%3d\n", (ifep+1));
1126             }
1127
1128             fprintf(outfile, "                  Empirical Transition Matrix\n");
1129             for (ifep = 0; ifep < nlim; ifep++)
1130             {
1131                 fprintf(outfile, "%12d", (ifep+1));
1132             }
1133             fprintf(outfile, "\n");
1134             for (ifep = 0; ifep < nlim; ifep++)
1135             {
1136                 for (jfep = 0; jfep < nlim; jfep++)
1137                 {
1138                     if (dfhist->n_at_lam[ifep] > 0)
1139                     {
1140                         if (expand->bSymmetrizedTMatrix)
1141                         {
1142                             Tprint = (dfhist->Tij_empirical[ifep][jfep]+dfhist->Tij_empirical[jfep][ifep])/(dfhist->n_at_lam[ifep]+dfhist->n_at_lam[jfep]);
1143                         }
1144                         else
1145                         {
1146                             Tprint = dfhist->Tij_empirical[ifep][jfep]/(dfhist->n_at_lam[ifep]);
1147                         }
1148                     }
1149                     else
1150                     {
1151                         Tprint = 0.0;
1152                     }
1153                     fprintf(outfile, "%12.8f", Tprint);
1154                 }
1155                 fprintf(outfile, "%3d\n", (ifep+1));
1156             }
1157         }
1158     }
1159 }
1160
1161 int ExpandedEnsembleDynamics(FILE *log, t_inputrec *ir, gmx_enerdata_t *enerd,
1162                              t_state *state, t_extmass *MassQ, int fep_state, df_history_t *dfhist,
1163                              gmx_int64_t step,
1164                              rvec *v, t_mdatoms *mdatoms)
1165 /* Note that the state variable is only needed for simulated tempering, not
1166    Hamiltonian expanded ensemble.  May be able to remove it after integrator refactoring. */
1167 {
1168     real       *pfep_lamee, *scaled_lamee, *weighted_lamee;
1169     double     *p_k;
1170     int         i, nlim, lamnew, totalsamples;
1171     real        oneovert, maxscaled = 0, maxweighted = 0;
1172     t_expanded *expand;
1173     t_simtemp  *simtemp;
1174     gmx_bool    bIfReset, bSwitchtoOneOverT, bDoneEquilibrating = FALSE;
1175
1176     expand  = ir->expandedvals;
1177     simtemp = ir->simtempvals;
1178     nlim    = ir->fepvals->n_lambda;
1179
1180     snew(scaled_lamee, nlim);
1181     snew(weighted_lamee, nlim);
1182     snew(pfep_lamee, nlim);
1183     snew(p_k, nlim);
1184
1185     /* update the count at the current lambda*/
1186     dfhist->n_at_lam[fep_state]++;
1187
1188     /* need to calculate the PV term somewhere, but not needed here? Not until there's a lambda state that's
1189        pressure controlled.*/
1190     /*
1191        pVTerm = 0;
1192        where does this PV term go?
1193        for (i=0;i<nlim;i++)
1194        {
1195        fep_lamee[i] += pVTerm;
1196        }
1197      */
1198
1199     /* determine the minimum value to avoid overflow.  Probably a better way to do this */
1200     /* we don't need to include the pressure term, since the volume is the same between the two.
1201        is there some term we are neglecting, however? */
1202
1203     if (ir->efep != efepNO)
1204     {
1205         for (i = 0; i < nlim; i++)
1206         {
1207             if (ir->bSimTemp)
1208             {
1209                 /* Note -- this assumes no mass changes, since kinetic energy is not added  . . . */
1210                 scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(simtemp->temperatures[i]*BOLTZ)
1211                     + enerd->term[F_EPOT]*(1.0/(simtemp->temperatures[i])- 1.0/(simtemp->temperatures[fep_state]))/BOLTZ;
1212             }
1213             else
1214             {
1215                 scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(expand->mc_temp*BOLTZ);
1216                 /* mc_temp is currently set to the system reft unless otherwise defined */
1217             }
1218
1219             /* save these energies for printing, so they don't get overwritten by the next step */
1220             /* they aren't overwritten in the non-free energy case, but we always print with these
1221                for simplicity */
1222         }
1223     }
1224     else
1225     {
1226         if (ir->bSimTemp)
1227         {
1228             for (i = 0; i < nlim; i++)
1229             {
1230                 scaled_lamee[i] = enerd->term[F_EPOT]*(1.0/simtemp->temperatures[i] - 1.0/simtemp->temperatures[fep_state])/BOLTZ;
1231             }
1232         }
1233     }
1234
1235     for (i = 0; i < nlim; i++)
1236     {
1237         pfep_lamee[i] = scaled_lamee[i];
1238
1239         weighted_lamee[i] = dfhist->sum_weights[i] - scaled_lamee[i];
1240         if (i == 0)
1241         {
1242             maxscaled   = scaled_lamee[i];
1243             maxweighted = weighted_lamee[i];
1244         }
1245         else
1246         {
1247             if (scaled_lamee[i] > maxscaled)
1248             {
1249                 maxscaled = scaled_lamee[i];
1250             }
1251             if (weighted_lamee[i] > maxweighted)
1252             {
1253                 maxweighted = weighted_lamee[i];
1254             }
1255         }
1256     }
1257
1258     for (i = 0; i < nlim; i++)
1259     {
1260         scaled_lamee[i]   -= maxscaled;
1261         weighted_lamee[i] -= maxweighted;
1262     }
1263
1264     /* update weights - we decide whether or not to actually do this inside */
1265
1266     bDoneEquilibrating = UpdateWeights(nlim, expand, dfhist, fep_state, scaled_lamee, weighted_lamee, step);
1267     if (bDoneEquilibrating)
1268     {
1269         if (log)
1270         {
1271             fprintf(log, "\nStep %d: Weights have equilibrated, using criteria: %s\n", (int)step, elmceq_names[expand->elmceq]);
1272         }
1273     }
1274
1275     lamnew = ChooseNewLambda(nlim, expand, dfhist, fep_state, weighted_lamee, p_k,
1276                              ir->expandedvals->lmc_seed, step);
1277     /* if using simulated tempering, we need to adjust the temperatures */
1278     if (ir->bSimTemp && (lamnew != fep_state)) /* only need to change the temperatures if we change the state */
1279     {
1280         int   i, j, n, d;
1281         real *buf_ngtc;
1282         real  told;
1283         int   nstart, nend, gt;
1284
1285         snew(buf_ngtc, ir->opts.ngtc);
1286
1287         for (i = 0; i < ir->opts.ngtc; i++)
1288         {
1289             if (ir->opts.ref_t[i] > 0)
1290             {
1291                 told              = ir->opts.ref_t[i];
1292                 ir->opts.ref_t[i] =  simtemp->temperatures[lamnew];
1293                 buf_ngtc[i]       = std::sqrt(ir->opts.ref_t[i]/told); /* using the buffer as temperature scaling */
1294             }
1295         }
1296
1297         /* we don't need to manipulate the ekind information, as it isn't due to be reset until the next step anyway */
1298
1299         nstart = 0;
1300         nend   = mdatoms->homenr;
1301         for (n = nstart; n < nend; n++)
1302         {
1303             gt = 0;
1304             if (mdatoms->cTC)
1305             {
1306                 gt = mdatoms->cTC[n];
1307             }
1308             for (d = 0; d < DIM; d++)
1309             {
1310                 v[n][d] *= buf_ngtc[gt];
1311             }
1312         }
1313
1314         if (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir))
1315         {
1316             /* we need to recalculate the masses if the temperature has changed */
1317             init_npt_masses(ir, state, MassQ, FALSE);
1318             for (i = 0; i < state->nnhpres; i++)
1319             {
1320                 for (j = 0; j < ir->opts.nhchainlength; j++)
1321                 {
1322                     state->nhpres_vxi[i+j] *= buf_ngtc[i];
1323                 }
1324             }
1325             for (i = 0; i < ir->opts.ngtc; i++)
1326             {
1327                 for (j = 0; j < ir->opts.nhchainlength; j++)
1328                 {
1329                     state->nosehoover_vxi[i+j] *= buf_ngtc[i];
1330                 }
1331             }
1332         }
1333         sfree(buf_ngtc);
1334     }
1335
1336     /* now check on the Wang-Landau updating critera */
1337
1338     if (EWL(expand->elamstats))
1339     {
1340         bSwitchtoOneOverT = FALSE;
1341         if (expand->bWLoneovert)
1342         {
1343             totalsamples = 0;
1344             for (i = 0; i < nlim; i++)
1345             {
1346                 totalsamples += dfhist->n_at_lam[i];
1347             }
1348             oneovert = (1.0*nlim)/totalsamples;
1349             /* oneovert has decreasd by a bit since last time, so we actually make sure its within one of this number */
1350             /* switch to 1/t incrementing when wl_delta has decreased at least once, and wl_delta is now less than 1/t */
1351             if ((dfhist->wl_delta <= ((totalsamples)/(totalsamples-1.00001))*oneovert) &&
1352                 (dfhist->wl_delta < expand->init_wl_delta))
1353             {
1354                 bSwitchtoOneOverT = TRUE;
1355             }
1356         }
1357         if (bSwitchtoOneOverT)
1358         {
1359             dfhist->wl_delta = oneovert; /* now we reduce by this each time, instead of only at flatness */
1360         }
1361         else
1362         {
1363             bIfReset = CheckHistogramRatios(nlim, dfhist->wl_histo, expand->wl_ratio);
1364             if (bIfReset)
1365             {
1366                 for (i = 0; i < nlim; i++)
1367                 {
1368                     dfhist->wl_histo[i] = 0;
1369                 }
1370                 dfhist->wl_delta *= expand->wl_scale;
1371                 if (log)
1372                 {
1373                     fprintf(log, "\nStep %d: weights are now:", (int)step);
1374                     for (i = 0; i < nlim; i++)
1375                     {
1376                         fprintf(log, " %.5f", dfhist->sum_weights[i]);
1377                     }
1378                     fprintf(log, "\n");
1379                 }
1380             }
1381         }
1382     }
1383     sfree(pfep_lamee);
1384     sfree(scaled_lamee);
1385     sfree(weighted_lamee);
1386     sfree(p_k);
1387
1388     return lamnew;
1389 }