Merge release-4-6 into master
[alexxy/gromacs.git] / src / gromacs / mdlib / expanded.c
1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
2  *
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  *          GROningen MAchine for Chemical Simulations
9  *
10  * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12  * Copyright (c) 2001-2012, The GROMACS development team,
13  * check out http://www.gromacs.org for more information.
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version 2
18  * of the License, or (at your option) any later version.
19  *
20  * If you want to redistribute modifications, please consider that
21  * scientific software is very special. Version control is crucial -
22  * bugs must be traceable. We will be happy to consider code for
23  * inclusion in the official distribution, but derived work must not
24  * be called official GROMACS. Details are found in the README & COPYING
25  * files - if they are missing, get the official version at www.gromacs.org.
26  *
27  * To help us fund GROMACS development, we humbly ask that you cite
28  * the papers on the package - you can find them in the top README file.
29  *
30  * For more info, check our website at http://www.gromacs.org
31  *
32  * And Hey:
33  * GROwing Monsters And Cloning Shrimps
34  */
35 #ifdef HAVE_CONFIG_H
36 #include <config.h>
37 #endif
38
39 #ifdef GMX_CRAY_XT3
40 #include<catamount/dclock.h>
41 #endif
42
43
44 #include <stdio.h>
45 #include <time.h>
46 #ifdef HAVE_SYS_TIME_H
47 #include <sys/time.h>
48 #endif
49 #include <math.h>
50 #include "typedefs.h"
51 #include "string2.h"
52 #include "gmxfio.h"
53 #include "smalloc.h"
54 #include "names.h"
55 #include "confio.h"
56 #include "mvdata.h"
57 #include "txtdump.h"
58 #include "pbc.h"
59 #include "chargegroup.h"
60 #include "vec.h"
61 #include "nrnb.h"
62 #include "mshift.h"
63 #include "mdrun.h"
64 #include "update.h"
65 #include "physics.h"
66 #include "main.h"
67 #include "mdatoms.h"
68 #include "force.h"
69 #include "bondf.h"
70 #include "pme.h"
71 #include "disre.h"
72 #include "orires.h"
73 #include "network.h"
74 #include "calcmu.h"
75 #include "constr.h"
76 #include "xvgr.h"
77 #include "trnio.h"
78 #include "xtcio.h"
79 #include "copyrite.h"
80 #include "gmx_random.h"
81 #include "domdec.h"
82 #include "partdec.h"
83 #include "gmx_wallcycle.h"
84 #include "macros.h"
85
86 #ifdef GMX_LIB_MPI
87 #include <mpi.h>
88 #endif
89 #ifdef GMX_THREADS
90 #include "tmpi.h"
91 #endif
92
93 void GenerateGibbsProbabilities(real *ene, real *p_k, real *pks, int minfep, int maxfep) {
94
95     int i;
96     real maxene;
97
98     *pks = 0.0;
99     maxene = ene[minfep];
100     /* find the maximum value */
101     for (i=minfep;i<=maxfep;i++)
102     {
103         if (ene[i]>maxene)
104         {
105             maxene = ene[i];
106         }
107     }
108     /* find the denominator */
109     for (i=minfep;i<=maxfep;i++)
110     {
111         *pks += exp(ene[i]-maxene);
112     }
113     /*numerators*/
114     for (i=minfep;i<=maxfep;i++)
115     {
116         p_k[i] = exp(ene[i]-maxene) / *pks;
117     }
118 }
119
120 void GenerateWeightedGibbsProbabilities(real *ene, real *p_k, real *pks, int nlim, real *nvals,real delta) {
121
122     int i;
123     real maxene;
124     real *nene;
125     *pks = 0.0;
126
127     snew(nene,nlim);
128     for (i=0;i<nlim;i++) {
129         if (nvals[i] == 0) {
130             /* add the delta, since we need to make sure it's greater than zero, and
131                we need a non-arbitrary number? */
132             nene[i] = ene[i] + log(nvals[i]+delta);
133         } else {
134             nene[i] = ene[i] + log(nvals[i]);
135         }
136     }
137
138     /* find the maximum value */
139     maxene = nene[0];
140     for (i=0;i<nlim;i++)
141     {
142         if (nene[i] > maxene) {
143             maxene = nene[i];
144         }
145     }
146
147     /* subtract off the maximum, avoiding overflow */
148     for (i=0;i<nlim;i++)
149     {
150         nene[i] -= maxene;
151     }
152
153     /* find the denominator */
154     for (i=0;i<nlim;i++)
155     {
156         *pks += exp(nene[i]);
157     }
158
159     /*numerators*/
160     for (i=0;i<nlim;i++)
161     {
162         p_k[i] = exp(nene[i]) / *pks;
163     }
164     sfree(nene);
165 }
166
167 real do_logsum(int N, real *a_n) {
168
169     /*     RETURN VALUE */
170     /* log(\sum_{i=0}^(N-1) exp[a_n]) */
171     real maxarg;
172     real sum;
173     int i;
174     real logsum;
175     /*     compute maximum argument to exp(.) */
176
177     maxarg = a_n[0];
178     for(i=1;i<N;i++)
179     {
180         maxarg = max(maxarg,a_n[i]);
181     }
182
183     /* compute sum of exp(a_n - maxarg) */
184     sum = 0.0;
185     for (i=0;i<N;i++)
186     {
187         sum = sum + exp(a_n[i] - maxarg);
188     }
189
190     /*     compute log sum */
191     logsum = log(sum) + maxarg;
192     return logsum;
193 }
194
195 int FindMinimum(real *min_metric, int N) {
196
197     real min_val;
198     int min_nval,nval;
199
200     min_nval = 0;
201     min_val = min_metric[0];
202
203     for (nval=0; nval<N; nval++)
204     {
205         if (min_metric[nval] < min_val)
206         {
207             min_val = min_metric[nval];
208             min_nval = nval;
209         }
210     }
211     return min_nval;
212 }
213
214 static gmx_bool CheckHistogramRatios(int nhisto, real *histo, real ratio)
215 {
216
217     int i;
218     real nmean;
219     gmx_bool bIfFlat;
220
221     nmean = 0;
222     for (i=0;i<nhisto;i++)
223     {
224         nmean += histo[i];
225     }
226
227     if (nmean == 0)
228     {
229         /* no samples! is bad!*/
230         bIfFlat = FALSE;
231         return bIfFlat;
232     }
233     nmean /= (real)nhisto;
234
235     bIfFlat = TRUE;
236     for (i=0;i<nhisto;i++)
237     {
238         /* make sure that all points are in the ratio < x <  1/ratio range  */
239         if (!((histo[i]/nmean < 1.0/ratio) && (histo[i]/nmean > ratio)))
240         {
241             bIfFlat = FALSE;
242             break;
243         }
244     }
245     return bIfFlat;
246 }
247
248 static gmx_bool CheckIfDoneEquilibrating(int nlim, t_expanded *expand, df_history_t *dfhist, gmx_large_int_t step)
249 {
250
251     int i,totalsamples;
252     gmx_bool bDoneEquilibrating=TRUE;
253     gmx_bool bIfFlat;
254
255     /* assume we have equilibrated the weights, then check to see if any of the conditions are not met */
256
257     /* calculate the total number of samples */
258     switch (expand->elmceq)
259     {
260     case elmceqNO:
261         /* We have not equilibrated, and won't, ever. */
262         return FALSE;
263     case elmceqYES:
264         /* we have equilibrated -- we're done */
265         return TRUE;
266     case elmceqSTEPS:
267         /* first, check if we are equilibrating by steps, if we're still under */
268         if (step < expand->equil_steps)
269         {
270             bDoneEquilibrating = FALSE;
271         }
272         break;
273     case elmceqSAMPLES:
274         totalsamples = 0;
275         for (i=0;i<nlim;i++)
276         {
277             totalsamples += dfhist->n_at_lam[i];
278         }
279         if (totalsamples < expand->equil_samples)
280         {
281             bDoneEquilibrating = FALSE;
282         }
283         break;
284     case elmceqNUMATLAM:
285         for (i=0;i<nlim;i++)
286         {
287             if (dfhist->n_at_lam[i] < expand->equil_n_at_lam) /* we are still doing the initial sweep, so we're definitely not
288                                                                  done equilibrating*/
289             {
290                 bDoneEquilibrating  = FALSE;
291                 break;
292             }
293         }
294         break;
295     case elmceqWLDELTA:
296         if (EWL(expand->elamstats)) /* This check is in readir as well, but
297                                     just to be sure */
298         {
299             if (dfhist->wl_delta > expand->equil_wl_delta)
300             {
301                 bDoneEquilibrating = FALSE;
302             }
303         }
304         break;
305     case elmceqRATIO:
306         /* we can use the flatness as a judge of good weights, as long as
307            we're not doing minvar, or Wang-Landau.
308            But turn off for now until we figure out exactly how we do this.
309         */
310
311         if (!(EWL(expand->elamstats) || expand->elamstats==elamstatsMINVAR))
312         {
313             /* we want to use flatness -avoiding- the forced-through samples.  Plus, we need to convert to
314                floats for this histogram function. */
315
316             real *modhisto;
317             snew(modhisto,nlim);
318             for (i=0;i<nlim;i++)
319             {
320                 modhisto[i] = 1.0*(dfhist->n_at_lam[i]-expand->lmc_forced_nstart);
321             }
322             bIfFlat = CheckHistogramRatios(nlim,modhisto,expand->equil_ratio);
323             sfree(modhisto);
324             if (!bIfFlat)
325             {
326                 bDoneEquilibrating = FALSE;
327             }
328         }
329     default:
330         bDoneEquilibrating = TRUE;
331     }
332     /* one last case to go though, if we are doing slow growth to get initial values, we haven't finished equilibrating */
333
334     if (expand->lmc_forced_nstart > 0)
335     {
336         for (i=0;i<nlim;i++)
337         {
338             if (dfhist->n_at_lam[i] < expand->lmc_forced_nstart) /* we are still doing the initial sweep, so we're definitely not
339                                                                     done equilibrating*/
340             {
341                 bDoneEquilibrating = FALSE;
342                 break;
343             }
344         }
345     }
346     return bDoneEquilibrating;
347 }
348
349 static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist,
350                               int fep_state, real *scaled_lamee, real *weighted_lamee, gmx_large_int_t step)
351 {
352     real maxdiff = 0.000000001;
353     gmx_bool bSufficientSamples;
354     int i, k, n, nz, indexi, indexk, min_n, max_n, nlam, totali;
355     int n0,np1,nm1,nval,min_nvalm,min_nvalp,maxc;
356     real omega_m1_0,omega_p1_m1,omega_m1_p1,omega_p1_0,clam_osum;
357     real de,de_function,dr,denom,maxdr,pks=0;
358     real min_val,cnval,zero_sum_weights;
359     real *omegam_array, *weightsm_array, *omegap_array, *weightsp_array, *varm_array, *varp_array, *dwp_array, *dwm_array;
360     real clam_varm, clam_varp, clam_weightsm, clam_weightsp, clam_minvar;
361     real *lam_weights, *lam_minvar_corr, *lam_variance, *lam_dg, *p_k;
362     real *numweighted_lamee, *logfrac;
363     int *nonzero;
364     real chi_m1_0,chi_p1_0,chi_m2_0,chi_p2_0,chi_p1_m1,chi_p2_m1,chi_m1_p1,chi_m2_p1;
365
366     /* if we have equilibrated the weights, exit now */
367     if (dfhist->bEquil)
368     {
369         return FALSE;
370     }
371
372     if (CheckIfDoneEquilibrating(nlim,expand,dfhist,step))
373     {
374         dfhist->bEquil = TRUE;
375         /* zero out the visited states so we know how many equilibrated states we have
376            from here on out.*/
377         for (i=0;i<nlim;i++)
378         {
379             dfhist->n_at_lam[i] = 0;
380         }
381         return TRUE;
382     }
383
384     /* If we reached this far, we have not equilibrated yet, keep on
385        going resetting the weights */
386
387     if (EWL(expand->elamstats))
388     {
389         if (expand->elamstats==elamstatsWL)  /* Standard Wang-Landau */
390         {
391             dfhist->sum_weights[fep_state] -= dfhist->wl_delta;
392             dfhist->wl_histo[fep_state] += 1.0;
393         }
394         else if (expand->elamstats==elamstatsWWL) /* Weighted Wang-Landau */
395         {
396             snew(p_k,nlim);
397
398             /* first increment count */
399             GenerateGibbsProbabilities(weighted_lamee,p_k,&pks,0,nlim-1);
400             for (i=0;i<nlim;i++) {
401                 dfhist->wl_histo[i] += p_k[i];
402             }
403
404             /* then increment weights (uses count) */
405             pks = 0.0;
406             GenerateWeightedGibbsProbabilities(weighted_lamee,p_k,&pks,nlim,dfhist->wl_histo,dfhist->wl_delta);
407
408             for (i=0;i<nlim;i++)
409             {
410                 dfhist->sum_weights[i] -= dfhist->wl_delta*p_k[i];
411             }
412             /* Alternate definition, using logarithms. Shouldn't make very much difference! */
413             /*
414               real di;
415               for (i=0;i<nlim;i++)
416               {
417                 di = 1+dfhist->wl_delta*p_k[i];
418                 dfhist->sum_weights[i] -= log(di);
419               }
420             */
421             sfree(p_k);
422         }
423
424         zero_sum_weights =  dfhist->sum_weights[0];
425         for (i=0;i<nlim;i++)
426         {
427             dfhist->sum_weights[i] -= zero_sum_weights;
428         }
429     }
430
431     if (expand->elamstats==elamstatsBARKER || expand->elamstats==elamstatsMETROPOLIS || expand->elamstats==elamstatsMINVAR) {
432
433         de_function = 0;  /* to get rid of warnings, but this value will not be used because of the logic */
434         maxc = 2*expand->c_range+1;
435
436         snew(lam_dg,nlim);
437         snew(lam_variance,nlim);
438
439         snew(omegap_array,maxc);
440         snew(weightsp_array,maxc);
441         snew(varp_array,maxc);
442         snew(dwp_array,maxc);
443
444         snew(omegam_array,maxc);
445         snew(weightsm_array,maxc);
446         snew(varm_array,maxc);
447         snew(dwm_array,maxc);
448
449         /* unpack the current lambdas -- we will only update 2 of these */
450
451         for (i=0;i<nlim-1;i++)
452         { /* only through the second to last */
453             lam_dg[i] = dfhist->sum_dg[i+1] - dfhist->sum_dg[i];
454             lam_variance[i] = pow(dfhist->sum_variance[i+1],2) - pow(dfhist->sum_variance[i],2);
455         }
456
457         /* accumulate running averages */
458         for (nval = 0; nval<maxc; nval++)
459         {
460             /* constants for later use */
461             cnval = (real)(nval-expand->c_range);
462             /* actually, should be able to rewrite it w/o exponential, for better numerical stability */
463             if (fep_state > 0)
464             {
465                 de = exp(cnval - (scaled_lamee[fep_state]-scaled_lamee[fep_state-1]));
466                 if (expand->elamstats==elamstatsBARKER || expand->elamstats==elamstatsMINVAR)
467                 {
468                     de_function = 1.0/(1.0+de);
469                 }
470                 else if (expand->elamstats==elamstatsMETROPOLIS)
471                 {
472                     if (de < 1.0)
473                     {
474                         de_function = 1.0;
475                     }
476                     else
477                     {
478                         de_function = 1.0/de;
479                     }
480                 }
481                 dfhist->accum_m[fep_state][nval] += de_function;
482                 dfhist->accum_m2[fep_state][nval] += de_function*de_function;
483             }
484
485             if (fep_state < nlim-1)
486             {
487                 de = exp(-cnval + (scaled_lamee[fep_state+1]-scaled_lamee[fep_state]));
488                 if (expand->elamstats==elamstatsBARKER || expand->elamstats==elamstatsMINVAR)
489                 {
490                     de_function = 1.0/(1.0+de);
491                 }
492                 else if (expand->elamstats==elamstatsMETROPOLIS)
493                 {
494                     if (de < 1.0)
495                     {
496                         de_function = 1.0;
497                     }
498                     else
499                     {
500                         de_function = 1.0/de;
501                     }
502                 }
503                 dfhist->accum_p[fep_state][nval] += de_function;
504                 dfhist->accum_p2[fep_state][nval] += de_function*de_function;
505             }
506
507             /* Metropolis transition and Barker transition (unoptimized Bennett) acceptance weight determination */
508
509             n0  = dfhist->n_at_lam[fep_state];
510             if (fep_state > 0) {nm1 = dfhist->n_at_lam[fep_state-1];} else {nm1 = 0;}
511             if (fep_state < nlim-1) {np1 = dfhist->n_at_lam[fep_state+1];} else {np1 = 0;}
512
513             /* logic SHOULD keep these all set correctly whatever the logic, but apparently it can't figure it out. */
514             chi_m1_0=chi_p1_0=chi_m2_0=chi_p2_0=chi_p1_m1=chi_p2_m1=chi_m1_p1=chi_m2_p1=0;
515
516             if (n0 > 0)
517             {
518                 chi_m1_0 = dfhist->accum_m[fep_state][nval]/n0;
519                 chi_p1_0 = dfhist->accum_p[fep_state][nval]/n0;
520                 chi_m2_0 = dfhist->accum_m2[fep_state][nval]/n0;
521                 chi_p2_0 = dfhist->accum_p2[fep_state][nval]/n0;
522             }
523
524             if ((fep_state > 0 ) && (nm1 > 0))
525             {
526                 chi_p1_m1 = dfhist->accum_p[fep_state-1][nval]/nm1;
527                 chi_p2_m1 = dfhist->accum_p2[fep_state-1][nval]/nm1;
528             }
529
530             if ((fep_state < nlim-1) && (np1 > 0))
531             {
532                 chi_m1_p1 = dfhist->accum_m[fep_state+1][nval]/np1;
533                 chi_m2_p1 = dfhist->accum_m2[fep_state+1][nval]/np1;
534             }
535
536             omega_m1_0 = 0;
537             omega_p1_0 = 0;
538             clam_weightsm = 0;
539             clam_weightsp = 0;
540             clam_varm = 0;
541             clam_varp = 0;
542
543             if (fep_state > 0)
544             {
545                 if (n0 > 0)
546                 {
547                     omega_m1_0 = chi_m2_0/(chi_m1_0*chi_m1_0) - 1.0;
548                 }
549                 if (nm1 > 0)
550                 {
551                     omega_p1_m1 = chi_p2_m1/(chi_p1_m1*chi_p1_m1) - 1.0;
552                 }
553                 if ((n0 > 0) && (nm1 > 0))
554                 {
555                     clam_weightsm = (log(chi_m1_0) - log(chi_p1_m1)) + cnval;
556                     clam_varm = (1.0/n0)*(omega_m1_0) + (1.0/nm1)*(omega_p1_m1);
557                 }
558             }
559
560             if (fep_state < nlim-1)
561             {
562                 if (n0 > 0)
563                 {
564                     omega_p1_0 = chi_p2_0/(chi_p1_0*chi_p1_0) - 1.0;
565                 }
566                 if (np1 > 0)
567                 {
568                     omega_m1_p1 = chi_m2_p1/(chi_m1_p1*chi_m1_p1) - 1.0;
569                 }
570                 if ((n0 > 0) && (np1 > 0))
571                 {
572                     clam_weightsp = (log(chi_m1_p1) - log(chi_p1_0)) + cnval;
573                     clam_varp = (1.0/np1)*(omega_m1_p1) + (1.0/n0)*(omega_p1_0);
574                 }
575             }
576
577             if (n0 > 0)
578             {
579                 omegam_array[nval]             = omega_m1_0;
580             }
581             else
582             {
583                 omegam_array[nval]             = 0;
584             }
585             weightsm_array[nval]           = clam_weightsm;
586             varm_array[nval]               = clam_varm;
587             if (nm1 > 0)
588             {
589                 dwm_array[nval]  = fabs( (cnval + log((1.0*n0)/nm1)) - lam_dg[fep_state-1] );
590             }
591             else
592             {
593                 dwm_array[nval]  = fabs( cnval - lam_dg[fep_state-1] );
594             }
595
596             if (n0 > 0)
597             {
598                 omegap_array[nval]             = omega_p1_0;
599             }
600             else
601             {
602                 omegap_array[nval]             = 0;
603             }
604             weightsp_array[nval]           = clam_weightsp;
605             varp_array[nval]               = clam_varp;
606             if ((np1 > 0) && (n0 > 0))
607             {
608                 dwp_array[nval]  = fabs( (cnval + log((1.0*np1)/n0)) - lam_dg[fep_state] );
609             }
610             else
611             {
612                 dwp_array[nval]  = fabs( cnval - lam_dg[fep_state] );
613             }
614
615         }
616
617         /* find the C's closest to the old weights value */
618
619         min_nvalm = FindMinimum(dwm_array,maxc);
620         omega_m1_0    = omegam_array[min_nvalm];
621         clam_weightsm = weightsm_array[min_nvalm];
622         clam_varm     = varm_array[min_nvalm];
623
624         min_nvalp = FindMinimum(dwp_array,maxc);
625         omega_p1_0    = omegap_array[min_nvalp];
626         clam_weightsp = weightsp_array[min_nvalp];
627         clam_varp     = varp_array[min_nvalp];
628
629         clam_osum = omega_m1_0 + omega_p1_0;
630         clam_minvar = 0;
631         if (clam_osum > 0)
632         {
633             clam_minvar = 0.5*log(clam_osum);
634         }
635
636         if (fep_state > 0)
637         {
638             lam_dg[fep_state-1] = clam_weightsm;
639             lam_variance[fep_state-1] = clam_varm;
640         }
641
642         if (fep_state < nlim-1)
643         {
644             lam_dg[fep_state] = clam_weightsp;
645             lam_variance[fep_state] = clam_varp;
646         }
647
648         if (expand->elamstats==elamstatsMINVAR)
649         {
650             bSufficientSamples = TRUE;
651             /* make sure they are all past a threshold */
652             for (i=0;i<nlim;i++)
653             {
654                 if (dfhist->n_at_lam[i] < expand->minvarmin)
655                 {
656                     bSufficientSamples = FALSE;
657                 }
658             }
659             if (bSufficientSamples)
660             {
661                 dfhist->sum_minvar[fep_state] = clam_minvar;
662                 if (fep_state==0)
663                 {
664                     for (i=0;i<nlim;i++)
665                     {
666                         dfhist->sum_minvar[i]+=(expand->minvar_const-clam_minvar);
667                     }
668                     expand->minvar_const = clam_minvar;
669                     dfhist->sum_minvar[fep_state] = 0.0;
670                 }
671                 else
672                 {
673                     dfhist->sum_minvar[fep_state] -= expand->minvar_const;
674                 }
675             }
676         }
677
678         /* we need to rezero minvar now, since it could change at fep_state = 0 */
679         dfhist->sum_dg[0] = 0.0;
680         dfhist->sum_variance[0] = 0.0;
681         dfhist->sum_weights[0] = dfhist->sum_dg[0] + dfhist->sum_minvar[0]; /* should be zero */
682
683         for (i=1;i<nlim;i++)
684         {
685             dfhist->sum_dg[i] = lam_dg[i-1] + dfhist->sum_dg[i-1];
686             dfhist->sum_variance[i] = sqrt(lam_variance[i-1] + pow(dfhist->sum_variance[i-1],2));
687             dfhist->sum_weights[i] = dfhist->sum_dg[i] + dfhist->sum_minvar[i];
688         }
689
690         sfree(lam_dg);
691         sfree(lam_variance);
692
693         sfree(omegam_array);
694         sfree(weightsm_array);
695         sfree(varm_array);
696         sfree(dwm_array);
697
698         sfree(omegap_array);
699         sfree(weightsp_array);
700         sfree(varp_array);
701         sfree(dwp_array);
702     }
703     return FALSE;
704 }
705
706 static int ChooseNewLambda(FILE *log, int nlim, t_expanded *expand, df_history_t *dfhist, int fep_state, real *weighted_lamee, real *p_k, gmx_rng_t rng)
707 {
708     /* Choose new lambda value, and update transition matrix */
709
710     int i,ifep,jfep,minfep,maxfep,lamnew,lamtrial,starting_fep_state;
711     real r1,r2,pks,de_old,de_new,de,trialprob,tprob=0;
712     real **Tij;
713     real *propose,*accept,*remainder;
714     real sum,pnorm;
715     gmx_bool bRestricted;
716
717     starting_fep_state = fep_state;
718     lamnew = fep_state; /* so that there is a default setting -- stays the same */
719
720     if (!EWL(expand->elamstats))   /* ignore equilibrating the weights if using WL */
721     {
722         if ((expand->lmc_forced_nstart > 0) && (dfhist->n_at_lam[nlim-1] <= expand->lmc_forced_nstart))
723         {
724             /* Use a marching method to run through the lambdas and get preliminary free energy data,
725                before starting 'free' sampling.  We start free sampling when we have enough at each lambda */
726
727             /* if we have enough at this lambda, move on to the next one */
728
729             if (dfhist->n_at_lam[fep_state] == expand->lmc_forced_nstart)
730             {
731                 lamnew = fep_state+1;
732                 if (lamnew == nlim)  /* whoops, stepped too far! */
733                 {
734                     lamnew -= 1;
735                 }
736             }
737             else
738             {
739                 lamnew = fep_state;
740             }
741             return lamnew;
742         }
743     }
744
745     snew(propose,nlim);
746     snew(accept,nlim);
747     snew(remainder,nlim);
748
749     for (i=0;i<expand->lmc_repeats;i++)
750     {
751
752         for(ifep=0;ifep<nlim;ifep++)
753         {
754             propose[ifep] = 0;
755             accept[ifep] = 0;
756         }
757
758         if ((expand->elmcmove==elmcmoveGIBBS) || (expand->elmcmove==elmcmoveMETGIBBS))
759         {
760             bRestricted = TRUE;
761             /* use the Gibbs sampler, with restricted range */
762             if (expand->gibbsdeltalam < 0)
763             {
764                 minfep = 0;
765                 maxfep = nlim-1;
766                 bRestricted = FALSE;
767             }
768             else
769             {
770                 minfep = fep_state - expand->gibbsdeltalam;
771                 maxfep = fep_state + expand->gibbsdeltalam;
772                 if (minfep < 0)
773                 {
774                     minfep = 0;
775                 }
776                 if (maxfep > nlim-1)
777                 {
778                     maxfep = nlim-1;
779                 }
780             }
781
782             GenerateGibbsProbabilities(weighted_lamee,p_k,&pks,minfep,maxfep);
783
784             if (expand->elmcmove == elmcmoveGIBBS)
785             {
786                 for (ifep=minfep;ifep<=maxfep;ifep++)
787                 {
788                     propose[ifep] = p_k[ifep];
789                     accept[ifep] = 1.0;
790                 }
791                 /* Gibbs sampling */
792                 r1 = gmx_rng_uniform_real(rng);
793                 for (lamnew=minfep;lamnew<=maxfep;lamnew++)
794                 {
795                     if (r1 <= p_k[lamnew])
796                     {
797                         break;
798                     }
799                     r1 -= p_k[lamnew];
800                 }
801             }
802             else if (expand->elmcmove==elmcmoveMETGIBBS)
803             {
804
805                 /* Metropolized Gibbs sampling */
806                 for (ifep=minfep;ifep<=maxfep;ifep++)
807                 {
808                     remainder[ifep] = 1 - p_k[ifep];
809                 }
810
811                 /* find the proposal probabilities */
812
813                 if (remainder[fep_state] == 0) {
814                     /* only the current state has any probability */
815                     /* we have to stay at the current state */
816                     lamnew=fep_state;
817                 } else {
818                     for (ifep=minfep;ifep<=maxfep;ifep++)
819                     {
820                         if (ifep != fep_state)
821                         {
822                             propose[ifep] = p_k[ifep]/remainder[fep_state];
823                         }
824                         else
825                         {
826                             propose[ifep] = 0;
827                         }
828                     }
829
830                     r1 = gmx_rng_uniform_real(rng);
831                     for (lamtrial=minfep;lamtrial<=maxfep;lamtrial++)
832                     {
833                         pnorm = p_k[lamtrial]/remainder[fep_state];
834                         if (lamtrial!=fep_state)
835                         {
836                             if (r1 <= pnorm)
837                             {
838                                 break;
839                             }
840                             r1 -= pnorm;
841                         }
842                     }
843
844                     /* we have now selected lamtrial according to p(lamtrial)/1-p(fep_state) */
845                     tprob = 1.0;
846                     /* trial probability is min{1,\frac{1 - p(old)}{1-p(new)} MRS 1/8/2008 */
847                     trialprob = (remainder[fep_state])/(remainder[lamtrial]);
848                     if (trialprob < tprob)
849                     {
850                         tprob = trialprob;
851                     }
852                     r2 = gmx_rng_uniform_real(rng);
853                     if (r2 < tprob)
854                     {
855                         lamnew = lamtrial;
856                     }
857                     else
858                     {
859                         lamnew = fep_state;
860                     }
861                 }
862
863                 /* now figure out the acceptance probability for each */
864                 for (ifep=minfep;ifep<=maxfep;ifep++)
865                 {
866                     tprob = 1.0;
867                     if (remainder[ifep] != 0) {
868                         trialprob = (remainder[fep_state])/(remainder[ifep]);
869                     }
870                     else
871                     {
872                         trialprob = 1.0; /* this state is the only choice! */
873                     }
874                     if (trialprob < tprob)
875                     {
876                         tprob = trialprob;
877                     }
878                     /* probability for fep_state=0, but that's fine, it's never proposed! */
879                     accept[ifep] = tprob;
880                 }
881             }
882
883             if (lamnew > maxfep)
884             {
885                 /* it's possible some rounding is failing */
886                 if (remainder[fep_state] < 2.0e-15)
887                 {
888                     /* probably numerical rounding error -- no state other than the original has weight */
889                     lamnew = fep_state;
890                 }
891                 else
892                 {
893                     /* probably not a numerical issue */
894                     int loc=0;
895                     int nerror = 200+(maxfep-minfep+1)*60;
896                     char *errorstr;
897                     snew(errorstr,nerror);
898                     /* if its greater than maxfep, then something went wrong -- probably underflow in the calculation
899                        of sum weights. Generated detailed info for failure */
900                     loc += sprintf(errorstr,"Something wrong in choosing new lambda state with a Gibbs move -- probably underflow in weight determination.\nDenominator is: %3d%17.10e\n  i                dE        numerator          weights\n",0,pks);
901                     for (ifep=minfep;ifep<=maxfep;ifep++)
902                     {
903                         loc += sprintf(&errorstr[loc],"%3d %17.10e%17.10e%17.10e\n",ifep,weighted_lamee[ifep],p_k[ifep],dfhist->sum_weights[ifep]);
904                     }
905                     gmx_fatal(FARGS,errorstr);
906                 }
907             }
908         }
909         else if ((expand->elmcmove==elmcmoveMETROPOLIS) || (expand->elmcmove==elmcmoveBARKER))
910         {
911             /* use the metropolis sampler with trial +/- 1 */
912             r1 = gmx_rng_uniform_real(rng);
913             if (r1 < 0.5)
914             {
915                 if (fep_state == 0) {
916                     lamtrial = fep_state;
917                 }
918                 else
919                 {
920                     lamtrial = fep_state-1;
921                 }
922             }
923             else
924             {
925                 if (fep_state == nlim-1) {
926                     lamtrial = fep_state;
927                 }
928                 else
929                 {
930                     lamtrial = fep_state+1;
931                 }
932             }
933
934             de = weighted_lamee[lamtrial] - weighted_lamee[fep_state];
935             if (expand->elmcmove==elmcmoveMETROPOLIS)
936             {
937                 tprob = 1.0;
938                 trialprob = exp(de);
939                 if (trialprob < tprob)
940                 {
941                     tprob = trialprob;
942                 }
943                 propose[fep_state] = 0;
944                 propose[lamtrial] = 1.0; /* note that this overwrites the above line if fep_state = ntrial, which only occurs at the ends */
945                 accept[fep_state] = 1.0; /* doesn't actually matter, never proposed unless fep_state = ntrial, in which case it's 1.0 anyway */
946                 accept[lamtrial] = tprob;
947
948             }
949             else if (expand->elmcmove==elmcmoveBARKER)
950             {
951                 tprob = 1.0/(1.0+exp(-de));
952
953                 propose[fep_state] = (1-tprob);
954                 propose[lamtrial] += tprob; /* we add, to account for the fact that at the end, they might be the same point */
955                 accept[fep_state] = 1.0;
956                 accept[lamtrial] = 1.0;
957             }
958
959             r2 = gmx_rng_uniform_real(rng);
960             if (r2 < tprob) {
961                 lamnew = lamtrial;
962             } else {
963                 lamnew = fep_state;
964             }
965         }
966
967         for (ifep=0;ifep<nlim;ifep++)
968         {
969             dfhist->Tij[fep_state][ifep] += propose[ifep]*accept[ifep];
970             dfhist->Tij[fep_state][fep_state] += propose[ifep]*(1.0-accept[ifep]);
971         }
972         fep_state = lamnew;
973     }
974
975     dfhist->Tij_empirical[starting_fep_state][lamnew] += 1.0;
976
977     sfree(propose);
978     sfree(accept);
979     sfree(remainder);
980
981     return lamnew;
982 }
983
984 /* print out the weights to the log, along with current state */
985 extern void PrintFreeEnergyInfoToFile(FILE *outfile, t_lambda *fep, t_expanded *expand, t_simtemp *simtemp, df_history_t *dfhist,
986                                       int nlam, int frequency, gmx_large_int_t step)
987 {
988     int nlim,i,ifep,jfep;
989     real dw,dg,dv,dm,Tprint;
990     real *temps;
991     const char *print_names[efptNR] = {" FEPL","MassL","CoulL"," VdwL","BondL","RestT","Temp.(K)"};
992     gmx_bool bSimTemp = FALSE;
993
994     nlim = fep->n_lambda;
995     if (simtemp != NULL) {
996         bSimTemp = TRUE;
997     }
998
999     if (mod(step,frequency)==0)
1000     {
1001         fprintf(outfile,"             MC-lambda information\n");
1002         if (EWL(expand->elamstats) && (!(dfhist->bEquil))) {
1003             fprintf(outfile,"  Wang-Landau incrementor is: %11.5g\n",dfhist->wl_delta);
1004         }
1005         fprintf(outfile,"  N");
1006         for (i=0;i<efptNR;i++)
1007         {
1008             if (fep->separate_dvdl[i])
1009             {
1010                 fprintf(outfile,"%7s",print_names[i]);
1011             }
1012             else if ((i == efptTEMPERATURE) && bSimTemp)
1013             {
1014                 fprintf(outfile,"%10s",print_names[i]); /* more space for temperature formats */
1015             }
1016         }
1017         fprintf(outfile,"    Count   ");
1018         if (expand->elamstats==elamstatsMINVAR)
1019         {
1020             fprintf(outfile,"W(in kT)   G(in kT)  dG(in kT)  dV(in kT)\n");
1021         }
1022         else
1023         {
1024             fprintf(outfile,"G(in kT)  dG(in kT)\n");
1025         }
1026         for (ifep=0;ifep<nlim;ifep++)
1027         {
1028             if (ifep==nlim-1)
1029             {
1030                 dw=0.0;
1031                 dg=0.0;
1032                 dv=0.0;
1033                 dm=0.0;
1034             }
1035             else
1036             {
1037                 dw = dfhist->sum_weights[ifep+1] - dfhist->sum_weights[ifep];
1038                 dg = dfhist->sum_dg[ifep+1] - dfhist->sum_dg[ifep];
1039                 dv = sqrt(pow(dfhist->sum_variance[ifep+1],2) - pow(dfhist->sum_variance[ifep],2));
1040                 dm = dfhist->sum_minvar[ifep+1] - dfhist->sum_minvar[ifep];
1041
1042             }
1043             fprintf(outfile,"%3d",(ifep+1));
1044             for (i=0;i<efptNR;i++)
1045             {
1046                 if (fep->separate_dvdl[i])
1047                 {
1048                     fprintf(outfile,"%7.3f",fep->all_lambda[i][ifep]);
1049                 } else if (i == efptTEMPERATURE && bSimTemp)
1050                 {
1051                     fprintf(outfile,"%9.3f",simtemp->temperatures[ifep]);
1052                 }
1053             }
1054             if (EWL(expand->elamstats) && (!(dfhist->bEquil)))  /* if performing WL and still haven't equilibrated */
1055             {
1056                 if (expand->elamstats == elamstatsWL)
1057                 {
1058                     fprintf(outfile," %8d",(int)dfhist->wl_histo[ifep]);
1059                 } else {
1060                     fprintf(outfile," %8.3f",dfhist->wl_histo[ifep]);
1061                 }
1062             }
1063             else   /* we have equilibrated weights */
1064             {
1065                 fprintf(outfile," %8d",dfhist->n_at_lam[ifep]);
1066             }
1067             if (expand->elamstats==elamstatsMINVAR)
1068             {
1069                 fprintf(outfile," %10.5f %10.5f %10.5f %10.5f",dfhist->sum_weights[ifep],dfhist->sum_dg[ifep],dg,dv);
1070             }
1071             else
1072             {
1073                 fprintf(outfile," %10.5f %10.5f",dfhist->sum_weights[ifep],dw);
1074             }
1075             if (ifep == nlam) {
1076                 fprintf(outfile," <<\n");
1077             }
1078             else
1079             {
1080                 fprintf(outfile,"   \n");
1081             }
1082         }
1083         fprintf(outfile,"\n");
1084
1085         if ((mod(step,expand->nstTij)==0) && (expand->nstTij > 0) && (step > 0))
1086         {
1087             fprintf(outfile,"                     Transition Matrix\n");
1088             for (ifep=0;ifep<nlim;ifep++)
1089             {
1090                 fprintf(outfile,"%12d",(ifep+1));
1091             }
1092             fprintf(outfile,"\n");
1093             for (ifep=0;ifep<nlim;ifep++)
1094             {
1095                 for (jfep=0;jfep<nlim;jfep++)
1096                 {
1097                     if (dfhist->n_at_lam[ifep] > 0)
1098                     {
1099                         if (expand->bSymmetrizedTMatrix)
1100                         {
1101                             Tprint = (dfhist->Tij[ifep][jfep]+dfhist->Tij[jfep][ifep])/(dfhist->n_at_lam[ifep]+dfhist->n_at_lam[jfep]);
1102                         } else {
1103                             Tprint = (dfhist->Tij[ifep][jfep])/(dfhist->n_at_lam[ifep]);
1104                         }
1105                     }
1106                     else
1107                     {
1108                         Tprint = 0.0;
1109                     }
1110                     fprintf(outfile,"%12.8f",Tprint);
1111                 }
1112                 fprintf(outfile,"%3d\n",(ifep+1));
1113             }
1114
1115             fprintf(outfile,"                  Empirical Transition Matrix\n");
1116             for (ifep=0;ifep<nlim;ifep++)
1117             {
1118                 fprintf(outfile,"%12d",(ifep+1));
1119             }
1120             fprintf(outfile,"\n");
1121             for (ifep=0;ifep<nlim;ifep++)
1122             {
1123                 for (jfep=0;jfep<nlim;jfep++)
1124                 {
1125                     if (dfhist->n_at_lam[ifep] > 0)
1126                     {
1127                         if (expand->bSymmetrizedTMatrix)
1128                         {
1129                             Tprint = (dfhist->Tij_empirical[ifep][jfep]+dfhist->Tij_empirical[jfep][ifep])/(dfhist->n_at_lam[ifep]+dfhist->n_at_lam[jfep]);
1130                         } else {
1131                             Tprint = dfhist->Tij_empirical[ifep][jfep]/(dfhist->n_at_lam[ifep]);
1132                         }
1133                     }
1134                     else
1135                     {
1136                         Tprint = 0.0;
1137                     }
1138                     fprintf(outfile,"%12.8f",Tprint);
1139                 }
1140                 fprintf(outfile,"%3d\n",(ifep+1));
1141             }
1142         }
1143         }
1144 }
1145
1146 extern void get_mc_state(gmx_rng_t rng,t_state *state)
1147 {
1148     gmx_rng_get_state(rng,state->mc_rng,state->mc_rngi);
1149 }
1150
1151 extern void set_mc_state(gmx_rng_t rng,t_state *state)
1152 {
1153     gmx_rng_set_state(rng,state->mc_rng,state->mc_rngi[0]);
1154 }
1155
1156 extern int ExpandedEnsembleDynamics(FILE *log,t_inputrec *ir, gmx_enerdata_t *enerd,
1157                                     t_state *state, t_extmass *MassQ, df_history_t *dfhist,
1158                                     gmx_large_int_t step, gmx_rng_t mcrng,
1159                                     rvec *v, t_mdatoms *mdatoms)
1160 {
1161     real *pfep_lamee,*p_k, *scaled_lamee, *weighted_lamee;
1162     int i,nlam,nlim,lamnew,totalsamples;
1163     real oneovert,maxscaled=0,maxweighted=0;
1164     t_expanded *expand;
1165     t_simtemp *simtemp;
1166     double *temperature_lambdas;
1167     gmx_bool bIfReset,bSwitchtoOneOverT,bDoneEquilibrating=FALSE;
1168
1169     expand = ir->expandedvals;
1170     simtemp = ir->simtempvals;
1171         nlim = ir->fepvals->n_lambda;
1172     nlam = state->fep_state;
1173
1174     snew(scaled_lamee,nlim);
1175     snew(weighted_lamee,nlim);
1176     snew(pfep_lamee,nlim);
1177     snew(p_k,nlim);
1178
1179     if (expand->bInit_weights)  /* if initialized weights, we need to fill them in */
1180     {
1181         dfhist->wl_delta = expand->init_wl_delta;  /* MRS -- this would fit better somewhere else? */
1182         for (i=0;i<nlim;i++) {
1183             dfhist->sum_weights[i] = expand->init_lambda_weights[i];
1184             dfhist->sum_dg[i] = expand->init_lambda_weights[i];
1185         }
1186         expand->bInit_weights = FALSE;
1187     }
1188
1189         /* update the count at the current lambda*/
1190         dfhist->n_at_lam[nlam]++;
1191
1192     /* need to calculate the PV term somewhere, but not needed here? Not until there's a lambda state that's
1193        pressure controlled.*/
1194     /*
1195       pVTerm = 0;
1196       where does this PV term go?
1197       for (i=0;i<nlim;i++)
1198       {
1199       fep_lamee[i] += pVTerm;
1200       }
1201     */
1202
1203         /* determine the minimum value to avoid overflow.  Probably a better way to do this */
1204         /* we don't need to include the pressure term, since the volume is the same between the two.
1205            is there some term we are neglecting, however? */
1206
1207     if (ir->efep != efepNO)
1208     {
1209         for (i=0;i<nlim;i++)
1210         {
1211             if (ir->bSimTemp)
1212             {
1213                 /* Note -- this assumes no mass changes, since kinetic energy is not added  . . . */
1214                 scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(simtemp->temperatures[i]*BOLTZ)
1215                     + enerd->term[F_EPOT]*(1.0/(simtemp->temperatures[i])- 1.0/(simtemp->temperatures[nlam]))/BOLTZ;
1216             }
1217             else
1218             {
1219                 scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(expand->mc_temp*BOLTZ);
1220                 /* mc_temp is currently set to the system reft unless otherwise defined */
1221             }
1222
1223             /* save these energies for printing, so they don't get overwritten by the next step */
1224             /* they aren't overwritten in the non-free energy case, but we always print with these
1225                for simplicity */
1226         }
1227     } else {
1228         if (ir->bSimTemp) {
1229             for (i=0;i<nlim;i++) {
1230                 scaled_lamee[i] = enerd->term[F_EPOT]*(1.0/simtemp->temperatures[i] - 1.0/simtemp->temperatures[nlam])/BOLTZ;
1231             }
1232         }
1233     }
1234
1235         for (i=0;i<nlim;i++) {
1236         pfep_lamee[i] = scaled_lamee[i];
1237
1238         weighted_lamee[i] = dfhist->sum_weights[i] - scaled_lamee[i];
1239         if (i==0)
1240         {
1241             maxscaled = scaled_lamee[i];
1242             maxweighted = weighted_lamee[i];
1243         }
1244         else
1245         {
1246             if (scaled_lamee[i] > maxscaled)
1247             {
1248                 maxscaled = scaled_lamee[i];
1249             }
1250             if (weighted_lamee[i] > maxweighted)
1251             {
1252                 maxweighted = weighted_lamee[i];
1253             }
1254         }
1255         }
1256
1257         for (i=0;i<nlim;i++)
1258     {
1259         scaled_lamee[i] -= maxscaled;
1260         weighted_lamee[i] -= maxweighted;
1261         }
1262
1263         /* update weights - we decide whether or not to actually do this inside */
1264
1265         bDoneEquilibrating = UpdateWeights(nlim,expand,dfhist,nlam,scaled_lamee,weighted_lamee,step);
1266     if (bDoneEquilibrating)
1267     {
1268         if (log) {
1269             fprintf(log,"\nStep %d: Weights have equilibrated, using criteria: %s\n",(int)step,elmceq_names[expand->elmceq]);
1270         }
1271     }
1272
1273     lamnew = ChooseNewLambda(log,nlim,expand,dfhist,nlam,weighted_lamee,p_k,mcrng);
1274     /* if using simulated tempering, we need to adjust the temperatures */
1275     if (ir->bSimTemp && (lamnew != nlam)) /* only need to change the temperatures if we change the state */
1276     {
1277         int i, j, n, d;
1278         real *buf_ngtc;
1279         real told;
1280         int nstart, nend, gt;
1281
1282         snew(buf_ngtc,ir->opts.ngtc);
1283
1284         for (i=0;i<ir->opts.ngtc;i++) {
1285             if (ir->opts.ref_t[i] > 0) {
1286                 told = ir->opts.ref_t[i];
1287                 ir->opts.ref_t[i] =  simtemp->temperatures[lamnew];
1288                 buf_ngtc[i] = sqrt(ir->opts.ref_t[i]/told); /* using the buffer as temperature scaling */
1289             }
1290         }
1291
1292         /* we don't need to manipulate the ekind information, as it isn't due to be reset until the next step anyway */
1293
1294         nstart = mdatoms->start;
1295         nend   = nstart + mdatoms->homenr;
1296         for (n=nstart; n<nend; n++)
1297         {
1298             gt = 0;
1299             if (mdatoms->cTC)
1300             {
1301                 gt = mdatoms->cTC[n];
1302             }
1303             for(d=0; d<DIM; d++)
1304             {
1305                  v[n][d] *= buf_ngtc[gt];
1306             }
1307         }
1308
1309         if (IR_NPT_TROTTER(ir) || IR_NPH_TROTTER(ir) || IR_NVT_TROTTER(ir)) {
1310             /* we need to recalculate the masses if the temperature has changed */
1311             init_npt_masses(ir,state,MassQ,FALSE);
1312             for (i=0;i<state->nnhpres;i++)
1313             {
1314                 for (j=0;j<ir->opts.nhchainlength;j++)
1315                 {
1316                     state->nhpres_vxi[i+j] *= buf_ngtc[i];
1317                 }
1318             }
1319             for (i=0;i<ir->opts.ngtc;i++)
1320             {
1321                 for (j=0;j<ir->opts.nhchainlength;j++)
1322                 {
1323                     state->nosehoover_vxi[i+j] *= buf_ngtc[i];
1324                 }
1325             }
1326         }
1327         sfree(buf_ngtc);
1328     }
1329
1330         /* now check on the Wang-Landau updating critera */
1331
1332         if (EWL(expand->elamstats))
1333     {
1334         bSwitchtoOneOverT = FALSE;
1335         if (expand->bWLoneovert) {
1336             totalsamples = 0;
1337             for (i=0;i<nlim;i++)
1338             {
1339                 totalsamples += dfhist->n_at_lam[i];
1340             }
1341             oneovert = (1.0*nlim)/totalsamples;
1342             /* oneovert has decreasd by a bit since last time, so we actually make sure its within one of this number */
1343             /* switch to 1/t incrementing when wl_delta has decreased at least once, and wl_delta is now less than 1/t */
1344             if ((dfhist->wl_delta <= ((totalsamples)/(totalsamples-1.00001))*oneovert) &&
1345                 (dfhist->wl_delta < expand->init_wl_delta))
1346             {
1347                 bSwitchtoOneOverT = TRUE;
1348             }
1349         }
1350         if (bSwitchtoOneOverT) {
1351             dfhist->wl_delta = oneovert; /* now we reduce by this each time, instead of only at flatness */
1352         } else {
1353             bIfReset = CheckHistogramRatios(nlim,dfhist->wl_histo,expand->wl_ratio);
1354             if (bIfReset)
1355             {
1356                 for (i=0;i<nlim;i++)
1357                 {
1358                     dfhist->wl_histo[i] = 0;
1359                 }
1360                 dfhist->wl_delta *= expand->wl_scale;
1361                 if (log) {
1362                     fprintf(log,"\nStep %d: weights are now:",(int)step);
1363                     for (i=0;i<nlim;i++)
1364                     {
1365                         fprintf(log," %.5f",dfhist->sum_weights[i]);
1366                     }
1367                     fprintf(log,"\n");
1368                 }
1369             }
1370         }
1371     }
1372     sfree(scaled_lamee);
1373     sfree(weighted_lamee);
1374     sfree(p_k);
1375
1376     return lamnew;
1377 }
1378
1379