4421198d0e6d09d0fde548cae84665cc11bdf3ea
[alexxy/gromacs.git] / src / mdlib / expanded.c
1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
2  *
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  *          GROningen MAchine for Chemical Simulations
9  *
10  * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12  * Copyright (c) 2001-2012, The GROMACS development team,
13  * check out http://www.gromacs.org for more information.
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version 2
18  * of the License, or (at your option) any later version.
19  *
20  * If you want to redistribute modifications, please consider that
21  * scientific software is very special. Version control is crucial -
22  * bugs must be traceable. We will be happy to consider code for
23  * inclusion in the official distribution, but derived work must not
24  * be called official GROMACS. Details are found in the README & COPYING
25  * files - if they are missing, get the official version at www.gromacs.org.
26  *
27  * To help us fund GROMACS development, we humbly ask that you cite
28  * the papers on the package - you can find them in the top README file.
29  *
30  * For more info, check our website at http://www.gromacs.org
31  *
32  * And Hey:
33  * GROwing Monsters And Cloning Shrimps
34  */
35 #ifdef HAVE_CONFIG_H
36 #include <config.h>
37 #endif
38
39 #ifdef GMX_CRAY_XT3
40 #include<catamount/dclock.h>
41 #endif
42
43
44 #include <stdio.h>
45 #include <time.h>
46 #ifdef HAVE_SYS_TIME_H
47 #include <sys/time.h>
48 #endif
49 #include <math.h>
50 #include "typedefs.h"
51 #include "string2.h"
52 #include "gmxfio.h"
53 #include "smalloc.h"
54 #include "names.h"
55 #include "confio.h"
56 #include "mvdata.h"
57 #include "txtdump.h"
58 #include "pbc.h"
59 #include "chargegroup.h"
60 #include "vec.h"
61 #include "nrnb.h"
62 #include "mshift.h"
63 #include "mdrun.h"
64 #include "update.h"
65 #include "physics.h"
66 #include "main.h"
67 #include "mdatoms.h"
68 #include "force.h"
69 #include "bondf.h"
70 #include "pme.h"
71 #include "disre.h"
72 #include "orires.h"
73 #include "network.h"
74 #include "calcmu.h"
75 #include "constr.h"
76 #include "xvgr.h"
77 #include "trnio.h"
78 #include "xtcio.h"
79 #include "copyrite.h"
80 #include "gmx_random.h"
81 #include "domdec.h"
82 #include "partdec.h"
83 #include "gmx_wallcycle.h"
84
85 #ifdef GMX_LIB_MPI
86 #include <mpi.h>
87 #endif
88 #ifdef GMX_THREADS
89 #include "tmpi.h"
90 #endif
91
92 void GenerateGibbsProbabilities(real *ene, real *p_k, real *pks, int minfep, int maxfep) {
93
94     int i;
95     real maxene;
96
97     *pks = 0.0;
98     maxene = ene[minfep];
99     /* find the maximum value */
100     for (i=minfep;i<=maxfep;i++)
101     {
102         if (ene[i]>maxene)
103         {
104             maxene = ene[i];
105         }
106     }
107     /* find the denominator */
108     for (i=minfep;i<=maxfep;i++)
109     {
110         *pks += exp(ene[i]-maxene);
111     }
112     /*numerators*/
113     for (i=minfep;i<=maxfep;i++)
114     {
115         p_k[i] = exp(ene[i]-maxene) / *pks;
116     }
117 }
118
119 void GenerateWeightedGibbsProbabilities(real *ene, real *p_k, real *pks, int nlim, real *nvals,real delta) {
120
121     int i;
122     real maxene;
123     real *nene;
124     *pks = 0.0;
125
126     snew(nene,nlim);
127     for (i=0;i<nlim;i++) {
128         if (nvals[i] == 0) {
129             /* add the delta, since we need to make sure it's greater than zero, and
130                we need a non-arbitrary number? */
131             nene[i] = ene[i] + log(nvals[i]+delta);
132         } else {
133             nene[i] = ene[i] + log(nvals[i]);
134         }
135     }
136
137     /* find the maximum value */
138     maxene = nene[0];
139     for (i=0;i<nlim;i++)
140     {
141         if (nene[i] > maxene) {
142             maxene = nene[i];
143         }
144     }
145
146     /* subtract off the maximum, avoiding overflow */
147     for (i=0;i<nlim;i++)
148     {
149         nene[i] -= maxene;
150     }
151
152     /* find the denominator */
153     for (i=0;i<nlim;i++)
154     {
155         *pks += exp(nene[i]);
156     }
157
158     /*numerators*/
159     for (i=0;i<nlim;i++)
160     {
161         p_k[i] = exp(nene[i]) / *pks;
162     }
163     sfree(nene);
164 }
165
166 real do_logsum(int N, real *a_n) {
167
168     /*     RETURN VALUE */
169     /* log(\sum_{i=0}^(N-1) exp[a_n]) */
170     real maxarg;
171     real sum;
172     int i;
173     real logsum;
174     /*     compute maximum argument to exp(.) */
175
176     maxarg = a_n[0];
177     for(i=1;i<N;i++)
178     {
179         maxarg = max(maxarg,a_n[i]);
180     }
181
182     /* compute sum of exp(a_n - maxarg) */
183     sum = 0.0;
184     for (i=0;i<N;i++)
185     {
186         sum = sum + exp(a_n[i] - maxarg);
187     }
188
189     /*     compute log sum */
190     logsum = log(sum) + maxarg;
191     return logsum;
192 }
193
194 int FindMinimum(real *min_metric, int N) {
195
196     real min_val;
197     int min_nval,nval;
198
199     min_nval = 0;
200     min_val = min_metric[0];
201
202     for (nval=0; nval<N; nval++)
203     {
204         if (min_metric[nval] < min_val)
205         {
206             min_val = min_metric[nval];
207             min_nval = nval;
208         }
209     }
210     return min_nval;
211 }
212
213 static gmx_bool CheckHistogramRatios(int nhisto, real *histo, real ratio)
214 {
215
216     int i;
217     real nmean;
218     gmx_bool bIfFlat;
219
220     nmean = 0;
221     for (i=0;i<nhisto;i++)
222     {
223         nmean += histo[i];
224     }
225
226     if (nmean == 0)
227     {
228         /* no samples! is bad!*/
229         bIfFlat = FALSE;
230         return bIfFlat;
231     }
232     nmean /= (real)nhisto;
233
234     bIfFlat = TRUE;
235     for (i=0;i<nhisto;i++)
236     {
237         /* make sure that all points are in the ratio < x <  1/ratio range  */
238         if (!((histo[i]/nmean < 1.0/ratio) && (histo[i]/nmean > ratio)))
239         {
240             bIfFlat = FALSE;
241             break;
242         }
243     }
244     return bIfFlat;
245 }
246
247 static gmx_bool CheckIfDoneEquilibrating(int nlim, t_expanded *expand, df_history_t *dfhist, gmx_large_int_t step)
248 {
249
250     int i,totalsamples;
251     gmx_bool bDoneEquilibrating=TRUE;
252     gmx_bool bIfFlat;
253
254     /* assume we have equilibrated the weights, then check to see if any of the conditions are not met */
255
256     /* calculate the total number of samples */
257     switch (expand->elmceq)
258     {
259     case elmceqNO:
260         /* We have not equilibrated, and won't, ever. */
261         return FALSE;
262     case elmceqYES:
263         /* we have equilibrated -- we're done */
264         return TRUE;
265     case elmceqSTEPS:
266         /* first, check if we are equilibrating by steps, if we're still under */
267         if (step < expand->equil_steps)
268         {
269             bDoneEquilibrating = FALSE;
270         }
271         break;
272     case elmceqSAMPLES:
273         totalsamples = 0;
274         for (i=0;i<nlim;i++)
275         {
276             totalsamples += dfhist->n_at_lam[i];
277         }
278         if (totalsamples < expand->equil_samples)
279         {
280             bDoneEquilibrating = FALSE;
281         }
282         break;
283     case elmceqNUMATLAM:
284         for (i=0;i<nlim;i++)
285         {
286             if (dfhist->n_at_lam[i] < expand->equil_n_at_lam) /* we are still doing the initial sweep, so we're definitely not
287                                                                  done equilibrating*/
288             {
289                 bDoneEquilibrating  = FALSE;
290                 break;
291             }
292         }
293         break;
294     case elmceqWLDELTA:
295         if (EWL(expand->elamstats)) /* This check is in readir as well, but
296                                     just to be sure */
297         {
298             if (dfhist->wl_delta > expand->equil_wl_delta)
299             {
300                 bDoneEquilibrating = FALSE;
301             }
302         }
303         break;
304     case elmceqRATIO:
305         /* we can use the flatness as a judge of good weights, as long as
306            we're not doing minvar, or Wang-Landau.
307            But turn off for now until we figure out exactly how we do this.
308         */
309
310         if (!(EWL(expand->elamstats) || expand->elamstats==elamstatsMINVAR))
311         {
312             /* we want to use flatness -avoiding- the forced-through samples.  Plus, we need to convert to
313                floats for this histogram function. */
314
315             real *modhisto;
316             snew(modhisto,nlim);
317             for (i=0;i<nlim;i++)
318             {
319                 modhisto[i] = 1.0*(dfhist->n_at_lam[i]-expand->lmc_forced_nstart);
320             }
321             bIfFlat = CheckHistogramRatios(nlim,modhisto,expand->equil_ratio);
322             sfree(modhisto);
323             if (!bIfFlat)
324             {
325                 bDoneEquilibrating = FALSE;
326             }
327         }
328     default:
329         bDoneEquilibrating = TRUE;
330     }
331     /* one last case to go though, if we are doing slow growth to get initial values, we haven't finished equilibrating */
332
333     if (expand->lmc_forced_nstart > 0)
334     {
335         for (i=0;i<nlim;i++)
336         {
337             if (dfhist->n_at_lam[i] < expand->lmc_forced_nstart) /* we are still doing the initial sweep, so we're definitely not
338                                                                     done equilibrating*/
339             {
340                 bDoneEquilibrating = FALSE;
341                 break;
342             }
343         }
344     }
345     return bDoneEquilibrating;
346 }
347
348 static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist,
349                               int fep_state, real *scaled_lamee, real *weighted_lamee, gmx_large_int_t step)
350 {
351     real maxdiff = 0.000000001;
352     gmx_bool bSufficientSamples;
353     int i, k, n, nz, indexi, indexk, min_n, max_n, nlam, totali;
354     int n0,np1,nm1,nval,min_nvalm,min_nvalp,maxc;
355     real omega_m1_0,omega_p1_m1,omega_m1_p1,omega_p1_0,clam_osum;
356     real de,de_function,dr,denom,maxdr,pks=0;
357     real min_val,cnval,zero_sum_weights;
358     real *omegam_array, *weightsm_array, *omegap_array, *weightsp_array, *varm_array, *varp_array, *dwp_array, *dwm_array;
359     real clam_varm, clam_varp, clam_weightsm, clam_weightsp, clam_minvar;
360     real *lam_weights, *lam_minvar_corr, *lam_variance, *lam_dg, *p_k;
361     real *numweighted_lamee, *logfrac;
362     int *nonzero;
363     real chi_m1_0,chi_p1_0,chi_m2_0,chi_p2_0,chi_p1_m1,chi_p2_m1,chi_m1_p1,chi_m2_p1;
364
365     /* if we have equilibrated the weights, exit now */
366     if (dfhist->bEquil)
367     {
368         return FALSE;
369     }
370
371     if (CheckIfDoneEquilibrating(nlim,expand,dfhist,step))
372     {
373         dfhist->bEquil = TRUE;
374         /* zero out the visited states so we know how many equilibrated states we have
375            from here on out.*/
376         for (i=0;i<nlim;i++)
377         {
378             dfhist->n_at_lam[i] = 0;
379         }
380         return TRUE;
381     }
382
383     /* If we reached this far, we have not equilibrated yet, keep on
384        going resetting the weights */
385
386     if (EWL(expand->elamstats))
387     {
388         if (expand->elamstats==elamstatsWL)  /* Standard Wang-Landau */
389         {
390             dfhist->sum_weights[fep_state] -= dfhist->wl_delta;
391             dfhist->wl_histo[fep_state] += 1.0;
392         }
393         else if (expand->elamstats==elamstatsWWL) /* Weighted Wang-Landau */
394         {
395             snew(p_k,nlim);
396
397             /* first increment count */
398             GenerateGibbsProbabilities(weighted_lamee,p_k,&pks,0,nlim-1);
399             for (i=0;i<nlim;i++) {
400                 dfhist->wl_histo[i] += p_k[i];
401             }
402
403             /* then increment weights (uses count) */
404             pks = 0.0;
405             GenerateWeightedGibbsProbabilities(weighted_lamee,p_k,&pks,nlim,dfhist->wl_histo,dfhist->wl_delta);
406
407             for (i=0;i<nlim;i++)
408             {
409                 dfhist->sum_weights[i] -= dfhist->wl_delta*p_k[i];
410             }
411             /* Alternate definition, using logarithms. Shouldn't make very much difference! */
412             /*
413               real di;
414               for (i=0;i<nlim;i++)
415               {
416                 di = 1+dfhist->wl_delta*p_k[i];
417                 dfhist->sum_weights[i] -= log(di);
418               }
419             */
420             sfree(p_k);
421         }
422
423         zero_sum_weights =  dfhist->sum_weights[0];
424         for (i=0;i<nlim;i++)
425         {
426             dfhist->sum_weights[i] -= zero_sum_weights;
427         }
428     }
429
430     if (expand->elamstats==elamstatsBARKER || expand->elamstats==elamstatsMETROPOLIS || expand->elamstats==elamstatsMINVAR) {
431
432         de_function = 0;  /* to get rid of warnings, but this value will not be used because of the logic */
433         maxc = 2*expand->c_range+1;
434
435         snew(lam_dg,nlim);
436         snew(lam_variance,nlim);
437
438         snew(omegap_array,maxc);
439         snew(weightsp_array,maxc);
440         snew(varp_array,maxc);
441         snew(dwp_array,maxc);
442
443         snew(omegam_array,maxc);
444         snew(weightsm_array,maxc);
445         snew(varm_array,maxc);
446         snew(dwm_array,maxc);
447
448         /* unpack the current lambdas -- we will only update 2 of these */
449
450         for (i=0;i<nlim-1;i++)
451         { /* only through the second to last */
452             lam_dg[i] = dfhist->sum_dg[i+1] - dfhist->sum_dg[i];
453             lam_variance[i] = pow(dfhist->sum_variance[i+1],2) - pow(dfhist->sum_variance[i],2);
454         }
455
456         /* accumulate running averages */
457         for (nval = 0; nval<maxc; nval++)
458         {
459             /* constants for later use */
460             cnval = (real)(nval-expand->c_range);
461             /* actually, should be able to rewrite it w/o exponential, for better numerical stability */
462             if (fep_state > 0)
463             {
464                 de = exp(cnval - (scaled_lamee[fep_state]-scaled_lamee[fep_state-1]));
465                 if (expand->elamstats==elamstatsBARKER || expand->elamstats==elamstatsMINVAR)
466                 {
467                     de_function = 1.0/(1.0+de);
468                 }
469                 else if (expand->elamstats==elamstatsMETROPOLIS)
470                 {
471                     if (de < 1.0)
472                     {
473                         de_function = 1.0;
474                     }
475                     else
476                     {
477                         de_function = 1.0/de;
478                     }
479                 }
480                 dfhist->accum_m[fep_state][nval] += de_function;
481                 dfhist->accum_m2[fep_state][nval] += de_function*de_function;
482             }
483
484             if (fep_state < nlim-1)
485             {
486                 de = exp(-cnval + (scaled_lamee[fep_state+1]-scaled_lamee[fep_state]));
487                 if (expand->elamstats==elamstatsBARKER || expand->elamstats==elamstatsMINVAR)
488                 {
489                     de_function = 1.0/(1.0+de);
490                 }
491                 else if (expand->elamstats==elamstatsMETROPOLIS)
492                 {
493                     if (de < 1.0)
494                     {
495                         de_function = 1.0;
496                     }
497                     else
498                     {
499                         de_function = 1.0/de;
500                     }
501                 }
502                 dfhist->accum_p[fep_state][nval] += de_function;
503                 dfhist->accum_p2[fep_state][nval] += de_function*de_function;
504             }
505
506             /* Metropolis transition and Barker transition (unoptimized Bennett) acceptance weight determination */
507
508             n0  = dfhist->n_at_lam[fep_state];
509             if (fep_state > 0) {nm1 = dfhist->n_at_lam[fep_state-1];} else {nm1 = 0;}
510             if (fep_state < nlim-1) {np1 = dfhist->n_at_lam[fep_state+1];} else {np1 = 0;}
511
512             /* logic SHOULD keep these all set correctly whatever the logic, but apparently it can't figure it out. */
513             chi_m1_0=chi_p1_0=chi_m2_0=chi_p2_0=chi_p1_m1=chi_p2_m1=chi_m1_p1=chi_m2_p1=0;
514
515             if (n0 > 0)
516             {
517                 chi_m1_0 = dfhist->accum_m[fep_state][nval]/n0;
518                 chi_p1_0 = dfhist->accum_p[fep_state][nval]/n0;
519                 chi_m2_0 = dfhist->accum_m2[fep_state][nval]/n0;
520                 chi_p2_0 = dfhist->accum_p2[fep_state][nval]/n0;
521             }
522
523             if ((fep_state > 0 ) && (nm1 > 0))
524             {
525                 chi_p1_m1 = dfhist->accum_p[fep_state-1][nval]/nm1;
526                 chi_p2_m1 = dfhist->accum_p2[fep_state-1][nval]/nm1;
527             }
528
529             if ((fep_state < nlim-1) && (np1 > 0))
530             {
531                 chi_m1_p1 = dfhist->accum_m[fep_state+1][nval]/np1;
532                 chi_m2_p1 = dfhist->accum_m2[fep_state+1][nval]/np1;
533             }
534
535             omega_m1_0 = 0;
536             omega_p1_0 = 0;
537             clam_weightsm = 0;
538             clam_weightsp = 0;
539             clam_varm = 0;
540             clam_varp = 0;
541
542             if (fep_state > 0)
543             {
544                 if (n0 > 0)
545                 {
546                     omega_m1_0 = chi_m2_0/(chi_m1_0*chi_m1_0) - 1.0;
547                 }
548                 if (nm1 > 0)
549                 {
550                     omega_p1_m1 = chi_p2_m1/(chi_p1_m1*chi_p1_m1) - 1.0;
551                 }
552                 if ((n0 > 0) && (nm1 > 0))
553                 {
554                     clam_weightsm = (log(chi_m1_0) - log(chi_p1_m1)) + cnval;
555                     clam_varm = (1.0/n0)*(omega_m1_0) + (1.0/nm1)*(omega_p1_m1);
556                 }
557             }
558
559             if (fep_state < nlim-1)
560             {
561                 if (n0 > 0)
562                 {
563                     omega_p1_0 = chi_p2_0/(chi_p1_0*chi_p1_0) - 1.0;
564                 }
565                 if (np1 > 0)
566                 {
567                     omega_m1_p1 = chi_m2_p1/(chi_m1_p1*chi_m1_p1) - 1.0;
568                 }
569                 if ((n0 > 0) && (np1 > 0))
570                 {
571                     clam_weightsp = (log(chi_m1_p1) - log(chi_p1_0)) + cnval;
572                     clam_varp = (1.0/np1)*(omega_m1_p1) + (1.0/n0)*(omega_p1_0);
573                 }
574             }
575
576             if (n0 > 0)
577             {
578                 omegam_array[nval]             = omega_m1_0;
579             }
580             else
581             {
582                 omegam_array[nval]             = 0;
583             }
584             weightsm_array[nval]           = clam_weightsm;
585             varm_array[nval]               = clam_varm;
586             if (nm1 > 0)
587             {
588                 dwm_array[nval]  = fabs( (cnval + log((1.0*n0)/nm1)) - lam_dg[fep_state-1] );
589             }
590             else
591             {
592                 dwm_array[nval]  = fabs( cnval - lam_dg[fep_state-1] );
593             }
594
595             if (n0 > 0)
596             {
597                 omegap_array[nval]             = omega_p1_0;
598             }
599             else
600             {
601                 omegap_array[nval]             = 0;
602             }
603             weightsp_array[nval]           = clam_weightsp;
604             varp_array[nval]               = clam_varp;
605             if ((np1 > 0) && (n0 > 0))
606             {
607                 dwp_array[nval]  = fabs( (cnval + log((1.0*np1)/n0)) - lam_dg[fep_state] );
608             }
609             else
610             {
611                 dwp_array[nval]  = fabs( cnval - lam_dg[fep_state] );
612             }
613
614         }
615
616         /* find the C's closest to the old weights value */
617
618         min_nvalm = FindMinimum(dwm_array,maxc);
619         omega_m1_0    = omegam_array[min_nvalm];
620         clam_weightsm = weightsm_array[min_nvalm];
621         clam_varm     = varm_array[min_nvalm];
622
623         min_nvalp = FindMinimum(dwp_array,maxc);
624         omega_p1_0    = omegap_array[min_nvalp];
625         clam_weightsp = weightsp_array[min_nvalp];
626         clam_varp     = varp_array[min_nvalp];
627
628         clam_osum = omega_m1_0 + omega_p1_0;
629         clam_minvar = 0;
630         if (clam_osum > 0)
631         {
632             clam_minvar = 0.5*log(clam_osum);
633         }
634
635         if (fep_state > 0)
636         {
637             lam_dg[fep_state-1] = clam_weightsm;
638             lam_variance[fep_state-1] = clam_varm;
639         }
640
641         if (fep_state < nlim-1)
642         {
643             lam_dg[fep_state] = clam_weightsp;
644             lam_variance[fep_state] = clam_varp;
645         }
646
647         if (expand->elamstats==elamstatsMINVAR)
648         {
649             bSufficientSamples = TRUE;
650             /* make sure they are all past a threshold */
651             for (i=0;i<nlim;i++)
652             {
653                 if (dfhist->n_at_lam[i] < expand->minvarmin)
654                 {
655                     bSufficientSamples = FALSE;
656                 }
657             }
658             if (bSufficientSamples)
659             {
660                 dfhist->sum_minvar[fep_state] = clam_minvar;
661                 if (fep_state==0)
662                 {
663                     for (i=0;i<nlim;i++)
664                     {
665                         dfhist->sum_minvar[i]+=(expand->minvar_const-clam_minvar);
666                     }
667                     expand->minvar_const = clam_minvar;
668                     dfhist->sum_minvar[fep_state] = 0.0;
669                 }
670                 else
671                 {
672                     dfhist->sum_minvar[fep_state] -= expand->minvar_const;
673                 }
674             }
675         }
676
677         /* we need to rezero minvar now, since it could change at fep_state = 0 */
678         dfhist->sum_dg[0] = 0.0;
679         dfhist->sum_variance[0] = 0.0;
680         dfhist->sum_weights[0] = dfhist->sum_dg[0] + dfhist->sum_minvar[0]; /* should be zero */
681
682         for (i=1;i<nlim;i++)
683         {
684             dfhist->sum_dg[i] = lam_dg[i-1] + dfhist->sum_dg[i-1];
685             dfhist->sum_variance[i] = sqrt(lam_variance[i-1] + pow(dfhist->sum_variance[i-1],2));
686             dfhist->sum_weights[i] = dfhist->sum_dg[i] + dfhist->sum_minvar[i];
687         }
688
689         sfree(lam_dg);
690         sfree(lam_variance);
691
692         sfree(omegam_array);
693         sfree(weightsm_array);
694         sfree(varm_array);
695         sfree(dwm_array);
696
697         sfree(omegap_array);
698         sfree(weightsp_array);
699         sfree(varp_array);
700         sfree(dwp_array);
701     }
702     return FALSE;
703 }
704
705 static int ChooseNewLambda(FILE *log, int nlim, t_expanded *expand, df_history_t *dfhist, int fep_state, real *weighted_lamee, real *p_k, gmx_rng_t rng)
706 {
707     /* Choose new lambda value, and update transition matrix */
708
709     int i,ifep,jfep,minfep,maxfep,lamnew,lamtrial,starting_fep_state;
710     real r1,r2,pks,de_old,de_new,de,trialprob,tprob=0;
711     real **Tij;
712     real *propose,*accept,*remainder;
713     real sum,pnorm;
714     gmx_bool bRestricted;
715
716     starting_fep_state = fep_state;
717     lamnew = fep_state; /* so that there is a default setting -- stays the same */
718
719     if (!EWL(expand->elamstats))   /* ignore equilibrating the weights if using WL */
720     {
721         if ((expand->lmc_forced_nstart > 0) && (dfhist->n_at_lam[nlim-1] <= expand->lmc_forced_nstart))
722         {
723             /* Use a marching method to run through the lambdas and get preliminary free energy data,
724                before starting 'free' sampling.  We start free sampling when we have enough at each lambda */
725
726             /* if we have enough at this lambda, move on to the next one */
727
728             if (dfhist->n_at_lam[fep_state] == expand->lmc_forced_nstart)
729             {
730                 lamnew = fep_state+1;
731                 if (lamnew == nlim)  /* whoops, stepped too far! */
732                 {
733                     lamnew -= 1;
734                 }
735             }
736             else
737             {
738                 lamnew = fep_state;
739             }
740             return lamnew;
741         }
742     }
743
744     snew(propose,nlim);
745     snew(accept,nlim);
746     snew(remainder,nlim);
747
748     for (i=0;i<expand->lmc_repeats;i++)
749     {
750
751         for(ifep=0;ifep<nlim;ifep++)
752         {
753             propose[ifep] = 0;
754             accept[ifep] = 0;
755         }
756
757         if ((expand->elmcmove==elmcmoveGIBBS) || (expand->elmcmove==elmcmoveMETGIBBS))
758         {
759             bRestricted = TRUE;
760             /* use the Gibbs sampler, with restricted range */
761             if (expand->gibbsdeltalam < 0)
762             {
763                 minfep = 0;
764                 maxfep = nlim-1;
765                 bRestricted = FALSE;
766             }
767             else
768             {
769                 minfep = fep_state - expand->gibbsdeltalam;
770                 maxfep = fep_state + expand->gibbsdeltalam;
771                 if (minfep < 0)
772                 {
773                     minfep = 0;
774                 }
775                 if (maxfep > nlim-1)
776                 {
777                     maxfep = nlim-1;
778                 }
779             }
780
781             GenerateGibbsProbabilities(weighted_lamee,p_k,&pks,minfep,maxfep);
782
783             if (expand->elmcmove == elmcmoveGIBBS)
784             {
785                 for (ifep=minfep;ifep<=maxfep;ifep++)
786                 {
787                     propose[ifep] = p_k[ifep];
788                     accept[ifep] = 1.0;
789                 }
790                 /* Gibbs sampling */
791                 r1 = gmx_rng_uniform_real(rng);
792                 for (lamnew=minfep;lamnew<=maxfep;lamnew++)
793                 {
794                     if (r1 <= p_k[lamnew])
795                     {
796                         break;
797                     }
798                     r1 -= p_k[lamnew];
799                 }
800             }
801             else if (expand->elmcmove==elmcmoveMETGIBBS)
802             {
803
804                 /* Metropolized Gibbs sampling */
805                 for (ifep=minfep;ifep<=maxfep;ifep++)
806                 {
807                     remainder[ifep] = 1 - p_k[ifep];
808                 }
809
810                 /* find the proposal probabilities */
811
812                 if (remainder[fep_state] == 0) {
813                     /* only the current state has any probability */
814                     /* we have to stay at the current state */
815                     lamnew=fep_state;
816                 } else {
817                     for (ifep=minfep;ifep<=maxfep;ifep++)
818                     {
819                         if (ifep != fep_state)
820                         {
821                             propose[ifep] = p_k[ifep]/remainder[fep_state];
822                         }
823                         else
824                         {
825                             propose[ifep] = 0;
826                         }
827                     }
828
829                     r1 = gmx_rng_uniform_real(rng);
830                     for (lamtrial=minfep;lamtrial<=maxfep;lamtrial++)
831                     {
832                         pnorm = p_k[lamtrial]/remainder[fep_state];
833                         if (lamtrial!=fep_state)
834                         {
835                             if (r1 <= pnorm)
836                             {
837                                 break;
838                             }
839                             r1 -= pnorm;
840                         }
841                     }
842
843                     /* we have now selected lamtrial according to p(lamtrial)/1-p(fep_state) */
844                     tprob = 1.0;
845                     /* trial probability is min{1,\frac{1 - p(old)}{1-p(new)} MRS 1/8/2008 */
846                     trialprob = (remainder[fep_state])/(remainder[lamtrial]);
847                     if (trialprob < tprob)
848                     {
849                         tprob = trialprob;
850                     }
851                     r2 = gmx_rng_uniform_real(rng);
852                     if (r2 < tprob)
853                     {
854                         lamnew = lamtrial;
855                     }
856                     else
857                     {
858                         lamnew = fep_state;
859                     }
860                 }
861
862                 /* now figure out the acceptance probability for each */
863                 for (ifep=minfep;ifep<=maxfep;ifep++)
864                 {
865                     tprob = 1.0;
866                     if (remainder[ifep] != 0) {
867                         trialprob = (remainder[fep_state])/(remainder[ifep]);
868                     }
869                     else
870                     {
871                         trialprob = 1.0; /* this state is the only choice! */
872                     }
873                     if (trialprob < tprob)
874                     {
875                         tprob = trialprob;
876                     }
877                     /* probability for fep_state=0, but that's fine, it's never proposed! */
878                     accept[ifep] = tprob;
879                 }
880             }
881
882             if (lamnew > maxfep)
883             {
884                 /* it's possible some rounding is failing */
885                 if (remainder[fep_state] < 2.0e-15)
886                 {
887                     /* probably numerical rounding error -- no state other than the original has weight */
888                     lamnew = fep_state;
889                 }
890                 else
891                 {
892                     /* probably not a numerical issue */
893                     int loc=0;
894                     int nerror = 200+(maxfep-minfep+1)*60;
895                     char *errorstr;
896                     snew(errorstr,nerror);
897                     /* if its greater than maxfep, then something went wrong -- probably underflow in the calculation
898                        of sum weights. Generated detailed info for failure */
899                     loc += sprintf(errorstr,"Something wrong in choosing new lambda state with a Gibbs move -- probably underflow in weight determination.\nDenominator is: %3d%17.10e\n  i                dE        numerator          weights\n",0,pks);
900                     for (ifep=minfep;ifep<=maxfep;ifep++)
901                     {
902                         loc += sprintf(&errorstr[loc],"%3d %17.10e%17.10e%17.10e\n",ifep,weighted_lamee[ifep],p_k[ifep],dfhist->sum_weights[ifep]);
903                     }
904                     gmx_fatal(FARGS,errorstr);
905                 }
906             }
907         }
908         else if ((expand->elmcmove==elmcmoveMETROPOLIS) || (expand->elmcmove==elmcmoveBARKER))
909         {
910             /* use the metropolis sampler with trial +/- 1 */
911             r1 = gmx_rng_uniform_real(rng);
912             if (r1 < 0.5)
913             {
914                 if (fep_state == 0) {
915                     lamtrial = fep_state;
916                 }
917                 else
918                 {
919                     lamtrial = fep_state-1;
920                 }
921             }
922             else
923             {
924                 if (fep_state == nlim-1) {
925                     lamtrial = fep_state;
926                 }
927                 else
928                 {
929                     lamtrial = fep_state+1;
930                 }
931             }
932
933             de = weighted_lamee[lamtrial] - weighted_lamee[fep_state];
934             if (expand->elmcmove==elmcmoveMETROPOLIS)
935             {
936                 tprob = 1.0;
937                 trialprob = exp(de);
938                 if (trialprob < tprob)
939                 {
940                     tprob = trialprob;
941                 }
942                 propose[fep_state] = 0;
943                 propose[lamtrial] = 1.0; /* note that this overwrites the above line if fep_state = ntrial, which only occurs at the ends */
944                 accept[fep_state] = 1.0; /* doesn't actually matter, never proposed unless fep_state = ntrial, in which case it's 1.0 anyway */
945                 accept[lamtrial] = tprob;
946
947             }
948             else if (expand->elmcmove==elmcmoveBARKER)
949             {
950                 tprob = 1.0/(1.0+exp(-de));
951
952                 propose[fep_state] = (1-tprob);
953                 propose[lamtrial] += tprob; /* we add, to account for the fact that at the end, they might be the same point */
954                 accept[fep_state] = 1.0;
955                 accept[lamtrial] = 1.0;
956             }
957
958             r2 = gmx_rng_uniform_real(rng);
959             if (r2 < tprob) {
960                 lamnew = lamtrial;
961             } else {
962                 lamnew = fep_state;
963             }
964         }
965
966         for (ifep=0;ifep<nlim;ifep++)
967         {
968             dfhist->Tij[fep_state][ifep] += propose[ifep]*accept[ifep];
969             dfhist->Tij[fep_state][fep_state] += propose[ifep]*(1.0-accept[ifep]);
970         }
971         fep_state = lamnew;
972     }
973
974     dfhist->Tij_empirical[starting_fep_state][lamnew] += 1.0;
975
976     sfree(propose);
977     sfree(accept);
978     sfree(remainder);
979
980     return lamnew;
981 }
982
983 /* print out the weights to the log, along with current state */
984 extern void PrintFreeEnergyInfoToFile(FILE *outfile, t_lambda *fep, t_expanded *expand, t_simtemp *simtemp, df_history_t *dfhist,
985                                       int nlam, int frequency, gmx_large_int_t step)
986 {
987     int nlim,i,ifep,jfep;
988     real dw,dg,dv,dm,Tprint;
989     real *temps;
990     const char *print_names[efptNR] = {" FEPL","MassL","CoulL"," VdwL","BondL","RestT","Temp.(K)"};
991     gmx_bool bSimTemp = FALSE;
992
993     nlim = fep->n_lambda;
994     if (simtemp != NULL) {
995         bSimTemp = TRUE;
996     }
997
998     if (mod(step,frequency)==0)
999     {
1000         fprintf(outfile,"             MC-lambda information\n");
1001         if (EWL(expand->elamstats) && (!(dfhist->bEquil))) {
1002             fprintf(outfile,"  Wang-Landau incrementor is: %11.5g\n",dfhist->wl_delta);
1003         }
1004         fprintf(outfile,"  N");
1005         for (i=0;i<efptNR;i++)
1006         {
1007             if (fep->separate_dvdl[i])
1008             {
1009                 fprintf(outfile,"%7s",print_names[i]);
1010             }
1011             else if ((i == efptTEMPERATURE) && bSimTemp)
1012             {
1013                 fprintf(outfile,"%10s",print_names[i]); /* more space for temperature formats */
1014             }
1015         }
1016         fprintf(outfile,"    Count   ");
1017         if (expand->elamstats==elamstatsMINVAR)
1018         {
1019             fprintf(outfile,"W(in kT)   G(in kT)  dG(in kT)  dV(in kT)\n");
1020         }
1021         else
1022         {
1023             fprintf(outfile,"G(in kT)  dG(in kT)\n");
1024         }
1025         for (ifep=0;ifep<nlim;ifep++)
1026         {
1027             if (ifep==nlim-1)
1028             {
1029                 dw=0.0;
1030                 dg=0.0;
1031                 dv=0.0;
1032                 dm=0.0;
1033             }
1034             else
1035             {
1036                 dw = dfhist->sum_weights[ifep+1] - dfhist->sum_weights[ifep];
1037                 dg = dfhist->sum_dg[ifep+1] - dfhist->sum_dg[ifep];
1038                 dv = sqrt(pow(dfhist->sum_variance[ifep+1],2) - pow(dfhist->sum_variance[ifep],2));
1039                 dm = dfhist->sum_minvar[ifep+1] - dfhist->sum_minvar[ifep];
1040
1041             }
1042             fprintf(outfile,"%3d",(ifep+1));
1043             for (i=0;i<efptNR;i++)
1044             {
1045                 if (fep->separate_dvdl[i])
1046                 {
1047                     fprintf(outfile,"%7.3f",fep->all_lambda[i][ifep]);
1048                 } else if (i == efptTEMPERATURE && bSimTemp)
1049                 {
1050                     fprintf(outfile,"%9.3f",simtemp->temperatures[ifep]);
1051                 }
1052             }
1053             if (EWL(expand->elamstats) && (!(dfhist->bEquil)))  /* if performing WL and still haven't equilibrated */
1054             {
1055                 if (expand->elamstats == elamstatsWL)
1056                 {
1057                     fprintf(outfile," %8d",(int)dfhist->wl_histo[ifep]);
1058                 } else {
1059                     fprintf(outfile," %8.3f",dfhist->wl_histo[ifep]);
1060                 }
1061             }
1062             else   /* we have equilibrated weights */
1063             {
1064                 fprintf(outfile," %8d",dfhist->n_at_lam[ifep]);
1065             }
1066             if (expand->elamstats==elamstatsMINVAR)
1067             {
1068                 fprintf(outfile," %10.5f %10.5f %10.5f %10.5f",dfhist->sum_weights[ifep],dfhist->sum_dg[ifep],dg,dv);
1069             }
1070             else
1071             {
1072                 fprintf(outfile," %10.5f %10.5f",dfhist->sum_weights[ifep],dw);
1073             }
1074             if (ifep == nlam) {
1075                 fprintf(outfile," <<\n");
1076             }
1077             else
1078             {
1079                 fprintf(outfile,"   \n");
1080             }
1081         }
1082         fprintf(outfile,"\n");
1083
1084         if ((mod(step,expand->nstTij)==0) && (expand->nstTij > 0) && (step > 0))
1085         {
1086             fprintf(outfile,"                     Transition Matrix\n");
1087             for (ifep=0;ifep<nlim;ifep++)
1088             {
1089                 fprintf(outfile,"%12d",(ifep+1));
1090             }
1091             fprintf(outfile,"\n");
1092             for (ifep=0;ifep<nlim;ifep++)
1093             {
1094                 for (jfep=0;jfep<nlim;jfep++)
1095                 {
1096                     if (dfhist->n_at_lam[ifep] > 0)
1097                     {
1098                         if (expand->bSymmetrizedTMatrix)
1099                         {
1100                             Tprint = (dfhist->Tij[ifep][jfep]+dfhist->Tij[jfep][ifep])/(dfhist->n_at_lam[ifep]+dfhist->n_at_lam[jfep]);
1101                         } else {
1102                             Tprint = (dfhist->Tij[ifep][jfep])/(dfhist->n_at_lam[ifep]);
1103                         }
1104                     }
1105                     else
1106                     {
1107                         Tprint = 0.0;
1108                     }
1109                     fprintf(outfile,"%12.8f",Tprint);
1110                 }
1111                 fprintf(outfile,"%3d\n",(ifep+1));
1112             }
1113
1114             fprintf(outfile,"                  Empirical Transition Matrix\n");
1115             for (ifep=0;ifep<nlim;ifep++)
1116             {
1117                 fprintf(outfile,"%12d",(ifep+1));
1118             }
1119             fprintf(outfile,"\n");
1120             for (ifep=0;ifep<nlim;ifep++)
1121             {
1122                 for (jfep=0;jfep<nlim;jfep++)
1123                 {
1124                     if (dfhist->n_at_lam[ifep] > 0)
1125                     {
1126                         if (expand->bSymmetrizedTMatrix)
1127                         {
1128                             Tprint = (dfhist->Tij_empirical[ifep][jfep]+dfhist->Tij_empirical[jfep][ifep])/(dfhist->n_at_lam[ifep]+dfhist->n_at_lam[jfep]);
1129                         } else {
1130                             Tprint = dfhist->Tij_empirical[ifep][jfep]/(dfhist->n_at_lam[ifep]);
1131                         }
1132                     }
1133                     else
1134                     {
1135                         Tprint = 0.0;
1136                     }
1137                     fprintf(outfile,"%12.8f",Tprint);
1138                 }
1139                 fprintf(outfile,"%3d\n",(ifep+1));
1140             }
1141         }
1142         }
1143 }
1144
1145 extern void get_mc_state(gmx_rng_t rng,t_state *state)
1146 {
1147     gmx_rng_get_state(rng,state->mc_rng,state->mc_rngi);
1148 }
1149
1150 extern void set_mc_state(gmx_rng_t rng,t_state *state)
1151 {
1152     gmx_rng_set_state(rng,state->mc_rng,state->mc_rngi[0]);
1153 }
1154
1155 extern int ExpandedEnsembleDynamics(FILE *log,t_inputrec *ir, gmx_enerdata_t *enerd,
1156                                     t_state *state, t_extmass *MassQ, df_history_t *dfhist,
1157                                     gmx_large_int_t step, gmx_rng_t mcrng,
1158                                     rvec *v, t_mdatoms *mdatoms)
1159 {
1160     real *pfep_lamee,*p_k, *scaled_lamee, *weighted_lamee;
1161     int i,nlam,nlim,lamnew,totalsamples;
1162     real oneovert,maxscaled=0,maxweighted=0;
1163     t_expanded *expand;
1164     t_simtemp *simtemp;
1165     double *temperature_lambdas;
1166     gmx_bool bIfReset,bSwitchtoOneOverT,bDoneEquilibrating=FALSE;
1167
1168     expand = ir->expandedvals;
1169     simtemp = ir->simtempvals;
1170         nlim = ir->fepvals->n_lambda;
1171     nlam = state->fep_state;
1172
1173     snew(scaled_lamee,nlim);
1174     snew(weighted_lamee,nlim);
1175     snew(pfep_lamee,nlim);
1176     snew(p_k,nlim);
1177
1178     if (expand->bInit_weights)  /* if initialized weights, we need to fill them in */
1179     {
1180         dfhist->wl_delta = expand->init_wl_delta;  /* MRS -- this would fit better somewhere else? */
1181         for (i=0;i<nlim;i++) {
1182             dfhist->sum_weights[i] = expand->init_lambda_weights[i];
1183             dfhist->sum_dg[i] = expand->init_lambda_weights[i];
1184         }
1185         expand->bInit_weights = FALSE;
1186     }
1187
1188         /* update the count at the current lambda*/
1189         dfhist->n_at_lam[nlam]++;
1190
1191     /* need to calculate the PV term somewhere, but not needed here? Not until there's a lambda state that's
1192        pressure controlled.*/
1193     /*
1194       pVTerm = 0;
1195       where does this PV term go?
1196       for (i=0;i<nlim;i++)
1197       {
1198       fep_lamee[i] += pVTerm;
1199       }
1200     */
1201
1202         /* determine the minimum value to avoid overflow.  Probably a better way to do this */
1203         /* we don't need to include the pressure term, since the volume is the same between the two.
1204            is there some term we are neglecting, however? */
1205
1206     if (ir->efep != efepNO)
1207     {
1208         for (i=0;i<nlim;i++)
1209         {
1210             if (ir->bSimTemp)
1211             {
1212                 /* Note -- this assumes no mass changes, since kinetic energy is not added  . . . */
1213                 scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(simtemp->temperatures[i]*BOLTZ)
1214                     + enerd->term[F_EPOT]*(1.0/(simtemp->temperatures[i])- 1.0/(simtemp->temperatures[nlam]))/BOLTZ;
1215             }
1216             else
1217             {
1218                 scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(expand->mc_temp*BOLTZ);
1219                 /* mc_temp is currently set to the system reft unless otherwise defined */
1220             }
1221
1222             /* save these energies for printing, so they don't get overwritten by the next step */
1223             /* they aren't overwritten in the non-free energy case, but we always print with these
1224                for simplicity */
1225         }
1226     } else {
1227         if (ir->bSimTemp) {
1228             for (i=0;i<nlim;i++) {
1229                 scaled_lamee[i] = enerd->term[F_EPOT]*(1.0/simtemp->temperatures[i] - 1.0/simtemp->temperatures[nlam])/BOLTZ;
1230             }
1231         }
1232     }
1233
1234         for (i=0;i<nlim;i++) {
1235         pfep_lamee[i] = scaled_lamee[i];
1236
1237         weighted_lamee[i] = dfhist->sum_weights[i] - scaled_lamee[i];
1238         if (i==0)
1239         {
1240             maxscaled = scaled_lamee[i];
1241             maxweighted = weighted_lamee[i];
1242         }
1243         else
1244         {
1245             if (scaled_lamee[i] > maxscaled)
1246             {
1247                 maxscaled = scaled_lamee[i];
1248             }
1249             if (weighted_lamee[i] > maxweighted)
1250             {
1251                 maxweighted = weighted_lamee[i];
1252             }
1253         }
1254         }
1255
1256         for (i=0;i<nlim;i++)
1257     {
1258         scaled_lamee[i] -= maxscaled;
1259         weighted_lamee[i] -= maxweighted;
1260         }
1261
1262         /* update weights - we decide whether or not to actually do this inside */
1263
1264         bDoneEquilibrating = UpdateWeights(nlim,expand,dfhist,nlam,scaled_lamee,weighted_lamee,step);
1265     if (bDoneEquilibrating)
1266     {
1267         if (log) {
1268             fprintf(log,"\nStep %d: Weights have equilibrated, using criteria: %s\n",(int)step,elmceq_names[expand->elmceq]);
1269         }
1270     }
1271
1272     lamnew = ChooseNewLambda(log,nlim,expand,dfhist,nlam,weighted_lamee,p_k,mcrng);
1273     /* if using simulated tempering, we need to adjust the temperatures */
1274     if (ir->bSimTemp && (lamnew != nlam)) /* only need to change the temperatures if we change the state */
1275     {
1276         int i, j, n, d;
1277         real *buf_ngtc;
1278         real told;
1279         int nstart, nend, gt;
1280
1281         snew(buf_ngtc,ir->opts.ngtc);
1282
1283         for (i=0;i<ir->opts.ngtc;i++) {
1284             if (ir->opts.ref_t[i] > 0) {
1285                 told = ir->opts.ref_t[i];
1286                 ir->opts.ref_t[i] =  simtemp->temperatures[lamnew];
1287                 buf_ngtc[i] = sqrt(ir->opts.ref_t[i]/told); /* using the buffer as temperature scaling */
1288             }
1289         }
1290
1291         /* we don't need to manipulate the ekind information, as it isn't due to be reset until the next step anyway */
1292
1293         nstart = mdatoms->start;
1294         nend   = nstart + mdatoms->homenr;
1295         for (n=nstart; n<nend; n++)
1296         {
1297             gt = 0;
1298             if (mdatoms->cTC)
1299             {
1300                 gt = mdatoms->cTC[n];
1301             }
1302             for(d=0; d<DIM; d++)
1303             {
1304                  v[n][d] *= buf_ngtc[gt];
1305             }
1306         }
1307
1308         if (IR_NPT_TROTTER(ir) || IR_NPH_TROTTER(ir) || IR_NVT_TROTTER(ir)) {
1309             /* we need to recalculate the masses if the temperature has changed */
1310             init_npt_masses(ir,state,MassQ,FALSE);
1311             for (i=0;i<state->nnhpres;i++)
1312             {
1313                 for (j=0;j<ir->opts.nhchainlength;j++)
1314                 {
1315                     state->nhpres_vxi[i+j] *= buf_ngtc[i];
1316                 }
1317             }
1318             for (i=0;i<ir->opts.ngtc;i++)
1319             {
1320                 for (j=0;j<ir->opts.nhchainlength;j++)
1321                 {
1322                     state->nosehoover_vxi[i+j] *= buf_ngtc[i];
1323                 }
1324             }
1325         }
1326         sfree(buf_ngtc);
1327     }
1328
1329         /* now check on the Wang-Landau updating critera */
1330
1331         if (EWL(expand->elamstats))
1332     {
1333         bSwitchtoOneOverT = FALSE;
1334         if (expand->bWLoneovert) {
1335             totalsamples = 0;
1336             for (i=0;i<nlim;i++)
1337             {
1338                 totalsamples += dfhist->n_at_lam[i];
1339             }
1340             oneovert = (1.0*nlim)/totalsamples;
1341             /* oneovert has decreasd by a bit since last time, so we actually make sure its within one of this number */
1342             /* switch to 1/t incrementing when wl_delta has decreased at least once, and wl_delta is now less than 1/t */
1343             if ((dfhist->wl_delta <= ((totalsamples)/(totalsamples-1.00001))*oneovert) &&
1344                 (dfhist->wl_delta < expand->init_wl_delta))
1345             {
1346                 bSwitchtoOneOverT = TRUE;
1347             }
1348         }
1349         if (bSwitchtoOneOverT) {
1350             dfhist->wl_delta = oneovert; /* now we reduce by this each time, instead of only at flatness */
1351         } else {
1352             bIfReset = CheckHistogramRatios(nlim,dfhist->wl_histo,expand->wl_ratio);
1353             if (bIfReset)
1354             {
1355                 for (i=0;i<nlim;i++)
1356                 {
1357                     dfhist->wl_histo[i] = 0;
1358                 }
1359                 dfhist->wl_delta *= expand->wl_scale;
1360                 if (log) {
1361                     fprintf(log,"\nStep %d: weights are now:",(int)step);
1362                     for (i=0;i<nlim;i++)
1363                     {
1364                         fprintf(log," %.5f",dfhist->sum_weights[i]);
1365                     }
1366                     fprintf(log,"\n");
1367                 }
1368             }
1369         }
1370     }
1371     sfree(scaled_lamee);
1372     sfree(weighted_lamee);
1373     sfree(p_k);
1374
1375     return lamnew;
1376 }
1377
1378