2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2015,2016,2017,2018,2019, The GROMACS development team.
5 * Copyright (c) 2020,2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
39 * Implements the BiasState class.
41 * \author Viveca Lindahl
42 * \author Berk Hess <hess@kth.se>
48 #include "biasstate.h"
59 #include "gromacs/fileio/gmxfio.h"
60 #include "gromacs/fileio/xvgr.h"
61 #include "gromacs/gmxlib/network.h"
62 #include "gromacs/math/units.h"
63 #include "gromacs/math/utilities.h"
64 #include "gromacs/mdrunutility/multisim.h"
65 #include "gromacs/mdtypes/awh_history.h"
66 #include "gromacs/mdtypes/awh_params.h"
67 #include "gromacs/mdtypes/commrec.h"
68 #include "gromacs/simd/simd.h"
69 #include "gromacs/simd/simd_math.h"
70 #include "gromacs/utility/arrayref.h"
71 #include "gromacs/utility/exceptions.h"
72 #include "gromacs/utility/gmxassert.h"
73 #include "gromacs/utility/smalloc.h"
74 #include "gromacs/utility/stringutil.h"
77 #include "biassharing.h"
78 #include "pointstate.h"
83 void BiasState::getPmf(gmx::ArrayRef<float> pmf) const
85 GMX_ASSERT(pmf.size() == points_.size(), "pmf should have the size of the bias grid");
87 /* The PMF is just the negative of the log of the sampled PMF histogram.
88 * Points with zero target weight are ignored, they will mostly contain noise.
90 for (size_t i = 0; i < points_.size(); i++)
92 pmf[i] = points_[i].inTargetRegion() ? -points_[i].logPmfSum() : GMX_FLOAT_MAX;
100 * Sum PMF over multiple simulations, when requested.
102 * \param[in,out] pointState The state of the points in the bias.
103 * \param[in] numSharedUpdate The number of biases sharing the histogram.
104 * \param[in] biasSharing Object for sharing bias data over multiple simulations
105 * \param[in] biasIndex Index of this bias in the total list of biases in this simulation
107 void sumPmf(gmx::ArrayRef<PointState> pointState, int numSharedUpdate, const BiasSharing* biasSharing, const int biasIndex)
109 if (numSharedUpdate == 1)
113 GMX_ASSERT(biasSharing != nullptr
114 && numSharedUpdate % biasSharing->numSharingSimulations(biasIndex) == 0,
115 "numSharedUpdate should be a multiple of multiSimComm->numSimulations_");
116 GMX_ASSERT(numSharedUpdate == biasSharing->numSharingSimulations(biasIndex),
117 "Sharing within a simulation is not implemented (yet)");
119 std::vector<double> buffer(pointState.size());
121 /* Need to temporarily exponentiate the log weights to sum over simulations */
122 for (size_t i = 0; i < buffer.size(); i++)
124 buffer[i] = pointState[i].inTargetRegion() ? std::exp(pointState[i].logPmfSum()) : 0;
127 biasSharing->sumOverSharingSimulations(gmx::ArrayRef<double>(buffer), biasIndex);
129 /* Take log again to get (non-normalized) PMF */
130 double normFac = 1.0 / numSharedUpdate;
131 for (gmx::index i = 0; i < pointState.ssize(); i++)
133 if (pointState[i].inTargetRegion())
135 pointState[i].setLogPmfSum(std::log(buffer[i] * normFac));
141 * Find the minimum free energy value.
143 * \param[in] pointState The state of the points.
144 * \returns the minimum free energy value.
146 double freeEnergyMinimumValue(gmx::ArrayRef<const PointState> pointState)
148 double fMin = GMX_FLOAT_MAX;
150 for (auto const& ps : pointState)
152 if (ps.inTargetRegion() && ps.freeEnergy() < fMin)
154 fMin = ps.freeEnergy();
162 * Find and return the log of the probability weight of a point given a coordinate value.
164 * The unnormalized weight is given by
165 * w(point|value) = exp(bias(point) - U(value,point)),
166 * where U is a harmonic umbrella potential.
168 * \param[in] dimParams The bias dimensions parameters
169 * \param[in] points The point state.
170 * \param[in] grid The grid.
171 * \param[in] pointIndex Point to evaluate probability weight for.
172 * \param[in] pointBias Bias for the point (as a log weight).
173 * \param[in] value Coordinate value.
174 * \param[in] neighborLambdaEnergies The energy of the system in neighboring lambdas states. Can be
175 * empty when there are no free energy lambda state dimensions.
176 * \param[in] gridpointIndex The index of the current grid point.
177 * \returns the log of the biased probability weight.
179 double biasedLogWeightFromPoint(ArrayRef<const DimParams> dimParams,
180 ArrayRef<const PointState> points,
181 const BiasGrid& grid,
184 const awh_dvec value,
185 ArrayRef<const double> neighborLambdaEnergies,
188 double logWeight = detail::c_largeNegativeExponent;
190 /* Only points in the target region have non-zero weight */
191 if (points[pointIndex].inTargetRegion())
193 logWeight = pointBias;
195 /* Add potential for all parameter dimensions */
196 for (size_t d = 0; d < dimParams.size(); d++)
198 if (dimParams[d].isFepLambdaDimension())
200 /* If this is not a sampling step or if this function is called from
201 * calcConvolvedBias(), when writing energy subblocks, neighborLambdaEnergies will
202 * be empty. No convolution is required along the lambda dimension. */
203 if (!neighborLambdaEnergies.empty())
205 const int pointLambdaIndex = grid.point(pointIndex).coordValue[d];
206 const int gridpointLambdaIndex = grid.point(gridpointIndex).coordValue[d];
207 logWeight -= dimParams[d].fepDimParams().beta
208 * (neighborLambdaEnergies[pointLambdaIndex]
209 - neighborLambdaEnergies[gridpointLambdaIndex]);
214 double dev = getDeviationFromPointAlongGridAxis(grid, d, pointIndex, value[d]);
215 logWeight -= 0.5 * dimParams[d].pullDimParams().betak * dev * dev;
223 * Calculates the marginal distribution (marginal probability) for each value along
224 * a free energy lambda axis.
225 * The marginal distribution of one coordinate dimension value is the sum of the probability
226 * distribution of all values (herein all neighbor values) with the same value in the dimension
228 * \param[in] grid The bias grid.
229 * \param[in] neighbors The points to use for the calculation of the marginal distribution.
230 * \param[in] probWeightNeighbor Probability weights of the neighbors.
231 * \returns The calculated marginal distribution in a 1D array with
232 * as many elements as there are points along the axis of interest.
234 std::vector<double> calculateFELambdaMarginalDistribution(const BiasGrid& grid,
235 ArrayRef<const int> neighbors,
236 ArrayRef<const double> probWeightNeighbor)
238 const std::optional<int> lambdaAxisIndex = grid.lambdaAxisIndex();
239 GMX_RELEASE_ASSERT(lambdaAxisIndex,
240 "There must be a free energy lambda axis in order to calculate the free "
241 "energy lambda marginal distribution.");
242 const int numFepLambdaStates = grid.numFepLambdaStates();
243 std::vector<double> lambdaMarginalDistribution(numFepLambdaStates, 0);
245 for (size_t i = 0; i < neighbors.size(); i++)
247 const int neighbor = neighbors[i];
248 const int lambdaState = grid.point(neighbor).coordValue[lambdaAxisIndex.value()];
249 lambdaMarginalDistribution[lambdaState] += probWeightNeighbor[i];
251 return lambdaMarginalDistribution;
256 void BiasState::calcConvolvedPmf(ArrayRef<const DimParams> dimParams,
257 const BiasGrid& grid,
258 std::vector<float>* convolvedPmf) const
260 size_t numPoints = grid.numPoints();
262 convolvedPmf->resize(numPoints);
264 /* Get the PMF to convolve. */
265 std::vector<float> pmf(numPoints);
268 for (size_t m = 0; m < numPoints; m++)
270 double freeEnergyWeights = 0;
271 const GridPoint& point = grid.point(m);
272 for (const auto& neighbor : point.neighbor)
274 /* Do not convolve the bias along a lambda axis - only use the pmf from the current point */
275 if (!pointsHaveDifferentLambda(grid, m, neighbor))
277 /* The negative PMF is a positive bias. */
278 double biasNeighbor = -pmf[neighbor];
280 /* Add the convolved PMF weights for the neighbors of this point.
281 Note that this function only adds point within the target > 0 region.
282 Sum weights, take the logarithm last to get the free energy. */
283 double logWeight = biasedLogWeightFromPoint(
284 dimParams, points_, grid, neighbor, biasNeighbor, point.coordValue, {}, m);
285 freeEnergyWeights += std::exp(logWeight);
289 GMX_RELEASE_ASSERT(freeEnergyWeights > 0,
290 "Attempting to do log(<= 0) in AWH convolved PMF calculation.");
291 (*convolvedPmf)[m] = -std::log(static_cast<float>(freeEnergyWeights));
299 * Updates the target distribution for all points.
301 * The target distribution is always updated for all points
304 * \param[in,out] pointState The state of all points.
305 * \param[in] params The bias parameters.
307 void updateTargetDistribution(ArrayRef<PointState> pointState, const BiasParams& params)
309 double freeEnergyCutoff = 0;
310 if (params.eTarget == AwhTargetType::Cutoff)
312 freeEnergyCutoff = freeEnergyMinimumValue(pointState) + params.freeEnergyCutoffInKT;
315 double sumTarget = 0;
316 for (PointState& ps : pointState)
318 sumTarget += ps.updateTargetWeight(params, freeEnergyCutoff);
320 GMX_RELEASE_ASSERT(sumTarget > 0, "We should have a non-zero distribution");
323 double invSum = 1.0 / sumTarget;
324 for (PointState& ps : pointState)
326 ps.scaleTarget(invSum);
331 * Puts together a string describing a grid point.
333 * \param[in] grid The grid.
334 * \param[in] point BiasGrid point index.
335 * \returns a string for the point.
337 std::string gridPointValueString(const BiasGrid& grid, int point)
339 std::string pointString;
343 for (int d = 0; d < grid.numDimensions(); d++)
345 pointString += gmx::formatString("%g", grid.point(point).coordValue[d]);
346 if (d < grid.numDimensions() - 1)
361 int BiasState::warnForHistogramAnomalies(const BiasGrid& grid, int biasIndex, double t, FILE* fplog, int maxNumWarnings) const
363 GMX_ASSERT(fplog != nullptr, "Warnings can only be issued if there is log file.");
364 const double maxHistogramRatio = 0.5; /* Tolerance for printing a warning about the histogram ratios */
366 /* Sum up the histograms and get their normalization */
367 double sumVisits = 0;
368 double sumWeights = 0;
369 for (const auto& pointState : points_)
371 if (pointState.inTargetRegion())
373 sumVisits += pointState.numVisitsTot();
374 sumWeights += pointState.weightSumTot();
377 GMX_RELEASE_ASSERT(sumVisits > 0, "We should have visits");
378 GMX_RELEASE_ASSERT(sumWeights > 0, "We should have weight");
379 double invNormVisits = 1.0 / sumVisits;
380 double invNormWeight = 1.0 / sumWeights;
382 /* Check all points for warnings */
384 size_t numPoints = grid.numPoints();
385 for (size_t m = 0; m < numPoints; m++)
387 /* Skip points close to boundary or non-target region */
388 const GridPoint& gridPoint = grid.point(m);
389 bool skipPoint = false;
390 for (size_t n = 0; (n < gridPoint.neighbor.size()) && !skipPoint; n++)
392 int neighbor = gridPoint.neighbor[n];
393 skipPoint = !points_[neighbor].inTargetRegion();
394 for (int d = 0; (d < grid.numDimensions()) && !skipPoint; d++)
396 const GridPoint& neighborPoint = grid.point(neighbor);
397 skipPoint = neighborPoint.index[d] == 0
398 || neighborPoint.index[d] == grid.axis(d).numPoints() - 1;
402 /* Warn if the coordinate distribution is less than the target distribution with a certain fraction somewhere */
403 const double relativeWeight = points_[m].weightSumTot() * invNormWeight;
404 const double relativeVisits = points_[m].numVisitsTot() * invNormVisits;
405 if (!skipPoint && relativeVisits < relativeWeight * maxHistogramRatio)
407 std::string pointValueString = gridPointValueString(grid, m);
408 std::string warningMessage = gmx::formatString(
410 "at t = %g ps the obtained coordinate distribution at coordinate value %s "
411 "is less than a fraction %g of the reference distribution at that point. "
412 "If you are not certain about your settings you might want to increase your "
413 "pull force constant or "
414 "modify your sampling region.\n",
417 pointValueString.c_str(),
419 gmx::TextLineWrapper wrapper;
420 wrapper.settings().setLineLength(c_linewidth);
421 fprintf(fplog, "%s", wrapper.wrapToString(warningMessage).c_str());
425 if (numWarnings >= maxNumWarnings)
434 double BiasState::calcUmbrellaForceAndPotential(ArrayRef<const DimParams> dimParams,
435 const BiasGrid& grid,
437 ArrayRef<const double> neighborLambdaDhdl,
438 ArrayRef<double> force) const
440 double potential = 0;
441 for (size_t d = 0; d < dimParams.size(); d++)
443 if (dimParams[d].isFepLambdaDimension())
445 if (!neighborLambdaDhdl.empty())
447 const int coordpointLambdaIndex = grid.point(point).coordValue[d];
448 force[d] = neighborLambdaDhdl[coordpointLambdaIndex];
449 /* The potential should not be affected by the lambda dimension. */
455 getDeviationFromPointAlongGridAxis(grid, d, point, coordState_.coordValue()[d]);
456 double k = dimParams[d].pullDimParams().k;
458 /* Force from harmonic potential 0.5*k*dev^2 */
459 force[d] = -k * deviation;
460 potential += 0.5 * k * deviation * deviation;
467 void BiasState::calcConvolvedForce(ArrayRef<const DimParams> dimParams,
468 const BiasGrid& grid,
469 ArrayRef<const double> probWeightNeighbor,
470 ArrayRef<const double> neighborLambdaDhdl,
471 ArrayRef<double> forceWorkBuffer,
472 ArrayRef<double> force) const
474 for (size_t d = 0; d < dimParams.size(); d++)
479 /* Only neighboring points have non-negligible contribution. */
480 const std::vector<int>& neighbor = grid.point(coordState_.gridpointIndex()).neighbor;
481 gmx::ArrayRef<double> forceFromNeighbor = forceWorkBuffer;
482 for (size_t n = 0; n < neighbor.size(); n++)
484 double weightNeighbor = probWeightNeighbor[n];
485 int indexNeighbor = neighbor[n];
487 /* Get the umbrella force from this point. The returned potential is ignored here. */
488 calcUmbrellaForceAndPotential(dimParams, grid, indexNeighbor, neighborLambdaDhdl, forceFromNeighbor);
490 /* Add the weighted umbrella force to the convolved force. */
491 for (size_t d = 0; d < dimParams.size(); d++)
493 force[d] += forceFromNeighbor[d] * weightNeighbor;
498 double BiasState::moveUmbrella(ArrayRef<const DimParams> dimParams,
499 const BiasGrid& grid,
500 ArrayRef<const double> probWeightNeighbor,
501 ArrayRef<const double> neighborLambdaDhdl,
502 ArrayRef<double> biasForce,
506 bool onlySampleUmbrellaGridpoint)
508 /* Generate and set a new coordinate reference value */
509 coordState_.sampleUmbrellaGridpoint(
510 grid, coordState_.gridpointIndex(), probWeightNeighbor, step, seed, indexSeed);
512 if (onlySampleUmbrellaGridpoint)
517 std::vector<double> newForce(dimParams.size());
518 double newPotential = calcUmbrellaForceAndPotential(
519 dimParams, grid, coordState_.umbrellaGridpoint(), neighborLambdaDhdl, newForce);
521 /* A modification of the reference value at time t will lead to a different
522 force over t-dt/2 to t and over t to t+dt/2. For high switching rates
523 this means the force and velocity will change signs roughly as often.
524 To avoid any issues we take the average of the previous and new force
525 at steps when the reference value has been moved. E.g. if the ref. value
526 is set every step to (coord dvalue +/- delta) would give zero force.
528 for (gmx::index d = 0; d < biasForce.ssize(); d++)
530 /* Average of the current and new force */
531 biasForce[d] = 0.5 * (biasForce[d] + newForce[d]);
541 * Sets the histogram rescaling factors needed to control the histogram size.
543 * For sake of robustness, the reference weight histogram can grow at a rate
544 * different from the actual sampling rate. Typically this happens for a limited
545 * initial time, alternatively growth is scaled down by a constant factor for all
546 * times. Since the size of the reference histogram sets the size of the free
547 * energy update this should be reflected also in the PMF. Thus the PMF histogram
548 * needs to be rescaled too.
550 * This function should only be called by the bias update function or wrapped by a function that
551 * knows what scale factors should be applied when, e.g,
552 * getSkippedUpdateHistogramScaleFactors().
554 * \param[in] params The bias parameters.
555 * \param[in] newHistogramSize New reference weight histogram size.
556 * \param[in] oldHistogramSize Previous reference weight histogram size (before adding new samples).
557 * \param[out] weightHistScaling Scaling factor for the reference weight histogram.
558 * \param[out] logPmfSumScaling Log of the scaling factor for the PMF histogram.
560 void setHistogramUpdateScaleFactors(const BiasParams& params,
561 double newHistogramSize,
562 double oldHistogramSize,
563 double* weightHistScaling,
564 double* logPmfSumScaling)
567 /* The two scaling factors below are slightly different (ignoring the log factor) because the
568 reference and the PMF histogram apply weight scaling differently. The weight histogram
569 applies is locally, i.e. each sample is scaled down meaning all samples get equal weight.
570 It is done this way because that is what target type local Boltzmann (for which
571 target = weight histogram) needs. In contrast, the PMF histogram is rescaled globally
572 by repeatedly scaling down the whole histogram. The reasons for doing it this way are:
573 1) empirically this is necessary for converging the PMF; 2) since the extraction of
574 the PMF is theoretically only valid for a constant bias, new samples should get more
575 weight than old ones for which the bias is fluctuating more. */
577 newHistogramSize / (oldHistogramSize + params.updateWeight * params.localWeightScaling);
578 *logPmfSumScaling = std::log(newHistogramSize / (oldHistogramSize + params.updateWeight));
583 void BiasState::getSkippedUpdateHistogramScaleFactors(const BiasParams& params,
584 double* weightHistScaling,
585 double* logPmfSumScaling) const
587 GMX_ASSERT(params.skipUpdates(),
588 "Calling function for skipped updates when skipping updates is not allowed");
590 if (inInitialStage())
592 /* In between global updates the reference histogram size is kept constant so we trivially
593 know what the histogram size was at the time of the skipped update. */
594 double histogramSize = histogramSize_.histogramSize();
595 setHistogramUpdateScaleFactors(
596 params, histogramSize, histogramSize, weightHistScaling, logPmfSumScaling);
600 /* In the final stage, the reference histogram grows at the sampling rate which gives trivial scale factors. */
601 *weightHistScaling = 1;
602 *logPmfSumScaling = 0;
606 void BiasState::doSkippedUpdatesForAllPoints(const BiasParams& params)
608 double weightHistScaling;
609 double logPmfsumScaling;
611 getSkippedUpdateHistogramScaleFactors(params, &weightHistScaling, &logPmfsumScaling);
613 for (auto& pointState : points_)
615 bool didUpdate = pointState.performPreviouslySkippedUpdates(
616 params, histogramSize_.numUpdates(), weightHistScaling, logPmfsumScaling);
618 /* Update the bias for this point only if there were skipped updates in the past to avoid calculating the log unneccessarily */
621 pointState.updateBias();
626 void BiasState::doSkippedUpdatesInNeighborhood(const BiasParams& params, const BiasGrid& grid)
628 double weightHistScaling;
629 double logPmfsumScaling;
631 getSkippedUpdateHistogramScaleFactors(params, &weightHistScaling, &logPmfsumScaling);
633 /* For each neighbor point of the center point, refresh its state by adding the results of all past, skipped updates. */
634 const std::vector<int>& neighbors = grid.point(coordState_.gridpointIndex()).neighbor;
635 for (const auto& neighbor : neighbors)
637 bool didUpdate = points_[neighbor].performPreviouslySkippedUpdates(
638 params, histogramSize_.numUpdates(), weightHistScaling, logPmfsumScaling);
642 points_[neighbor].updateBias();
651 * Merge update lists from multiple sharing simulations.
653 * \param[in,out] updateList Update list for this simulation (assumed >= npoints long).
654 * \param[in] numPoints Total number of points.
655 * \param[in] biasSharing Object for sharing bias data over multiple simulations
656 * \param[in] biasIndex Index of this bias in the total list of biases in this simulation
658 void mergeSharedUpdateLists(std::vector<int>* updateList,
660 const BiasSharing& biasSharing,
663 std::vector<int> numUpdatesOfPoint;
665 /* Flag the update points of this sim.
666 TODO: we can probably avoid allocating this array and just use the input array. */
667 numUpdatesOfPoint.resize(numPoints, 0);
668 for (auto& pointIndex : *updateList)
670 numUpdatesOfPoint[pointIndex] = 1;
673 /* Sum over the sims to get all the flagged points */
674 biasSharing.sumOverSharingSimulations(arrayRefFromArray(numUpdatesOfPoint.data(), numPoints), biasIndex);
676 /* Collect the indices of the flagged points in place. The resulting array will be the merged update list.*/
678 for (int m = 0; m < numPoints; m++)
680 if (numUpdatesOfPoint[m] > 0)
682 updateList->push_back(m);
688 * Generate an update list of points sampled since the last update.
690 * \param[in] grid The AWH bias.
691 * \param[in] points The point state.
692 * \param[in] originUpdatelist The origin of the rectangular region that has been sampled since
694 * \param[in] endUpdatelist The end of the rectangular that has been sampled since
696 * \param[in,out] updateList Local update list to set (assumed >= npoints long).
698 void makeLocalUpdateList(const BiasGrid& grid,
699 ArrayRef<const PointState> points,
700 const awh_ivec originUpdatelist,
701 const awh_ivec endUpdatelist,
702 std::vector<int>* updateList)
707 /* Define the update search grid */
708 for (int d = 0; d < grid.numDimensions(); d++)
710 origin[d] = originUpdatelist[d];
711 numPoints[d] = endUpdatelist[d] - originUpdatelist[d] + 1;
713 /* Because the end_updatelist is unwrapped it can be > (npoints - 1) so that numPoints can be > npoints in grid.
714 This helps for calculating the distance/number of points but should be removed and fixed when the way of
715 updating origin/end updatelist is changed (see sampleProbabilityWeights). */
716 numPoints[d] = std::min(grid.axis(d).numPoints(), numPoints[d]);
719 /* Make the update list */
722 bool pointExists = true;
725 pointExists = advancePointInSubgrid(grid, origin, numPoints, &pointIndex);
727 if (pointExists && points[pointIndex].inTargetRegion())
729 updateList->push_back(pointIndex);
736 void BiasState::resetLocalUpdateRange(const BiasGrid& grid)
738 const int gridpointIndex = coordState_.gridpointIndex();
739 for (int d = 0; d < grid.numDimensions(); d++)
741 /* This gives the minimum range consisting only of the current closest point. */
742 originUpdatelist_[d] = grid.point(gridpointIndex).index[d];
743 endUpdatelist_[d] = grid.point(gridpointIndex).index[d];
751 * Add partial histograms (accumulating between updates) to accumulating histograms.
753 * \param[in,out] pointState The state of the points in the bias.
754 * \param[in,out] weightSumCovering The weights for checking covering.
755 * \param[in] numSharedUpdate The number of biases sharing the histrogram.
756 * \param[in] biasSharing Object for sharing bias data over multiple simulations
757 * \param[in] biasIndex Index of this bias in the total list of biases in this
758 * simulation \param[in] localUpdateList List of points with data.
760 void sumHistograms(gmx::ArrayRef<PointState> pointState,
761 gmx::ArrayRef<double> weightSumCovering,
763 const BiasSharing* biasSharing,
765 const std::vector<int>& localUpdateList)
767 /* The covering checking histograms are added before summing over simulations, so that the
768 weights from different simulations are kept distinguishable. */
769 for (int globalIndex : localUpdateList)
771 weightSumCovering[globalIndex] += pointState[globalIndex].weightSumIteration();
774 /* Sum histograms over multiple simulations if needed. */
775 if (numSharedUpdate > 1)
777 GMX_ASSERT(numSharedUpdate == biasSharing->numSharingSimulations(biasIndex),
778 "Sharing within a simulation is not implemented (yet)");
780 /* Collect the weights and counts in linear arrays to be able to use gmx_sumd_sim. */
781 std::vector<double> weightSum;
782 std::vector<double> coordVisits;
784 weightSum.resize(localUpdateList.size());
785 coordVisits.resize(localUpdateList.size());
787 for (size_t localIndex = 0; localIndex < localUpdateList.size(); localIndex++)
789 const PointState& ps = pointState[localUpdateList[localIndex]];
791 weightSum[localIndex] = ps.weightSumIteration();
792 coordVisits[localIndex] = ps.numVisitsIteration();
795 biasSharing->sumOverSharingSimulations(gmx::ArrayRef<double>(weightSum), biasIndex);
796 biasSharing->sumOverSharingSimulations(gmx::ArrayRef<double>(coordVisits), biasIndex);
798 /* Transfer back the result */
799 for (size_t localIndex = 0; localIndex < localUpdateList.size(); localIndex++)
801 PointState& ps = pointState[localUpdateList[localIndex]];
803 ps.setPartialWeightAndCount(weightSum[localIndex], coordVisits[localIndex]);
807 /* Now add the partial counts and weights to the accumulating histograms.
808 Note: we still need to use the weights for the update so we wait
809 with resetting them until the end of the update. */
810 for (int globalIndex : localUpdateList)
812 pointState[globalIndex].addPartialWeightAndCount();
817 * Label points along an axis as covered or not.
819 * A point is covered if it is surrounded by visited points up to a radius = coverRadius.
821 * \param[in] visited Visited? For each point.
822 * \param[in] checkCovering Check for covering? For each point.
823 * \param[in] numPoints The number of grid points along this dimension.
824 * \param[in] period Period in number of points.
825 * \param[in] coverRadius Cover radius, in points, needed for defining a point as covered.
826 * \param[in,out] covered In this array elements are 1 for covered points and 0 for
827 * non-covered points, this routine assumes that \p covered has at least size \p numPoints.
829 void labelCoveredPoints(const std::vector<bool>& visited,
830 const std::vector<bool>& checkCovering,
834 gmx::ArrayRef<int> covered)
836 GMX_ASSERT(covered.ssize() >= numPoints, "covered should be at least as large as the grid");
838 bool haveFirstNotVisited = false;
839 int firstNotVisited = -1;
840 int notVisitedLow = -1;
841 int notVisitedHigh = -1;
843 for (int n = 0; n < numPoints; n++)
845 if (checkCovering[n] && !visited[n])
847 if (!haveFirstNotVisited)
851 haveFirstNotVisited = true;
857 /* Have now an interval I = [notVisitedLow,notVisitedHigh] of visited points bounded
858 by unvisited points. The unvisted end points affect the coveredness of the
859 visited with a reach equal to the cover radius. */
860 int notCoveredLow = notVisitedLow + coverRadius;
861 int notCoveredHigh = notVisitedHigh - coverRadius;
862 for (int i = notVisitedLow; i <= notVisitedHigh; i++)
864 covered[i] = static_cast<int>((i > notCoveredLow) && (i < notCoveredHigh));
867 /* Find a new interval to set covering for. Make the notVisitedHigh of this interval
868 the notVisitedLow of the next. */
869 notVisitedLow = notVisitedHigh;
874 /* Have labelled all the internal points. Now take care of the boundary regions. */
875 if (!haveFirstNotVisited)
877 /* No non-visited points <=> all points visited => all points covered. */
879 for (int n = 0; n < numPoints; n++)
886 int lastNotVisited = notVisitedLow;
888 /* For periodic boundaries, non-visited points can influence points
889 on the other side of the boundary so we need to wrap around. */
891 /* Lower end. For periodic boundaries the last upper end not visited point becomes the low-end not visited point.
892 For non-periodic boundaries there is no lower end point so a dummy value is used. */
893 int notVisitedHigh = firstNotVisited;
894 int notVisitedLow = period > 0 ? (lastNotVisited - period) : -(coverRadius + 1);
896 int notCoveredLow = notVisitedLow + coverRadius;
897 int notCoveredHigh = notVisitedHigh - coverRadius;
899 for (int i = 0; i <= notVisitedHigh; i++)
901 /* For non-periodic boundaries notCoveredLow = -1 will impose no restriction. */
902 covered[i] = static_cast<int>((i > notCoveredLow) && (i < notCoveredHigh));
905 /* Upper end. Same as for lower end but in the other direction. */
906 notVisitedHigh = period > 0 ? (firstNotVisited + period) : (numPoints + coverRadius);
907 notVisitedLow = lastNotVisited;
909 notCoveredLow = notVisitedLow + coverRadius;
910 notCoveredHigh = notVisitedHigh - coverRadius;
912 for (int i = notVisitedLow; i <= numPoints - 1; i++)
914 /* For non-periodic boundaries notCoveredHigh = numPoints will impose no restriction. */
915 covered[i] = static_cast<int>((i > notCoveredLow) && (i < notCoveredHigh));
922 bool BiasState::isSamplingRegionCovered(const BiasParams& params,
923 ArrayRef<const DimParams> dimParams,
924 const BiasGrid& grid) const
926 /* Allocate and initialize arrays: one for checking visits along each dimension,
927 one for keeping track of which points to check and one for the covered points.
928 Possibly these could be kept as AWH variables to avoid these allocations. */
931 std::vector<bool> visited;
932 std::vector<bool> checkCovering;
933 // We use int for the covering array since we might use gmx_sumi_sim.
934 std::vector<int> covered;
937 std::vector<CheckDim> checkDim;
938 checkDim.resize(grid.numDimensions());
940 for (int d = 0; d < grid.numDimensions(); d++)
942 const size_t numPoints = grid.axis(d).numPoints();
943 checkDim[d].visited.resize(numPoints, false);
944 checkDim[d].checkCovering.resize(numPoints, false);
945 checkDim[d].covered.resize(numPoints, 0);
948 /* Set visited points along each dimension and which points should be checked for covering.
949 Specifically, points above the free energy cutoff (if there is one) or points outside
950 of the target region are ignored. */
952 /* Set the free energy cutoff */
953 double maxFreeEnergy = GMX_FLOAT_MAX;
955 if (params.eTarget == AwhTargetType::Cutoff)
957 maxFreeEnergy = freeEnergyMinimumValue(points_) + params.freeEnergyCutoffInKT;
960 /* Set the threshold weight for a point to be considered visited. */
961 double weightThreshold = 1;
962 for (int d = 0; d < grid.numDimensions(); d++)
964 if (grid.axis(d).isFepLambdaAxis())
966 /* Do not modify the weight threshold based on a FEP lambda axis. The spread
967 * of the sampling weights is not depending on a Gaussian distribution (like
969 weightThreshold *= 1.0;
973 /* The spacing is proportional to 1/sqrt(betak). The weight threshold will be
974 * approximately (given that the spacing can be modified if the dimension is periodic)
975 * proportional to sqrt(1/(2*pi)). */
976 weightThreshold *= grid.axis(d).spacing()
977 * std::sqrt(dimParams[d].pullDimParams().betak * 0.5 * M_1_PI);
981 /* Project the sampling weights onto each dimension */
982 for (size_t m = 0; m < grid.numPoints(); m++)
984 const PointState& pointState = points_[m];
986 for (int d = 0; d < grid.numDimensions(); d++)
988 int n = grid.point(m).index[d];
990 /* Is visited if it was already visited or if there is enough weight at the current point */
991 checkDim[d].visited[n] = checkDim[d].visited[n] || (weightSumCovering_[m] > weightThreshold);
993 /* Check for covering if there is at least point in this slice that is in the target region and within the cutoff */
994 checkDim[d].checkCovering[n] =
995 checkDim[d].checkCovering[n]
996 || (pointState.inTargetRegion() && pointState.freeEnergy() < maxFreeEnergy);
1000 /* Label each point along each dimension as covered or not. */
1001 for (int d = 0; d < grid.numDimensions(); d++)
1003 labelCoveredPoints(checkDim[d].visited,
1004 checkDim[d].checkCovering,
1005 grid.axis(d).numPoints(),
1006 grid.axis(d).numPointsInPeriod(),
1007 params.coverRadius()[d],
1008 checkDim[d].covered);
1011 /* Now check for global covering. Each dimension needs to be covered separately.
1012 A dimension is covered if each point is covered. Multiple simulations collectively
1013 cover the points, i.e. a point is covered if any of the simulations covered it.
1014 However, visited points are not shared, i.e. if a point is covered or not is
1015 determined by the visits of a single simulation. In general the covering criterion is
1016 all points covered => all points are surrounded by visited points up to a radius = coverRadius.
1017 For 1 simulation, all points covered <=> all points visited. For multiple simulations
1018 however, all points visited collectively !=> all points covered, except for coverRadius = 0.
1019 In the limit of large coverRadius, all points covered => all points visited by at least one
1020 simulation (since no point will be covered until all points have been visited by a
1021 single simulation). Basically coverRadius sets how much "connectedness" (or mixing) a point
1022 needs with surrounding points before sharing covering information with other simulations. */
1024 /* Communicate the covered points between sharing simulations if needed. */
1025 if (params.numSharedUpdate > 1)
1027 /* For multiple dimensions this may not be the best way to do it. */
1028 for (int d = 0; d < grid.numDimensions(); d++)
1030 biasSharing_->sumOverSharingSimulations(
1031 gmx::arrayRefFromArray(checkDim[d].covered.data(), grid.axis(d).numPoints()),
1036 /* Now check if for each dimension all points are covered. Break if not true. */
1037 bool allPointsCovered = true;
1038 for (int d = 0; d < grid.numDimensions() && allPointsCovered; d++)
1040 for (int n = 0; n < grid.axis(d).numPoints() && allPointsCovered; n++)
1042 allPointsCovered = (checkDim[d].covered[n] != 0);
1046 return allPointsCovered;
1050 * Normalizes the free energy and PMF sum.
1052 * \param[in] pointState The state of the points.
1054 static void normalizeFreeEnergyAndPmfSum(std::vector<PointState>* pointState)
1056 double minF = freeEnergyMinimumValue(*pointState);
1058 for (PointState& ps : *pointState)
1060 ps.normalizeFreeEnergyAndPmfSum(minF);
1064 void BiasState::updateFreeEnergyAndAddSamplesToHistogram(ArrayRef<const DimParams> dimParams,
1065 const BiasGrid& grid,
1066 const BiasParams& params,
1070 std::vector<int>* updateList)
1072 /* Note hat updateList is only used in this scope and is always
1073 * re-initialized. We do not use a local vector, because that would
1074 * cause reallocation every time this funtion is called and the vector
1075 * can be the size of the whole grid.
1078 /* Make a list of all local points, i.e. those that could have been touched since
1079 the last update. These are the points needed for summing histograms below
1080 (non-local points only add zeros). For local updates, this will also be the
1081 final update list. */
1082 makeLocalUpdateList(grid, points_, originUpdatelist_, endUpdatelist_, updateList);
1083 if (params.numSharedUpdate > 1)
1085 mergeSharedUpdateLists(updateList, points_.size(), *biasSharing_, params.biasIndex);
1088 /* Reset the range for the next update */
1089 resetLocalUpdateRange(grid);
1091 /* Add samples to histograms for all local points and sync simulations if needed */
1092 sumHistograms(points_, weightSumCovering_, params.numSharedUpdate, biasSharing_, params.biasIndex, *updateList);
1094 sumPmf(points_, params.numSharedUpdate, biasSharing_, params.biasIndex);
1096 /* Renormalize the free energy if values are too large. */
1097 bool needToNormalizeFreeEnergy = false;
1098 for (int& globalIndex : *updateList)
1100 /* We want to keep the absolute value of the free energies to be less
1101 c_largePositiveExponent to be able to safely pass these values to exp(). The check below
1102 ensures this as long as the free energy values grow less than 0.5*c_largePositiveExponent
1103 in a return time to this neighborhood. For reasonable update sizes it's unlikely that
1104 this requirement would be broken. */
1105 if (std::abs(points_[globalIndex].freeEnergy()) > 0.5 * detail::c_largePositiveExponent)
1107 needToNormalizeFreeEnergy = true;
1112 /* Update target distribution? */
1113 bool needToUpdateTargetDistribution =
1114 (params.eTarget != AwhTargetType::Constant && params.isUpdateTargetStep(step));
1116 /* In the initial stage, the histogram grows dynamically as a function of the number of coverings. */
1117 bool detectedCovering = false;
1118 if (inInitialStage())
1121 (params.isCheckCoveringStep(step) && isSamplingRegionCovered(params, dimParams, grid));
1124 /* The weighthistogram size after this update. */
1125 double newHistogramSize = histogramSize_.newHistogramSize(
1126 params, t, detectedCovering, points_, weightSumCovering_, fplog);
1128 /* Make the update list. Usually we try to only update local points,
1129 * but if the update has non-trivial or non-deterministic effects
1130 * on non-local points a global update is needed. This is the case when:
1131 * 1) a covering occurred in the initial stage, leading to non-trivial
1132 * histogram rescaling factors; or
1133 * 2) the target distribution will be updated, since we don't make any
1134 * assumption on its form; or
1135 * 3) the AWH parameters are such that we never attempt to skip non-local
1137 * 4) the free energy values have grown so large that a renormalization
1140 if (needToUpdateTargetDistribution || detectedCovering || !params.skipUpdates() || needToNormalizeFreeEnergy)
1142 /* Global update, just add all points. */
1143 updateList->clear();
1144 for (size_t m = 0; m < points_.size(); m++)
1146 if (points_[m].inTargetRegion())
1148 updateList->push_back(m);
1153 /* Set histogram scale factors. */
1154 double weightHistScalingSkipped = 0;
1155 double logPmfsumScalingSkipped = 0;
1156 if (params.skipUpdates())
1158 getSkippedUpdateHistogramScaleFactors(params, &weightHistScalingSkipped, &logPmfsumScalingSkipped);
1160 double weightHistScalingNew;
1161 double logPmfsumScalingNew;
1162 setHistogramUpdateScaleFactors(
1163 params, newHistogramSize, histogramSize_.histogramSize(), &weightHistScalingNew, &logPmfsumScalingNew);
1165 /* Update free energy and reference weight histogram for points in the update list. */
1166 for (int pointIndex : *updateList)
1168 PointState* pointStateToUpdate = &points_[pointIndex];
1170 /* Do updates from previous update steps that were skipped because this point was at that time non-local. */
1171 if (params.skipUpdates())
1173 pointStateToUpdate->performPreviouslySkippedUpdates(
1174 params, histogramSize_.numUpdates(), weightHistScalingSkipped, logPmfsumScalingSkipped);
1177 /* Now do an update with new sampling data. */
1178 pointStateToUpdate->updateWithNewSampling(
1179 params, histogramSize_.numUpdates(), weightHistScalingNew, logPmfsumScalingNew);
1182 /* Only update the histogram size after we are done with the local point updates */
1183 histogramSize_.setHistogramSize(newHistogramSize, weightHistScalingNew);
1185 if (needToNormalizeFreeEnergy)
1187 normalizeFreeEnergyAndPmfSum(&points_);
1190 if (needToUpdateTargetDistribution)
1192 /* The target distribution is always updated for all points at once. */
1193 updateTargetDistribution(points_, params);
1196 /* Update the bias. The bias is updated separately and last since it simply a function of
1197 the free energy and the target distribution and we want to avoid doing extra work. */
1198 for (int pointIndex : *updateList)
1200 points_[pointIndex].updateBias();
1203 /* Increase the update counter. */
1204 histogramSize_.incrementNumUpdates();
1207 double BiasState::updateProbabilityWeightsAndConvolvedBias(ArrayRef<const DimParams> dimParams,
1208 const BiasGrid& grid,
1209 ArrayRef<const double> neighborLambdaEnergies,
1210 std::vector<double, AlignedAllocator<double>>* weight) const
1212 /* Only neighbors of the current coordinate value will have a non-negligible chance of getting sampled */
1213 const std::vector<int>& neighbors = grid.point(coordState_.gridpointIndex()).neighbor;
1215 #if GMX_SIMD_HAVE_DOUBLE
1216 typedef SimdDouble PackType;
1217 constexpr int packSize = GMX_SIMD_DOUBLE_WIDTH;
1219 typedef double PackType;
1220 constexpr int packSize = 1;
1222 /* Round the size of the weight array up to packSize */
1223 const int weightSize = ((neighbors.size() + packSize - 1) / packSize) * packSize;
1224 weight->resize(weightSize);
1226 double* gmx_restrict weightData = weight->data();
1227 PackType weightSumPack(0.0);
1228 for (size_t i = 0; i < neighbors.size(); i += packSize)
1230 for (size_t n = i; n < i + packSize; n++)
1232 if (n < neighbors.size())
1234 const int neighbor = neighbors[n];
1235 (*weight)[n] = biasedLogWeightFromPoint(dimParams,
1239 points_[neighbor].bias(),
1240 coordState_.coordValue(),
1241 neighborLambdaEnergies,
1242 coordState_.gridpointIndex());
1246 /* Pad with values that don't affect the result */
1247 (*weight)[n] = detail::c_largeNegativeExponent;
1250 PackType weightPack = load<PackType>(weightData + i);
1251 weightPack = gmx::exp(weightPack);
1252 weightSumPack = weightSumPack + weightPack;
1253 store(weightData + i, weightPack);
1255 /* Sum of probability weights */
1256 double weightSum = reduce(weightSumPack);
1257 GMX_RELEASE_ASSERT(weightSum > 0,
1258 "zero probability weight when updating AWH probability weights.");
1260 /* Normalize probabilities to sum to 1 */
1261 double invWeightSum = 1 / weightSum;
1263 /* When there is a free energy lambda state axis remove the convolved contributions along that
1264 * axis from the total bias. This must be done after calculating invWeightSum (since weightSum
1265 * will be modified), but before normalizing the weights (below). */
1266 if (grid.hasLambdaAxis())
1268 /* If there is only one axis the bias will not be convolved in any dimension. */
1269 if (grid.axis().size() == 1)
1271 weightSum = gmx::exp(points_[coordState_.gridpointIndex()].bias());
1275 for (size_t i = 0; i < neighbors.size(); i++)
1277 const int neighbor = neighbors[i];
1278 if (pointsHaveDifferentLambda(grid, coordState_.gridpointIndex(), neighbor))
1280 weightSum -= weightData[i];
1286 for (double& w : *weight)
1291 /* Return the convolved bias */
1292 return std::log(weightSum);
1295 double BiasState::calcConvolvedBias(ArrayRef<const DimParams> dimParams,
1296 const BiasGrid& grid,
1297 const awh_dvec& coordValue) const
1299 int point = grid.nearestIndex(coordValue);
1300 const GridPoint& gridPoint = grid.point(point);
1302 /* Sum the probability weights from the neighborhood of the given point */
1303 double weightSum = 0;
1304 for (int neighbor : gridPoint.neighbor)
1306 /* No convolution is required along the lambda dimension. */
1307 if (pointsHaveDifferentLambda(grid, point, neighbor))
1311 double logWeight = biasedLogWeightFromPoint(
1312 dimParams, points_, grid, neighbor, points_[neighbor].bias(), coordValue, {}, point);
1313 weightSum += std::exp(logWeight);
1316 /* Returns -GMX_FLOAT_MAX if no neighboring points were in the target region. */
1317 return (weightSum > 0) ? std::log(weightSum) : -GMX_FLOAT_MAX;
1320 void BiasState::sampleProbabilityWeights(const BiasGrid& grid, gmx::ArrayRef<const double> probWeightNeighbor)
1322 const std::vector<int>& neighbor = grid.point(coordState_.gridpointIndex()).neighbor;
1324 /* Save weights for next update */
1325 for (size_t n = 0; n < neighbor.size(); n++)
1327 points_[neighbor[n]].increaseWeightSumIteration(probWeightNeighbor[n]);
1330 /* Update the local update range. Two corner points define this rectangular
1331 * domain. We need to choose two new corner points such that the new domain
1332 * contains both the old update range and the current neighborhood.
1333 * In the simplest case when an update is performed every sample,
1334 * the update range would simply equal the current neighborhood.
1336 int neighborStart = neighbor[0];
1337 int neighborLast = neighbor[neighbor.size() - 1];
1338 for (int d = 0; d < grid.numDimensions(); d++)
1340 int origin = grid.point(neighborStart).index[d];
1341 int last = grid.point(neighborLast).index[d];
1345 /* Unwrap if wrapped around the boundary (only happens for periodic
1346 * boundaries). This has been already for the stored index interval.
1348 /* TODO: what we want to do is to find the smallest the update
1349 * interval that contains all points that need to be updated.
1350 * This amounts to combining two intervals, the current
1351 * [origin, end] update interval and the new touched neighborhood
1352 * into a new interval that contains all points from both the old
1355 * For periodic boundaries it becomes slightly more complicated
1356 * than for closed boundaries because then it needs not be
1357 * true that origin < end (so one can't simply relate the origin/end
1358 * in the min()/max() below). The strategy here is to choose the
1359 * origin closest to a reference point (index 0) and then unwrap
1360 * the end index if needed and choose the largest end index.
1361 * This ensures that both intervals are in the new interval
1362 * but it's not necessarily the smallest.
1363 * Currently we solve this by going through each possibility
1364 * and checking them.
1366 last += grid.axis(d).numPointsInPeriod();
1369 originUpdatelist_[d] = std::min(originUpdatelist_[d], origin);
1370 endUpdatelist_[d] = std::max(endUpdatelist_[d], last);
1374 void BiasState::sampleCoordAndPmf(const std::vector<DimParams>& dimParams,
1375 const BiasGrid& grid,
1376 gmx::ArrayRef<const double> probWeightNeighbor,
1377 double convolvedBias)
1379 /* Sampling-based deconvolution extracting the PMF.
1380 * Update the PMF histogram with the current coordinate value.
1382 * Because of the finite width of the harmonic potential, the free energy
1383 * defined for each coordinate point does not exactly equal that of the
1384 * actual coordinate, the PMF. However, the PMF can be estimated by applying
1385 * the relation exp(-PMF) = exp(-bias_convolved)*P_biased/Z, i.e. by keeping a
1386 * reweighted histogram of the coordinate value. Strictly, this relies on
1387 * the unknown normalization constant Z being either constant or known. Here,
1388 * neither is true except in the long simulation time limit. Empirically however,
1389 * it works (mainly because how the PMF histogram is rescaled).
1392 const int gridPointIndex = coordState_.gridpointIndex();
1393 const std::optional<int> lambdaAxisIndex = grid.lambdaAxisIndex();
1395 /* Update the PMF of points along a lambda axis with their bias. */
1396 if (lambdaAxisIndex)
1398 const std::vector<int>& neighbors = grid.point(gridPointIndex).neighbor;
1400 std::vector<double> lambdaMarginalDistribution =
1401 calculateFELambdaMarginalDistribution(grid, neighbors, probWeightNeighbor);
1403 awh_dvec coordValueAlongLambda = { coordState_.coordValue()[0],
1404 coordState_.coordValue()[1],
1405 coordState_.coordValue()[2],
1406 coordState_.coordValue()[3] };
1407 for (size_t i = 0; i < neighbors.size(); i++)
1409 const int neighbor = neighbors[i];
1411 if (pointsAlongLambdaAxis(grid, gridPointIndex, neighbor))
1413 const double neighborLambda = grid.point(neighbor).coordValue[lambdaAxisIndex.value()];
1414 if (neighbor == gridPointIndex)
1416 bias = convolvedBias;
1420 coordValueAlongLambda[lambdaAxisIndex.value()] = neighborLambda;
1421 bias = calcConvolvedBias(dimParams, grid, coordValueAlongLambda);
1424 const double probWeight = lambdaMarginalDistribution[neighborLambda];
1425 const double weightedBias = bias - std::log(std::max(probWeight, GMX_DOUBLE_MIN)); // avoid log(0)
1427 if (neighbor == gridPointIndex && grid.covers(coordState_.coordValue()))
1429 points_[neighbor].samplePmf(weightedBias);
1433 points_[neighbor].updatePmfUnvisited(weightedBias);
1440 /* Only save coordinate data that is in range (the given index is always
1441 * in range even if the coordinate value is not).
1443 if (grid.covers(coordState_.coordValue()))
1445 /* Save PMF sum and keep a histogram of the sampled coordinate values */
1446 points_[gridPointIndex].samplePmf(convolvedBias);
1450 /* Save probability weights for the update */
1451 sampleProbabilityWeights(grid, probWeightNeighbor);
1454 void BiasState::initHistoryFromState(AwhBiasHistory* biasHistory) const
1456 biasHistory->pointState.resize(points_.size());
1459 void BiasState::updateHistory(AwhBiasHistory* biasHistory, const BiasGrid& grid) const
1461 GMX_RELEASE_ASSERT(biasHistory->pointState.size() == points_.size(),
1462 "The AWH history setup does not match the AWH state.");
1464 AwhBiasStateHistory* stateHistory = &biasHistory->state;
1465 stateHistory->umbrellaGridpoint = coordState_.umbrellaGridpoint();
1467 for (size_t m = 0; m < biasHistory->pointState.size(); m++)
1469 AwhPointStateHistory* psh = &biasHistory->pointState[m];
1471 points_[m].storeState(psh);
1473 psh->weightsum_covering = weightSumCovering_[m];
1476 histogramSize_.storeState(stateHistory);
1478 stateHistory->origin_index_updatelist = multiDimGridIndexToLinear(grid, originUpdatelist_);
1479 stateHistory->end_index_updatelist = multiDimGridIndexToLinear(grid, endUpdatelist_);
1482 void BiasState::restoreFromHistory(const AwhBiasHistory& biasHistory, const BiasGrid& grid)
1484 const AwhBiasStateHistory& stateHistory = biasHistory.state;
1486 coordState_.restoreFromHistory(stateHistory);
1488 if (biasHistory.pointState.size() != points_.size())
1491 InvalidInputError("Bias grid size in checkpoint and simulation do not match. "
1492 "Likely you provided a checkpoint from a different simulation."));
1494 for (size_t m = 0; m < points_.size(); m++)
1496 points_[m].setFromHistory(biasHistory.pointState[m]);
1499 for (size_t m = 0; m < weightSumCovering_.size(); m++)
1501 weightSumCovering_[m] = biasHistory.pointState[m].weightsum_covering;
1504 histogramSize_.restoreFromHistory(stateHistory);
1506 linearGridindexToMultiDim(grid, stateHistory.origin_index_updatelist, originUpdatelist_);
1507 linearGridindexToMultiDim(grid, stateHistory.end_index_updatelist, endUpdatelist_);
1510 void BiasState::broadcast(const t_commrec* commRecord)
1512 gmx_bcast(sizeof(coordState_), &coordState_, commRecord->mpi_comm_mygroup);
1514 gmx_bcast(points_.size() * sizeof(PointState), points_.data(), commRecord->mpi_comm_mygroup);
1516 gmx_bcast(weightSumCovering_.size() * sizeof(double), weightSumCovering_.data(), commRecord->mpi_comm_mygroup);
1518 gmx_bcast(sizeof(histogramSize_), &histogramSize_, commRecord->mpi_comm_mygroup);
1521 void BiasState::setFreeEnergyToConvolvedPmf(ArrayRef<const DimParams> dimParams, const BiasGrid& grid)
1523 std::vector<float> convolvedPmf;
1525 calcConvolvedPmf(dimParams, grid, &convolvedPmf);
1527 for (size_t m = 0; m < points_.size(); m++)
1529 points_[m].setFreeEnergy(convolvedPmf[m]);
1534 * Count trailing data rows containing only zeros.
1536 * \param[in] data 2D data array.
1537 * \param[in] numRows Number of rows in array.
1538 * \param[in] numColumns Number of cols in array.
1539 * \returns the number of trailing zero rows.
1541 static int countTrailingZeroRows(const double* const* data, int numRows, int numColumns)
1543 int numZeroRows = 0;
1544 for (int m = numRows - 1; m >= 0; m--)
1546 bool rowIsZero = true;
1547 for (int d = 0; d < numColumns; d++)
1549 if (data[d][m] != 0)
1558 /* At a row with non-zero data */
1563 /* Still at a zero data row, keep checking rows higher up. */
1572 * Initializes the PMF and target with data read from an input table.
1574 * \param[in] dimParams The dimension parameters.
1575 * \param[in] grid The grid.
1576 * \param[in] filename The filename to read PMF and target from.
1577 * \param[in] numBias Number of biases.
1578 * \param[in] biasIndex The index of the bias.
1579 * \param[in,out] pointState The state of the points in this bias.
1581 static void readUserPmfAndTargetDistribution(ArrayRef<const DimParams> dimParams,
1582 const BiasGrid& grid,
1583 const std::string& filename,
1586 std::vector<PointState>* pointState)
1588 /* Read the PMF and target distribution.
1589 From the PMF, the convolved PMF, or the reference value free energy, can be calculated
1590 base on the force constant. The free energy and target together determine the bias.
1592 std::string filenameModified(filename);
1595 size_t n = filenameModified.rfind('.');
1596 GMX_RELEASE_ASSERT(n != std::string::npos,
1597 "The filename should contain an extension starting with .");
1598 filenameModified.insert(n, formatString("%d", biasIndex));
1601 std::string correctFormatMessage = formatString(
1602 "%s is expected in the following format. "
1603 "The first ndim column(s) should contain the coordinate values for each point, "
1604 "each column containing values of one dimension (in ascending order). "
1605 "For a multidimensional coordinate, points should be listed "
1606 "in the order obtained by traversing lower dimensions first. "
1607 "E.g. for two-dimensional grid of size nxn: "
1608 "(1, 1), (1, 2),..., (1, n), (2, 1), (2, 2), ..., , (n, n - 1), (n, n). "
1609 "Column ndim + 1 should contain the PMF value for each coordinate value. "
1610 "The target distribution values should be in column ndim + 2 or column ndim + 5. "
1611 "Make sure the input file ends with a new line but has no trailing new lines.",
1613 gmx::TextLineWrapper wrapper;
1614 wrapper.settings().setLineLength(c_linewidth);
1615 correctFormatMessage = wrapper.wrapToString(correctFormatMessage);
1619 int numRows = read_xvg(filenameModified.c_str(), &data, &numColumns);
1621 /* Check basic data properties here. BiasGrid takes care of more complicated things. */
1625 std::string mesg = gmx::formatString(
1626 "%s is empty!.\n\n%s", filename.c_str(), correctFormatMessage.c_str());
1627 GMX_THROW(InvalidInputError(mesg));
1630 /* Less than 2 points is not useful for PMF or target. */
1633 std::string mesg = gmx::formatString(
1634 "%s contains too few data points (%d)."
1635 "The minimum number of points is 2.",
1638 GMX_THROW(InvalidInputError(mesg));
1641 /* Make sure there are enough columns of data.
1643 Two formats are allowed. Either with columns {coords, PMF, target} or
1644 {coords, PMF, x, y, z, target, ...}. The latter format is allowed since that
1645 is how AWH output is written (x, y, z being other AWH variables). For this format,
1646 trailing columns are ignored.
1648 int columnIndexTarget;
1649 int numColumnsMin = dimParams.size() + 2;
1650 int columnIndexPmf = dimParams.size();
1651 if (numColumns == numColumnsMin)
1653 columnIndexTarget = columnIndexPmf + 1;
1657 columnIndexTarget = columnIndexPmf + 4;
1660 if (numColumns < numColumnsMin)
1662 std::string mesg = gmx::formatString(
1663 "The number of columns in %s should be at least %d."
1667 correctFormatMessage.c_str());
1668 GMX_THROW(InvalidInputError(mesg));
1671 /* read_xvg can give trailing zero data rows for trailing new lines in the input. We allow 1 zero row,
1672 since this could be real data. But multiple trailing zero rows cannot correspond to valid data. */
1673 int numZeroRows = countTrailingZeroRows(data, numRows, numColumns);
1674 if (numZeroRows > 1)
1676 std::string mesg = gmx::formatString(
1677 "Found %d trailing zero data rows in %s. Please remove trailing empty lines and "
1681 GMX_THROW(InvalidInputError(mesg));
1684 /* Convert from user units to internal units before sending the data of to grid. */
1685 for (size_t d = 0; d < dimParams.size(); d++)
1687 double scalingFactor = dimParams[d].scaleUserInputToInternal(1);
1688 if (scalingFactor == 1)
1692 for (size_t m = 0; m < pointState->size(); m++)
1694 data[d][m] *= scalingFactor;
1698 /* Get a data point for each AWH grid point so that they all get data. */
1699 std::vector<int> gridIndexToDataIndex(grid.numPoints());
1700 mapGridToDataGrid(&gridIndexToDataIndex, data, numRows, filename, grid, correctFormatMessage);
1702 /* Extract the data for each grid point.
1703 * We check if the target distribution is zero for all points.
1705 bool targetDistributionIsZero = true;
1706 for (size_t m = 0; m < pointState->size(); m++)
1708 (*pointState)[m].setLogPmfSum(-data[columnIndexPmf][gridIndexToDataIndex[m]]);
1709 double target = data[columnIndexTarget][gridIndexToDataIndex[m]];
1711 /* Check if the values are allowed. */
1714 std::string mesg = gmx::formatString(
1715 "Target distribution weight at point %zu (%g) in %s is negative.",
1719 GMX_THROW(InvalidInputError(mesg));
1723 targetDistributionIsZero = false;
1725 (*pointState)[m].setTargetConstantWeight(target);
1728 if (targetDistributionIsZero)
1731 gmx::formatString("The target weights given in column %d in %s are all 0",
1734 GMX_THROW(InvalidInputError(mesg));
1737 /* Free the arrays. */
1738 for (int m = 0; m < numColumns; m++)
1745 void BiasState::normalizePmf(int numSharingSims)
1747 /* The normalization of the PMF estimate matters because it determines how big effect the next sample has.
1748 Approximately (for large enough force constant) we should have:
1749 sum_x(exp(-pmf(x)) = nsamples*sum_xref(exp(-f(xref)).
1752 /* Calculate the normalization factor, i.e. divide by the pmf sum, multiply by the number of samples and the f sum */
1753 double expSumPmf = 0;
1755 for (const PointState& pointState : points_)
1757 if (pointState.inTargetRegion())
1759 expSumPmf += std::exp(pointState.logPmfSum());
1760 expSumF += std::exp(-pointState.freeEnergy());
1763 double numSamples = histogramSize_.histogramSize() / numSharingSims;
1766 double logRenorm = std::log(numSamples * expSumF / expSumPmf);
1767 for (PointState& pointState : points_)
1769 if (pointState.inTargetRegion())
1771 pointState.setLogPmfSum(pointState.logPmfSum() + logRenorm);
1776 void BiasState::initGridPointState(const AwhBiasParams& awhBiasParams,
1777 ArrayRef<const DimParams> dimParams,
1778 const BiasGrid& grid,
1779 const BiasParams& params,
1780 const std::string& filename,
1783 /* Modify PMF, free energy and the constant target distribution factor
1784 * to user input values if there is data given.
1786 if (awhBiasParams.userPMFEstimate())
1788 readUserPmfAndTargetDistribution(dimParams, grid, filename, numBias, params.biasIndex, &points_);
1789 setFreeEnergyToConvolvedPmf(dimParams, grid);
1792 /* The local Boltzmann distribution is special because the target distribution is updated as a function of the reference weighthistogram. */
1793 GMX_RELEASE_ASSERT(params.eTarget != AwhTargetType::LocalBoltzmann || points_[0].weightSumRef() != 0,
1794 "AWH reference weight histogram not initialized properly with local "
1795 "Boltzmann target distribution.");
1797 updateTargetDistribution(points_, params);
1799 for (PointState& pointState : points_)
1801 if (pointState.inTargetRegion())
1803 pointState.updateBias();
1807 /* Note that for zero target this is a value that represents -infinity but should not be used for biasing. */
1808 pointState.setTargetToZero();
1812 /* Set the initial reference weighthistogram. */
1813 const double histogramSize = histogramSize_.histogramSize();
1814 for (auto& pointState : points_)
1816 pointState.setInitialReferenceWeightHistogram(histogramSize);
1819 /* Make sure the pmf is normalized consistently with the histogram size.
1820 Note: the target distribution and free energy need to be set here. */
1821 normalizePmf(params.numSharedUpdate);
1824 BiasState::BiasState(const AwhBiasParams& awhBiasParams,
1825 double histogramSizeInitial,
1826 ArrayRef<const DimParams> dimParams,
1827 const BiasGrid& grid,
1828 const BiasSharing* biasSharing) :
1829 coordState_(awhBiasParams, dimParams, grid),
1830 points_(grid.numPoints()),
1831 weightSumCovering_(grid.numPoints()),
1832 histogramSize_(awhBiasParams, histogramSizeInitial),
1833 biasSharing_(biasSharing)
1835 /* The minimum and maximum multidimensional point indices that are affected by the next update */
1836 for (size_t d = 0; d < dimParams.size(); d++)
1838 int index = grid.point(coordState_.gridpointIndex()).index[d];
1839 originUpdatelist_[d] = index;
1840 endUpdatelist_[d] = index;