2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2015,2016,2017,2018,2019, The GROMACS development team.
5 * Copyright (c) 2020,2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
39 * Implements the BiasState class.
41 * \author Viveca Lindahl
42 * \author Berk Hess <hess@kth.se>
48 #include "biasstate.h"
59 #include "gromacs/fileio/gmxfio.h"
60 #include "gromacs/fileio/xvgr.h"
61 #include "gromacs/gmxlib/network.h"
62 #include "gromacs/math/units.h"
63 #include "gromacs/math/utilities.h"
64 #include "gromacs/mdrunutility/multisim.h"
65 #include "gromacs/mdtypes/awh_history.h"
66 #include "gromacs/mdtypes/awh_params.h"
67 #include "gromacs/mdtypes/commrec.h"
68 #include "gromacs/simd/simd.h"
69 #include "gromacs/simd/simd_math.h"
70 #include "gromacs/utility/arrayref.h"
71 #include "gromacs/utility/exceptions.h"
72 #include "gromacs/utility/gmxassert.h"
73 #include "gromacs/utility/smalloc.h"
74 #include "gromacs/utility/stringutil.h"
77 #include "biassharing.h"
78 #include "pointstate.h"
83 void BiasState::getPmf(gmx::ArrayRef<float> pmf) const
85 GMX_ASSERT(pmf.size() == points_.size(), "pmf should have the size of the bias grid");
87 /* The PMF is just the negative of the log of the sampled PMF histogram.
88 * Points with zero target weight are ignored, they will mostly contain noise.
90 for (size_t i = 0; i < points_.size(); i++)
92 pmf[i] = points_[i].inTargetRegion() ? -points_[i].logPmfSum() : GMX_FLOAT_MAX;
100 * Sum PMF over multiple simulations, when requested.
102 * \param[in,out] pointState The state of the points in the bias.
103 * \param[in] numSharedUpdate The number of biases sharing the histogram.
104 * \param[in] biasSharing Object for sharing bias data over multiple simulations
105 * \param[in] biasIndex Index of this bias in the total list of biases in this simulation
107 void sumPmf(gmx::ArrayRef<PointState> pointState, int numSharedUpdate, const BiasSharing* biasSharing, const int biasIndex)
109 if (numSharedUpdate == 1)
113 GMX_ASSERT(biasSharing != nullptr
114 && numSharedUpdate % biasSharing->numSharingSimulations(biasIndex) == 0,
115 "numSharedUpdate should be a multiple of multiSimComm->numSimulations_");
116 GMX_ASSERT(numSharedUpdate == biasSharing->numSharingSimulations(biasIndex),
117 "Sharing within a simulation is not implemented (yet)");
119 std::vector<double> buffer(pointState.size());
121 /* Need to temporarily exponentiate the log weights to sum over simulations */
122 for (size_t i = 0; i < buffer.size(); i++)
124 buffer[i] = pointState[i].inTargetRegion() ? std::exp(pointState[i].logPmfSum()) : 0;
127 biasSharing->sum(gmx::ArrayRef<double>(buffer), biasIndex);
129 /* Take log again to get (non-normalized) PMF */
130 double normFac = 1.0 / numSharedUpdate;
131 for (gmx::index i = 0; i < pointState.ssize(); i++)
133 if (pointState[i].inTargetRegion())
135 pointState[i].setLogPmfSum(std::log(buffer[i] * normFac));
141 * Find the minimum free energy value.
143 * \param[in] pointState The state of the points.
144 * \returns the minimum free energy value.
146 double freeEnergyMinimumValue(gmx::ArrayRef<const PointState> pointState)
148 double fMin = GMX_FLOAT_MAX;
150 for (auto const& ps : pointState)
152 if (ps.inTargetRegion() && ps.freeEnergy() < fMin)
154 fMin = ps.freeEnergy();
162 * Find and return the log of the probability weight of a point given a coordinate value.
164 * The unnormalized weight is given by
165 * w(point|value) = exp(bias(point) - U(value,point)),
166 * where U is a harmonic umbrella potential.
168 * \param[in] dimParams The bias dimensions parameters
169 * \param[in] points The point state.
170 * \param[in] grid The grid.
171 * \param[in] pointIndex Point to evaluate probability weight for.
172 * \param[in] pointBias Bias for the point (as a log weight).
173 * \param[in] value Coordinate value.
174 * \param[in] neighborLambdaEnergies The energy of the system in neighboring lambdas states. Can be
175 * empty when there are no free energy lambda state dimensions.
176 * \param[in] gridpointIndex The index of the current grid point.
177 * \returns the log of the biased probability weight.
179 double biasedLogWeightFromPoint(ArrayRef<const DimParams> dimParams,
180 ArrayRef<const PointState> points,
181 const BiasGrid& grid,
184 const awh_dvec value,
185 ArrayRef<const double> neighborLambdaEnergies,
188 double logWeight = detail::c_largeNegativeExponent;
190 /* Only points in the target region have non-zero weight */
191 if (points[pointIndex].inTargetRegion())
193 logWeight = pointBias;
195 /* Add potential for all parameter dimensions */
196 for (size_t d = 0; d < dimParams.size(); d++)
198 if (dimParams[d].isFepLambdaDimension())
200 /* If this is not a sampling step or if this function is called from
201 * calcConvolvedBias(), when writing energy subblocks, neighborLambdaEnergies will
202 * be empty. No convolution is required along the lambda dimension. */
203 if (!neighborLambdaEnergies.empty())
205 const int pointLambdaIndex = grid.point(pointIndex).coordValue[d];
206 const int gridpointLambdaIndex = grid.point(gridpointIndex).coordValue[d];
207 logWeight -= dimParams[d].fepDimParams().beta
208 * (neighborLambdaEnergies[pointLambdaIndex]
209 - neighborLambdaEnergies[gridpointLambdaIndex]);
214 double dev = getDeviationFromPointAlongGridAxis(grid, d, pointIndex, value[d]);
215 logWeight -= 0.5 * dimParams[d].pullDimParams().betak * dev * dev;
223 * Calculates the marginal distribution (marginal probability) for each value along
224 * a free energy lambda axis.
225 * The marginal distribution of one coordinate dimension value is the sum of the probability
226 * distribution of all values (herein all neighbor values) with the same value in the dimension
228 * \param[in] grid The bias grid.
229 * \param[in] neighbors The points to use for the calculation of the marginal distribution.
230 * \param[in] probWeightNeighbor Probability weights of the neighbors.
231 * \returns The calculated marginal distribution in a 1D array with
232 * as many elements as there are points along the axis of interest.
234 std::vector<double> calculateFELambdaMarginalDistribution(const BiasGrid& grid,
235 ArrayRef<const int> neighbors,
236 ArrayRef<const double> probWeightNeighbor)
238 const std::optional<int> lambdaAxisIndex = grid.lambdaAxisIndex();
239 GMX_RELEASE_ASSERT(lambdaAxisIndex,
240 "There must be a free energy lambda axis in order to calculate the free "
241 "energy lambda marginal distribution.");
242 const int numFepLambdaStates = grid.numFepLambdaStates();
243 std::vector<double> lambdaMarginalDistribution(numFepLambdaStates, 0);
245 for (size_t i = 0; i < neighbors.size(); i++)
247 const int neighbor = neighbors[i];
248 const int lambdaState = grid.point(neighbor).coordValue[lambdaAxisIndex.value()];
249 lambdaMarginalDistribution[lambdaState] += probWeightNeighbor[i];
251 return lambdaMarginalDistribution;
256 void BiasState::calcConvolvedPmf(ArrayRef<const DimParams> dimParams,
257 const BiasGrid& grid,
258 std::vector<float>* convolvedPmf) const
260 size_t numPoints = grid.numPoints();
262 convolvedPmf->resize(numPoints);
264 /* Get the PMF to convolve. */
265 std::vector<float> pmf(numPoints);
268 for (size_t m = 0; m < numPoints; m++)
270 double freeEnergyWeights = 0;
271 const GridPoint& point = grid.point(m);
272 for (const auto& neighbor : point.neighbor)
274 /* Do not convolve the bias along a lambda axis - only use the pmf from the current point */
275 if (!pointsHaveDifferentLambda(grid, m, neighbor))
277 /* The negative PMF is a positive bias. */
278 double biasNeighbor = -pmf[neighbor];
280 /* Add the convolved PMF weights for the neighbors of this point.
281 Note that this function only adds point within the target > 0 region.
282 Sum weights, take the logarithm last to get the free energy. */
283 double logWeight = biasedLogWeightFromPoint(
284 dimParams, points_, grid, neighbor, biasNeighbor, point.coordValue, {}, m);
285 freeEnergyWeights += std::exp(logWeight);
289 GMX_RELEASE_ASSERT(freeEnergyWeights > 0,
290 "Attempting to do log(<= 0) in AWH convolved PMF calculation.");
291 (*convolvedPmf)[m] = -std::log(static_cast<float>(freeEnergyWeights));
299 * Updates the target distribution for all points.
301 * The target distribution is always updated for all points
304 * \param[in,out] pointState The state of all points.
305 * \param[in] params The bias parameters.
307 void updateTargetDistribution(ArrayRef<PointState> pointState, const BiasParams& params)
309 double freeEnergyCutoff = 0;
310 if (params.eTarget == AwhTargetType::Cutoff)
312 freeEnergyCutoff = freeEnergyMinimumValue(pointState) + params.freeEnergyCutoffInKT;
315 double sumTarget = 0;
316 for (PointState& ps : pointState)
318 sumTarget += ps.updateTargetWeight(params, freeEnergyCutoff);
320 GMX_RELEASE_ASSERT(sumTarget > 0, "We should have a non-zero distribution");
323 double invSum = 1.0 / sumTarget;
324 for (PointState& ps : pointState)
326 ps.scaleTarget(invSum);
331 * Puts together a string describing a grid point.
333 * \param[in] grid The grid.
334 * \param[in] point BiasGrid point index.
335 * \returns a string for the point.
337 std::string gridPointValueString(const BiasGrid& grid, int point)
339 std::string pointString;
343 for (int d = 0; d < grid.numDimensions(); d++)
345 pointString += gmx::formatString("%g", grid.point(point).coordValue[d]);
346 if (d < grid.numDimensions() - 1)
361 int BiasState::warnForHistogramAnomalies(const BiasGrid& grid, int biasIndex, double t, FILE* fplog, int maxNumWarnings) const
363 GMX_ASSERT(fplog != nullptr, "Warnings can only be issued if there is log file.");
364 const double maxHistogramRatio = 0.5; /* Tolerance for printing a warning about the histogram ratios */
366 /* Sum up the histograms and get their normalization */
367 double sumVisits = 0;
368 double sumWeights = 0;
369 for (const auto& pointState : points_)
371 if (pointState.inTargetRegion())
373 sumVisits += pointState.numVisitsTot();
374 sumWeights += pointState.weightSumTot();
377 GMX_RELEASE_ASSERT(sumVisits > 0, "We should have visits");
378 GMX_RELEASE_ASSERT(sumWeights > 0, "We should have weight");
379 double invNormVisits = 1.0 / sumVisits;
380 double invNormWeight = 1.0 / sumWeights;
382 /* Check all points for warnings */
384 size_t numPoints = grid.numPoints();
385 for (size_t m = 0; m < numPoints; m++)
387 /* Skip points close to boundary or non-target region */
388 const GridPoint& gridPoint = grid.point(m);
389 bool skipPoint = false;
390 for (size_t n = 0; (n < gridPoint.neighbor.size()) && !skipPoint; n++)
392 int neighbor = gridPoint.neighbor[n];
393 skipPoint = !points_[neighbor].inTargetRegion();
394 for (int d = 0; (d < grid.numDimensions()) && !skipPoint; d++)
396 const GridPoint& neighborPoint = grid.point(neighbor);
397 skipPoint = neighborPoint.index[d] == 0
398 || neighborPoint.index[d] == grid.axis(d).numPoints() - 1;
402 /* Warn if the coordinate distribution is less than the target distribution with a certain fraction somewhere */
403 const double relativeWeight = points_[m].weightSumTot() * invNormWeight;
404 const double relativeVisits = points_[m].numVisitsTot() * invNormVisits;
405 if (!skipPoint && relativeVisits < relativeWeight * maxHistogramRatio)
407 std::string pointValueString = gridPointValueString(grid, m);
408 std::string warningMessage = gmx::formatString(
410 "at t = %g ps the obtained coordinate distribution at coordinate value %s "
411 "is less than a fraction %g of the reference distribution at that point. "
412 "If you are not certain about your settings you might want to increase your "
413 "pull force constant or "
414 "modify your sampling region.\n",
417 pointValueString.c_str(),
419 gmx::TextLineWrapper wrapper;
420 wrapper.settings().setLineLength(c_linewidth);
421 fprintf(fplog, "%s", wrapper.wrapToString(warningMessage).c_str());
425 if (numWarnings >= maxNumWarnings)
434 double BiasState::calcUmbrellaForceAndPotential(ArrayRef<const DimParams> dimParams,
435 const BiasGrid& grid,
437 ArrayRef<const double> neighborLambdaDhdl,
438 ArrayRef<double> force) const
440 double potential = 0;
441 for (size_t d = 0; d < dimParams.size(); d++)
443 if (dimParams[d].isFepLambdaDimension())
445 if (!neighborLambdaDhdl.empty())
447 const int coordpointLambdaIndex = grid.point(point).coordValue[d];
448 force[d] = neighborLambdaDhdl[coordpointLambdaIndex];
449 /* The potential should not be affected by the lambda dimension. */
455 getDeviationFromPointAlongGridAxis(grid, d, point, coordState_.coordValue()[d]);
456 double k = dimParams[d].pullDimParams().k;
458 /* Force from harmonic potential 0.5*k*dev^2 */
459 force[d] = -k * deviation;
460 potential += 0.5 * k * deviation * deviation;
467 void BiasState::calcConvolvedForce(ArrayRef<const DimParams> dimParams,
468 const BiasGrid& grid,
469 ArrayRef<const double> probWeightNeighbor,
470 ArrayRef<const double> neighborLambdaDhdl,
471 ArrayRef<double> forceWorkBuffer,
472 ArrayRef<double> force) const
474 for (size_t d = 0; d < dimParams.size(); d++)
479 /* Only neighboring points have non-negligible contribution. */
480 const std::vector<int>& neighbor = grid.point(coordState_.gridpointIndex()).neighbor;
481 gmx::ArrayRef<double> forceFromNeighbor = forceWorkBuffer;
482 for (size_t n = 0; n < neighbor.size(); n++)
484 double weightNeighbor = probWeightNeighbor[n];
485 int indexNeighbor = neighbor[n];
487 /* Get the umbrella force from this point. The returned potential is ignored here. */
488 calcUmbrellaForceAndPotential(dimParams, grid, indexNeighbor, neighborLambdaDhdl, forceFromNeighbor);
490 /* Add the weighted umbrella force to the convolved force. */
491 for (size_t d = 0; d < dimParams.size(); d++)
493 force[d] += forceFromNeighbor[d] * weightNeighbor;
498 double BiasState::moveUmbrella(ArrayRef<const DimParams> dimParams,
499 const BiasGrid& grid,
500 ArrayRef<const double> probWeightNeighbor,
501 ArrayRef<const double> neighborLambdaDhdl,
502 ArrayRef<double> biasForce,
506 bool onlySampleUmbrellaGridpoint)
508 /* Generate and set a new coordinate reference value */
509 coordState_.sampleUmbrellaGridpoint(
510 grid, coordState_.gridpointIndex(), probWeightNeighbor, step, seed, indexSeed);
512 if (onlySampleUmbrellaGridpoint)
517 std::vector<double> newForce(dimParams.size());
518 double newPotential = calcUmbrellaForceAndPotential(
519 dimParams, grid, coordState_.umbrellaGridpoint(), neighborLambdaDhdl, newForce);
521 /* A modification of the reference value at time t will lead to a different
522 force over t-dt/2 to t and over t to t+dt/2. For high switching rates
523 this means the force and velocity will change signs roughly as often.
524 To avoid any issues we take the average of the previous and new force
525 at steps when the reference value has been moved. E.g. if the ref. value
526 is set every step to (coord dvalue +/- delta) would give zero force.
528 for (gmx::index d = 0; d < biasForce.ssize(); d++)
530 /* Average of the current and new force */
531 biasForce[d] = 0.5 * (biasForce[d] + newForce[d]);
541 * Sets the histogram rescaling factors needed to control the histogram size.
543 * For sake of robustness, the reference weight histogram can grow at a rate
544 * different from the actual sampling rate. Typically this happens for a limited
545 * initial time, alternatively growth is scaled down by a constant factor for all
546 * times. Since the size of the reference histogram sets the size of the free
547 * energy update this should be reflected also in the PMF. Thus the PMF histogram
548 * needs to be rescaled too.
550 * This function should only be called by the bias update function or wrapped by a function that
551 * knows what scale factors should be applied when, e.g,
552 * getSkippedUpdateHistogramScaleFactors().
554 * \param[in] params The bias parameters.
555 * \param[in] newHistogramSize New reference weight histogram size.
556 * \param[in] oldHistogramSize Previous reference weight histogram size (before adding new samples).
557 * \param[out] weightHistScaling Scaling factor for the reference weight histogram.
558 * \param[out] logPmfSumScaling Log of the scaling factor for the PMF histogram.
560 void setHistogramUpdateScaleFactors(const BiasParams& params,
561 double newHistogramSize,
562 double oldHistogramSize,
563 double* weightHistScaling,
564 double* logPmfSumScaling)
567 /* The two scaling factors below are slightly different (ignoring the log factor) because the
568 reference and the PMF histogram apply weight scaling differently. The weight histogram
569 applies is locally, i.e. each sample is scaled down meaning all samples get equal weight.
570 It is done this way because that is what target type local Boltzmann (for which
571 target = weight histogram) needs. In contrast, the PMF histogram is rescaled globally
572 by repeatedly scaling down the whole histogram. The reasons for doing it this way are:
573 1) empirically this is necessary for converging the PMF; 2) since the extraction of
574 the PMF is theoretically only valid for a constant bias, new samples should get more
575 weight than old ones for which the bias is fluctuating more. */
577 newHistogramSize / (oldHistogramSize + params.updateWeight * params.localWeightScaling);
578 *logPmfSumScaling = std::log(newHistogramSize / (oldHistogramSize + params.updateWeight));
583 void BiasState::getSkippedUpdateHistogramScaleFactors(const BiasParams& params,
584 double* weightHistScaling,
585 double* logPmfSumScaling) const
587 GMX_ASSERT(params.skipUpdates(),
588 "Calling function for skipped updates when skipping updates is not allowed");
590 if (inInitialStage())
592 /* In between global updates the reference histogram size is kept constant so we trivially
593 know what the histogram size was at the time of the skipped update. */
594 double histogramSize = histogramSize_.histogramSize();
595 setHistogramUpdateScaleFactors(
596 params, histogramSize, histogramSize, weightHistScaling, logPmfSumScaling);
600 /* In the final stage, the reference histogram grows at the sampling rate which gives trivial scale factors. */
601 *weightHistScaling = 1;
602 *logPmfSumScaling = 0;
606 void BiasState::doSkippedUpdatesForAllPoints(const BiasParams& params)
608 double weightHistScaling;
609 double logPmfsumScaling;
611 getSkippedUpdateHistogramScaleFactors(params, &weightHistScaling, &logPmfsumScaling);
613 for (auto& pointState : points_)
615 bool didUpdate = pointState.performPreviouslySkippedUpdates(
616 params, histogramSize_.numUpdates(), weightHistScaling, logPmfsumScaling);
618 /* Update the bias for this point only if there were skipped updates in the past to avoid calculating the log unneccessarily */
621 pointState.updateBias();
626 void BiasState::doSkippedUpdatesInNeighborhood(const BiasParams& params, const BiasGrid& grid)
628 double weightHistScaling;
629 double logPmfsumScaling;
631 getSkippedUpdateHistogramScaleFactors(params, &weightHistScaling, &logPmfsumScaling);
633 /* For each neighbor point of the center point, refresh its state by adding the results of all past, skipped updates. */
634 const std::vector<int>& neighbors = grid.point(coordState_.gridpointIndex()).neighbor;
635 for (const auto& neighbor : neighbors)
637 bool didUpdate = points_[neighbor].performPreviouslySkippedUpdates(
638 params, histogramSize_.numUpdates(), weightHistScaling, logPmfsumScaling);
642 points_[neighbor].updateBias();
651 * Merge update lists from multiple sharing simulations.
653 * \param[in,out] updateList Update list for this simulation (assumed >= npoints long).
654 * \param[in] numPoints Total number of points.
655 * \param[in] biasSharing Object for sharing bias data over multiple simulations
656 * \param[in] biasIndex Index of this bias in the total list of biases in this simulation
658 void mergeSharedUpdateLists(std::vector<int>* updateList,
660 const BiasSharing& biasSharing,
663 std::vector<int> numUpdatesOfPoint;
665 /* Flag the update points of this sim.
666 TODO: we can probably avoid allocating this array and just use the input array. */
667 numUpdatesOfPoint.resize(numPoints, 0);
668 for (auto& pointIndex : *updateList)
670 numUpdatesOfPoint[pointIndex] = 1;
673 /* Sum over the sims to get all the flagged points */
674 biasSharing.sum(arrayRefFromArray(numUpdatesOfPoint.data(), numPoints), biasIndex);
676 /* Collect the indices of the flagged points in place. The resulting array will be the merged update list.*/
678 for (int m = 0; m < numPoints; m++)
680 if (numUpdatesOfPoint[m] > 0)
682 updateList->push_back(m);
688 * Generate an update list of points sampled since the last update.
690 * \param[in] grid The AWH bias.
691 * \param[in] points The point state.
692 * \param[in] originUpdatelist The origin of the rectangular region that has been sampled since
693 * last update. \param[in] endUpdatelist The end of the rectangular that has been sampled since
694 * last update. \param[in,out] updateList Local update list to set (assumed >= npoints long).
696 void makeLocalUpdateList(const BiasGrid& grid,
697 ArrayRef<const PointState> points,
698 const awh_ivec originUpdatelist,
699 const awh_ivec endUpdatelist,
700 std::vector<int>* updateList)
705 /* Define the update search grid */
706 for (int d = 0; d < grid.numDimensions(); d++)
708 origin[d] = originUpdatelist[d];
709 numPoints[d] = endUpdatelist[d] - originUpdatelist[d] + 1;
711 /* Because the end_updatelist is unwrapped it can be > (npoints - 1) so that numPoints can be > npoints in grid.
712 This helps for calculating the distance/number of points but should be removed and fixed when the way of
713 updating origin/end updatelist is changed (see sampleProbabilityWeights). */
714 numPoints[d] = std::min(grid.axis(d).numPoints(), numPoints[d]);
717 /* Make the update list */
720 bool pointExists = true;
723 pointExists = advancePointInSubgrid(grid, origin, numPoints, &pointIndex);
725 if (pointExists && points[pointIndex].inTargetRegion())
727 updateList->push_back(pointIndex);
734 void BiasState::resetLocalUpdateRange(const BiasGrid& grid)
736 const int gridpointIndex = coordState_.gridpointIndex();
737 for (int d = 0; d < grid.numDimensions(); d++)
739 /* This gives the minimum range consisting only of the current closest point. */
740 originUpdatelist_[d] = grid.point(gridpointIndex).index[d];
741 endUpdatelist_[d] = grid.point(gridpointIndex).index[d];
749 * Add partial histograms (accumulating between updates) to accumulating histograms.
751 * \param[in,out] pointState The state of the points in the bias.
752 * \param[in,out] weightSumCovering The weights for checking covering.
753 * \param[in] numSharedUpdate The number of biases sharing the histrogram.
754 * \param[in] biasSharing Object for sharing bias data over multiple simulations
755 * \param[in] biasIndex Index of this bias in the total list of biases in this
756 * simulation \param[in] localUpdateList List of points with data.
758 void sumHistograms(gmx::ArrayRef<PointState> pointState,
759 gmx::ArrayRef<double> weightSumCovering,
761 const BiasSharing* biasSharing,
763 const std::vector<int>& localUpdateList)
765 /* The covering checking histograms are added before summing over simulations, so that the
766 weights from different simulations are kept distinguishable. */
767 for (int globalIndex : localUpdateList)
769 weightSumCovering[globalIndex] += pointState[globalIndex].weightSumIteration();
772 /* Sum histograms over multiple simulations if needed. */
773 if (numSharedUpdate > 1)
775 GMX_ASSERT(numSharedUpdate == biasSharing->numSharingSimulations(biasIndex),
776 "Sharing within a simulation is not implemented (yet)");
778 /* Collect the weights and counts in linear arrays to be able to use gmx_sumd_sim. */
779 std::vector<double> weightSum;
780 std::vector<double> coordVisits;
782 weightSum.resize(localUpdateList.size());
783 coordVisits.resize(localUpdateList.size());
785 for (size_t localIndex = 0; localIndex < localUpdateList.size(); localIndex++)
787 const PointState& ps = pointState[localUpdateList[localIndex]];
789 weightSum[localIndex] = ps.weightSumIteration();
790 coordVisits[localIndex] = ps.numVisitsIteration();
793 biasSharing->sum(gmx::ArrayRef<double>(weightSum), biasIndex);
794 biasSharing->sum(gmx::ArrayRef<double>(coordVisits), biasIndex);
796 /* Transfer back the result */
797 for (size_t localIndex = 0; localIndex < localUpdateList.size(); localIndex++)
799 PointState& ps = pointState[localUpdateList[localIndex]];
801 ps.setPartialWeightAndCount(weightSum[localIndex], coordVisits[localIndex]);
805 /* Now add the partial counts and weights to the accumulating histograms.
806 Note: we still need to use the weights for the update so we wait
807 with resetting them until the end of the update. */
808 for (int globalIndex : localUpdateList)
810 pointState[globalIndex].addPartialWeightAndCount();
815 * Label points along an axis as covered or not.
817 * A point is covered if it is surrounded by visited points up to a radius = coverRadius.
819 * \param[in] visited Visited? For each point.
820 * \param[in] checkCovering Check for covering? For each point.
821 * \param[in] numPoints The number of grid points along this dimension.
822 * \param[in] period Period in number of points.
823 * \param[in] coverRadius Cover radius, in points, needed for defining a point as covered.
824 * \param[in,out] covered In this array elements are 1 for covered points and 0 for
825 * non-covered points, this routine assumes that \p covered has at least size \p numPoints.
827 void labelCoveredPoints(const std::vector<bool>& visited,
828 const std::vector<bool>& checkCovering,
832 gmx::ArrayRef<int> covered)
834 GMX_ASSERT(covered.ssize() >= numPoints, "covered should be at least as large as the grid");
836 bool haveFirstNotVisited = false;
837 int firstNotVisited = -1;
838 int notVisitedLow = -1;
839 int notVisitedHigh = -1;
841 for (int n = 0; n < numPoints; n++)
843 if (checkCovering[n] && !visited[n])
845 if (!haveFirstNotVisited)
849 haveFirstNotVisited = true;
855 /* Have now an interval I = [notVisitedLow,notVisitedHigh] of visited points bounded
856 by unvisited points. The unvisted end points affect the coveredness of the
857 visited with a reach equal to the cover radius. */
858 int notCoveredLow = notVisitedLow + coverRadius;
859 int notCoveredHigh = notVisitedHigh - coverRadius;
860 for (int i = notVisitedLow; i <= notVisitedHigh; i++)
862 covered[i] = static_cast<int>((i > notCoveredLow) && (i < notCoveredHigh));
865 /* Find a new interval to set covering for. Make the notVisitedHigh of this interval
866 the notVisitedLow of the next. */
867 notVisitedLow = notVisitedHigh;
872 /* Have labelled all the internal points. Now take care of the boundary regions. */
873 if (!haveFirstNotVisited)
875 /* No non-visited points <=> all points visited => all points covered. */
877 for (int n = 0; n < numPoints; n++)
884 int lastNotVisited = notVisitedLow;
886 /* For periodic boundaries, non-visited points can influence points
887 on the other side of the boundary so we need to wrap around. */
889 /* Lower end. For periodic boundaries the last upper end not visited point becomes the low-end not visited point.
890 For non-periodic boundaries there is no lower end point so a dummy value is used. */
891 int notVisitedHigh = firstNotVisited;
892 int notVisitedLow = period > 0 ? (lastNotVisited - period) : -(coverRadius + 1);
894 int notCoveredLow = notVisitedLow + coverRadius;
895 int notCoveredHigh = notVisitedHigh - coverRadius;
897 for (int i = 0; i <= notVisitedHigh; i++)
899 /* For non-periodic boundaries notCoveredLow = -1 will impose no restriction. */
900 covered[i] = static_cast<int>((i > notCoveredLow) && (i < notCoveredHigh));
903 /* Upper end. Same as for lower end but in the other direction. */
904 notVisitedHigh = period > 0 ? (firstNotVisited + period) : (numPoints + coverRadius);
905 notVisitedLow = lastNotVisited;
907 notCoveredLow = notVisitedLow + coverRadius;
908 notCoveredHigh = notVisitedHigh - coverRadius;
910 for (int i = notVisitedLow; i <= numPoints - 1; i++)
912 /* For non-periodic boundaries notCoveredHigh = numPoints will impose no restriction. */
913 covered[i] = static_cast<int>((i > notCoveredLow) && (i < notCoveredHigh));
920 bool BiasState::isSamplingRegionCovered(const BiasParams& params,
921 ArrayRef<const DimParams> dimParams,
922 const BiasGrid& grid) const
924 /* Allocate and initialize arrays: one for checking visits along each dimension,
925 one for keeping track of which points to check and one for the covered points.
926 Possibly these could be kept as AWH variables to avoid these allocations. */
929 std::vector<bool> visited;
930 std::vector<bool> checkCovering;
931 // We use int for the covering array since we might use gmx_sumi_sim.
932 std::vector<int> covered;
935 std::vector<CheckDim> checkDim;
936 checkDim.resize(grid.numDimensions());
938 for (int d = 0; d < grid.numDimensions(); d++)
940 const size_t numPoints = grid.axis(d).numPoints();
941 checkDim[d].visited.resize(numPoints, false);
942 checkDim[d].checkCovering.resize(numPoints, false);
943 checkDim[d].covered.resize(numPoints, 0);
946 /* Set visited points along each dimension and which points should be checked for covering.
947 Specifically, points above the free energy cutoff (if there is one) or points outside
948 of the target region are ignored. */
950 /* Set the free energy cutoff */
951 double maxFreeEnergy = GMX_FLOAT_MAX;
953 if (params.eTarget == AwhTargetType::Cutoff)
955 maxFreeEnergy = freeEnergyMinimumValue(points_) + params.freeEnergyCutoffInKT;
958 /* Set the threshold weight for a point to be considered visited. */
959 double weightThreshold = 1;
960 for (int d = 0; d < grid.numDimensions(); d++)
962 if (grid.axis(d).isFepLambdaAxis())
964 /* Do not modify the weight threshold based on a FEP lambda axis. The spread
965 * of the sampling weights is not depending on a Gaussian distribution (like
967 weightThreshold *= 1.0;
971 /* The spacing is proportional to 1/sqrt(betak). The weight threshold will be
972 * approximately (given that the spacing can be modified if the dimension is periodic)
973 * proportional to sqrt(1/(2*pi)). */
974 weightThreshold *= grid.axis(d).spacing()
975 * std::sqrt(dimParams[d].pullDimParams().betak * 0.5 * M_1_PI);
979 /* Project the sampling weights onto each dimension */
980 for (size_t m = 0; m < grid.numPoints(); m++)
982 const PointState& pointState = points_[m];
984 for (int d = 0; d < grid.numDimensions(); d++)
986 int n = grid.point(m).index[d];
988 /* Is visited if it was already visited or if there is enough weight at the current point */
989 checkDim[d].visited[n] = checkDim[d].visited[n] || (weightSumCovering_[m] > weightThreshold);
991 /* Check for covering if there is at least point in this slice that is in the target region and within the cutoff */
992 checkDim[d].checkCovering[n] =
993 checkDim[d].checkCovering[n]
994 || (pointState.inTargetRegion() && pointState.freeEnergy() < maxFreeEnergy);
998 /* Label each point along each dimension as covered or not. */
999 for (int d = 0; d < grid.numDimensions(); d++)
1001 labelCoveredPoints(checkDim[d].visited,
1002 checkDim[d].checkCovering,
1003 grid.axis(d).numPoints(),
1004 grid.axis(d).numPointsInPeriod(),
1005 params.coverRadius()[d],
1006 checkDim[d].covered);
1009 /* Now check for global covering. Each dimension needs to be covered separately.
1010 A dimension is covered if each point is covered. Multiple simulations collectively
1011 cover the points, i.e. a point is covered if any of the simulations covered it.
1012 However, visited points are not shared, i.e. if a point is covered or not is
1013 determined by the visits of a single simulation. In general the covering criterion is
1014 all points covered => all points are surrounded by visited points up to a radius = coverRadius.
1015 For 1 simulation, all points covered <=> all points visited. For multiple simulations
1016 however, all points visited collectively !=> all points covered, except for coverRadius = 0.
1017 In the limit of large coverRadius, all points covered => all points visited by at least one
1018 simulation (since no point will be covered until all points have been visited by a
1019 single simulation). Basically coverRadius sets how much "connectedness" (or mixing) a point
1020 needs with surrounding points before sharing covering information with other simulations. */
1022 /* Communicate the covered points between sharing simulations if needed. */
1023 if (params.numSharedUpdate > 1)
1025 /* For multiple dimensions this may not be the best way to do it. */
1026 for (int d = 0; d < grid.numDimensions(); d++)
1028 biasSharing_->sum(gmx::arrayRefFromArray(checkDim[d].covered.data(), grid.axis(d).numPoints()),
1033 /* Now check if for each dimension all points are covered. Break if not true. */
1034 bool allPointsCovered = true;
1035 for (int d = 0; d < grid.numDimensions() && allPointsCovered; d++)
1037 for (int n = 0; n < grid.axis(d).numPoints() && allPointsCovered; n++)
1039 allPointsCovered = (checkDim[d].covered[n] != 0);
1043 return allPointsCovered;
1047 * Normalizes the free energy and PMF sum.
1049 * \param[in] pointState The state of the points.
1051 static void normalizeFreeEnergyAndPmfSum(std::vector<PointState>* pointState)
1053 double minF = freeEnergyMinimumValue(*pointState);
1055 for (PointState& ps : *pointState)
1057 ps.normalizeFreeEnergyAndPmfSum(minF);
1061 void BiasState::updateFreeEnergyAndAddSamplesToHistogram(ArrayRef<const DimParams> dimParams,
1062 const BiasGrid& grid,
1063 const BiasParams& params,
1067 std::vector<int>* updateList)
1069 /* Note hat updateList is only used in this scope and is always
1070 * re-initialized. We do not use a local vector, because that would
1071 * cause reallocation every time this funtion is called and the vector
1072 * can be the size of the whole grid.
1075 /* Make a list of all local points, i.e. those that could have been touched since
1076 the last update. These are the points needed for summing histograms below
1077 (non-local points only add zeros). For local updates, this will also be the
1078 final update list. */
1079 makeLocalUpdateList(grid, points_, originUpdatelist_, endUpdatelist_, updateList);
1080 if (params.numSharedUpdate > 1)
1082 mergeSharedUpdateLists(updateList, points_.size(), *biasSharing_, params.biasIndex);
1085 /* Reset the range for the next update */
1086 resetLocalUpdateRange(grid);
1088 /* Add samples to histograms for all local points and sync simulations if needed */
1089 sumHistograms(points_, weightSumCovering_, params.numSharedUpdate, biasSharing_, params.biasIndex, *updateList);
1091 sumPmf(points_, params.numSharedUpdate, biasSharing_, params.biasIndex);
1093 /* Renormalize the free energy if values are too large. */
1094 bool needToNormalizeFreeEnergy = false;
1095 for (int& globalIndex : *updateList)
1097 /* We want to keep the absolute value of the free energies to be less
1098 c_largePositiveExponent to be able to safely pass these values to exp(). The check below
1099 ensures this as long as the free energy values grow less than 0.5*c_largePositiveExponent
1100 in a return time to this neighborhood. For reasonable update sizes it's unlikely that
1101 this requirement would be broken. */
1102 if (std::abs(points_[globalIndex].freeEnergy()) > 0.5 * detail::c_largePositiveExponent)
1104 needToNormalizeFreeEnergy = true;
1109 /* Update target distribution? */
1110 bool needToUpdateTargetDistribution =
1111 (params.eTarget != AwhTargetType::Constant && params.isUpdateTargetStep(step));
1113 /* In the initial stage, the histogram grows dynamically as a function of the number of coverings. */
1114 bool detectedCovering = false;
1115 if (inInitialStage())
1118 (params.isCheckCoveringStep(step) && isSamplingRegionCovered(params, dimParams, grid));
1121 /* The weighthistogram size after this update. */
1122 double newHistogramSize = histogramSize_.newHistogramSize(
1123 params, t, detectedCovering, points_, weightSumCovering_, fplog);
1125 /* Make the update list. Usually we try to only update local points,
1126 * but if the update has non-trivial or non-deterministic effects
1127 * on non-local points a global update is needed. This is the case when:
1128 * 1) a covering occurred in the initial stage, leading to non-trivial
1129 * histogram rescaling factors; or
1130 * 2) the target distribution will be updated, since we don't make any
1131 * assumption on its form; or
1132 * 3) the AWH parameters are such that we never attempt to skip non-local
1134 * 4) the free energy values have grown so large that a renormalization
1137 if (needToUpdateTargetDistribution || detectedCovering || !params.skipUpdates() || needToNormalizeFreeEnergy)
1139 /* Global update, just add all points. */
1140 updateList->clear();
1141 for (size_t m = 0; m < points_.size(); m++)
1143 if (points_[m].inTargetRegion())
1145 updateList->push_back(m);
1150 /* Set histogram scale factors. */
1151 double weightHistScalingSkipped = 0;
1152 double logPmfsumScalingSkipped = 0;
1153 if (params.skipUpdates())
1155 getSkippedUpdateHistogramScaleFactors(params, &weightHistScalingSkipped, &logPmfsumScalingSkipped);
1157 double weightHistScalingNew;
1158 double logPmfsumScalingNew;
1159 setHistogramUpdateScaleFactors(
1160 params, newHistogramSize, histogramSize_.histogramSize(), &weightHistScalingNew, &logPmfsumScalingNew);
1162 /* Update free energy and reference weight histogram for points in the update list. */
1163 for (int pointIndex : *updateList)
1165 PointState* pointStateToUpdate = &points_[pointIndex];
1167 /* Do updates from previous update steps that were skipped because this point was at that time non-local. */
1168 if (params.skipUpdates())
1170 pointStateToUpdate->performPreviouslySkippedUpdates(
1171 params, histogramSize_.numUpdates(), weightHistScalingSkipped, logPmfsumScalingSkipped);
1174 /* Now do an update with new sampling data. */
1175 pointStateToUpdate->updateWithNewSampling(
1176 params, histogramSize_.numUpdates(), weightHistScalingNew, logPmfsumScalingNew);
1179 /* Only update the histogram size after we are done with the local point updates */
1180 histogramSize_.setHistogramSize(newHistogramSize, weightHistScalingNew);
1182 if (needToNormalizeFreeEnergy)
1184 normalizeFreeEnergyAndPmfSum(&points_);
1187 if (needToUpdateTargetDistribution)
1189 /* The target distribution is always updated for all points at once. */
1190 updateTargetDistribution(points_, params);
1193 /* Update the bias. The bias is updated separately and last since it simply a function of
1194 the free energy and the target distribution and we want to avoid doing extra work. */
1195 for (int pointIndex : *updateList)
1197 points_[pointIndex].updateBias();
1200 /* Increase the update counter. */
1201 histogramSize_.incrementNumUpdates();
1204 double BiasState::updateProbabilityWeightsAndConvolvedBias(ArrayRef<const DimParams> dimParams,
1205 const BiasGrid& grid,
1206 ArrayRef<const double> neighborLambdaEnergies,
1207 std::vector<double, AlignedAllocator<double>>* weight) const
1209 /* Only neighbors of the current coordinate value will have a non-negligible chance of getting sampled */
1210 const std::vector<int>& neighbors = grid.point(coordState_.gridpointIndex()).neighbor;
1212 #if GMX_SIMD_HAVE_DOUBLE
1213 typedef SimdDouble PackType;
1214 constexpr int packSize = GMX_SIMD_DOUBLE_WIDTH;
1216 typedef double PackType;
1217 constexpr int packSize = 1;
1219 /* Round the size of the weight array up to packSize */
1220 const int weightSize = ((neighbors.size() + packSize - 1) / packSize) * packSize;
1221 weight->resize(weightSize);
1223 double* gmx_restrict weightData = weight->data();
1224 PackType weightSumPack(0.0);
1225 for (size_t i = 0; i < neighbors.size(); i += packSize)
1227 for (size_t n = i; n < i + packSize; n++)
1229 if (n < neighbors.size())
1231 const int neighbor = neighbors[n];
1232 (*weight)[n] = biasedLogWeightFromPoint(dimParams,
1236 points_[neighbor].bias(),
1237 coordState_.coordValue(),
1238 neighborLambdaEnergies,
1239 coordState_.gridpointIndex());
1243 /* Pad with values that don't affect the result */
1244 (*weight)[n] = detail::c_largeNegativeExponent;
1247 PackType weightPack = load<PackType>(weightData + i);
1248 weightPack = gmx::exp(weightPack);
1249 weightSumPack = weightSumPack + weightPack;
1250 store(weightData + i, weightPack);
1252 /* Sum of probability weights */
1253 double weightSum = reduce(weightSumPack);
1254 GMX_RELEASE_ASSERT(weightSum > 0,
1255 "zero probability weight when updating AWH probability weights.");
1257 /* Normalize probabilities to sum to 1 */
1258 double invWeightSum = 1 / weightSum;
1260 /* When there is a free energy lambda state axis remove the convolved contributions along that
1261 * axis from the total bias. This must be done after calculating invWeightSum (since weightSum
1262 * will be modified), but before normalizing the weights (below). */
1263 if (grid.hasLambdaAxis())
1265 /* If there is only one axis the bias will not be convolved in any dimension. */
1266 if (grid.axis().size() == 1)
1268 weightSum = gmx::exp(points_[coordState_.gridpointIndex()].bias());
1272 for (size_t i = 0; i < neighbors.size(); i++)
1274 const int neighbor = neighbors[i];
1275 if (pointsHaveDifferentLambda(grid, coordState_.gridpointIndex(), neighbor))
1277 weightSum -= weightData[i];
1283 for (double& w : *weight)
1288 /* Return the convolved bias */
1289 return std::log(weightSum);
1292 double BiasState::calcConvolvedBias(ArrayRef<const DimParams> dimParams,
1293 const BiasGrid& grid,
1294 const awh_dvec& coordValue) const
1296 int point = grid.nearestIndex(coordValue);
1297 const GridPoint& gridPoint = grid.point(point);
1299 /* Sum the probability weights from the neighborhood of the given point */
1300 double weightSum = 0;
1301 for (int neighbor : gridPoint.neighbor)
1303 /* No convolution is required along the lambda dimension. */
1304 if (pointsHaveDifferentLambda(grid, point, neighbor))
1308 double logWeight = biasedLogWeightFromPoint(
1309 dimParams, points_, grid, neighbor, points_[neighbor].bias(), coordValue, {}, point);
1310 weightSum += std::exp(logWeight);
1313 /* Returns -GMX_FLOAT_MAX if no neighboring points were in the target region. */
1314 return (weightSum > 0) ? std::log(weightSum) : -GMX_FLOAT_MAX;
1317 void BiasState::sampleProbabilityWeights(const BiasGrid& grid, gmx::ArrayRef<const double> probWeightNeighbor)
1319 const std::vector<int>& neighbor = grid.point(coordState_.gridpointIndex()).neighbor;
1321 /* Save weights for next update */
1322 for (size_t n = 0; n < neighbor.size(); n++)
1324 points_[neighbor[n]].increaseWeightSumIteration(probWeightNeighbor[n]);
1327 /* Update the local update range. Two corner points define this rectangular
1328 * domain. We need to choose two new corner points such that the new domain
1329 * contains both the old update range and the current neighborhood.
1330 * In the simplest case when an update is performed every sample,
1331 * the update range would simply equal the current neighborhood.
1333 int neighborStart = neighbor[0];
1334 int neighborLast = neighbor[neighbor.size() - 1];
1335 for (int d = 0; d < grid.numDimensions(); d++)
1337 int origin = grid.point(neighborStart).index[d];
1338 int last = grid.point(neighborLast).index[d];
1342 /* Unwrap if wrapped around the boundary (only happens for periodic
1343 * boundaries). This has been already for the stored index interval.
1345 /* TODO: what we want to do is to find the smallest the update
1346 * interval that contains all points that need to be updated.
1347 * This amounts to combining two intervals, the current
1348 * [origin, end] update interval and the new touched neighborhood
1349 * into a new interval that contains all points from both the old
1352 * For periodic boundaries it becomes slightly more complicated
1353 * than for closed boundaries because then it needs not be
1354 * true that origin < end (so one can't simply relate the origin/end
1355 * in the min()/max() below). The strategy here is to choose the
1356 * origin closest to a reference point (index 0) and then unwrap
1357 * the end index if needed and choose the largest end index.
1358 * This ensures that both intervals are in the new interval
1359 * but it's not necessarily the smallest.
1360 * Currently we solve this by going through each possibility
1361 * and checking them.
1363 last += grid.axis(d).numPointsInPeriod();
1366 originUpdatelist_[d] = std::min(originUpdatelist_[d], origin);
1367 endUpdatelist_[d] = std::max(endUpdatelist_[d], last);
1371 void BiasState::sampleCoordAndPmf(const std::vector<DimParams>& dimParams,
1372 const BiasGrid& grid,
1373 gmx::ArrayRef<const double> probWeightNeighbor,
1374 double convolvedBias)
1376 /* Sampling-based deconvolution extracting the PMF.
1377 * Update the PMF histogram with the current coordinate value.
1379 * Because of the finite width of the harmonic potential, the free energy
1380 * defined for each coordinate point does not exactly equal that of the
1381 * actual coordinate, the PMF. However, the PMF can be estimated by applying
1382 * the relation exp(-PMF) = exp(-bias_convolved)*P_biased/Z, i.e. by keeping a
1383 * reweighted histogram of the coordinate value. Strictly, this relies on
1384 * the unknown normalization constant Z being either constant or known. Here,
1385 * neither is true except in the long simulation time limit. Empirically however,
1386 * it works (mainly because how the PMF histogram is rescaled).
1389 const int gridPointIndex = coordState_.gridpointIndex();
1390 const std::optional<int> lambdaAxisIndex = grid.lambdaAxisIndex();
1392 /* Update the PMF of points along a lambda axis with their bias. */
1393 if (lambdaAxisIndex)
1395 const std::vector<int>& neighbors = grid.point(gridPointIndex).neighbor;
1397 std::vector<double> lambdaMarginalDistribution =
1398 calculateFELambdaMarginalDistribution(grid, neighbors, probWeightNeighbor);
1400 awh_dvec coordValueAlongLambda = { coordState_.coordValue()[0],
1401 coordState_.coordValue()[1],
1402 coordState_.coordValue()[2],
1403 coordState_.coordValue()[3] };
1404 for (size_t i = 0; i < neighbors.size(); i++)
1406 const int neighbor = neighbors[i];
1408 if (pointsAlongLambdaAxis(grid, gridPointIndex, neighbor))
1410 const double neighborLambda = grid.point(neighbor).coordValue[lambdaAxisIndex.value()];
1411 if (neighbor == gridPointIndex)
1413 bias = convolvedBias;
1417 coordValueAlongLambda[lambdaAxisIndex.value()] = neighborLambda;
1418 bias = calcConvolvedBias(dimParams, grid, coordValueAlongLambda);
1421 const double probWeight = lambdaMarginalDistribution[neighborLambda];
1422 const double weightedBias = bias - std::log(std::max(probWeight, GMX_DOUBLE_MIN)); // avoid log(0)
1424 if (neighbor == gridPointIndex && grid.covers(coordState_.coordValue()))
1426 points_[neighbor].samplePmf(weightedBias);
1430 points_[neighbor].updatePmfUnvisited(weightedBias);
1437 /* Only save coordinate data that is in range (the given index is always
1438 * in range even if the coordinate value is not).
1440 if (grid.covers(coordState_.coordValue()))
1442 /* Save PMF sum and keep a histogram of the sampled coordinate values */
1443 points_[gridPointIndex].samplePmf(convolvedBias);
1447 /* Save probability weights for the update */
1448 sampleProbabilityWeights(grid, probWeightNeighbor);
1451 void BiasState::initHistoryFromState(AwhBiasHistory* biasHistory) const
1453 biasHistory->pointState.resize(points_.size());
1456 void BiasState::updateHistory(AwhBiasHistory* biasHistory, const BiasGrid& grid) const
1458 GMX_RELEASE_ASSERT(biasHistory->pointState.size() == points_.size(),
1459 "The AWH history setup does not match the AWH state.");
1461 AwhBiasStateHistory* stateHistory = &biasHistory->state;
1462 stateHistory->umbrellaGridpoint = coordState_.umbrellaGridpoint();
1464 for (size_t m = 0; m < biasHistory->pointState.size(); m++)
1466 AwhPointStateHistory* psh = &biasHistory->pointState[m];
1468 points_[m].storeState(psh);
1470 psh->weightsum_covering = weightSumCovering_[m];
1473 histogramSize_.storeState(stateHistory);
1475 stateHistory->origin_index_updatelist = multiDimGridIndexToLinear(grid, originUpdatelist_);
1476 stateHistory->end_index_updatelist = multiDimGridIndexToLinear(grid, endUpdatelist_);
1479 void BiasState::restoreFromHistory(const AwhBiasHistory& biasHistory, const BiasGrid& grid)
1481 const AwhBiasStateHistory& stateHistory = biasHistory.state;
1483 coordState_.restoreFromHistory(stateHistory);
1485 if (biasHistory.pointState.size() != points_.size())
1488 InvalidInputError("Bias grid size in checkpoint and simulation do not match. "
1489 "Likely you provided a checkpoint from a different simulation."));
1491 for (size_t m = 0; m < points_.size(); m++)
1493 points_[m].setFromHistory(biasHistory.pointState[m]);
1496 for (size_t m = 0; m < weightSumCovering_.size(); m++)
1498 weightSumCovering_[m] = biasHistory.pointState[m].weightsum_covering;
1501 histogramSize_.restoreFromHistory(stateHistory);
1503 linearGridindexToMultiDim(grid, stateHistory.origin_index_updatelist, originUpdatelist_);
1504 linearGridindexToMultiDim(grid, stateHistory.end_index_updatelist, endUpdatelist_);
1507 void BiasState::broadcast(const t_commrec* commRecord)
1509 gmx_bcast(sizeof(coordState_), &coordState_, commRecord->mpi_comm_mygroup);
1511 gmx_bcast(points_.size() * sizeof(PointState), points_.data(), commRecord->mpi_comm_mygroup);
1513 gmx_bcast(weightSumCovering_.size() * sizeof(double), weightSumCovering_.data(), commRecord->mpi_comm_mygroup);
1515 gmx_bcast(sizeof(histogramSize_), &histogramSize_, commRecord->mpi_comm_mygroup);
1518 void BiasState::setFreeEnergyToConvolvedPmf(ArrayRef<const DimParams> dimParams, const BiasGrid& grid)
1520 std::vector<float> convolvedPmf;
1522 calcConvolvedPmf(dimParams, grid, &convolvedPmf);
1524 for (size_t m = 0; m < points_.size(); m++)
1526 points_[m].setFreeEnergy(convolvedPmf[m]);
1531 * Count trailing data rows containing only zeros.
1533 * \param[in] data 2D data array.
1534 * \param[in] numRows Number of rows in array.
1535 * \param[in] numColumns Number of cols in array.
1536 * \returns the number of trailing zero rows.
1538 static int countTrailingZeroRows(const double* const* data, int numRows, int numColumns)
1540 int numZeroRows = 0;
1541 for (int m = numRows - 1; m >= 0; m--)
1543 bool rowIsZero = true;
1544 for (int d = 0; d < numColumns; d++)
1546 if (data[d][m] != 0)
1555 /* At a row with non-zero data */
1560 /* Still at a zero data row, keep checking rows higher up. */
1569 * Initializes the PMF and target with data read from an input table.
1571 * \param[in] dimParams The dimension parameters.
1572 * \param[in] grid The grid.
1573 * \param[in] filename The filename to read PMF and target from.
1574 * \param[in] numBias Number of biases.
1575 * \param[in] biasIndex The index of the bias.
1576 * \param[in,out] pointState The state of the points in this bias.
1578 static void readUserPmfAndTargetDistribution(ArrayRef<const DimParams> dimParams,
1579 const BiasGrid& grid,
1580 const std::string& filename,
1583 std::vector<PointState>* pointState)
1585 /* Read the PMF and target distribution.
1586 From the PMF, the convolved PMF, or the reference value free energy, can be calculated
1587 base on the force constant. The free energy and target together determine the bias.
1589 std::string filenameModified(filename);
1592 size_t n = filenameModified.rfind('.');
1593 GMX_RELEASE_ASSERT(n != std::string::npos,
1594 "The filename should contain an extension starting with .");
1595 filenameModified.insert(n, formatString("%d", biasIndex));
1598 std::string correctFormatMessage = formatString(
1599 "%s is expected in the following format. "
1600 "The first ndim column(s) should contain the coordinate values for each point, "
1601 "each column containing values of one dimension (in ascending order). "
1602 "For a multidimensional coordinate, points should be listed "
1603 "in the order obtained by traversing lower dimensions first. "
1604 "E.g. for two-dimensional grid of size nxn: "
1605 "(1, 1), (1, 2),..., (1, n), (2, 1), (2, 2), ..., , (n, n - 1), (n, n). "
1606 "Column ndim + 1 should contain the PMF value for each coordinate value. "
1607 "The target distribution values should be in column ndim + 2 or column ndim + 5. "
1608 "Make sure the input file ends with a new line but has no trailing new lines.",
1610 gmx::TextLineWrapper wrapper;
1611 wrapper.settings().setLineLength(c_linewidth);
1612 correctFormatMessage = wrapper.wrapToString(correctFormatMessage);
1616 int numRows = read_xvg(filenameModified.c_str(), &data, &numColumns);
1618 /* Check basic data properties here. BiasGrid takes care of more complicated things. */
1622 std::string mesg = gmx::formatString(
1623 "%s is empty!.\n\n%s", filename.c_str(), correctFormatMessage.c_str());
1624 GMX_THROW(InvalidInputError(mesg));
1627 /* Less than 2 points is not useful for PMF or target. */
1630 std::string mesg = gmx::formatString(
1631 "%s contains too few data points (%d)."
1632 "The minimum number of points is 2.",
1635 GMX_THROW(InvalidInputError(mesg));
1638 /* Make sure there are enough columns of data.
1640 Two formats are allowed. Either with columns {coords, PMF, target} or
1641 {coords, PMF, x, y, z, target, ...}. The latter format is allowed since that
1642 is how AWH output is written (x, y, z being other AWH variables). For this format,
1643 trailing columns are ignored.
1645 int columnIndexTarget;
1646 int numColumnsMin = dimParams.size() + 2;
1647 int columnIndexPmf = dimParams.size();
1648 if (numColumns == numColumnsMin)
1650 columnIndexTarget = columnIndexPmf + 1;
1654 columnIndexTarget = columnIndexPmf + 4;
1657 if (numColumns < numColumnsMin)
1659 std::string mesg = gmx::formatString(
1660 "The number of columns in %s should be at least %d."
1664 correctFormatMessage.c_str());
1665 GMX_THROW(InvalidInputError(mesg));
1668 /* read_xvg can give trailing zero data rows for trailing new lines in the input. We allow 1 zero row,
1669 since this could be real data. But multiple trailing zero rows cannot correspond to valid data. */
1670 int numZeroRows = countTrailingZeroRows(data, numRows, numColumns);
1671 if (numZeroRows > 1)
1673 std::string mesg = gmx::formatString(
1674 "Found %d trailing zero data rows in %s. Please remove trailing empty lines and "
1678 GMX_THROW(InvalidInputError(mesg));
1681 /* Convert from user units to internal units before sending the data of to grid. */
1682 for (size_t d = 0; d < dimParams.size(); d++)
1684 double scalingFactor = dimParams[d].scaleUserInputToInternal(1);
1685 if (scalingFactor == 1)
1689 for (size_t m = 0; m < pointState->size(); m++)
1691 data[d][m] *= scalingFactor;
1695 /* Get a data point for each AWH grid point so that they all get data. */
1696 std::vector<int> gridIndexToDataIndex(grid.numPoints());
1697 mapGridToDataGrid(&gridIndexToDataIndex, data, numRows, filename, grid, correctFormatMessage);
1699 /* Extract the data for each grid point.
1700 * We check if the target distribution is zero for all points.
1702 bool targetDistributionIsZero = true;
1703 for (size_t m = 0; m < pointState->size(); m++)
1705 (*pointState)[m].setLogPmfSum(-data[columnIndexPmf][gridIndexToDataIndex[m]]);
1706 double target = data[columnIndexTarget][gridIndexToDataIndex[m]];
1708 /* Check if the values are allowed. */
1711 std::string mesg = gmx::formatString(
1712 "Target distribution weight at point %zu (%g) in %s is negative.",
1716 GMX_THROW(InvalidInputError(mesg));
1720 targetDistributionIsZero = false;
1722 (*pointState)[m].setTargetConstantWeight(target);
1725 if (targetDistributionIsZero)
1728 gmx::formatString("The target weights given in column %d in %s are all 0",
1731 GMX_THROW(InvalidInputError(mesg));
1734 /* Free the arrays. */
1735 for (int m = 0; m < numColumns; m++)
1742 void BiasState::normalizePmf(int numSharingSims)
1744 /* The normalization of the PMF estimate matters because it determines how big effect the next sample has.
1745 Approximately (for large enough force constant) we should have:
1746 sum_x(exp(-pmf(x)) = nsamples*sum_xref(exp(-f(xref)).
1749 /* Calculate the normalization factor, i.e. divide by the pmf sum, multiply by the number of samples and the f sum */
1750 double expSumPmf = 0;
1752 for (const PointState& pointState : points_)
1754 if (pointState.inTargetRegion())
1756 expSumPmf += std::exp(pointState.logPmfSum());
1757 expSumF += std::exp(-pointState.freeEnergy());
1760 double numSamples = histogramSize_.histogramSize() / numSharingSims;
1763 double logRenorm = std::log(numSamples * expSumF / expSumPmf);
1764 for (PointState& pointState : points_)
1766 if (pointState.inTargetRegion())
1768 pointState.setLogPmfSum(pointState.logPmfSum() + logRenorm);
1773 void BiasState::initGridPointState(const AwhBiasParams& awhBiasParams,
1774 ArrayRef<const DimParams> dimParams,
1775 const BiasGrid& grid,
1776 const BiasParams& params,
1777 const std::string& filename,
1780 /* Modify PMF, free energy and the constant target distribution factor
1781 * to user input values if there is data given.
1783 if (awhBiasParams.userPMFEstimate())
1785 readUserPmfAndTargetDistribution(dimParams, grid, filename, numBias, params.biasIndex, &points_);
1786 setFreeEnergyToConvolvedPmf(dimParams, grid);
1789 /* The local Boltzmann distribution is special because the target distribution is updated as a function of the reference weighthistogram. */
1790 GMX_RELEASE_ASSERT(params.eTarget != AwhTargetType::LocalBoltzmann || points_[0].weightSumRef() != 0,
1791 "AWH reference weight histogram not initialized properly with local "
1792 "Boltzmann target distribution.");
1794 updateTargetDistribution(points_, params);
1796 for (PointState& pointState : points_)
1798 if (pointState.inTargetRegion())
1800 pointState.updateBias();
1804 /* Note that for zero target this is a value that represents -infinity but should not be used for biasing. */
1805 pointState.setTargetToZero();
1809 /* Set the initial reference weighthistogram. */
1810 const double histogramSize = histogramSize_.histogramSize();
1811 for (auto& pointState : points_)
1813 pointState.setInitialReferenceWeightHistogram(histogramSize);
1816 /* Make sure the pmf is normalized consistently with the histogram size.
1817 Note: the target distribution and free energy need to be set here. */
1818 normalizePmf(params.numSharedUpdate);
1821 BiasState::BiasState(const AwhBiasParams& awhBiasParams,
1822 double histogramSizeInitial,
1823 ArrayRef<const DimParams> dimParams,
1824 const BiasGrid& grid,
1825 const BiasSharing* biasSharing) :
1826 coordState_(awhBiasParams, dimParams, grid),
1827 points_(grid.numPoints()),
1828 weightSumCovering_(grid.numPoints()),
1829 histogramSize_(awhBiasParams, histogramSizeInitial),
1830 biasSharing_(biasSharing)
1832 /* The minimum and maximum multidimensional point indices that are affected by the next update */
1833 for (size_t d = 0; d < dimParams.size(); d++)
1835 int index = grid.point(coordState_.gridpointIndex()).index[d];
1836 originUpdatelist_[d] = index;
1837 endUpdatelist_[d] = index;