# These options need to match the node label in Jenkins and the
# capabilities in releng/agents.py for the agent where the analysis is
# intended to run.
-build_options = ['clang-6', 'clang-static-analyzer-6']
+build_options = ['clang-8', 'clang-static-analyzer-8']
# Policy global variables
use_stdlib_through_env_vars = False
Also, please use the syntax :issue:`number` to reference issues on redmine, without the
a space between the colon and number!
+Fixed bug in gmx order -calcdist
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+The reference position for the distance calculation was calculated
+wrongly.
+
int i, j;
int N = tMPI_Comm_N(comm);
volatile tMPI_Comm *newcomm_list;
- volatile int colors[MAX_PREALLOC_THREADS]; /* array with the colors
- of each thread */
+ volatile int colors[MAX_PREALLOC_THREADS] = { 0 }; /* array with the colors
+ of each thread */
volatile int keys[MAX_PREALLOC_THREADS]; /* same for keys (only one of
the threads actually suplies
these arrays to the comm
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2011,2012,2013,2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2011,2012,2013,2014,2015,2017,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
if (bParallel)
{
+#ifndef __clang_analyzer__
::testing::Expectation init =
EXPECT_CALL(*this, parallelDataStarted(source, _))
.WillOnce(Return(true));
}
EXPECT_CALL(*this, dataFinished())
.After(framesFinished);
+#endif
}
else
{
BiasCoupledToSystem(Bias bias,
const std::vector<int> &pullCoordIndex);
- Bias bias; /**< The bias. */
- const std::vector<int> pullCoordIndex; /**< The pull coordinates this bias acts on. */
+ Bias bias_; /**< The bias. */
+ const std::vector<int> pullCoordIndex_; /**< The pull coordinates this bias acts on. */
/* Here AWH can be extended to work on other coordinates than pull. */
};
BiasCoupledToSystem::BiasCoupledToSystem(Bias bias,
const std::vector<int> &pullCoordIndex) :
- bias(std::move(bias)),
- pullCoordIndex(pullCoordIndex)
+ bias_(std::move(bias)),
+ pullCoordIndex_(pullCoordIndex)
{
/* We already checked for this in grompp, but check again here. */
- GMX_RELEASE_ASSERT(static_cast<size_t>(bias.ndim()) == pullCoordIndex.size(), "The bias dimensionality should match the number of pull coordinates.");
+ GMX_RELEASE_ASSERT(static_cast<size_t>(bias_.ndim()) == pullCoordIndex_.size(), "The bias dimensionality should match the number of pull coordinates.");
}
Awh::Awh(FILE *fplog,
biasCoupledToSystem_.emplace_back(Bias(k, awhParams, awhParams.awhBiasParams[k], dimParams, beta, inputRecord.delta_t, numSharingSimulations, biasInitFilename, thisRankWillDoIO),
pullCoordIndex);
- biasCoupledToSystem_.back().bias.printInitializationToLog(fplog);
+ biasCoupledToSystem_.back().bias_.printInitializationToLog(fplog);
}
/* Need to register the AWH coordinates to be allowed to apply forces to the pull coordinates. */
std::vector<size_t> pointSize;
for (auto const &biasCts : biasCoupledToSystem_)
{
- pointSize.push_back(biasCts.bias.state().points().size());
+ pointSize.push_back(biasCts.bias_.state().points().size());
}
/* Ensure that the shared biased are compatible between simulations */
biasesAreCompatibleForSharingBetweenSimulations(awhParams, pointSize, multiSimRecord_);
* pull coordinates.
*/
awh_dvec coordValue = { 0, 0, 0, 0 };
- for (int d = 0; d < biasCts.bias.ndim(); d++)
+ for (int d = 0; d < biasCts.bias_.ndim(); d++)
{
- coordValue[d] = get_pull_coord_value(pull_, biasCts.pullCoordIndex[d], &pbc);
+ coordValue[d] = get_pull_coord_value(pull_, biasCts.pullCoordIndex_[d], &pbc);
}
/* Perform an AWH biasing step: this means, at regular intervals,
* to supports bias sharing within a single simulation.
*/
gmx::ArrayRef<const double> biasForce =
- biasCts.bias.calcForceAndUpdateBias(coordValue,
- &biasPotential, &biasPotentialJump,
- commRecord_,
- multiSimRecord_,
- t, step, seed_, fplog);
+ biasCts.bias_.calcForceAndUpdateBias(coordValue,
+ &biasPotential, &biasPotentialJump,
+ commRecord_,
+ multiSimRecord_,
+ t, step, seed_, fplog);
awhPotential += biasPotential;
* The bias potential is returned at the end of this function,
* so that it can be added externally to the correct energy data block.
*/
- for (int d = 0; d < biasCts.bias.ndim(); d++)
+ for (int d = 0; d < biasCts.bias_.ndim(); d++)
{
- apply_external_pull_coord_force(pull_, biasCts.pullCoordIndex[d],
+ apply_external_pull_coord_force(pull_, biasCts.pullCoordIndex_[d],
biasForce[d], &mdatoms,
forceWithVirial);
}
/* We might have skipped updates for part of the grid points.
* Ensure all points are updated before writing out their data.
*/
- biasCts.bias.doSkippedUpdatesForAllPoints();
+ biasCts.bias_.doSkippedUpdatesForAllPoints();
}
}
for (size_t k = 0; k < awhHistory->bias.size(); k++)
{
- biasCoupledToSystem_[k].bias.initHistoryFromState(&awhHistory->bias[k]);
+ biasCoupledToSystem_[k].bias_.initHistoryFromState(&awhHistory->bias[k]);
}
return awhHistory;
for (size_t k = 0; k < biasCoupledToSystem_.size(); k++)
{
- biasCoupledToSystem_[k].bias.restoreStateFromHistory(awhHistory ? &awhHistory->bias[k] : nullptr, commRecord_);
+ biasCoupledToSystem_[k].bias_.restoreStateFromHistory(awhHistory ? &awhHistory->bias[k] : nullptr, commRecord_);
}
}
for (size_t k = 0; k < awhHistory->bias.size(); k++)
{
- biasCoupledToSystem_[k].bias.updateHistory(&awhHistory->bias[k]);
+ biasCoupledToSystem_[k].bias_.updateHistory(&awhHistory->bias[k]);
}
}
int numSubblocks = 0;
for (auto &biasCoupledToSystem : biasCoupledToSystem_)
{
- numSubblocks += biasCoupledToSystem.bias.numEnergySubblocksToWrite();
+ numSubblocks += biasCoupledToSystem.bias_.numEnergySubblocksToWrite();
}
GMX_ASSERT(numSubblocks > 0, "We should always have data to write");
int energySubblockCount = 0;
for (auto &biasCoupledToSystem : biasCoupledToSystem_)
{
- energySubblockCount += biasCoupledToSystem.bias.writeToEnergySubblocks(&(awhEnergyBlock->sub[energySubblockCount]));
+ energySubblockCount += biasCoupledToSystem.bias_.writeToEnergySubblocks(&(awhEnergyBlock->sub[energySubblockCount]));
}
}
ivec tmp, s;
gmx_domdec_zones_t *zones;
gmx_domdec_ns_ranges_t *izone;
+ GMX_ASSERT(dd->ndim >= 0, "Must have non-negative number of dimensions for DD");
for (d = 0; d < dd->ndim; d++)
{
}
if (dim_ind == 2 && (zonei == 2 || zonei == 3))
{
+ GMX_ASSERT(dim1 >= 0 && dim1 < DIM, "Must have a valid dimension index");
rn[dim1] += cg_cm[cg][dim1] - c->cr1[zone];
tric_sh = 0;
for (i = dim1+1; i < DIM; i++)
if (bDistMB_pulse)
{
clear_rvec(rb);
+ GMX_ASSERT(dim >= 0 && dim < DIM, "Must have a valid dimension index");
rb[dim] += cg_cm[cg][dim] - c->bc[dim_ind] + tric_sh;
if (rb[dim] > 0)
{
#include "gromacs/utility/basedefinitions.h"
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/smalloc.h"
#include "pme_grid.h"
nthread = pme->nthread;
assert(nthread > 0);
+ GMX_ASSERT(grids != nullptr || !bSpread, "If there's no grid, we cannot be spreading");
#ifdef PME_TIME_THREADS
c1 = omp_cyc_start();
oK1[i] = (KG*i)/P[1];
#endif
}
- for (i = 0; i < P[0]-1; i++)
+ for (i = 0; P[0] > 0 && i < P[0]-1; i++)
{
N0[i] = oN0[i+1]-oN0[i];
M0[i] = oM0[i+1]-oM0[i];
N0[P[0]-1] = NG-oN0[P[0]-1];
M0[P[0]-1] = MG-oM0[P[0]-1];
K0[P[0]-1] = KG-oK0[P[0]-1];
- for (i = 0; i < P[1]-1; i++)
+ for (i = 0; P[1] > 0 && i < P[1]-1; i++)
{
N1[i] = oN1[i+1]-oN1[i];
M1[i] = oM1[i+1]-oM1[i];
C: contiguous dimension, and nP: number of processor in subcommunicator
for that step */
-
+ GMX_ASSERT(prank[0] < P[0], "Must have valid rank within communicator size");
+ GMX_ASSERT(prank[1] < P[1], "Must have valid rank within communicator size");
pM[0] = M0[prank[0]];
oM[0] = oM0[prank[0]];
pK[0] = K1[prank[1]];
using namespace gmx;
static const char mapper[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()-_=+{}|;:',<.>/?";
-#define NMAP static_cast<long int>(strlen(mapper))
+#define NMAP static_cast<long int>(sizeof(mapper)/sizeof(mapper[0]))
#define MAX_XPM_LINELENGTH 4096
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*/
static real optimal_error_estimate(double sigma, const double fitparm[], real tTotal)
{
+ // When sigma is zero, the fitparm data can be uninitialized
+ if (sigma == 0.0)
+ {
+ return 0;
+ }
double ss = fitparm[1]*fitparm[0]+(1-fitparm[1])*fitparm[2];
if ((tTotal <= 0) || (ss <= 0))
{
}
if (sig[s] == 0)
{
- ee = 0;
- a = 1;
- tau1 = 0;
- tau2 = 0;
+ ee = 0;
+ a = 1;
+ tau1 = 0;
+ tau2 = 0;
+ fitparm[0] = 0;
+ fitparm[1] = 0;
+ fitparm[2] = 0;
}
else
{
{
return FALSE;
}
+ GMX_RELEASE_ASSERT((name != nullptr) || (name_length == 0),
+ "If name is empty, the length of the substring to examine within it must be zero");
len = std::strlen(lc->names[index]);
if (len != name_length)
{
return FALSE;
}
+ if (name_length == 0)
+ {
+ // Everything matches a zero-length substring. This branch is
+ // needed because name could in principle be nullptr.
+ return TRUE;
+ }
return std::strncmp(lc->names[index], name, name_length) == 0;
}
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/futil.h"
+#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/smalloc.h"
#define e2d(x) ENM2DEBYE*(x)
gmx_rmpbc_t gpbc = nullptr;
gnx_tot = gnx[0];
- if (ncos > 1)
+ if (ncos == 2)
{
gnx_tot += gnx[1];
}
+ GMX_RELEASE_ASSERT(ncos == 1 || ncos == 2, "Invalid number of groups used with -ncos");
vol_aver = 0.0;
j1 = nx1;
index3 = index1;
}
+ GMX_RELEASE_ASSERT(index1 != nullptr, "Need a valid index for plotting distances");
rmin2 = 1e12;
rmax2 = -1e12;
}
else
{
+ GMX_RELEASE_ASSERT(ng > 1, "Must have more than one group with bMat");
snew(leg, (ng*(ng-1))/2);
for (i = j = 0; (i < ng-1); i++)
{
}
else
{
+ GMX_RELEASE_ASSERT(ng > 1, "Must have more than one group when not using -matrix");
for (i = 1; (i < ng); i++)
{
calc_dist(rcut, bPBC, ePBC, box, x0, gnx[0], gnx[i], index[0], index[i], bGroup,
}
gnx[0] = 1;
}
+ GMX_RELEASE_ASSERT(!bMat || ng > 1, "Must have more than one group with bMat");
if (resfnm)
{
/* calculate the center of mass */
if (!gnx_com.empty())
{
+ GMX_RELEASE_ASSERT(index_com != nullptr, "Center-of-mass removal must have valid index group");
calc_com(bMol, gnx_com[0], index_com[0], xa[cur], xa[prev], box,
&top->atoms, com);
}
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
{
/* if permolecule = TRUE, order parameters will be calculed per molecule
* and stored in slOrder with #slices = # molecules */
- rvec *x0, /* coordinates with pbc */
- *x1, /* coordinates without pbc */
- dist; /* vector between two atoms */
+ rvec *x0, /* coordinates with pbc */
+ *x1; /* coordinates without pbc */
matrix box; /* box (3x3) */
t_trxstatus *status;
rvec cossum, /* sum of vector angles for three axes */
int *slCount; /* nr. of atoms in one slice */
real sdbangle = 0; /* sum of these angles */
gmx_bool use_unitvector = FALSE; /* use a specified unit vector instead of axis to specify unit normal*/
- rvec direction, com, dref, dvec;
+ rvec direction, com;
int comsize, distsize;
int *comidx = nullptr, *distidx = nullptr;
char *grpname = nullptr;
}
svmul(1.0/comsize, com, com);
}
+ rvec displacementFromReference;
if (distcalc)
{
- dref[XX] = 0.0; dref[YY] = 0.0; dref[ZZ] = 0.0;
+ rvec dref = { 0.0, 0.0, 0.0 };
for (j = 0; j < distsize; j++)
{
- rvec_inc(dist, x1[distidx[j]]);
+ rvec_inc(dref, x1[distidx[j]]);
}
svmul(1.0/distsize, dref, dref);
if (radial)
{
- pbc_dx(&pbc, dref, com, dvec);
- unitv(dvec, dvec);
+ pbc_dx(&pbc, dref, com, displacementFromReference);
+ unitv(displacementFromReference, displacementFromReference);
}
}
if (bUnsat)
{
+ rvec dist;
/* Using convention for unsaturated carbons */
/* first get Sz, the vector from Cn to Cn+1 */
rvec_sub(x1[a[index[i+1]+j]], x1[a[index[i]+j]], dist);
}
else
{
+ rvec dist;
/* get vector dist(Cn-1,Cn+1) for tail atoms */
rvec_sub(x1[a[index[i+1]+j]], x1[a[index[i-1]+j]], dist);
length = norm(dist); /* determine distance between two atoms */
if (radial)
{
/* bin order parameter by arc distance from reference group*/
- arcdist = gmx_angle(dvec, direction);
+ arcdist = gmx_angle(displacementFromReference, direction);
(*distvals)[j][i] += arcdist;
}
else if (i == 1)
tmpdist = trace(box); /* should be max value */
for (k = 0; k < distsize; k++)
{
- pbc_dx(&pbc, x1[distidx[k]], x1[a[index[i]+j]], dvec);
- /* at the moment, just remove dvec[axis] */
- dvec[axis] = 0;
- tmpdist = std::min(tmpdist, norm2(dvec));
+ rvec displacement;
+ pbc_dx(&pbc, x1[distidx[k]], x1[a[index[i]+j]], displacement);
+ /* at the moment, just remove displacement[axis] */
+ displacement[axis] = 0;
+ tmpdist = std::min(tmpdist, norm2(displacement));
}
//fprintf(stderr, "Min dist %f; trace %f\n", tmpdist, trace(box));
(*distvals)[j][i] += std::sqrt(tmpdist);
}
randomArray[ipull] = ipullRandom;
}
- /*for (ipull=0; ipull<nPull; ipull++)
- printf("%d ",randomArray[ipull]); printf("\n"); */
}
/*! \brief Set pull group information of a synthetic histogram
switch (opt->bsMethod)
{
case bsMethod_hist:
- snew(randomArray, nAllPull);
printf("\n\nWhen computing statistical errors by bootstrapping entire histograms:\n");
please_cite(stdout, "Hub2006");
break;
{
case bsMethod_hist:
/* bootstrap complete histograms from given histograms */
+ srenew(randomArray, nAllPull);
getRandomIntArray(nAllPull, opt->histBootStrapBlockLength, randomArray, &opt->rng);
for (i = 0; i < nAllPull; i++)
{
const char *ffdir)
{
#define MAXATOMSPERRESIDUE 16
- int k, m, i0, ni0, whatres, resind, add_shift, nvsite, nadd;
+ int k, m, i0, ni0, whatres, add_shift, nvsite, nadd;
int ai, aj, ak, al;
int nrfound = 0, needed, nrbonds, nrHatoms, Heavy, nrheavies, tpM, tpHeavy;
int Hatoms[4], heavies[4];
int *o2n, *newvsite_type, *newcgnr, ats[MAXATOMSPERRESIDUE];
t_atom *newatom;
char ***newatomname;
- char *resnm = nullptr;
int cmplength;
bool isN, planarN, bFound;
/* generate vsite constructions */
/* loop over all atoms */
- resind = -1;
+ int resind = -1;
for (int i = 0; (i < at->nr); i++)
{
if (at->atom[i].resind != resind)
{
resind = at->atom[i].resind;
- resnm = *(at->resinfo[resind].name);
}
+ const char *resnm = *(at->resinfo[resind].name);
/* first check for aromatics to virtualize */
/* don't waste our effort on DNA, water etc. */
/* Only do the vsite aromatic stuff when we reach the
}
if (bWARNING)
{
- fprintf(stderr,
- "Warning: cannot convert atom %d %s (bound to a heavy atom "
- "%s with \n"
- " %d bonds and %d bound hydrogens atoms) to virtual site\n",
- i+1, *(at->atomname[i]), tpname, nrbonds, nrHatoms);
+ gmx_fatal(FARGS, "Cannot convert atom %d %s (bound to a heavy atom "
+ "%s with \n"
+ " %d bonds and %d bound hydrogens atoms) to virtual site\n",
+ i+1, *(at->atomname[i]), tpname, nrbonds, nrHatoms);
}
if (bAddVsiteParam)
{
}
npbcdim = ePBC2npbcdim(ePBC);
+ GMX_RELEASE_ASSERT(npbcdim <= DIM, "Invalid npbcdim");
clear_rvec(com);
if (rc_scaling != erscNO)
{
if ((i == 0) || (this_chainnumber != prev_chainnumber) || (bWat_ != bPrevWat_))
{
+ GMX_RELEASE_ASSERT(pdba_all.pdbinfo, "Must have pdbinfo from reading a PDB file if chain number is changing");
this_chainstart = pdba_all.atom[i].resind;
bMerged = false;
if (i > 0 && !bWat_)
{
sprintf(err_buf, "nstlog must be non-zero");
CHECK(ir->nstlog == 0);
- sprintf(err_buf, "nst-transition-matrix (%d) must be an integer multiple of nstlog (%d)",
- expand->nstTij, ir->nstlog);
- CHECK((expand->nstTij % ir->nstlog) != 0);
+ // Avoid modulus by zero in the case that already triggered an error exit.
+ if (ir->nstlog != 0)
+ {
+ sprintf(err_buf, "nst-transition-matrix (%d) must be an integer multiple of nstlog (%d)",
+ expand->nstTij, ir->nstlog);
+ CHECK((expand->nstTij % ir->nstlog) != 0);
+ }
}
}
{
ilist_data_t ild[F_NRE];
- assert(bt->nthreads > 0);
+ GMX_ASSERT(bt->nthreads > 0, "Must have positive number of threads");
+ const int numThreads = bt->nthreads;
bt->haveBondeds = false;
int numType = 0;
if (nrToAssignToCpuThreads == 0)
{
/* No interactions, avoid all the integer math below */
- for (int t = 0; t <= bt->nthreads; t++)
+ for (int t = 0; t <= numThreads; t++)
{
bt->workDivision.setBound(fType, t, 0);
}
}
- else if (bt->nthreads <= bt->max_nthread_uniform || fType == F_DISRES)
+ else if (numThreads <= bt->max_nthread_uniform || fType == F_DISRES)
{
/* On up to 4 threads, load balancing the bonded work
* is more important than minimizing the reduction cost.
const int stride = 1 + NRAL(fType);
- for (int t = 0; t <= bt->nthreads; t++)
+ for (int t = 0; t <= numThreads; t++)
{
/* Divide equally over the threads */
- int nr_t = (((nrToAssignToCpuThreads/stride)*t)/bt->nthreads)*stride;
+ int nr_t = (((nrToAssignToCpuThreads/stride)*t)/numThreads)*stride;
if (fType == F_DISRES)
{
int t;
fprintf(debug, "%16s", interaction_function[f].name);
- for (t = 0; t < bt->nthreads; t++)
+ for (t = 0; t < numThreads; t++)
{
fprintf(debug, " %4d",
(bt->workDivision.bound(f, t + 1) -
}
}
- int p_imax;
- real ncons_loc;
- real p_ssd;
- real p_max = 0;
+ int p_imax = 0;
+ real ncons_loc = 0;
+ real p_ssd = 0;
+ real p_max = 0;
if (debug)
{
cconerr(*lincsd, xprime, pbc,
else if (md->cFREEZE)
{
g = md->cFREEZE[i];
+ GMX_ASSERT(opts->nFreeze != nullptr, "Must have freeze groups to initialize masses");
if (opts->nFreeze[g][XX] && opts->nFreeze[g][YY] && opts->nFreeze[g][ZZ])
{
/* Set the mass of completely frozen particles to ALMOST_ZERO
gmx_bool bSumEkinhOld, bDoReplEx, bExchanged, bNeedRepartition;
gmx_bool bTemp, bPres, bTrotter;
real dvdl_constr;
- rvec *cbuf = nullptr;
- int cbuf_nalloc = 0;
+ std::vector<RVec> cbuf;
matrix lastbox;
int lamnew = 0;
/* for FEP */
if (ir->eI == eiVVAK)
{
- /* We probably only need md->homenr, not state->natoms */
- if (state->natoms > cbuf_nalloc)
- {
- cbuf_nalloc = state->natoms;
- srenew(cbuf, cbuf_nalloc);
- }
- copy_rvecn(as_rvec_array(state->x.data()), cbuf, 0, state->natoms);
+ cbuf.resize(state->x.size());
+ std::copy(state->x.begin(), state->x.end(), cbuf.begin());
}
if (c_useGpuUpdateConstrain)
);
wallcycle_start(wcycle, ewcUPDATE);
trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ4);
- /* now we know the scaling, we can compute the positions again again */
- copy_rvecn(cbuf, as_rvec_array(state->x.data()), 0, state->natoms);
+ /* now we know the scaling, we can compute the positions again */
+ std::copy(cbuf.begin(), cbuf.end(), state->x.begin());
update_coords(step, ir, mdatoms, state, f.arrayRefWithPadding(), fcd,
ekind, M, &upd, etrtPOSITION, cr, constr);
bool doFreeEnergyPerturbation = false;
unsigned int force_flags;
tensor force_vir, shake_vir, total_vir, pres;
- t_trxstatus *status;
+ t_trxstatus *status = nullptr;
rvec mu_tot;
t_trxframe rerun_fr;
gmx_localtop_t top;
#include "gromacs/utility/arraysize.h"
#include "gromacs/utility/cstringutil.h"
#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/smalloc.h"
typedef struct {
real dt_1, fudge, tm, m1, m2, m3;
rvec *ptr;
+ GMX_RELEASE_ASSERT(mass || mtop, "Must have masses or a way to look them up");
+
/* We introduce a fudge factor for performance reasons: with this choice
* the initial force on the shells is about a factor of two lower than
* without
{
using ::testing::_;
using ::testing::Return;
+#ifndef __clang_analyzer__
ON_CALL(*this, setCurrentThreadAffinityToCore(_))
.WillByDefault(Return(true));
+#endif
}
MockThreadAffinityAccess::~MockThreadAffinityAccess()
#include "gromacs/hardware/hw_info.h"
#include "gromacs/mdrunutility/threadaffinity.h"
+#include "gromacs/utility/basedefinitions.h"
#include "gromacs/utility/logger.h"
#include "gromacs/utility/physicalnodecommunicator.h"
#include "gromacs/utility/stringutil.h"
void expectAffinitySetThatFails(int core)
{
using ::testing::Return;
+#ifndef __clang_analyzer__
EXPECT_CALL(affinityAccess_, setCurrentThreadAffinityToCore(core))
.WillOnce(Return(false));
+#else
+ GMX_UNUSED_VALUE(core);
+#endif
}
void expectWarningMatchingRegex(const char *re)
#if NBNXN_SEARCH_SIMD4_FLOAT_X_BB
if (nbat->XFormat == nbatXYZQ)
{
+ GMX_ASSERT(bb_work_aligned != nullptr, "Must have valid aligned work structure");
calc_bounding_box_xxxx_simd4(numAtoms, nbat->x().data() + atomStart*nbat->xstride,
bb_work_aligned, pbb_ptr);
}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2016,2018, by the GROMACS development team, led by
+ * Copyright (c) 2016,2018,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
// PBC stuff
set_pbc(&pbc, epbc, box);
- GMX_ASSERT(pbc.ndim_ePBC >= 1, "Tests only support PBC along at least x");
+ GMX_ASSERT(pbc.ndim_ePBC >= 1 && pbc.ndim_ePBC <= DIM, "Tests only support PBC along at least x and at most x, y, and z");
real boxSizeZSquared;
if (pbc.ndim_ePBC > ZZ)
#include "gromacs/selection/indexutil.h"
#include "gromacs/selection/selection.h"
#include "gromacs/utility/exceptions.h"
+#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/smalloc.h"
#include "gromacs/utility/stringutil.h"
switch (sel->u.boolt)
{
case BOOL_NOT:
+ GMX_ASSERT(g != nullptr, "Need a valid group");
gmx_ana_index_reserve(gmin, g->isize);
gmx_ana_index_reserve(gmax, g->isize);
gmx_ana_index_difference(gmax, g, sel->child->cdata->gmin);
case BOOL_OR:
/* We can assume here that the gmin of children do not overlap
* because of the way _gmx_sel_evaluate_or() works. */
+ GMX_ASSERT(g != nullptr, "Need a valid group");
gmx_ana_index_reserve(gmin, g->isize);
gmx_ana_index_reserve(gmax, g->isize);
gmx_ana_index_copy(gmin, sel->child->cdata->gmin, false);
double t1 = 0.0;
#endif
+ GMX_RELEASE_ASSERT(q != nullptr, "Must have charges");
+
if (seed == 0)
{
seed = static_cast<int>(gmx::makeRandomSeed());
}
if (mode & FLAG_DOTS)
{
+ GMX_RELEASE_ASSERT(nu_dots != nullptr, "Must have valid nu_dots pointer");
*nu_dots = lfnr;
+ GMX_RELEASE_ASSERT(lidots != nullptr, "Must have valid lidots pointer");
*lidots = dots;
}
if (mode & FLAG_ATOM_AREA)
{
+ GMX_RELEASE_ASSERT(at_area != nullptr, "Must have valid at_area pointer");
*at_area = atom_area;
}
*value_of_area = area;