#include "utility.h"
static void distributeVecSendrecv(gmx_domdec_t *dd,
- const t_block *cgs,
gmx::ArrayRef<const gmx::RVec> globalVec,
gmx::ArrayRef<gmx::RVec> localVec)
{
if (DDMASTER(dd))
{
+ const t_block &atomGroups = dd->comm->cgs_gl;
+
std::vector<gmx::RVec> buffer;
for (int rank = 0; rank < dd->nnodes; rank++)
buffer.resize(domainGroups.numAtoms);
int localAtom = 0;
- for (const int &cg : domainGroups.atomGroups)
+ for (const int &g : domainGroups.atomGroups)
{
- for (int globalAtom = cgs->index[cg]; globalAtom < cgs->index[cg + 1]; globalAtom++)
+ for (int globalAtom = atomGroups.index[g]; globalAtom < atomGroups.index[g + 1]; globalAtom++)
{
buffer[localAtom++] = globalVec[globalAtom];
}
const auto &domainGroups = dd->ma->domainGroups[dd->masterrank];
int localAtom = 0;
- for (const int &cg : domainGroups.atomGroups)
+ for (const int &g : domainGroups.atomGroups)
{
- for (int globalAtom = cgs->index[cg]; globalAtom < cgs->index[cg + 1]; globalAtom++)
+ for (int globalAtom = atomGroups.index[g]; globalAtom < atomGroups.index[g + 1]; globalAtom++)
{
localVec[localAtom++] = globalVec[globalAtom];
}
}
static void distributeVecScatterv(gmx_domdec_t *dd,
- const t_block *cgs,
gmx::ArrayRef<const gmx::RVec> globalVec,
gmx::ArrayRef<gmx::RVec> localVec)
{
get_commbuffer_counts(&ma, &sendCounts, &displacements);
+ const t_block &atomGroups = dd->comm->cgs_gl;
+
gmx::ArrayRef<gmx::RVec> buffer = ma.rvecBuffer;
int localAtom = 0;
for (int rank = 0; rank < dd->nnodes; rank++)
{
const auto &domainGroups = ma.domainGroups[rank];
- for (const int &cg : domainGroups.atomGroups)
+ for (const int &g : domainGroups.atomGroups)
{
- for (int globalAtom = cgs->index[cg]; globalAtom < cgs->index[cg + 1]; globalAtom++)
+ for (int globalAtom = atomGroups.index[g]; globalAtom < atomGroups.index[g + 1]; globalAtom++)
{
buffer[localAtom++] = globalVec[globalAtom];
}
}
static void distributeVec(gmx_domdec_t *dd,
- const t_block *cgs,
gmx::ArrayRef<const gmx::RVec> globalVec,
gmx::ArrayRef<gmx::RVec> localVec)
{
if (dd->nnodes <= c_maxNumRanksUseSendRecvForScatterAndGather)
{
- distributeVecSendrecv(dd, cgs, globalVec, localVec);
+ distributeVecSendrecv(dd, globalVec, localVec);
}
else
{
- distributeVecScatterv(dd, cgs, globalVec, localVec);
+ distributeVecScatterv(dd, globalVec, localVec);
}
}
}
}
-static void dd_distribute_state(gmx_domdec_t *dd, const t_block *cgs,
+static void dd_distribute_state(gmx_domdec_t *dd,
const t_state *state, t_state *state_local,
PaddedRVecVector *f)
{
if (state_local->flags & (1 << estX))
{
- distributeVec(dd, cgs, DDMASTER(dd) ? makeArrayRef(state->x) : gmx::EmptyArrayRef(), state_local->x);
+ distributeVec(dd, DDMASTER(dd) ? makeArrayRef(state->x) : gmx::EmptyArrayRef(), state_local->x);
}
if (state_local->flags & (1 << estV))
{
- distributeVec(dd, cgs, DDMASTER(dd) ? makeArrayRef(state->v) : gmx::EmptyArrayRef(), state_local->v);
+ distributeVec(dd, DDMASTER(dd) ? makeArrayRef(state->v) : gmx::EmptyArrayRef(), state_local->v);
}
if (state_local->flags & (1 << estCGP))
{
- distributeVec(dd, cgs, DDMASTER(dd) ? makeArrayRef(state->cg_p) : gmx::EmptyArrayRef(), state_local->cg_p);
+ distributeVec(dd, DDMASTER(dd) ? makeArrayRef(state->cg_p) : gmx::EmptyArrayRef(), state_local->cg_p);
}
}
static std::vector < std::vector < int>>
getAtomGroupDistribution(FILE *fplog,
const matrix box, const gmx_ddbox_t &ddbox,
- const t_block *cgs, rvec pos[],
+ rvec pos[],
gmx_domdec_t *dd)
{
AtomDistribution &ma = *dd->ma;
const auto cellBoundaries =
set_dd_cell_sizes_slb(dd, &ddbox, setcellsizeslbMASTER, npulse);
- const int *cgindex = cgs->index;
+ const t_block *cgs = &dd->comm->cgs_gl;
+ const int *cgindex = cgs->index;
std::vector < std::vector < int>> indices(dd->nnodes);
}
static void distributeAtomGroups(FILE *fplog, gmx_domdec_t *dd,
- const t_block *cgs,
const matrix box, const gmx_ddbox_t *ddbox,
rvec pos[])
{
check_screw_box(box);
}
- groupIndices = getAtomGroupDistribution(fplog, box, *ddbox, cgs, pos, dd);
+ groupIndices = getAtomGroupDistribution(fplog, box, *ddbox, pos, dd);
for (int rank = 0; rank < dd->nnodes; rank++)
{
dd->ncg_home*sizeof(int), dd->globalAtomGroupIndices.data());
/* Determine the home charge group sizes */
- dd->atomGroups_.index.resize(dd->ncg_home + 1);
- dd->atomGroups_.index[0] = 0;
+ const t_block &globalAtomGroups = dd->comm->cgs_gl;
+ dd->atomGroups_.clear();
for (int i = 0; i < dd->ncg_home; i++)
{
- int cg_gl = dd->globalAtomGroupIndices[i];
- dd->atomGroups_.index[i+1] =
- dd->atomGroups_.index[i] + cgs->index[cg_gl+1] - cgs->index[cg_gl];
+ dd->atomGroups_.appendBlock(globalAtomGroups.blockSize(dd->globalAtomGroupIndices[i]));
}
if (debug)
void distributeState(FILE *fplog,
gmx_domdec_t *dd,
t_state *state_global,
- const t_block &cgs_gl,
const gmx_ddbox_t &ddbox,
t_state *state_local,
PaddedRVecVector *f)
{
rvec *xGlobal = (DDMASTER(dd) ? as_rvec_array(state_global->x.data()) : nullptr);
- distributeAtomGroups(fplog, dd, &cgs_gl,
+ distributeAtomGroups(fplog, dd,
DDMASTER(dd) ? state_global->box : nullptr,
&ddbox, xGlobal);
- dd_distribute_state(dd, &cgs_gl,
- state_global, state_local, f);
+ dd_distribute_state(dd, state_global, state_local, f);
}
void distributeState(FILE *fplog,
gmx_domdec_t *dd,
t_state *state_global,
- const t_block &cgs_gl,
const gmx_ddbox_t &ddbox,
t_state *state_local,
PaddedRVecVector *f);
comm = dd->comm;
- const BlockRanges &atomGroups = dd->atomGroups();
+ const RangePartitioning &atomGroups = dd->atomGroups();
nzone = 1;
nat_tot = comm->atomRanges.numHomeAtoms();
cd = &comm->cd[d];
for (const gmx_domdec_ind_t &ind : cd->ind)
{
- gmx::ArrayRef<const int> index = ind.index;
DDBufferAccess<gmx::RVec> sendBufferAccess(comm->rvecBuffer, ind.nsend[nzone + 1]);
gmx::ArrayRef<gmx::RVec> &sendBuffer = sendBufferAccess.buffer;
int n = 0;
if (!bPBC)
{
- for (int i = 0; i < ind.nsend[nzone]; i++)
+ for (int g : ind.index)
{
- int at0 = atomGroups.index[index[i]];
- int at1 = atomGroups.index[index[i] + 1];
- for (int j = at0; j < at1; j++)
+ for (int j : atomGroups.block(g))
{
sendBuffer[n] = x[j];
n++;
}
else if (!bScrew)
{
- for (int i = 0; i < ind.nsend[nzone]; i++)
+ for (int g : ind.index)
{
- int at0 = atomGroups.index[index[i]];
- int at1 = atomGroups.index[index[i] + 1];
- for (int j = at0; j < at1; j++)
+ for (int j : atomGroups.block(g))
{
/* We need to shift the coordinates */
for (int d = 0; d < DIM; d++)
}
else
{
- for (int i = 0; i < ind.nsend[nzone]; i++)
+ for (int g : ind.index)
{
- int at0 = atomGroups.index[index[i]];
- int at1 = atomGroups.index[index[i]+1];
- for (int j = at0; j < at1; j++)
+ for (int j : atomGroups.block(g))
{
/* Shift x */
sendBuffer[n][XX] = x[j][XX] + shift[XX];
comm = dd->comm;
- const BlockRanges &atomGroups = dd->atomGroups();
+ const RangePartitioning &atomGroups = dd->atomGroups();
nzone = comm->zones.n/2;
nat_tot = comm->atomRanges.end(DDAtomRanges::Type::Zones);
/* Communicate the forces */
ddSendrecv(dd, d, dddirForward,
sendBuffer, receiveBuffer);
- gmx::ArrayRef<const int> index = ind.index;
/* Add the received forces */
- int n = 0;
+ int n = 0;
if (!bShiftForcesNeedPbc)
{
- for (int i = 0; i < ind.nsend[nzone]; i++)
+ for (int g : ind.index)
{
- int at0 = atomGroups.index[index[i]];
- int at1 = atomGroups.index[index[i] + 1];
- for (int j = at0; j < at1; j++)
+ for (int j : atomGroups.block(g))
{
for (int d = 0; d < DIM; d++)
{
/* fshift should always be defined if this function is
* called when bShiftForcesNeedPbc is true */
assert(NULL != fshift);
- for (int i = 0; i < ind.nsend[nzone]; i++)
+ for (int g : ind.index)
{
- int at0 = atomGroups.index[index[i]];
- int at1 = atomGroups.index[index[i] + 1];
- for (int j = at0; j < at1; j++)
+ for (int j : atomGroups.block(g))
{
for (int d = 0; d < DIM; d++)
{
}
else
{
- for (int i = 0; i < ind.nsend[nzone]; i++)
+ for (int g : ind.index)
{
- int at0 = atomGroups.index[index[i]];
- int at1 = atomGroups.index[index[i] + 1];
- for (int j = at0; j < at1; j++)
+ for (int j : atomGroups.block(g))
{
/* Rotate the force */
f[j][XX] += receiveBuffer[n][XX];
comm = dd->comm;
- const BlockRanges &atomGroups = dd->atomGroups();
+ const RangePartitioning &atomGroups = dd->atomGroups();
nzone = 1;
nat_tot = comm->atomRanges.numHomeAtoms();
cd = &comm->cd[d];
for (const gmx_domdec_ind_t &ind : cd->ind)
{
- gmx::ArrayRef<const int> index = ind.index;
-
/* Note: We provision for RVec instead of real, so a factor of 3
* more than needed. The buffer actually already has this size
* and we pass a plain pointer below, so this does not matter.
DDBufferAccess<gmx::RVec> sendBufferAccess(comm->rvecBuffer, ind.nsend[nzone + 1]);
gmx::ArrayRef<real> sendBuffer = realArrayRefFromRvecArrayRef(sendBufferAccess.buffer);
int n = 0;
- for (int i = 0; i < ind.nsend[nzone]; i++)
+ for (int g : ind.index)
{
- int at0 = atomGroups.index[index[i]];
- int at1 = atomGroups.index[index[i] + 1];
- for (int j = at0; j < at1; j++)
+ for (int j : atomGroups.block(g))
{
sendBuffer[n++] = v[j];
}
comm = dd->comm;
- const gmx::BlockRanges &atomGroups = dd->atomGroups();
+ const gmx::RangePartitioning &atomGroups = dd->atomGroups();
nzone = comm->zones.n/2;
nat_tot = comm->atomRanges.end(DDAtomRanges::Type::Zones);
/* Communicate the forces */
ddSendrecv(dd, d, dddirForward,
sendBuffer, receiveBuffer);
- gmx::ArrayRef<const int> index = ind.index;
/* Add the received forces */
- int n = 0;
- for (int i = 0; i < ind.nsend[nzone]; i++)
+ int n = 0;
+ for (int g : ind.index)
{
- int at0 = atomGroups.index[index[i]];
- int at1 = atomGroups.index[index[i] + 1];
- for (int j = at0; j < at1; j++)
+ for (int j : atomGroups.block(g))
{
v[j] += receiveBuffer[n];
n++;
if (i < dd->comm->atomRanges.end(DDAtomRanges::Type::Zones))
{
c = 0;
- while (i >= dd->atomGroups().index[dd->comm->zones.cg_range[c+1]])
+ while (i >= dd->atomGroups().subRange(0, dd->comm->zones.cg_range[c + 1]).end())
{
c++;
}
gmx::ArrayRef<const int> atomGroupsState = state->cg_gl;
std::vector<int> &globalAtomGroupIndices = dd->globalAtomGroupIndices;
- gmx::BlockRanges &atomGroups = dd->atomGroups_;
+ gmx::RangePartitioning &atomGroups = dd->atomGroups_;
globalAtomGroupIndices.resize(atomGroupsState.size());
- atomGroups.index.resize(atomGroupsState.size() + 1);
+ atomGroups.clear();
/* Copy back the global charge group indices from state
* and rebuild the local charge group to atom index.
*/
- int atomIndex = 0;
for (unsigned int i = 0; i < atomGroupsState.size(); i++)
{
- atomGroups.index[i] = atomIndex;
const int atomGroupGlobal = atomGroupsState[i];
+ const int groupSize = gcgs_index[atomGroupGlobal + 1] - gcgs_index[atomGroupGlobal];
globalAtomGroupIndices[i] = atomGroupGlobal;
- atomIndex += gcgs_index[atomGroupGlobal + 1] - gcgs_index[atomGroupGlobal];
+ atomGroups.appendBlock(groupSize);
}
- atomGroups.index[atomGroupsState.size()] = atomIndex;
dd->ncg_home = atomGroupsState.size();
- dd->comm->atomRanges.setEnd(DDAtomRanges::Type::Home, atomIndex);
+ dd->comm->atomRanges.setEnd(DDAtomRanges::Type::Home, atomGroups.fullRange().end());
set_zones_ncg_home(dd);
}
}
/* Make the local to global and global to local atom index */
- int a = dd->atomGroups().index[cg_start];
+ int a = dd->atomGroups().subRange(cg_start, cg_start).begin();
globalAtomIndices.resize(a);
for (int zone = 0; zone < numZones; zone++)
{
}
}
-static void make_cell2at_index(gmx_domdec_comm_dim_t *cd,
- int nzone,
- int cg0,
- const BlockRanges &atomGroups)
+static void make_cell2at_index(gmx_domdec_comm_dim_t *cd,
+ int nzone,
+ int atomGroupStart,
+ const RangePartitioning &atomGroups)
{
/* Store the atom block boundaries for easy copying of communication buffers
*/
- int cg = cg0;
+ int g = atomGroupStart;
for (int zone = 0; zone < nzone; zone++)
{
for (gmx_domdec_ind_t &ind : cd->ind)
{
- ind.cell2at0[zone] = atomGroups.index[cg];
- cg += ind.nrecv[zone];
- ind.cell2at1[zone] = atomGroups.index[cg];
+ const auto range = atomGroups.subRange(g, g + ind.nrecv[zone]);
+ ind.cell2at0[zone] = range.begin();
+ ind.cell2at1[zone] = range.end();
+ g += ind.nrecv[zone];
}
}
}
int zonei, int zone,
int cg0, int cg1,
gmx::ArrayRef<const int> globalAtomGroupIndices,
- const gmx::BlockRanges &atomGroups,
+ const gmx::RangePartitioning &atomGroups,
int dim, int dim_ind,
int dim0, int dim1, int dim2,
real r_comm2, real r_bcomm2,
}
vbuf.emplace_back(posPbc[XX], posPbc[YY], posPbc[ZZ]);
- nat += atomGroups.index[cg+1] - atomGroups.index[cg];
+ nat += atomGroups.block(cg).size();
}
}
/* Make space for the global cg indices */
int numAtomGroupsNew = pos_cg + ind->nrecv[nzone];
dd->globalAtomGroupIndices.resize(numAtomGroupsNew);
- dd->atomGroups_.index.resize(numAtomGroupsNew + 1);
/* Communicate the global cg indices */
gmx::ArrayRef<int> integerBufferRef;
if (cd->receiveInPlace)
cg_gl = dd->globalAtomGroupIndices[pos_cg];
fr->cginfo[pos_cg] = ddcginfo(cginfo_mb, cg_gl);
nrcg = GET_CGINFO_NATOMS(fr->cginfo[pos_cg]);
- dd->atomGroups_.index[pos_cg + 1] = dd->atomGroups_.index[pos_cg] + nrcg;
+ dd->atomGroups_.appendBlock(nrcg);
if (bBondComm)
{
/* Update the charge group presence,
else
{
/* This part of the code is never executed with bBondComm. */
+ std::vector<int> &atomGroupsIndex = dd->atomGroups_.rawIndex();
+ atomGroupsIndex.resize(numAtomGroupsNew + 1);
+
merge_cg_buffers(nzone, cd, p, zone_cg_range,
dd->globalAtomGroupIndices, integerBufferRef.data(),
cg_cm, as_rvec_array(rvecBufferRef.data()),
- dd->atomGroups_.index,
+ atomGroupsIndex,
fr->cginfo_mb, fr->cginfo);
pos_cg += ind->nrecv[nzone];
}
}
}
-static void order_vec_atom(int ncg,
- const gmx::BlockRanges *atomGroups,
- const gmx_cgsort_t *sort,
- gmx::ArrayRef<gmx::RVec> v,
- gmx::ArrayRef<gmx::RVec> buf)
+static void order_vec_atom(int ncg,
+ const gmx::RangePartitioning *atomGroups,
+ const gmx_cgsort_t *sort,
+ gmx::ArrayRef<gmx::RVec> v,
+ gmx::ArrayRef<gmx::RVec> buf)
{
- int a, atot, cg, cg0, cg1, i;
-
if (atomGroups == nullptr)
{
/* Avoid the useless loop of the atoms within a cg */
}
/* Order the data */
- a = 0;
- for (cg = 0; cg < ncg; cg++)
+ int a = 0;
+ for (int g = 0; g < ncg; g++)
{
- cg0 = atomGroups->index[sort[cg].ind];
- cg1 = atomGroups->index[sort[cg].ind+1];
- for (i = cg0; i < cg1; i++)
+ for (int i : atomGroups->block(sort[g].ind))
{
copy_rvec(v[i], buf[a]);
a++;
}
}
- atot = a;
+ int atot = a;
/* Copy back to the original array */
- for (a = 0; a < atot; a++)
+ for (int a = 0; a < atot; a++)
{
copy_rvec(buf[a], v[a]);
}
{
gmx_domdec_sort_t *sort;
gmx_cgsort_t *cgsort;
- int ncg_new, i, *ibuf, cgsize;
+ int ncg_new, i, *ibuf;
sort = dd->comm->sort;
ncg_new = 0;
}
- const gmx::BlockRanges &atomGroups = dd->atomGroups();
+ const gmx::RangePartitioning &atomGroups = dd->atomGroups();
/* We alloc with the old size, since cgindex is still old */
- DDBufferAccess<gmx::RVec> rvecBuffer(dd->comm->rvecBuffer, atomGroups.index[dd->ncg_home]);
+ GMX_ASSERT(atomGroups.numBlocks() == dd->ncg_home, "atomGroups and dd should be consistent");
+ DDBufferAccess<gmx::RVec> rvecBuffer(dd->comm->rvecBuffer, atomGroups.fullRange().end());
- const gmx::BlockRanges *atomGroupsPtr = (dd->comm->bCGs ? &atomGroups : nullptr);
+ const gmx::RangePartitioning *atomGroupsPtr = (dd->comm->bCGs ? &atomGroups : nullptr);
/* Remove the charge groups which are no longer at home here */
dd->ncg_home = ncg_new;
/* Rebuild the local cg index */
if (dd->comm->bCGs)
{
- ibuf[0] = 0;
+ /* We make a new, ordered atomGroups object and assign it to
+ * the old one. This causes some allocated overhead, but saves
+ * a copy back of the whole index.
+ */
+ gmx::RangePartitioning ordered;
for (i = 0; i < dd->ncg_home; i++)
{
- cgsize = atomGroups.index[cgsort[i].ind+1] - atomGroups.index[cgsort[i].ind];
- ibuf[i+1] = ibuf[i] + cgsize;
- }
- for (i = 0; i < dd->ncg_home+1; i++)
- {
- dd->atomGroups_.index[i] = ibuf[i];
+ ordered.appendBlock(atomGroups.block(cgsort[i].ind).size());
}
+ dd->atomGroups_ = ordered;
}
else
{
- for (i = 0; i < dd->ncg_home+1; i++)
- {
- dd->atomGroups_.index[i] = i;
- }
+ dd->atomGroups_.setAllBlocksSizeOne(dd->ncg_home);
}
/* Set the home atom number */
- dd->comm->atomRanges.setEnd(DDAtomRanges::Type::Home, dd->atomGroups().index[dd->ncg_home]);
+ dd->comm->atomRanges.setEnd(DDAtomRanges::Type::Home, dd->atomGroups().fullRange().end());
if (fr->cutoff_scheme == ecutsVERLET)
{
true, xGlobal,
&ddbox);
- distributeState(fplog, dd, state_global, *cgs_gl, ddbox, state_local, f);
+ distributeState(fplog, dd, state_global, ddbox, state_local, f);
dd_make_local_cgs(dd, &top_local->cgs);
{
if (GET_CGINFO_SETTLE(cginfo[cg]))
{
- for (int a = dd->atomGroups().index[cg]; a < dd->atomGroups().index[cg+1]; a++)
+ for (int a : dd->atomGroups().block(cg))
{
int a_gl = dd->globalAtomIndices[a];
int a_mol;
{
if (GET_CGINFO_CONSTR(cginfo[cg]))
{
- for (int a = dd->atomGroups().index[cg]; a < dd->atomGroups().index[cg+1]; a++)
+ for (int a : dd->atomGroups().block(cg))
{
int a_gl = dd->globalAtomIndices[a];
int molnr, a_mol;
gmx_domdec_specat_comm_t *constraint_comm = nullptr;
/* The number of home atom groups */
- int ncg_home = 0;
+ int ncg_home = 0;
/* Global atom group indices for the home and all non-home groups */
- std::vector<int> globalAtomGroupIndices;
+ std::vector<int> globalAtomGroupIndices;
/* The atom groups for the home and all non-home groups, todo: make private */
- gmx::BlockRanges atomGroups_;
- const gmx::BlockRanges &atomGroups() const
+ gmx::RangePartitioning atomGroups_;
+ const gmx::RangePartitioning &atomGroups() const
{
return atomGroups_;
}
}
/*! \brief Build the index that maps each local atom to its local atom group */
-static void makeLocalAtomGroupFromAtom(gmx_domdec_t *dd)
+static void makeLocalAtomGroupsFromAtoms(gmx_domdec_t *dd)
{
- const gmx::BlockRanges &atomGroups = dd->atomGroups();
+ const gmx::RangePartitioning &atomGroups = dd->atomGroups();
dd->localAtomGroupFromAtom.clear();
for (size_t g = 0; g < dd->globalAtomGroupIndices.size(); g++)
{
- for (int a = atomGroups.index[g]; a < atomGroups.index[g + 1]; a++)
+ for (int gmx_unused a : atomGroups.block(g))
{
dd->localAtomGroupFromAtom.push_back(g);
}
int **vsite_pbc,
int *vsite_pbc_nalloc,
int izone,
- int at_start, int at_end)
+ gmx::RangePartitioning::Block atomRange)
{
int mb, mt, mol, i_mol;
int *index, *rtil;
nbonded_local = 0;
- for (int i = at_start; i < at_end; i++)
+ for (int i : atomRange)
{
/* Get the global atom number */
const int i_gl = dd->globalAtomIndices[i];
}
/*! \brief Set the exclusion data for i-zone \p iz for the case of no exclusions */
-static void set_no_exclusions_zone(gmx_domdec_t *dd, gmx_domdec_zones_t *zones,
- int iz, t_blocka *lexcls)
+static void set_no_exclusions_zone(const gmx_domdec_t *dd,
+ const gmx_domdec_zones_t *zones,
+ int iz,
+ t_blocka *lexcls)
{
- int a0 = dd->atomGroups().index[zones->cg_range[iz]];
- int a1 = dd->atomGroups().index[zones->cg_range[iz + 1]];
+ const auto zone = dd->atomGroups().subRange(zones->cg_range[iz],
+ zones->cg_range[iz + 1]);
- for (int a = a0 + 1; a < a1 + 1; a++)
+ for (int a : zone)
{
- lexcls->index[a] = lexcls->nra;
+ lexcls->index[a + 1] = lexcls->nra;
}
}
ga2la = dd->ga2la;
- int jla0 = dd->atomGroups().index[zones->izone[iz].jcg0];
- int jla1 = dd->atomGroups().index[zones->izone[iz].jcg1];
+ const auto jRange = dd->atomGroups().subRange(zones->izone[iz].jcg0,
+ zones->izone[iz].jcg1);
n_excl_at_max = dd->reverse_top->n_excl_at_max;
/* We set the end index, but note that we might not start at zero here */
- lexcls->nr = dd->atomGroups().index[cg_end];
+ lexcls->nr = dd->atomGroups().subRange(0, cg_end).size();
int n = lexcls->nra;
int count = 0;
lexcls->nalloc_a = over_alloc_large(n + (cg_end - cg_start)*n_excl_at_max);
srenew(lexcls->a, lexcls->nalloc_a);
}
- int la0 = dd->atomGroups().index[cg];
- int la1 = dd->atomGroups().index[cg + 1];
+ const auto atomGroup = dd->atomGroups().block(cg);
if (GET_CGINFO_EXCL_INTER(cginfo[cg]) ||
!GET_CGINFO_EXCL_INTRA(cginfo[cg]))
{
/* Copy the exclusions from the global top */
- for (int la = la0; la < la1; la++)
+ for (int la : atomGroup)
{
lexcls->index[la] = n;
int a_gl = dd->globalAtomIndices[la];
int aj_mol = excls->a[j];
/* This computation of jla is only correct intra-cg */
int jla = la + aj_mol - a_mol;
- if (jla >= la0 && jla < la1)
+ if (atomGroup.inRange(jla))
{
/* This is an intra-cg exclusion. We can skip
* the global indexing and distance checking.
count++;
}
}
- else if (jla >= jla0 && jla < jla1 &&
+ else if (jRange.inRange(jla) &&
(!bRCheck ||
dd_dist2(pbc_null, cg_cm, la2lc, la, jla) < rc2))
{
- /* jla > la, since jla0 > la */
+ /* jla > la, since jRange.begin() > la */
lexcls->a[n++] = jla;
count++;
}
*/
if (iz == 0)
{
- for (int la = la0; la < la1; la++)
+ for (int la : atomGroup)
{
lexcls->index[la] = n;
- for (int j = la0; j < la1; j++)
+ for (int j : atomGroup)
{
lexcls->a[n++] = j;
}
}
- count += ((la1 - la0)*(la1 - la0 - 1))/2;
+ count += (atomGroup.size()*(atomGroup.size() - 1))/2;
}
else
{
/* We don't need exclusions for this cg */
- for (int la = la0; la < la1; la++)
+ for (int la : atomGroup)
{
lexcls->index[la] = n;
}
ga2la = dd->ga2la;
- int jla0 = dd->atomGroups().index[zones->izone[iz].jcg0];
- int jla1 = dd->atomGroups().index[zones->izone[iz].jcg1];
+ const auto jRange = dd->atomGroups().subRange(zones->izone[iz].jcg0,
+ zones->izone[iz].jcg1);
n_excl_at_max = dd->reverse_top->n_excl_at_max;
* the number of exclusions in the list, which in turn
* can speed up the pair list construction a bit.
*/
- if (at_j >= jla0 && at_j < jla1)
+ if (jRange.inRange(at_j))
{
lexcls->a[n++] = at_j;
}
static void check_exclusions_alloc(gmx_domdec_t *dd, gmx_domdec_zones_t *zones,
t_blocka *lexcls)
{
- int nr = dd->atomGroups().index[zones->izone[zones->nizone - 1].cg1];
+ int nr = dd->atomGroups().subRange(0, zones->izone[zones->nizone - 1].cg1).size();
check_alloc_index(lexcls, nr);
static void finish_local_exclusions(gmx_domdec_t *dd, gmx_domdec_zones_t *zones,
t_blocka *lexcls)
{
- lexcls->nr = dd->atomGroups().index[zones->izone[zones->nizone - 1].cg1];
+ const auto nonhomeIzonesAtomRange =
+ dd->atomGroups().subRange(zones->izone[0].cg1,
+ zones->izone[zones->nizone - 1].cg1);
if (dd->n_intercg_excl == 0)
{
/* There are no exclusions involving non-home charge groups,
* but we need to set the indices for neighborsearching.
*/
- int la0 = dd->atomGroups().index[zones->izone[0].cg1];
- for (int la = la0; la < lexcls->nr; la++)
+ for (int la : nonhomeIzonesAtomRange)
{
lexcls->index[la] = lexcls->nra;
}
/* nr is only used to loop over the exclusions for Ewald and RF,
* so we can set it to the number of home atoms for efficiency.
*/
- lexcls->nr = dd->atomGroups().index[zones->izone[0].cg1];
+ lexcls->nr = nonhomeIzonesAtomRange.begin();
+ }
+ else
+ {
+ lexcls->nr = nonhomeIzonesAtomRange.end();
}
}
idef_t,
vsite_pbc, vsite_pbc_nalloc,
izone,
- dd->atomGroups().index[cg0t],
- dd->atomGroups().index[cg1t]);
+ dd->atomGroups().subRange(cg0t, cg1t));
if (izone < nzone_excl)
{
excl_t->nra = 0;
}
- int numAtomGroups = dd->globalAtomGroupIndices.size();
- if (dd->atomGroups().index[numAtomGroups] == numAtomGroups &&
+ if (dd->atomGroups().allBlocksHaveSizeOne() &&
!rt->bExclRequired)
{
/* No charge groups and no distance check required */
void dd_make_local_cgs(gmx_domdec_t *dd, t_block *lcgs)
{
lcgs->nr = dd->globalAtomGroupIndices.size();
- lcgs->index = dd->atomGroups_.index.data();
+ lcgs->index = dd->atomGroups_.rawIndex().data();
}
void dd_make_local_top(gmx_domdec_t *dd, gmx_domdec_zones_t *zones,
}
if (bRCheckMB || bRCheck2B)
{
- makeLocalAtomGroupFromAtom(dd);
+ makeLocalAtomGroupsFromAtoms(dd);
if (fr->bMolPBC)
{
pbc_null = set_pbc_dd(&pbc, fr->ePBC, dd->nc, TRUE, box);
#define DD_FLAG_BW(d) (1<<(16+(d)*2+1))
static int compact_and_copy_vec_at(int ncg, int *move,
- const gmx::BlockRanges &atomGroups,
+ const gmx::RangePartitioning &atomGroups,
int nvec, int vec,
rvec *src, gmx_domdec_comm_t *comm,
gmx_bool bCompact)
{
- int m, icg, i, i0, i1, nrcg;
- int home_pos;
- int pos_vec[DIM*2];
+ int home_pos = 0;
+ int pos_vec[DIM*2] = { 0 };
- home_pos = 0;
-
- for (m = 0; m < DIM*2; m++)
- {
- pos_vec[m] = 0;
- }
-
- i0 = 0;
- for (icg = 0; icg < ncg; icg++)
+ for (int g = 0; g < ncg; g++)
{
- i1 = atomGroups.index[icg+1];
- m = move[icg];
+ const auto atomGroup = atomGroups.block(g);
+ int m = move[g];
if (m == -1)
{
if (bCompact)
{
/* Compact the home array in place */
- for (i = i0; i < i1; i++)
+ for (int i : atomGroup)
{
copy_rvec(src[i], src[home_pos++]);
}
else
{
/* Copy to the communication buffer */
- nrcg = i1 - i0;
- pos_vec[m] += 1 + vec*nrcg;
- for (i = i0; i < i1; i++)
+ int numAtoms = atomGroup.size();
+ pos_vec[m] += 1 + vec*numAtoms;
+ for (int i : atomGroup)
{
copy_rvec(src[i], comm->cgcm_state[m][pos_vec[m]++]);
}
- pos_vec[m] += (nvec - vec - 1)*nrcg;
+ pos_vec[m] += (nvec - vec - 1)*numAtoms;
}
if (!bCompact)
{
- home_pos += i1 - i0;
+ home_pos += atomGroup.size();
}
- i0 = i1;
}
return home_pos;
}
static int compact_and_copy_vec_cg(int ncg, int *move,
- const gmx::BlockRanges &atomGroups,
+ const gmx::RangePartitioning &atomGroups,
int nvec, rvec *src, gmx_domdec_comm_t *comm,
gmx_bool bCompact)
{
- int m, icg, i0, i1, nrcg;
- int home_pos;
- int pos_vec[DIM*2];
-
- home_pos = 0;
-
- for (m = 0; m < DIM*2; m++)
- {
- pos_vec[m] = 0;
- }
+ int home_pos = 0;
+ int pos_vec[DIM*2] = { 0 };
- i0 = 0;
- for (icg = 0; icg < ncg; icg++)
+ for (int g = 0; g < ncg; g++)
{
- i1 = atomGroups.index[icg + 1];
- m = move[icg];
+ const auto atomGroup = atomGroups.block(g);
+ int m = move[g];
if (m == -1)
{
if (bCompact)
{
/* Compact the home array in place */
- copy_rvec(src[icg], src[home_pos++]);
+ copy_rvec(src[g], src[home_pos++]);
}
}
else
{
- nrcg = i1 - i0;
/* Copy to the communication buffer */
- copy_rvec(src[icg], comm->cgcm_state[m][pos_vec[m]]);
- pos_vec[m] += 1 + nrcg*nvec;
+ copy_rvec(src[g], comm->cgcm_state[m][pos_vec[m]]);
+ pos_vec[m] += 1 + atomGroup.size()*nvec;
}
- i0 = i1;
}
if (!bCompact)
{
return home_pos;
}
-static int compact_ind(int ncg,
- const int *move,
- gmx::ArrayRef<int> globalAtomGroupIndices,
- gmx::BlockRanges *atomGroups,
- gmx::ArrayRef<int> globalAtomIndices,
- gmx_ga2la_t *ga2la,
- char *bLocalCG,
- int *cginfo)
+static int compact_ind(int numAtomGroups,
+ const int *move,
+ gmx::ArrayRef<int> globalAtomGroupIndices,
+ gmx::RangePartitioning *atomGroups,
+ gmx::ArrayRef<int> globalAtomIndices,
+ gmx_ga2la_t *ga2la,
+ char *bLocalCG,
+ int *cginfo)
{
+ atomGroups->clear();
int home_pos = 0;
int nat = 0;
- for (int cg = 0; cg < ncg; cg++)
+ for (int g = 0; g < numAtomGroups; g++)
{
- int a0 = atomGroups->index[cg];
- int a1 = atomGroups->index[cg+1];
- if (move[cg] == -1)
+ if (move[g] == -1)
{
/* Compact the home arrays in place.
* Anything that can be done here avoids access to global arrays.
*/
- atomGroups->index[home_pos] = nat;
- for (int a = a0; a < a1; a++)
+ atomGroups->appendBlock(nat);
+ for (int a : atomGroups->block(g))
{
const int a_gl = globalAtomIndices[a];
globalAtomIndices[nat] = a_gl;
ga2la_change_la(ga2la, a_gl, nat);
nat++;
}
- globalAtomGroupIndices[home_pos] = globalAtomGroupIndices[cg];
- cginfo[home_pos] = cginfo[cg];
+ globalAtomGroupIndices[home_pos] = globalAtomGroupIndices[g];
+ cginfo[home_pos] = cginfo[g];
/* The charge group remains local, so bLocalCG does not change */
home_pos++;
}
else
{
/* Clear the global indices */
- for (int a = a0; a < a1; a++)
+ for (int a : atomGroups->block(g))
{
ga2la_del(ga2la, globalAtomIndices[a]);
}
if (bLocalCG)
{
- bLocalCG[globalAtomGroupIndices[cg]] = FALSE;
+ bLocalCG[globalAtomGroupIndices[g]] = FALSE;
}
}
}
- atomGroups->index[home_pos] = nat;
+
+ GMX_ASSERT(atomGroups->numBlocks() == home_pos, "The atom group data and atomGroups should be consistent");
return home_pos;
}
-static void clear_and_mark_ind(int ncg,
- const int *move,
- gmx::ArrayRef<const int> globalAtomGroupIndices,
- const gmx::BlockRanges &atomGroups,
- gmx::ArrayRef<const int> globalAtomIndices,
- gmx_ga2la_t *ga2la,
- char *bLocalCG,
- int *cell_index)
+static void clear_and_mark_ind(int numAtomGroups,
+ const int *move,
+ gmx::ArrayRef<const int> globalAtomGroupIndices,
+ const gmx::RangePartitioning &atomGroups,
+ gmx::ArrayRef<const int> globalAtomIndices,
+ gmx_ga2la_t *ga2la,
+ char *bLocalCG,
+ int *cell_index)
{
- for (int cg = 0; cg < ncg; cg++)
+ for (int g = 0; g < numAtomGroups; g++)
{
- if (move[cg] >= 0)
+ if (move[g] >= 0)
{
- int a0 = atomGroups.index[cg];
- int a1 = atomGroups.index[cg + 1];
/* Clear the global indices */
- for (int a = a0; a < a1; a++)
+ for (int a : atomGroups.block(g))
{
ga2la_del(ga2la, globalAtomIndices[a]);
}
if (bLocalCG)
{
- bLocalCG[globalAtomGroupIndices[cg]] = FALSE;
+ bLocalCG[globalAtomGroupIndices[g]] = FALSE;
}
- /* Signal that this cg has moved using the ns cell index.
+ /* Signal that this group has moved using the ns cell index.
* Here we set it to -1. fill_grid will change it
* from -1 to NSGRID_SIGNAL_MOVED_FAC*grid->ncells.
*/
- cell_index[cg] = -1;
+ cell_index[g] = -1;
}
}
}
{
fprintf(fplog, "%s %d moved more than the distance allowed by the domain decomposition (%f) in direction %c\n",
dd->comm->bCGs ? "The charge group starting at atom" : "Atom",
- ddglatnr(dd, dd->atomGroups().index[cg]), limitd, dim2char(dim));
+ ddglatnr(dd, dd->atomGroups().block(cg).begin()), limitd, dim2char(dim));
}
else
{
/* We don't have a limiting distance available: don't print it */
fprintf(fplog, "%s %d moved more than the distance allowed by the domain decomposition in direction %c\n",
dd->comm->bCGs ? "The charge group starting at atom" : "Atom",
- ddglatnr(dd, dd->atomGroups().index[cg]), dim2char(dim));
+ ddglatnr(dd, dd->atomGroups().block(cg).begin()), dim2char(dim));
}
fprintf(fplog, "distance out of cell %f\n",
dir == 1 ? pos_d - comm->cell_x1[dim] : pos_d - comm->cell_x0[dim]);
return movedBuffer.data();
}
+/* Determine to which domains atomGroups in the range \p cg_start, \p cg_end should go.
+ *
+ * Returns in the move array where the groups should go.
+ * Also updates \p cg_cm for jumps over periodic boundaries.
+ *
+ * \TODO Rename cg to atomGroup.
+ */
static void calc_cg_move(FILE *fplog, gmx_int64_t step,
gmx_domdec_t *dd,
t_state *state,
ivec tric_dir, matrix tcm,
rvec cell_x0, rvec cell_x1,
rvec limitd, rvec limit0, rvec limit1,
- const gmx::BlockRanges &atomGroups,
+ const gmx::RangePartitioning &atomGroups,
int cg_start, int cg_end,
rvec *cg_cm,
int *move)
{
- int npbcdim;
- int cg, k, k0, k1, d, dim, d2;
- int mc, nrcg;
- int flag;
- gmx_bool bScrew;
- ivec dev;
- real inv_ncg, pos_d;
- rvec cm_new;
-
- npbcdim = dd->npbcdim;
+ const int npbcdim = dd->npbcdim;
- for (cg = cg_start; cg < cg_end; cg++)
+ for (int g = cg_start; g < cg_end; g++)
{
- k0 = atomGroups.index[cg];
- k1 = atomGroups.index[cg + 1];
- nrcg = k1 - k0;
- if (nrcg == 1)
+ const auto atomGroup = atomGroups.block(g);
+ const int numAtoms = atomGroup.size();
+ // TODO: Rename this center of geometry variable to cogNew
+ rvec cm_new;
+ if (numAtoms == 1)
{
- copy_rvec(state->x[k0], cm_new);
+ copy_rvec(state->x[atomGroup.begin()], cm_new);
}
else
{
- inv_ncg = 1.0/nrcg;
-
+ real invNumAtoms = 1/static_cast<real>(numAtoms);
clear_rvec(cm_new);
- for (k = k0; (k < k1); k++)
+ for (int k : atomGroup)
{
rvec_inc(cm_new, state->x[k]);
}
- for (d = 0; (d < DIM); d++)
+ for (int d = 0; d < DIM; d++)
{
- cm_new[d] = inv_ncg*cm_new[d];
+ cm_new[d] = invNumAtoms*cm_new[d];
}
}
- clear_ivec(dev);
+ ivec dev = { 0 };
/* Do pbc and check DD cell boundary crossings */
- for (d = DIM-1; d >= 0; d--)
+ for (int d = DIM - 1; d >= 0; d--)
{
if (dd->nc[d] > 1)
{
- bScrew = (dd->bScrewPBC && d == XX);
+ bool bScrew = (dd->bScrewPBC && d == XX);
/* Determine the location of this cg in lattice coordinates */
- pos_d = cm_new[d];
+ real pos_d = cm_new[d];
if (tric_dir[d])
{
- for (d2 = d+1; d2 < DIM; d2++)
+ for (int d2 = d + 1; d2 < DIM; d2++)
{
pos_d += cm_new[d2]*tcm[d2][d];
}
{
if (pos_d >= limit1[d])
{
- cg_move_error(fplog, dd, step, cg, d, 1,
+ cg_move_error(fplog, dd, step, g, d, 1,
cg_cm != as_rvec_array(state->x.data()), limitd[d],
- cg_cm[cg], cm_new, pos_d);
+ cg_cm[g], cm_new, pos_d);
}
dev[d] = 1;
if (dd->ci[d] == dd->nc[d] - 1)
cm_new[YY] = state->box[YY][YY] - cm_new[YY];
cm_new[ZZ] = state->box[ZZ][ZZ] - cm_new[ZZ];
}
- for (k = k0; (k < k1); k++)
+ for (int k : atomGroup)
{
rvec_dec(state->x[k], state->box[d]);
if (bScrew)
{
if (pos_d < limit0[d])
{
- cg_move_error(fplog, dd, step, cg, d, -1,
+ cg_move_error(fplog, dd, step, g, d, -1,
cg_cm != as_rvec_array(state->x.data()), limitd[d],
- cg_cm[cg], cm_new, pos_d);
+ cg_cm[g], cm_new, pos_d);
}
dev[d] = -1;
if (dd->ci[d] == 0)
cm_new[YY] = state->box[YY][YY] - cm_new[YY];
cm_new[ZZ] = state->box[ZZ][ZZ] - cm_new[ZZ];
}
- for (k = k0; (k < k1); k++)
+ for (int k : atomGroup)
{
rvec_inc(state->x[k], state->box[d]);
if (bScrew)
while (cm_new[d] >= state->box[d][d])
{
rvec_dec(cm_new, state->box[d]);
- for (k = k0; (k < k1); k++)
+ for (int k : atomGroup)
{
rvec_dec(state->x[k], state->box[d]);
}
while (cm_new[d] < 0)
{
rvec_inc(cm_new, state->box[d]);
- for (k = k0; (k < k1); k++)
+ for (int k : atomGroup)
{
rvec_inc(state->x[k], state->box[d]);
}
}
}
- copy_rvec(cm_new, cg_cm[cg]);
+ copy_rvec(cm_new, cg_cm[g]);
/* Determine where this cg should go */
- flag = 0;
- mc = -1;
- for (d = 0; d < dd->ndim; d++)
+ int flag = 0;
+ int mc = -1;
+ for (int d = 0; d < dd->ndim; d++)
{
- dim = dd->dim[d];
+ int dim = dd->dim[d];
if (dev[dim] == 1)
{
flag |= DD_FLAG_FW(d);
}
}
/* Temporarily store the flag in move */
- move[cg] = mc + flag;
+ move[g] = mc + flag;
}
}
make_tric_corr_matrix(npbcdim, state->box, tcm);
- const gmx::BlockRanges &atomGroups = dd->atomGroups();
+ const gmx::RangePartitioning &atomGroups = dd->atomGroups();
nthread = gmx_omp_nthreads_get(emntDomdec);
* and the place where the charge group should go
* in the next 6 bits. This saves some communication volume.
*/
- nrcg = atomGroups.index[cg+1] - atomGroups.index[cg];
+ nrcg = atomGroups.block(cg).size();
cggl_flag[ncg[mc]*DD_CGIBS+1] = nrcg | flag;
ncg[mc] += 1;
nat[mc] += nrcg;
/* Now we can remove the excess global atom-group indices from the list */
dd->globalAtomGroupIndices.resize(home_pos_cg);
- dd->atomGroups_.index.resize(home_pos_cg + 1);
+ dd->atomGroups_.reduceNumBlocks(home_pos_cg);
/* We reuse the intBuffer without reacquiring since we are in the same scope */
DDBufferAccess<int> &flagBuffer = moveBuffer;
/* Set the global charge group index and size */
const int globalAtomGroupIndex = flagBuffer.buffer[cg*DD_CGIBS];
dd->globalAtomGroupIndices.push_back(globalAtomGroupIndex);
- dd->atomGroups_.index.push_back(dd->atomGroups_.index[home_pos_cg] + nrcg);
+ dd->atomGroups_.appendBlock(nrcg);
/* Copy the state from the buffer */
if (fr->cutoff_scheme == ecutsGROUP)
{
tfac = ndf/(3.0*natoms);
}
- gmx::BlockRanges mols;
+ gmx::RangePartitioning mols;
if (bMol)
{
if (ndx)
{
GMX_RELEASE_ASSERT(mols.numBlocks() > 0, "Cannot access index[] from empty mols");
bSame = FALSE;
- for (ii = mols.index[ai]; !bSame && (ii < mols.index[ai+1]); ii++)
+ for (ii = mols.block(ai).begin(); !bSame && ii < mols.block(ai).end(); ii++)
{
- for (jj = mols.index[aj]; !bSame && (jj < mols.index[aj+1]); jj++)
+ for (jj = mols.block(aj).begin(); !bSame && jj < mols.block(aj).end(); jj++)
{
if (bPBC)
{
if (bMol)
{
GMX_RELEASE_ASSERT(mols.numBlocks() > 0, "Cannot access index[] from empty mols");
- for (j = mols.index[i]; (j < mols.index[i+1]); j++)
+ for (int j : mols.block(i))
{
fprintf(fp, "%d\n", j+1);
}
static void init_imd_prepare_mols_in_imdgroup(t_gmx_IMD_setup *IMDsetup, gmx_mtop_t *top_global)
{
int i, ii;
- int gstart, gend, count;
t_block lmols;
int nat;
int *ind;
}
}
- gmx::BlockRanges gmols = gmx_mtop_molecules(*top_global);
+ gmx::RangePartitioning gmols = gmx_mtop_molecules(*top_global);
snew(lmols.index, gmols.numBlocks() + 1);
lmols.index[0] = 0;
for (i = 0; i < gmols.numBlocks(); i++)
{
- gstart = gmols.index[i];
- gend = gmols.index[i+1];
- count = 0;
+ auto mol = gmols.block(i);
+ int count = 0;
for (ii = 0; ii < nat; ii++)
{
- if ((ind[ii] >= gstart) && (ind[ii] < gend))
+ if (mol.inRange(ind[ii]))
{
count += 1;
}
*/
cgs = &mtop->moltype[mtop->molblock.back().type].cgs;
fr->n_tpi = cgs->index[cgs->nr] - cgs->index[cgs->nr-1];
- gmx::BlockRanges molecules = gmx_mtop_molecules(*mtop);
- if (fr->n_tpi != molecules.index[molecules.numBlocks()] - molecules.index[molecules.numBlocks() - 1])
+ gmx::RangePartitioning molecules = gmx_mtop_molecules(*mtop);
+ if (fr->n_tpi != molecules.block(molecules.numBlocks() - 1).size())
{
gmx_fatal(FARGS, "The molecule to insert can not consist of multiple charge groups.\nMake it a single charge group.");
}
int *order;
r_min_rad = probe_rad*probe_rad;
- gmx::BlockRanges molecules = gmx_mtop_molecules(*mtop);
+ gmx::RangePartitioning molecules = gmx_mtop_molecules(*mtop);
snew(rm_p->block, molecules.numBlocks());
nrm = nupper = 0;
nlower = low_up_rm;
{
if (mol_id == mem_p->mol_id[l])
{
- for (k = molecules.index[mol_id]; k < molecules.index[mol_id+1]; k++)
+ for (int k : molecules.block(mol_id))
{
z_lip += r[k][ZZ];
}
- z_lip /= molecules.index[mol_id+1] - molecules.index[mol_id];
+ z_lip /= molecules.block(mol_id).size();
if (z_lip < mem_p->zmed)
{
nlower++;
snew(order, mem_p->nmol);
for (i = 0; i < mem_p->nmol; i++)
{
- at = molecules.index[mem_p->mol_id[i]];
+ at = molecules.block(mem_p->mol_id[i]).begin();
pbc_dx(pbc, r[at], pos_ins->geom_cent[0], dr);
if (pos_ins->pieces > 1)
{
if (bRM)
{
z_lip = 0;
- for (k = molecules.index[mol_id]; k < molecules.index[mol_id+1]; k++)
+ for (int k : molecules.block(mol_id))
{
z_lip += r[k][ZZ];
}
- z_lip /= molecules.index[mol_id+1] - molecules.index[mol_id];
+ z_lip /= molecules.block(mol_id).size();
if (nupper > nlower && z_lip < mem_p->zmed)
{
rm_p->mol[nrm] = mol_id;
int RMmolblock;
/* Construct the molecule range information */
- gmx::BlockRanges molecules = gmx_mtop_molecules(*mtop);
+ gmx::RangePartitioning molecules = gmx_mtop_molecules(*mtop);
snew(list, state->natoms);
n = 0;
for (int i = 0; i < rm_p->nr; i++)
{
mol_id = rm_p->mol[i];
- at = molecules.index[mol_id];
+ at = molecules.block(mol_id).size();
block = rm_p->block[i];
mtop->molblock[block].nmol--;
for (j = 0; j < mtop->moltype[mtop->molblock[block].type].atoms.nr; j++)
for (zi = 0; zi < dd_zones->n; zi++)
{
- ca1[zi] = dd->atomGroups().index[dd_zones->cg_range[zi + 1]];
+ ca1[zi] = dd->atomGroups().block(dd_zones->cg_range[zi + 1]).begin();
}
i = 0;
for (zi = 0; zi < dd_zones->nizone && zi < dd_zones->n; zi++)
* The atoms in \p g are assumed to be sorted.
*/
bool
-gmx_ana_index_has_full_blocks(const gmx_ana_index_t *g, const gmx::BlockRanges *b)
+gmx_ana_index_has_full_blocks(const gmx_ana_index_t *g,
+ const gmx::RangePartitioning *b)
{
int i, j, bi;
while (i < g->isize)
{
/* Find the block that begins with the first unmatched atom */
- while (bi < b->numBlocks() && b->index[bi] != g->index[i])
+ while (bi < b->numBlocks() && b->block(bi).begin() != g->index[i])
{
++bi;
}
/* If not found, or if too large, return */
- if (bi == b->numBlocks() || i + b->index[bi+1] - b->index[bi] > g->isize)
+ if (bi == b->numBlocks() || i + b->block(bi).size() > g->isize)
{
return false;
}
/* Check that the block matches the index */
- for (j = b->index[bi]; j < b->index[bi+1]; ++j, ++i)
+ for (j = b->block(bi).begin(); j < b->block(bi).end(); ++j, ++i)
{
if (g->index[i] != j)
{
case INDEX_MOL:
{
- gmx::BlockRanges molecules = gmx_mtop_molecules(*top);
+ auto molecules = gmx_mtop_molecules(*top);
return gmx_ana_index_has_full_blocks(g, &molecules);
}
}
e_index_t type, bool bComplete);
/** Checks whether a group consists of full blocks. */
bool
-gmx_ana_index_has_full_blocks(const gmx_ana_index_t *g, const gmx::BlockRanges *b);
+gmx_ana_index_has_full_blocks(const gmx_ana_index_t *g, const gmx::RangePartitioning *b);
/** Checks whether a group consists of full blocks. */
bool
gmx_ana_index_has_full_ablocks(gmx_ana_index_t *g, t_blocka *b);
#include "gromacs/utility/smalloc.h"
#include "gromacs/utility/txtdump.h"
+void gmx::RangePartitioning::setAllBlocksSizeOne(int numBlocksToSet)
+{
+ if (!allBlocksHaveSizeOne())
+ {
+ clear();
+ }
+ if (numBlocksToSet < numBlocks())
+ {
+ index_.resize(numBlocksToSet + 1);
+ }
+ else if (numBlocksToSet > numBlocks())
+ {
+ for (int b = numBlocks(); b < numBlocksToSet; b++)
+ {
+ appendBlock(1);
+ }
+ }
+}
+
void init_block(t_block *block)
{
block->nr = 0;
/*! \brief Division of a range of indices into consecutive blocks
*
- * A range of consecutive indices 0 to index[numBlocks()] is divided
+ * A range of consecutive indices 0 to full.range.end() is divided
* into numBlocks() consecutive blocks of consecutive indices.
- * Block b contains indices i for which index[b] <= i < index[b+1].
+ * Block b contains indices i for which block(b).begin() <= i < block(b).end().
*/
-struct BlockRanges
+class RangePartitioning
{
- /*! \brief Returns the number of blocks
- *
- * This should only be called on a valid struct. Validy is asserted
- * (only) in debug mode.
- */
- int numBlocks() const
- {
- GMX_ASSERT(index.size() > 0, "numBlocks() should only be called on a valid BlockRanges struct");
- return index.size() - 1;
- }
+ public:
+ /*! \brief Struct for returning the range of a block.
+ *
+ * Can be used in a range loop.
+ */
+ struct Block
+ {
+ public:
+ /*! \brief An iterator that loops over integers */
+ struct iterator
+ {
+ //! Constructor
+ iterator(int value) : value_(value) {}
+ //! Value
+ operator int () const { return value_; }
+ //! Reference
+ operator int &() { return value_; }
+ //! Pointer
+ int operator* () const { return value_; }
+ //! Inequality comparison
+ bool operator!= (const iterator other) { return value_ != other; }
+ //! Increment operator
+ iterator &operator++() { ++value_; return *this; }
+ //! Increment operator
+ iterator operator++(int) { iterator tmp(*this); ++value_; return tmp; }
+ //! The actual value
+ int value_;
+ };
+
+ /*! \brief Constructor, constructs a range starting at 0 with 0 blocks */
+ Block(int begin,
+ int end) :
+ begin_(begin),
+ end_(end)
+ {
+ }
+
+ /*! \brief Begin iterator/value */
+ const iterator begin() const { return begin_; };
+ /*! \brief End iterator/value */
+ const iterator end() const { return end_; };
+
+ /*! \brief The number of items in the block */
+ int size() const
+ {
+ return end_ - begin_;
+ }
+
+ /*! \brief Returns whether \p index is within range of the block */
+ bool inRange(int index) const
+ {
+ return (begin_ <= index && index < end_);
+ }
+
+ private:
+ const int begin_; /**< The start index of the block */
+ const int end_; /**< The end index of the block */
+ };
+
+ /*! \brief Returns the number of blocks */
+ int numBlocks() const
+ {
+ return index_.size() - 1;
+ }
+
+ /*! \brief Returns the size of the block with index \p blockIndex */
+ Block block(int blockIndex) const
+ {
+ return Block(index_[blockIndex], index_[blockIndex + 1]);
+ }
+
+ /*! \brief Returns the full range */
+ Block fullRange() const
+ {
+ return Block(index_.front(), index_.back());
+ }
+
+ /*! \brief Returns a range starting at \p blockIndexBegin and ending at \p blockIndexEnd */
+ Block subRange(int blockIndexBegin,
+ int blockIndexEnd) const
+ {
+ return Block(index_[blockIndexBegin], index_[blockIndexEnd]);
+ }
- std::vector<int> index; /**< The list of block begin/end indices */
+ /*! \brief Returns true when all blocks have size 0 or numBlocks()=0 */
+ bool allBlocksHaveSizeOne() const
+ {
+ return (index_.back() == numBlocks());
+ }
+
+ /*! \brief Appends a block of size \p blockSize at the end of the range
+ *
+ * \note blocksize has to be >= 1
+ */
+ void appendBlock(int blockSize)
+ {
+ GMX_ASSERT(blockSize > 0, "block sizes should be >= 1");
+ index_.push_back(index_.back() + blockSize);
+ }
+
+ /*! \brief Removes all blocks */
+ void clear()
+ {
+ index_.resize(1);
+ }
+
+ /*! \brief Reduces the number of blocks to \p newNumBlocks
+ *
+ * \note \p newNumBlocks should be <= numBlocks().
+ */
+ void reduceNumBlocks(int newNumBlocks)
+ {
+ GMX_ASSERT(newNumBlocks <= numBlocks(), "Can only shrink to fewer blocks");
+ index_.resize(newNumBlocks + 1);
+ }
+
+ /*! \brief Sets the partitioning to \p numBlocks blocks each of size 1 */
+ void setAllBlocksSizeOne(int numBlocks);
+
+ /*! \brief Returns the raw block index array, avoid using this */
+ std::vector<int> &rawIndex()
+ {
+ return index_;
+ }
+
+ private:
+ std::vector<int> index_ = { 0 }; /**< The list of block begin/end indices */
};
} // nsamespace gmx
/* Deprecated, C-style version of BlockRanges */
typedef struct t_block
{
+#ifdef __cplusplus
+ int blockSize(int blockIndex) const
+ {
+ GMX_ASSERT(blockIndex < nr, "blockIndex should be in range");
+ return index[blockIndex + 1] - index[blockIndex];
+ }
+#endif // __cplusplus
+
int nr; /* The number of blocks */
int *index; /* Array of indices (dim: nr+1) */
int nalloc_index; /* The allocation size for index */
}
}
-gmx::BlockRanges gmx_mtop_molecules(const gmx_mtop_t &mtop)
+gmx::RangePartitioning gmx_mtop_molecules(const gmx_mtop_t &mtop)
{
- gmx::BlockRanges mols;
+ gmx::RangePartitioning mols;
- mols.index.resize(gmx_mtop_num_molecules(mtop) + 1);
-
- fillMoleculeIndices(mtop, mols.index);
+ for (const gmx_molblock_t &molb : mtop.molblock)
+ {
+ int numAtomsPerMolecule = mtop.moltype[molb.type].atoms.nr;
+ for (int mol = 0; mol < molb.nmol; mol++)
+ {
+ mols.appendBlock(numAtomsPerMolecule);
+ }
+ }
return mols;
}
/*!\brief Creates and returns a struct with begin/end atom indices of all molecules
*
* \param[in] mtop The global topology
- * \returns A struct of type BlockRanges with numBlocks() equal to the number
+ * \returns A RangePartitioning object with numBlocks() equal to the number
* of molecules and atom indices such that molecule m contains atoms a with:
* index[m] <= a < index[m+1].
*/
-gmx::BlockRanges gmx_mtop_molecules(const gmx_mtop_t &mtop);
+gmx::RangePartitioning gmx_mtop_molecules(const gmx_mtop_t &mtop);
/* Converts a gmx_mtop_t struct to t_topology.