From 28806b6e7960fdd9678abc79db74159283d3e7c1 Mon Sep 17 00:00:00 2001 From: Berk Hess Date: Tue, 16 Apr 2019 16:19:01 +0200 Subject: [PATCH] Remove charge groups from domdec and localtop Code explicitly handling charge groups has been removed in the domdec module. Removed the charge groups from gmx_localtop_t. Changed cginfo in forcerec to std::vector, which propagates into the nbnxm module. Parts of the TPI code are now commented out until reimplemented. Change-Id: I9d3bffe26e34ebbb8e31ef97a9a7f1e0bea92306 Todo: Change naming from charge group to atom. Todo: Remove duplicate counters for atoms and charge groups. --- src/gromacs/domdec/distribute.cpp | 8 - src/gromacs/domdec/domdec.cpp | 160 +++++-------- src/gromacs/domdec/domdec.h | 3 - src/gromacs/domdec/domdec_constraints.cpp | 209 +++++++++-------- src/gromacs/domdec/domdec_internal.h | 6 +- src/gromacs/domdec/domdec_setup.cpp | 10 +- src/gromacs/domdec/domdec_struct.h | 10 +- src/gromacs/domdec/domdec_topology.cpp | 193 +++++----------- src/gromacs/domdec/dump.cpp | 4 +- src/gromacs/domdec/partition.cpp | 262 +++++----------------- src/gromacs/domdec/redistribute.cpp | 234 +++++-------------- src/gromacs/domdec/utility.cpp | 26 +-- src/gromacs/mdlib/constr.cpp | 2 +- src/gromacs/mdlib/forcerec.cpp | 32 +-- src/gromacs/mdlib/shake.cpp | 11 +- src/gromacs/mdlib/shake.h | 6 +- src/gromacs/mdlib/sim_util.cpp | 2 +- src/gromacs/mdrun/shellfc.cpp | 16 +- src/gromacs/mdrun/tpi.cpp | 23 +- src/gromacs/mdtypes/forcerec.h | 4 +- src/gromacs/nbnxm/gridset.cpp | 4 +- src/gromacs/nbnxm/gridset.h | 2 +- src/gromacs/nbnxm/nbnxm.cpp | 14 +- src/gromacs/nbnxm/nbnxm.h | 8 +- src/gromacs/nbnxm/pairsearch.h | 4 +- src/gromacs/topology/mtop_util.cpp | 1 - src/gromacs/topology/topology.cpp | 2 - src/gromacs/topology/topology.h | 2 - 28 files changed, 409 insertions(+), 849 deletions(-) diff --git a/src/gromacs/domdec/distribute.cpp b/src/gromacs/domdec/distribute.cpp index e437c5f488..57540a1e14 100644 --- a/src/gromacs/domdec/distribute.cpp +++ b/src/gromacs/domdec/distribute.cpp @@ -556,14 +556,6 @@ static void distributeAtomGroups(const gmx::MDLogger &mdlog, bMaster ? ma->atomGroups.data() : nullptr, dd->ncg_home*sizeof(int), dd->globalAtomGroupIndices.data()); - /* Determine the home charge group sizes */ - const t_block &globalAtomGroups = dd->comm->cgs_gl; - dd->atomGrouping_.clear(); - for (int i = 0; i < dd->ncg_home; i++) - { - dd->atomGrouping_.appendBlock(globalAtomGroups.blockSize(dd->globalAtomGroupIndices[i])); - } - if (debug) { fprintf(debug, "Home charge groups:\n"); diff --git a/src/gromacs/domdec/domdec.cpp b/src/gromacs/domdec/domdec.cpp index d46ee6ce00..4ba6cc3484 100644 --- a/src/gromacs/domdec/domdec.cpp +++ b/src/gromacs/domdec/domdec.cpp @@ -320,8 +320,6 @@ void dd_move_x(gmx_domdec_t *dd, comm = dd->comm; - const gmx::RangePartitioning &atomGrouping = dd->atomGrouping(); - nzone = 1; nat_tot = comm->atomRanges.numHomeAtoms(); for (int d = 0; d < dd->ndim; d++) @@ -340,46 +338,37 @@ void dd_move_x(gmx_domdec_t *dd, int n = 0; if (!bPBC) { - for (int g : ind.index) + for (int j : ind.index) { - for (int j : atomGrouping.block(g)) - { - sendBuffer[n] = x[j]; - n++; - } + sendBuffer[n] = x[j]; + n++; } } else if (!bScrew) { - for (int g : ind.index) + for (int j : ind.index) { - for (int j : atomGrouping.block(g)) + /* We need to shift the coordinates */ + for (int d = 0; d < DIM; d++) { - /* We need to shift the coordinates */ - for (int d = 0; d < DIM; d++) - { - sendBuffer[n][d] = x[j][d] + shift[d]; - } - n++; + sendBuffer[n][d] = x[j][d] + shift[d]; } + n++; } } else { - for (int g : ind.index) + for (int j : ind.index) { - for (int j : atomGrouping.block(g)) - { - /* Shift x */ - sendBuffer[n][XX] = x[j][XX] + shift[XX]; - /* Rotate y and z. - * This operation requires a special shift force - * treatment, which is performed in calc_vir. - */ - sendBuffer[n][YY] = box[YY][YY] - x[j][YY]; - sendBuffer[n][ZZ] = box[ZZ][ZZ] - x[j][ZZ]; - n++; - } + /* Shift x */ + sendBuffer[n][XX] = x[j][XX] + shift[XX]; + /* Rotate y and z. + * This operation requires a special shift force + * treatment, which is performed in calc_vir. + */ + sendBuffer[n][YY] = box[YY][YY] - x[j][YY]; + sendBuffer[n][ZZ] = box[ZZ][ZZ] - x[j][ZZ]; + n++; } } @@ -433,8 +422,6 @@ void dd_move_f(gmx_domdec_t *dd, comm = dd->comm; - const gmx::RangePartitioning &atomGrouping = dd->atomGrouping(); - nzone = comm->zones.n/2; nat_tot = comm->atomRanges.end(DDAtomRanges::Type::Zones); for (int d = dd->ndim-1; d >= 0; d--) @@ -487,16 +474,13 @@ void dd_move_f(gmx_domdec_t *dd, int n = 0; if (!bShiftForcesNeedPbc) { - for (int g : ind.index) + for (int j : ind.index) { - for (int j : atomGrouping.block(g)) + for (int d = 0; d < DIM; d++) { - for (int d = 0; d < DIM; d++) - { - f[j][d] += receiveBuffer[n][d]; - } - n++; + f[j][d] += receiveBuffer[n][d]; } + n++; } } else if (!bScrew) @@ -504,43 +488,37 @@ void dd_move_f(gmx_domdec_t *dd, /* fshift should always be defined if this function is * called when bShiftForcesNeedPbc is true */ assert(nullptr != fshift); - for (int g : ind.index) + for (int j : ind.index) { - for (int j : atomGrouping.block(g)) + for (int d = 0; d < DIM; d++) { - for (int d = 0; d < DIM; d++) - { - f[j][d] += receiveBuffer[n][d]; - } - /* Add this force to the shift force */ - for (int d = 0; d < DIM; d++) - { - fshift[is][d] += receiveBuffer[n][d]; - } - n++; + f[j][d] += receiveBuffer[n][d]; + } + /* Add this force to the shift force */ + for (int d = 0; d < DIM; d++) + { + fshift[is][d] += receiveBuffer[n][d]; } + n++; } } else { - for (int g : ind.index) + for (int j : ind.index) { - for (int j : atomGrouping.block(g)) + /* Rotate the force */ + f[j][XX] += receiveBuffer[n][XX]; + f[j][YY] -= receiveBuffer[n][YY]; + f[j][ZZ] -= receiveBuffer[n][ZZ]; + if (fshift) { - /* Rotate the force */ - f[j][XX] += receiveBuffer[n][XX]; - f[j][YY] -= receiveBuffer[n][YY]; - f[j][ZZ] -= receiveBuffer[n][ZZ]; - if (fshift) + /* Add this force to the shift force */ + for (int d = 0; d < DIM; d++) { - /* Add this force to the shift force */ - for (int d = 0; d < DIM; d++) - { - fshift[is][d] += receiveBuffer[n][d]; - } + fshift[is][d] += receiveBuffer[n][d]; } - n++; } + n++; } } } @@ -571,8 +549,6 @@ void dd_atom_spread_real(gmx_domdec_t *dd, real v[]) comm = dd->comm; - const gmx::RangePartitioning &atomGrouping = dd->atomGrouping(); - nzone = 1; nat_tot = comm->atomRanges.numHomeAtoms(); for (int d = 0; d < dd->ndim; d++) @@ -587,12 +563,9 @@ void dd_atom_spread_real(gmx_domdec_t *dd, real v[]) DDBufferAccess sendBufferAccess(comm->rvecBuffer, ind.nsend[nzone + 1]); gmx::ArrayRef sendBuffer = realArrayRefFromRvecArrayRef(sendBufferAccess.buffer); int n = 0; - for (int g : ind.index) + for (int j : ind.index) { - for (int j : atomGrouping.block(g)) - { - sendBuffer[n++] = v[j]; - } + sendBuffer[n++] = v[j]; } DDBufferAccess receiveBufferAccess(comm->rvecBuffer2, cd->receiveInPlace ? 0 : ind.nrecv[nzone + 1]); @@ -634,8 +607,6 @@ void dd_atom_sum_real(gmx_domdec_t *dd, real v[]) comm = dd->comm; - const gmx::RangePartitioning &atomGrouping = dd->atomGrouping(); - nzone = comm->zones.n/2; nat_tot = comm->atomRanges.end(DDAtomRanges::Type::Zones); for (int d = dd->ndim-1; d >= 0; d--) @@ -677,13 +648,10 @@ void dd_atom_sum_real(gmx_domdec_t *dd, real v[]) sendBuffer, receiveBuffer); /* Add the received forces */ int n = 0; - for (int g : ind.index) + for (int j : ind.index) { - for (int j : atomGrouping.block(g)) - { - v[j] += receiveBuffer[n]; - n++; - } + v[j] += receiveBuffer[n]; + n++; } } nzone /= 2; @@ -699,7 +667,7 @@ real dd_cutoff_multibody(const gmx_domdec_t *dd) comm = dd->comm; r = -1; - if (comm->bInterCGBondeds) + if (comm->haveInterDomainMultiBodyBondeds) { if (comm->cutoff_mbody > 0) { @@ -2187,8 +2155,6 @@ static void set_dd_limits_and_grid(const gmx::MDLogger &mdlog, /* Allocate the charge group/atom sorting struct */ comm->sort = std::make_unique(); - comm->bCGs = (ncg_mtop(mtop) < mtop->natoms); - /* We need to decide on update groups early, as this affects communication distances */ comm->useUpdateGroups = false; if (ir->cutoff_scheme == ecutsVERLET) @@ -2197,16 +2163,10 @@ static void set_dd_limits_and_grid(const gmx::MDLogger &mdlog, setupUpdateGroups(mdlog, *mtop, *ir, cutoffMargin, cr->nnodes, comm); } - comm->bInterCGBondeds = ((ncg_mtop(mtop) > gmx_mtop_num_molecules(*mtop)) || - mtop->bIntermolecularInteractions); - if (comm->bInterCGBondeds) - { - comm->bInterCGMultiBody = (multi_body_bondeds_count(mtop) > 0); - } - else - { - comm->bInterCGMultiBody = FALSE; - } + // TODO: Check whether all bondeds are within update groups + comm->haveInterDomainBondeds = (mtop->natoms > gmx_mtop_num_molecules(*mtop) || + mtop->bIntermolecularInteractions); + comm->haveInterDomainMultiBodyBondeds = (multi_body_bondeds_count(mtop) > 0); if (comm->useUpdateGroups) { @@ -2265,7 +2225,7 @@ static void set_dd_limits_and_grid(const gmx::MDLogger &mdlog, * Note that we would need to improve the pairlist buffer case. */ - if (comm->bInterCGBondeds) + if (comm->haveInterDomainBondeds) { if (options.minimumCommunicationRange > 0) { @@ -2401,7 +2361,7 @@ static void set_dd_limits_and_grid(const gmx::MDLogger &mdlog, !isDlbDisabled(comm), options.dlbScaling, comm->cellsize_limit, comm->cutoff, - comm->bInterCGBondeds); + comm->haveInterDomainBondeds); if (dd->nc[XX] == 0) { @@ -2500,7 +2460,7 @@ static void set_dd_limits_and_grid(const gmx::MDLogger &mdlog, comm->slb_frac[ZZ] = get_slb_frac(mdlog, "z", dd->nc[ZZ], options.cellSizeZ); } - if (comm->bInterCGBondeds && comm->cutoff_mbody == 0) + if (comm->haveInterDomainBondeds && comm->cutoff_mbody == 0) { if (comm->bBondComm || !isDlbDisabled(comm)) { @@ -2669,16 +2629,12 @@ static void writeSettings(gmx::TextWriter *log, const bool haveInterDomainVsites = (countInterUpdategroupVsites(*mtop, comm->updateGroupingPerMoleculetype) != 0); - if (comm->bInterCGBondeds || + if (comm->haveInterDomainBondeds || haveInterDomainVsites || dd->splitConstraints || dd->splitSettles) { std::string decompUnits; - if (comm->bCGs) - { - decompUnits = "charge groups"; - } - else if (comm->useUpdateGroups) + if (comm->useUpdateGroups) { decompUnits = "atom groups"; } @@ -2707,7 +2663,7 @@ static void writeSettings(gmx::TextWriter *log, } } - if (comm->bInterCGBondeds) + if (comm->haveInterDomainBondeds) { log->writeLineFormatted("%40s %-7s %6.3f nm", "two-body bonded interactions", "(-rdd)", @@ -2863,7 +2819,7 @@ gmx_bool dd_bonded_molpbc(const gmx_domdec_t *dd, int ePBC) * or we use domain decomposition for each periodic dimension, * we do not need to take pbc into account for the bonded interactions. */ - return (ePBC != epbcNONE && dd->comm->bInterCGBondeds && + return (ePBC != epbcNONE && dd->comm->haveInterDomainBondeds && !(dd->nc[XX] > 1 && dd->nc[YY] > 1 && (dd->nc[ZZ] > 1 || ePBC == epbcXY))); diff --git a/src/gromacs/domdec/domdec.h b/src/gromacs/domdec/domdec.h index d8aae0b18c..017a6679d5 100644 --- a/src/gromacs/domdec/domdec.h +++ b/src/gromacs/domdec/domdec.h @@ -273,9 +273,6 @@ void dd_make_reverse_top(FILE *fplog, const gmx_vsite_t *vsite, const t_inputrec *ir, gmx_bool bBCheck); -/*! \brief Store the local charge group index in \p lcgs */ -void dd_make_local_cgs(struct gmx_domdec_t *dd, t_block *lcgs); - /*! \brief Generate the local topology and virtual site data */ void dd_make_local_top(struct gmx_domdec_t *dd, struct gmx_domdec_zones_t *zones, diff --git a/src/gromacs/domdec/domdec_constraints.cpp b/src/gromacs/domdec/domdec_constraints.cpp index aa5f7b2832..eacf0c7db0 100644 --- a/src/gromacs/domdec/domdec_constraints.cpp +++ b/src/gromacs/domdec/domdec_constraints.cpp @@ -234,68 +234,64 @@ static void atoms_to_settles(gmx_domdec_t *dd, int nral = NRAL(F_SETTLE); int mb = 0; - for (int cg = cg_start; cg < cg_end; cg++) + for (int a = cg_start; a < cg_end; a++) { - if (GET_CGINFO_SETTLE(cginfo[cg])) + if (GET_CGINFO_SETTLE(cginfo[a])) { - for (int a : dd->atomGrouping().block(cg)) - { - int a_gl = dd->globalAtomIndices[a]; - int a_mol; - mtopGetMolblockIndex(mtop, a_gl, &mb, nullptr, &a_mol); + int a_gl = dd->globalAtomIndices[a]; + int a_mol; + mtopGetMolblockIndex(mtop, a_gl, &mb, nullptr, &a_mol); - const gmx_molblock_t *molb = &mtop->molblock[mb]; - int settle = at2settle_mt[molb->type][a_mol]; + const gmx_molblock_t *molb = &mtop->molblock[mb]; + int settle = at2settle_mt[molb->type][a_mol]; - if (settle >= 0) - { - int offset = a_gl - a_mol; + if (settle >= 0) + { + int offset = a_gl - a_mol; - const int *ia1 = mtop->moltype[molb->type].ilist[F_SETTLE].iatoms.data(); + const int *ia1 = mtop->moltype[molb->type].ilist[F_SETTLE].iatoms.data(); - int a_gls[3]; - gmx_bool bAssign = FALSE; - int nlocal = 0; - for (int sa = 0; sa < nral; sa++) + int a_gls[3]; + gmx_bool bAssign = FALSE; + int nlocal = 0; + for (int sa = 0; sa < nral; sa++) + { + int a_glsa = offset + ia1[settle*(1+nral)+1+sa]; + a_gls[sa] = a_glsa; + if (ga2la.findHome(a_glsa)) { - int a_glsa = offset + ia1[settle*(1+nral)+1+sa]; - a_gls[sa] = a_glsa; - if (ga2la.findHome(a_glsa)) + if (nlocal == 0 && a_gl == a_glsa) { - - if (nlocal == 0 && a_gl == a_glsa) - { - bAssign = TRUE; - } - nlocal++; + bAssign = TRUE; } + nlocal++; } + } - if (bAssign) + if (bAssign) + { + if (ils_local->nr+1+nral > ils_local->nalloc) { - if (ils_local->nr+1+nral > ils_local->nalloc) - { - ils_local->nalloc = over_alloc_dd(ils_local->nr+1+nral); - srenew(ils_local->iatoms, ils_local->nalloc); - } + ils_local->nalloc = over_alloc_dd(ils_local->nr+1+nral); + srenew(ils_local->iatoms, ils_local->nalloc); + } - ils_local->iatoms[ils_local->nr++] = ia1[settle*4]; + ils_local->iatoms[ils_local->nr++] = ia1[settle*4]; - for (int sa = 0; sa < nral; sa++) + for (int sa = 0; sa < nral; sa++) + { + if (const int *a_loc = ga2la.findHome(a_gls[sa])) { - if (const int *a_loc = ga2la.findHome(a_gls[sa])) - { - ils_local->iatoms[ils_local->nr++] = *a_loc; - } - else - { - ils_local->iatoms[ils_local->nr++] = -a_gls[sa] - 1; - /* Add this non-home atom to the list */ - ireq->push_back(a_gls[sa]); - /* A check on double atom requests is - * not required for settle. - */ - } + ils_local->iatoms[ils_local->nr++] = *a_loc; + } + else + { + ils_local->iatoms[ils_local->nr++] = -a_gls[sa] - 1; + /* Add this non-home atom to the list */ + ireq->push_back(a_gls[sa]); + /* A check on double atom requests is + * not required for settle. + */ } } } @@ -325,75 +321,72 @@ static void atoms_to_constraints(gmx_domdec_t *dd, int mb = 0; int nhome = 0; - for (int cg = 0; cg < dd->ncg_home; cg++) + for (int a = 0; a < dd->ncg_home; a++) { - if (GET_CGINFO_CONSTR(cginfo[cg])) + if (GET_CGINFO_CONSTR(cginfo[a])) { - for (int a : dd->atomGrouping().block(cg)) + int a_gl = dd->globalAtomIndices[a]; + int molnr, a_mol; + mtopGetMolblockIndex(mtop, a_gl, &mb, &molnr, &a_mol); + + const gmx_molblock_t &molb = mtop->molblock[mb]; + + gmx::ArrayRef ia1 = mtop->moltype[molb.type].ilist[F_CONSTR].iatoms; + gmx::ArrayRef ia2 = mtop->moltype[molb.type].ilist[F_CONSTRNC].iatoms; + + /* Calculate the global constraint number offset for the molecule. + * This is only required for the global index to make sure + * that we use each constraint only once. + */ + con_offset = + dc->molb_con_offset[mb] + molnr*dc->molb_ncon_mol[mb]; + + /* The global atom number offset for this molecule */ + offset = a_gl - a_mol; + at2con = &at2con_mt[molb.type]; + for (i = at2con->index[a_mol]; i < at2con->index[a_mol+1]; i++) { - int a_gl = dd->globalAtomIndices[a]; - int molnr, a_mol; - mtopGetMolblockIndex(mtop, a_gl, &mb, &molnr, &a_mol); - - const gmx_molblock_t &molb = mtop->molblock[mb]; - - gmx::ArrayRef ia1 = mtop->moltype[molb.type].ilist[F_CONSTR].iatoms; - gmx::ArrayRef ia2 = mtop->moltype[molb.type].ilist[F_CONSTRNC].iatoms; - - /* Calculate the global constraint number offset for the molecule. - * This is only required for the global index to make sure - * that we use each constraint only once. - */ - con_offset = - dc->molb_con_offset[mb] + molnr*dc->molb_ncon_mol[mb]; - - /* The global atom number offset for this molecule */ - offset = a_gl - a_mol; - at2con = &at2con_mt[molb.type]; - for (i = at2con->index[a_mol]; i < at2con->index[a_mol+1]; i++) + con = at2con->a[i]; + const int *iap = constr_iatomptr(ia1, ia2, con); + if (a_mol == iap[1]) { - con = at2con->a[i]; - const int *iap = constr_iatomptr(ia1, ia2, con); - if (a_mol == iap[1]) - { - b_mol = iap[2]; - } - else - { - b_mol = iap[1]; - } - if (const int *a_loc = ga2la.findHome(offset + b_mol)) + b_mol = iap[2]; + } + else + { + b_mol = iap[1]; + } + if (const int *a_loc = ga2la.findHome(offset + b_mol)) + { + /* Add this fully home constraint at the first atom */ + if (a_mol < b_mol) { - /* Add this fully home constraint at the first atom */ - if (a_mol < b_mol) + dc->con_gl.push_back(con_offset + con); + dc->con_nlocat.push_back(2); + if (ilc_local->nr + 3 > ilc_local->nalloc) { - dc->con_gl.push_back(con_offset + con); - dc->con_nlocat.push_back(2); - if (ilc_local->nr + 3 > ilc_local->nalloc) - { - ilc_local->nalloc = over_alloc_dd(ilc_local->nr + 3); - srenew(ilc_local->iatoms, ilc_local->nalloc); - } - b_lo = *a_loc; - ilc_local->iatoms[ilc_local->nr++] = iap[0]; - ilc_local->iatoms[ilc_local->nr++] = (a_gl == iap[1] ? a : b_lo); - ilc_local->iatoms[ilc_local->nr++] = (a_gl == iap[1] ? b_lo : a ); - dc->ncon++; - nhome++; + ilc_local->nalloc = over_alloc_dd(ilc_local->nr + 3); + srenew(ilc_local->iatoms, ilc_local->nalloc); } + b_lo = *a_loc; + ilc_local->iatoms[ilc_local->nr++] = iap[0]; + ilc_local->iatoms[ilc_local->nr++] = (a_gl == iap[1] ? a : b_lo); + ilc_local->iatoms[ilc_local->nr++] = (a_gl == iap[1] ? b_lo : a ); + dc->ncon++; + nhome++; } - else - { - /* We need the nrec constraints coupled to this constraint, - * so we need to walk out of the home cell by nrec+1 atoms, - * since already atom bg is not locally present. - * Therefore we call walk_out with nrec recursions to go - * after this first call. - */ - walk_out(con, con_offset, b_mol, offset, nrec, - ia1, ia2, at2con, - ga2la, TRUE, dc, dcc, ilc_local, ireq); - } + } + else + { + /* We need the nrec constraints coupled to this constraint, + * so we need to walk out of the home cell by nrec+1 atoms, + * since already atom bg is not locally present. + * Therefore we call walk_out with nrec recursions to go + * after this first call. + */ + walk_out(con, con_offset, b_mol, offset, nrec, + ia1, ia2, at2con, + ga2la, TRUE, dc, dcc, ilc_local, ireq); } } } diff --git a/src/gromacs/domdec/domdec_internal.h b/src/gromacs/domdec/domdec_internal.h index c2d9aef4c7..46a460ee1b 100644 --- a/src/gromacs/domdec/domdec_internal.h +++ b/src/gromacs/domdec/domdec_internal.h @@ -417,10 +417,8 @@ struct gmx_domdec_comm_t // NOLINT (clang-analyzer-optin.performance.Padding) std::unique_ptr updateGroupsCog; /* Are there charge groups? */ - gmx_bool bCGs; /**< True when there are charge groups */ - - gmx_bool bInterCGBondeds; /**< Are there inter-cg bonded interactions? */ - gmx_bool bInterCGMultiBody; /**< Are there inter-cg multi-body interactions? */ + bool haveInterDomainBondeds; /**< Are there inter-domain bonded interactions? */ + bool haveInterDomainMultiBodyBondeds; /**< Are there inter-domain multi-body interactions? */ /* Data for the optional bonded interaction atom communication range */ gmx_bool bBondComm; /**< Only communicate atoms beyond the non-bonded cut-off when they are involved in bonded interactions with non-local atoms */ diff --git a/src/gromacs/domdec/domdec_setup.cpp b/src/gromacs/domdec/domdec_setup.cpp index 8df4070901..14b5373439 100644 --- a/src/gromacs/domdec/domdec_setup.cpp +++ b/src/gromacs/domdec/domdec_setup.cpp @@ -1,7 +1,7 @@ /* * This file is part of the GROMACS molecular simulation package. * - * Copyright (c) 2008,2009,2010,2011,2012,2013,2014,2015,2017,2018, by the GROMACS development team, led by + * Copyright (c) 2008,2009,2010,2011,2012,2013,2014,2015,2017,2018,2019, by the GROMACS development team, led by * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, * and including many others, as listed in the AUTHORS file in the * top-level source directory and at http://www.gromacs.org. @@ -609,7 +609,7 @@ static real optimize_ncells(const gmx::MDLogger &mdlog, const t_inputrec *ir, gmx_domdec_t *dd, real cellsize_limit, real cutoff, - gmx_bool bInterCGBondeds, + const bool haveInterDomainBondeds, ivec nc) { int npp, npme, d, nmax; @@ -633,7 +633,7 @@ static real optimize_ncells(const gmx::MDLogger &mdlog, npme = 0; } - if (bInterCGBondeds) + if (haveInterDomainBondeds) { /* If we can skip PBC for distance calculations in plain-C bondeds, * we can save some time (e.g. 3D DD with pbc=xyz). @@ -725,7 +725,7 @@ real dd_choose_grid(const gmx::MDLogger &mdlog, int nPmeRanks, gmx_bool bDynLoadBal, real dlb_scale, real cellsize_limit, real cutoff_dd, - gmx_bool bInterCGBondeds) + const bool haveInterDomainBondeds) { int64_t nnodes_div, ldiv; real limit; @@ -802,7 +802,7 @@ real dd_choose_grid(const gmx::MDLogger &mdlog, bDynLoadBal, dlb_scale, mtop, box, ddbox, ir, dd, cellsize_limit, cutoff_dd, - bInterCGBondeds, + haveInterDomainBondeds, dd->nc); } else diff --git a/src/gromacs/domdec/domdec_struct.h b/src/gromacs/domdec/domdec_struct.h index 85425ff3c6..766089b5b3 100644 --- a/src/gromacs/domdec/domdec_struct.h +++ b/src/gromacs/domdec/domdec_struct.h @@ -3,7 +3,7 @@ * * Copyright (c) 1991-2000, University of Groningen, The Netherlands. * Copyright (c) 2001-2004, The GROMACS development team. - * Copyright (c) 2013,2014,2015,2018, by the GROMACS development team, led by + * Copyright (c) 2013,2014,2015,2018,2019, by the GROMACS development team, led by * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, * and including many others, as listed in the AUTHORS file in the * top-level source directory and at http://www.gromacs.org. @@ -195,14 +195,6 @@ struct gmx_domdec_t { //NOLINT(clang-analyzer-optin.performance.Padding) int ncg_home = 0; /* Global atom group indices for the home and all non-home groups */ std::vector globalAtomGroupIndices; - /* The atom groups for the home and all non-home groups, todo: make private */ - gmx::RangePartitioning atomGrouping_; - const gmx::RangePartitioning &atomGrouping() const - { - return atomGrouping_; - } - /* Local atom to local atom-group index, only used for checking bondeds */ - std::vector localAtomGroupFromAtom; /* Index from the local atoms to the global atoms, covers home and received zones */ std::vector globalAtomIndices; diff --git a/src/gromacs/domdec/domdec_topology.cpp b/src/gromacs/domdec/domdec_topology.cpp index 79a2b76a0d..c43971003e 100644 --- a/src/gromacs/domdec/domdec_topology.cpp +++ b/src/gromacs/domdec/domdec_topology.cpp @@ -1017,34 +1017,18 @@ static void add_vsite(const gmx_ga2la_t &ga2la, } } -/*! \brief Build the index that maps each local atom to its local atom group */ -static void makeLocalAtomGroupsFromAtoms(gmx_domdec_t *dd) -{ - const gmx::RangePartitioning &atomGrouping = dd->atomGrouping(); - - dd->localAtomGroupFromAtom.clear(); - - for (size_t g = 0; g < dd->globalAtomGroupIndices.size(); g++) - { - for (int gmx_unused a : atomGrouping.block(g)) - { - dd->localAtomGroupFromAtom.push_back(g); - } - } -} - -/*! \brief Returns the squared distance between charge groups \p i and \p j */ -static real dd_dist2(t_pbc *pbc_null, rvec *cg_cm, const int *la2lc, int i, int j) +/*! \brief Returns the squared distance between atoms \p i and \p j */ +static real dd_dist2(t_pbc *pbc_null, const rvec *x, const int i, int j) { rvec dx; if (pbc_null) { - pbc_dx_aiuc(pbc_null, cg_cm[la2lc[i]], cg_cm[la2lc[j]], dx); + pbc_dx_aiuc(pbc_null, x[i], x[j], dx); } else { - rvec_sub(cg_cm[la2lc[i]], cg_cm[la2lc[j]], dx); + rvec_sub(x[i], x[j], dx); } return norm2(dx); @@ -1174,7 +1158,6 @@ check_assign_interactions_atom(int i, int i_gl, const gmx_molblock_t *molb, gmx_bool bRCheckMB, const ivec rcheck, gmx_bool bRCheck2B, real rc2, - int *la2lc, t_pbc *pbc_null, rvec *cg_cm, const t_iparams *ip_in, @@ -1277,7 +1260,7 @@ check_assign_interactions_atom(int i, int i_gl, tiatoms[2] = entry->la; /* If necessary check the cgcm distance */ if (bRCheck2B && - dd_dist2(pbc_null, cg_cm, la2lc, + dd_dist2(pbc_null, cg_cm, tiatoms[1], tiatoms[2]) >= rc2) { bUse = FALSE; @@ -1356,7 +1339,7 @@ check_assign_interactions_atom(int i, int i_gl, */ if (rcheck[d] && k_plus[d] && - dd_dist2(pbc_null, cg_cm, la2lc, + dd_dist2(pbc_null, cg_cm, tiatoms[k_zero[d]], tiatoms[k_plus[d]]) >= rc2) { bUse = FALSE; @@ -1392,7 +1375,7 @@ static int make_bondeds_zone(gmx_domdec_t *dd, const std::vector &molb, gmx_bool bRCheckMB, ivec rcheck, gmx_bool bRCheck2B, real rc2, - int *la2lc, t_pbc *pbc_null, rvec *cg_cm, + t_pbc *pbc_null, rvec *cg_cm, const t_iparams *ip_in, t_idef *idef, int izone, @@ -1425,7 +1408,6 @@ static int make_bondeds_zone(gmx_domdec_t *dd, dd, zones, &molb[mb], bRCheckMB, rcheck, bRCheck2B, rc2, - la2lc, pbc_null, cg_cm, ip_in, @@ -1448,7 +1430,6 @@ static int make_bondeds_zone(gmx_domdec_t *dd, dd, zones, &molb[mb], bRCheckMB, rcheck, bRCheck2B, rc2, - la2lc, pbc_null, cg_cm, ip_in, @@ -1463,15 +1444,11 @@ static int make_bondeds_zone(gmx_domdec_t *dd, } /*! \brief Set the exclusion data for i-zone \p iz for the case of no exclusions */ -static void set_no_exclusions_zone(const gmx_domdec_t *dd, - const gmx_domdec_zones_t *zones, +static void set_no_exclusions_zone(const gmx_domdec_zones_t *zones, int iz, t_blocka *lexcls) { - const auto zone = dd->atomGrouping().subRange(zones->cg_range[iz], - zones->cg_range[iz + 1]); - - for (int a : zone) + for (int a = zones->cg_range[iz]; a < zones->cg_range[iz + 1]; a++) { lexcls->index[a + 1] = lexcls->nra; } @@ -1486,7 +1463,7 @@ static void set_no_exclusions_zone(const gmx_domdec_t *dd, static int make_exclusions_zone_cg(gmx_domdec_t *dd, gmx_domdec_zones_t *zones, const std::vector &moltype, gmx_bool bRCheck, real rc2, - int *la2lc, t_pbc *pbc_null, rvec *cg_cm, + t_pbc *pbc_null, rvec *cg_cm, const int *cginfo, t_blocka *lexcls, int iz, @@ -1498,117 +1475,78 @@ static int make_exclusions_zone_cg(gmx_domdec_t *dd, gmx_domdec_zones_t *zones, const gmx_ga2la_t &ga2la = *dd->ga2la; - const auto jRange = - dd->atomGrouping().subRange(zones->izone[iz].jcg0, - zones->izone[iz].jcg1); + // TODO: Replace this by a more standard range + const gmx::RangePartitioning::Block jRange(zones->izone[iz].jcg0, + zones->izone[iz].jcg1); n_excl_at_max = dd->reverse_top->n_excl_at_max; /* We set the end index, but note that we might not start at zero here */ - lexcls->nr = dd->atomGrouping().subRange(0, cg_end).size(); + lexcls->nr = cg_end; int n = lexcls->nra; int count = 0; - for (int cg = cg_start; cg < cg_end; cg++) + for (int la = cg_start; la < cg_end; la++) { if (n + (cg_end - cg_start)*n_excl_at_max > lexcls->nalloc_a) { lexcls->nalloc_a = over_alloc_large(n + (cg_end - cg_start)*n_excl_at_max); srenew(lexcls->a, lexcls->nalloc_a); } - const auto atomGroup = dd->atomGrouping().block(cg); - if (GET_CGINFO_EXCL_INTER(cginfo[cg]) || - !GET_CGINFO_EXCL_INTRA(cginfo[cg])) + if (GET_CGINFO_EXCL_INTER(cginfo[la]) || + !GET_CGINFO_EXCL_INTRA(cginfo[la])) { /* Copy the exclusions from the global top */ - for (int la : atomGroup) + lexcls->index[la] = n; + int a_gl = dd->globalAtomIndices[la]; + int a_mol; + global_atomnr_to_moltype_ind(dd->reverse_top, a_gl, &mb, &mt, &mol, &a_mol); + excls = &moltype[mt].excls; + for (int j = excls->index[a_mol]; j < excls->index[a_mol+1]; j++) { - lexcls->index[la] = n; - int a_gl = dd->globalAtomIndices[la]; - int a_mol; - global_atomnr_to_moltype_ind(dd->reverse_top, a_gl, &mb, &mt, &mol, &a_mol); - excls = &moltype[mt].excls; - for (int j = excls->index[a_mol]; j < excls->index[a_mol+1]; j++) + int aj_mol = excls->a[j]; + /* Since exclusions are pair interactions, + * just like non-bonded interactions, + * they can be assigned properly up + * to the DD cutoff (not cutoff_min as + * for the other bonded interactions). + */ + if (const auto *jEntry = ga2la.find(a_gl + aj_mol - a_mol)) { - int aj_mol = excls->a[j]; - /* This computation of jla is only correct intra-cg */ - int jla = la + aj_mol - a_mol; - if (atomGroup.inRange(jla)) + if (iz == 0 && jEntry->cell == 0) { - /* This is an intra-cg exclusion. We can skip - * the global indexing and distance checking. - */ - /* Intra-cg exclusions are only required - * for the home zone. - */ - if (iz == 0) + lexcls->a[n++] = jEntry->la; + /* Check to avoid double counts */ + if (jEntry->la > la) { - lexcls->a[n++] = jla; - /* Check to avoid double counts */ - if (jla > la) - { - count++; - } + count++; } } - else + else if (jRange.inRange(jEntry->la) && + (!bRCheck || + dd_dist2(pbc_null, cg_cm, la, jEntry->la) < rc2)) { - /* This is a inter-cg exclusion */ - /* Since exclusions are pair interactions, - * just like non-bonded interactions, - * they can be assigned properly up - * to the DD cutoff (not cutoff_min as - * for the other bonded interactions). - */ - if (const auto *jEntry = ga2la.find(a_gl + aj_mol - a_mol)) - { - if (iz == 0 && jEntry->cell == 0) - { - lexcls->a[n++] = jEntry->la; - /* Check to avoid double counts */ - if (jEntry->la > la) - { - count++; - } - } - else if (jRange.inRange(jEntry->la) && - (!bRCheck || - dd_dist2(pbc_null, cg_cm, la2lc, la, jEntry->la) < rc2)) - { - /* jla > la, since jRange.begin() > la */ - lexcls->a[n++] = jEntry->la; - count++; - } - } + /* jla > la, since jRange.begin() > la */ + lexcls->a[n++] = jEntry->la; + count++; } } } } else { - /* There are no inter-cg excls and this cg is self-excluded. + /* There are no inter-atomic excls and this atom is self-excluded. * These exclusions are only required for zone 0, * since other zones do not see themselves. */ if (iz == 0) { - for (int la : atomGroup) - { - lexcls->index[la] = n; - for (int j : atomGroup) - { - lexcls->a[n++] = j; - } - } - count += (atomGroup.size()*(atomGroup.size() - 1))/2; + lexcls->index[la] = n; + lexcls->a[n++] = la; } else { - /* We don't need exclusions for this cg */ - for (int la : atomGroup) - { - lexcls->index[la] = n; - } + lexcls->index[la] = n; } } } @@ -1630,9 +1568,9 @@ static void make_exclusions_zone(gmx_domdec_t *dd, gmx_domdec_zones_t *zones, const gmx_ga2la_t &ga2la = *dd->ga2la; - const auto jRange = - dd->atomGrouping().subRange(zones->izone[iz].jcg0, - zones->izone[iz].jcg1); + // TODO: Replace this by a more standard range + const gmx::RangePartitioning::Block jRange(zones->izone[iz].jcg0, + zones->izone[iz].jcg1); n_excl_at_max = dd->reverse_top->n_excl_at_max; @@ -1731,7 +1669,7 @@ static void check_alloc_index(t_blocka *ba, int nindex_max) static void check_exclusions_alloc(gmx_domdec_t *dd, gmx_domdec_zones_t *zones, t_blocka *lexcls) { - int nr = dd->atomGrouping().subRange(0, zones->izone[zones->nizone - 1].cg1).size(); + const int nr = zones->izone[zones->nizone - 1].cg1; check_alloc_index(lexcls, nr); @@ -1745,9 +1683,9 @@ static void check_exclusions_alloc(gmx_domdec_t *dd, gmx_domdec_zones_t *zones, static void finish_local_exclusions(gmx_domdec_t *dd, gmx_domdec_zones_t *zones, t_blocka *lexcls) { - const auto nonhomeIzonesAtomRange = - dd->atomGrouping().subRange(zones->izone[0].cg1, - zones->izone[zones->nizone - 1].cg1); + // TODO: Replace this by a more standard range + const gmx::RangePartitioning::Block nonhomeIzonesAtomRange(zones->izone[0].cg1, + zones->izone[zones->nizone - 1].cg1); if (dd->n_intercg_excl == 0) { @@ -1789,7 +1727,7 @@ static int make_local_bondeds_excls(gmx_domdec_t *dd, const int *cginfo, gmx_bool bRCheckMB, ivec rcheck, gmx_bool bRCheck2B, real rc, - int *la2lc, t_pbc *pbc_null, rvec *cg_cm, + t_pbc *pbc_null, rvec *cg_cm, t_idef *idef, t_blocka *lexcls, int *excl_count) { @@ -1869,10 +1807,10 @@ static int make_local_bondeds_excls(gmx_domdec_t *dd, make_bondeds_zone(dd, zones, mtop->molblock, bRCheckMB, rcheck, bRCheck2B, rc2, - la2lc, pbc_null, cg_cm, idef->iparams, + pbc_null, cg_cm, idef->iparams, idef_t, izone, - dd->atomGrouping().subRange(cg0t, cg1t)); + gmx::RangePartitioning::Block(cg0t, cg1t)); if (izone < nzone_excl) { @@ -1887,8 +1825,7 @@ static int make_local_bondeds_excls(gmx_domdec_t *dd, excl_t->nra = 0; } - if (dd->atomGrouping().allBlocksHaveSizeOne() && - !rt->bExclRequired) + if (!rt->bExclRequired) { /* No charge groups and no distance check required */ make_exclusions_zone(dd, zones, mtop->moltype, cginfo, @@ -1901,7 +1838,7 @@ static int make_local_bondeds_excls(gmx_domdec_t *dd, rt->th_work[thread].excl_count = make_exclusions_zone_cg(dd, zones, mtop->moltype, bRCheck2B, rc2, - la2lc, pbc_null, cg_cm, cginfo, + pbc_null, cg_cm, cginfo, excl_t, izone, cg0t, cg1t); @@ -1940,7 +1877,7 @@ static int make_local_bondeds_excls(gmx_domdec_t *dd, */ for (izone = nzone_excl; izone < zones->nizone; izone++) { - set_no_exclusions_zone(dd, zones, izone, lexcls); + set_no_exclusions_zone(zones, izone, lexcls); } finish_local_exclusions(dd, zones, lexcls); @@ -1953,12 +1890,6 @@ static int make_local_bondeds_excls(gmx_domdec_t *dd, return nbonded_local; } -void dd_make_local_cgs(gmx_domdec_t *dd, t_block *lcgs) -{ - lcgs->nr = dd->globalAtomGroupIndices.size(); - lcgs->index = dd->atomGrouping_.rawIndex().data(); -} - void dd_make_local_top(gmx_domdec_t *dd, gmx_domdec_zones_t *zones, int npbcdim, matrix box, rvec cellsize_min, const ivec npulse, @@ -1977,8 +1908,6 @@ void dd_make_local_top(gmx_domdec_t *dd, gmx_domdec_zones_t *zones, fprintf(debug, "Making local topology\n"); } - dd_make_local_cgs(dd, <op->cgs); - bRCheckMB = FALSE; bRCheck2B = FALSE; @@ -2021,7 +1950,6 @@ void dd_make_local_top(gmx_domdec_t *dd, gmx_domdec_zones_t *zones, } if (bRCheckMB || bRCheck2B) { - makeLocalAtomGroupsFromAtoms(dd); if (fr->bMolPBC) { pbc_null = set_pbc_dd(&pbc, fr->ePBC, dd->nc, TRUE, box); @@ -2034,9 +1962,8 @@ void dd_make_local_top(gmx_domdec_t *dd, gmx_domdec_zones_t *zones, } dd->nbonded_local = - make_local_bondeds_excls(dd, zones, &mtop, fr->cginfo, + make_local_bondeds_excls(dd, zones, &mtop, fr->cginfo.data(), bRCheckMB, rcheck, bRCheck2B, rc, - dd->localAtomGroupFromAtom.data(), pbc_null, cgcm_or_x, <op->idef, <op->excls, &nexcl); diff --git a/src/gromacs/domdec/dump.cpp b/src/gromacs/domdec/dump.cpp index a2093a5262..5e88a3dc29 100644 --- a/src/gromacs/domdec/dump.cpp +++ b/src/gromacs/domdec/dump.cpp @@ -1,7 +1,7 @@ /* * This file is part of the GROMACS molecular simulation package. * - * Copyright (c) 2018, by the GROMACS development team, led by + * Copyright (c) 2018,2019, by the GROMACS development team, led by * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, * and including many others, as listed in the AUTHORS file in the * top-level source directory and at http://www.gromacs.org. @@ -176,7 +176,7 @@ void write_dd_pdb(const char *fn, int64_t step, const char *title, if (i < dd->comm->atomRanges.end(DDAtomRanges::Type::Zones)) { c = 0; - while (i >= dd->atomGrouping().subRange(0, dd->comm->zones.cg_range[c + 1]).end()) + while (i >= dd->comm->zones.cg_range[c + 1]) { c++; } diff --git a/src/gromacs/domdec/partition.cpp b/src/gromacs/domdec/partition.cpp index 27ffb535ab..62e2a0741d 100644 --- a/src/gromacs/domdec/partition.cpp +++ b/src/gromacs/domdec/partition.cpp @@ -441,30 +441,25 @@ static void set_zones_ncg_home(gmx_domdec_t *dd) } //! Restore atom groups for the charge groups. -static void restoreAtomGroups(gmx_domdec_t *dd, - const int *gcgs_index, const t_state *state) +static void restoreAtomGroups(gmx_domdec_t *dd, + const t_state *state) { gmx::ArrayRef atomGroupsState = state->cg_gl; std::vector &globalAtomGroupIndices = dd->globalAtomGroupIndices; - gmx::RangePartitioning &atomGrouping = dd->atomGrouping_; globalAtomGroupIndices.resize(atomGroupsState.size()); - atomGrouping.clear(); /* Copy back the global charge group indices from state * and rebuild the local charge group to atom index. */ for (gmx::index i = 0; i < atomGroupsState.ssize(); i++) { - const int atomGroupGlobal = atomGroupsState[i]; - const int groupSize = gcgs_index[atomGroupGlobal + 1] - gcgs_index[atomGroupGlobal]; - globalAtomGroupIndices[i] = atomGroupGlobal; - atomGrouping.appendBlock(groupSize); + globalAtomGroupIndices[i] = atomGroupsState[i]; } dd->ncg_home = atomGroupsState.size(); - dd->comm->atomRanges.setEnd(DDAtomRanges::Type::Home, atomGrouping.fullRange().end()); + dd->comm->atomRanges.setEnd(DDAtomRanges::Type::Home, atomGroupsState.ssize()); set_zones_ncg_home(dd); } @@ -473,16 +468,12 @@ static void restoreAtomGroups(gmx_domdec_t *dd, static void dd_set_cginfo(gmx::ArrayRef index_gl, int cg0, int cg1, t_forcerec *fr, char *bLocalCG) { - cginfo_mb_t *cginfo_mb; - int *cginfo; - int cg; - if (fr != nullptr) { - cginfo_mb = fr->cginfo_mb; - cginfo = fr->cginfo; + const cginfo_mb_t *cginfo_mb = fr->cginfo_mb; + gmx::ArrayRef cginfo = fr->cginfo; - for (cg = cg0; cg < cg1; cg++) + for (int cg = cg0; cg < cg1; cg++) { cginfo[cg] = ddcginfo(cginfo_mb, index_gl[cg]); } @@ -490,7 +481,7 @@ static void dd_set_cginfo(gmx::ArrayRef index_gl, int cg0, int cg1, if (bLocalCG != nullptr) { - for (cg = cg0; cg < cg1; cg++) + for (int cg = cg0; cg < cg1; cg++) { bLocalCG[index_gl[cg]] = TRUE; } @@ -499,13 +490,12 @@ static void dd_set_cginfo(gmx::ArrayRef index_gl, int cg0, int cg1, //! Makes the mappings between global and local atom indices during DD repartioning. static void make_dd_indices(gmx_domdec_t *dd, - const int *gcgs_index, int cg_start) + const int atomStart) { const int numZones = dd->comm->zones.n; const int *zone2cg = dd->comm->zones.cg_range; const int *zone_ncg1 = dd->comm->zone_ncg1; gmx::ArrayRef globalAtomGroupIndices = dd->globalAtomGroupIndices; - const gmx_bool bCGs = dd->comm->bCGs; std::vector &globalAtomIndices = dd->globalAtomIndices; gmx_ga2la_t &ga2la = *dd->ga2la; @@ -516,14 +506,14 @@ static void make_dd_indices(gmx_domdec_t *dd, } /* Make the local to global and global to local atom index */ - int a = dd->atomGrouping().subRange(cg_start, cg_start).begin(); + int a = atomStart; globalAtomIndices.resize(a); for (int zone = 0; zone < numZones; zone++) { int cg0; if (zone == 0) { - cg0 = cg_start; + cg0 = atomStart; } else { @@ -541,21 +531,9 @@ static void make_dd_indices(gmx_domdec_t *dd, zone1 += numZones; } int cg_gl = globalAtomGroupIndices[cg]; - if (bCGs) - { - for (int a_gl = gcgs_index[cg_gl]; a_gl < gcgs_index[cg_gl+1]; a_gl++) - { - globalAtomIndices.push_back(a_gl); - ga2la.insert(a_gl, {a, zone1}); - a++; - } - } - else - { - globalAtomIndices.push_back(cg_gl); - ga2la.insert(cg_gl, {a, zone1}); - a++; - } + globalAtomIndices.push_back(cg_gl); + ga2la.insert(cg_gl, {a, zone1}); + a++; } } } @@ -1407,19 +1385,20 @@ void set_dd_dlb_max_cutoff(t_commrec *cr, real cutoff) } } -//! Merges charge-group buffers. +//! Merge atom buffers. static void merge_cg_buffers(int ncell, gmx_domdec_comm_dim_t *cd, int pulse, int *ncg_cell, gmx::ArrayRef index_gl, const int *recv_i, - rvec *cg_cm, rvec *recv_vr, - gmx::ArrayRef cgindex, - cginfo_mb_t *cginfo_mb, int *cginfo) + gmx::ArrayRef x, + gmx::ArrayRef recv_vr, + cginfo_mb_t *cginfo_mb, + gmx::ArrayRef cginfo) { gmx_domdec_ind_t *ind, *ind_p; - int p, cell, c, cg, cg0, cg1, cg_gl, nat; - int shift, shift_at; + int p, cell, c, cg, cg0, cg1, cg_gl; + int shift; ind = &cd->ind[pulse]; @@ -1433,13 +1412,11 @@ static void merge_cg_buffers(int ncell, /* Move the cg's present from previous grid pulses */ cg0 = ncg_cell[ncell+cell]; cg1 = ncg_cell[ncell+cell+1]; - cgindex[cg1+shift] = cgindex[cg1]; for (cg = cg1-1; cg >= cg0; cg--) { - index_gl[cg+shift] = index_gl[cg]; - copy_rvec(cg_cm[cg], cg_cm[cg+shift]); - cgindex[cg+shift] = cgindex[cg]; - cginfo[cg+shift] = cginfo[cg]; + index_gl[cg + shift] = index_gl[cg]; + x[cg + shift] = x[cg]; + cginfo[cg + shift] = cginfo[cg]; } /* Correct the already stored send indices for the shift */ for (p = 1; p <= pulse; p++) @@ -1461,32 +1438,20 @@ static void merge_cg_buffers(int ncell, /* Merge in the communicated buffers */ shift = 0; - shift_at = 0; cg0 = 0; for (cell = 0; cell < ncell; cell++) { cg1 = ncg_cell[ncell+cell+1] + shift; - if (shift_at > 0) - { - /* Correct the old cg indices */ - for (cg = ncg_cell[ncell+cell]; cg < cg1; cg++) - { - cgindex[cg+1] += shift_at; - } - } for (cg = 0; cg < ind->nrecv[cell]; cg++) { - /* Copy this charge group from the buffer */ + /* Copy this atom from the buffer */ index_gl[cg1] = recv_i[cg0]; - copy_rvec(recv_vr[cg0], cg_cm[cg1]); - /* Add it to the cgindex */ + x[cg1] = recv_vr[cg0]; + /* Copy information */ cg_gl = index_gl[cg1]; cginfo[cg1] = ddcginfo(cginfo_mb, cg_gl); - nat = GET_CGINFO_NATOMS(cginfo[cg1]); - cgindex[cg1+1] = cgindex[cg1] + nat; cg0++; cg1++; - shift_at += nat; } shift += ind->nrecv[cell]; ncg_cell[ncell+cell+1] = cg1; @@ -1496,8 +1461,7 @@ static void merge_cg_buffers(int ncell, //! Makes a range partitioning for the atom groups wthin a cell static void make_cell2at_index(gmx_domdec_comm_dim_t *cd, int nzone, - int atomGroupStart, - const gmx::RangePartitioning &atomGroups) + int atomGroupStart) { /* Store the atom block boundaries for easy copying of communication buffers */ @@ -1506,10 +1470,9 @@ static void make_cell2at_index(gmx_domdec_comm_dim_t *cd, { for (gmx_domdec_ind_t &ind : cd->ind) { - const auto range = atomGroups.subRange(g, g + ind.nrecv[zone]); - ind.cell2at0[zone] = range.begin(); - ind.cell2at1[zone] = range.end(); + ind.cell2at0[zone] = g; g += ind.nrecv[zone]; + ind.cell2at1[zone] = g; } } } @@ -1652,7 +1615,6 @@ get_zone_pulse_cgs(gmx_domdec_t *dd, int zonei, int zone, int cg0, int cg1, gmx::ArrayRef globalAtomGroupIndices, - const gmx::RangePartitioning &atomGroups, int dim, int dim_ind, int dim0, int dim1, int dim2, real r_comm2, real r_bcomm2, @@ -1668,7 +1630,7 @@ get_zone_pulse_cgs(gmx_domdec_t *dd, gmx_bool bDist2B, gmx_bool bDistMB, rvec *cg_cm, - const int *cginfo, + gmx::ArrayRef cginfo, std::vector *localAtomGroups, dd_comm_setup_work_t *work) { @@ -1893,7 +1855,7 @@ get_zone_pulse_cgs(gmx_domdec_t *dd, } vbuf.emplace_back(posPbc[XX], posPbc[YY], posPbc[ZZ]); - nat += atomGroups.block(cg).size(); + nat += 1; } } @@ -1920,7 +1882,7 @@ static void setup_dd_communication(gmx_domdec_t *dd, { int dim_ind, dim, dim0, dim1, dim2, dimd, nat_tot; int nzone, nzone_send, zone, zonei, cg0, cg1; - int c, i, cg, cg_gl, nrcg; + int c, i, cg, cg_gl; int *zone_cg_range, pos_cg; gmx_domdec_comm_t *comm; gmx_domdec_zones_t *zones; @@ -1928,7 +1890,7 @@ static void setup_dd_communication(gmx_domdec_t *dd, cginfo_mb_t *cginfo_mb; gmx_bool bBondComm, bDist2B, bDistMB, bDistBonded; dd_corners_t corners; - rvec *cg_cm, *normal, *v_d, *v_0 = nullptr, *v_1 = nullptr; + rvec *normal, *v_d, *v_0 = nullptr, *v_1 = nullptr; real skew_fac2_d, skew_fac_01; rvec sf2_round; @@ -1949,22 +1911,10 @@ static void setup_dd_communication(gmx_domdec_t *dd, comm->dth.resize(numThreads); } - switch (fr->cutoff_scheme) - { - case ecutsGROUP: - cg_cm = fr->cg_cm; - break; - case ecutsVERLET: - cg_cm = state->x.rvec_array(); - break; - default: - gmx_incons("unimplemented"); - } - bBondComm = comm->bBondComm; /* Do we need to determine extra distances for multi-body bondeds? */ - bDistMB = (comm->bInterCGMultiBody && isDlbOn(dd->comm) && dd->ndim > 1); + bDistMB = (comm->haveInterDomainMultiBodyBondeds && isDlbOn(dd->comm) && dd->ndim > 1); /* Do we need to determine extra distances for only two-body bondeds? */ bDist2B = (bBondComm && !bDistMB); @@ -2131,7 +2081,6 @@ static void setup_dd_communication(gmx_domdec_t *dd, /* Get the cg's for this pulse in this zone */ get_zone_pulse_cgs(dd, zonei, zone, cg0_th, cg1_th, dd->globalAtomGroupIndices, - dd->atomGrouping(), dim, dim_ind, dim0, dim1, dim2, r_comm2, r_bcomm2, box, distanceIsTriclinic, @@ -2139,7 +2088,8 @@ static void setup_dd_communication(gmx_domdec_t *dd, v_d, v_0, v_1, &corners, sf2_round, bDistBonded, bBondComm, bDist2B, bDistMB, - cg_cm, fr->cginfo, + state->x.rvec_array(), + fr->cginfo, th == 0 ? &ind->index : &work.localAtomGroupBuffer, &work); } @@ -2214,19 +2164,12 @@ static void setup_dd_communication(gmx_domdec_t *dd, /* Make space for cg_cm */ dd_check_alloc_ncg(fr, state, f, pos_cg + ind->nrecv[nzone]); - if (fr->cutoff_scheme == ecutsGROUP) - { - cg_cm = fr->cg_cm; - } - else - { - cg_cm = state->x.rvec_array(); - } - /* Communicate cg_cm */ + + /* Communicate the coordinates */ gmx::ArrayRef rvecBufferRef; if (cd->receiveInPlace) { - rvecBufferRef = gmx::arrayRefFromArray(reinterpret_cast(cg_cm + pos_cg), ind->nrecv[nzone]); + rvecBufferRef = gmx::makeArrayRef(state->x).subArray(pos_cg, ind->nrecv[nzone]); } else { @@ -2245,8 +2188,6 @@ static void setup_dd_communication(gmx_domdec_t *dd, { cg_gl = dd->globalAtomGroupIndices[pos_cg]; fr->cginfo[pos_cg] = ddcginfo(cginfo_mb, cg_gl); - nrcg = GET_CGINFO_NATOMS(fr->cginfo[pos_cg]); - dd->atomGrouping_.appendBlock(nrcg); if (bBondComm) { /* Update the charge group presence, @@ -2267,13 +2208,9 @@ static void setup_dd_communication(gmx_domdec_t *dd, else { /* This part of the code is never executed with bBondComm. */ - std::vector &atomGroupsIndex = dd->atomGrouping_.rawIndex(); - atomGroupsIndex.resize(numAtomGroupsNew + 1); - merge_cg_buffers(nzone, cd, p, zone_cg_range, dd->globalAtomGroupIndices, integerBufferRef.data(), - cg_cm, as_rvec_array(rvecBufferRef.data()), - atomGroupsIndex, + state->x, rvecBufferRef, fr->cginfo_mb, fr->cginfo); pos_cg += ind->nrecv[nzone]; } @@ -2282,7 +2219,7 @@ static void setup_dd_communication(gmx_domdec_t *dd, if (!cd->receiveInPlace) { /* Store the atom block for easy copying of communication buffers */ - make_cell2at_index(cd, nzone, zone_cg_range[nzone], dd->atomGrouping()); + make_cell2at_index(cd, nzone, zone_cg_range[nzone]); } nzone += nzone; } @@ -2357,7 +2294,7 @@ static void set_zones_size(gmx_domdec_t *dd, zones = &comm->zones; /* Do we need to determine extra distances for multi-body bondeds? */ - bDistMB = (comm->bInterCGMultiBody && isDlbOn(dd->comm) && dd->ndim > 1); + bDistMB = (comm->haveInterDomainMultiBodyBondeds && isDlbOn(dd->comm) && dd->ndim > 1); for (z = zone_start; z < zone_end; z++) { @@ -2627,39 +2564,6 @@ orderVector(gmx::ArrayRef sort, orderVector(sort, vectorToSort, *workVector); } -//! Order vectors of atoms. -static void order_vec_atom(const gmx::RangePartitioning *atomGroups, - gmx::ArrayRef sort, - gmx::ArrayRef v, - gmx::ArrayRef buf) -{ - if (atomGroups == nullptr) - { - /* Avoid the useless loop of the atoms within a cg */ - orderVector(sort, v, buf); - - return; - } - - /* Order the data */ - int a = 0; - for (const gmx_cgsort_t &entry : sort) - { - for (int i : atomGroups->block(entry.ind)) - { - copy_rvec(v[i], buf[a]); - a++; - } - } - int atot = a; - - /* Copy back to the original array */ - for (int a = 0; a < atot; a++) - { - copy_rvec(buf[a], v[a]); - } -} - //! Returns the sorting order for atoms based on the nbnxn grid order in sort static void dd_sort_order_nbnxn(const t_forcerec *fr, std::vector *sort) @@ -2682,26 +2586,20 @@ static void dd_sort_order_nbnxn(const t_forcerec *fr, } //! Returns the sorting state for DD. -static void dd_sort_state(gmx_domdec_t *dd, rvec *cgcm, t_forcerec *fr, t_state *state) +static void dd_sort_state(gmx_domdec_t *dd, t_forcerec *fr, t_state *state) { gmx_domdec_sort_t *sort = dd->comm->sort.get(); dd_sort_order_nbnxn(fr, &sort->sorted); - const gmx::RangePartitioning &atomGrouping = dd->atomGrouping(); - /* We alloc with the old size, since cgindex is still old */ - GMX_ASSERT(atomGrouping.numBlocks() == dd->ncg_home, "atomGroups and dd should be consistent"); - DDBufferAccess rvecBuffer(dd->comm->rvecBuffer, atomGrouping.fullRange().end()); - - const gmx::RangePartitioning *atomGroupsPtr = (dd->comm->bCGs ? &atomGrouping : nullptr); + DDBufferAccess rvecBuffer(dd->comm->rvecBuffer, dd->ncg_home); /* Set the new home atom/charge group count */ dd->ncg_home = sort->sorted.size(); if (debug) { - fprintf(debug, "Set the new home %s count to %d\n", - dd->comm->bCGs ? "charge group" : "atom", + fprintf(debug, "Set the new home atom count to %d\n", dd->ncg_home); } @@ -2711,48 +2609,23 @@ static void dd_sort_state(gmx_domdec_t *dd, rvec *cgcm, t_forcerec *fr, t_state if (state->flags & (1 << estX)) { - order_vec_atom(atomGroupsPtr, cgsort, state->x, rvecBuffer.buffer); + orderVector(cgsort, makeArrayRef(state->x), rvecBuffer.buffer); } if (state->flags & (1 << estV)) { - order_vec_atom(atomGroupsPtr, cgsort, state->v, rvecBuffer.buffer); + orderVector(cgsort, makeArrayRef(state->v), rvecBuffer.buffer); } if (state->flags & (1 << estCGP)) { - order_vec_atom(atomGroupsPtr, cgsort, state->cg_p, rvecBuffer.buffer); - } - - if (fr->cutoff_scheme == ecutsGROUP) - { - /* Reorder cgcm */ - gmx::ArrayRef cgcmRef = gmx::arrayRefFromArray(reinterpret_cast(cgcm[0]), cgsort.size()); - orderVector(cgsort, cgcmRef, rvecBuffer.buffer); + orderVector(cgsort, makeArrayRef(state->cg_p), rvecBuffer.buffer); } /* Reorder the global cg index */ orderVector(cgsort, dd->globalAtomGroupIndices, &sort->intBuffer); /* Reorder the cginfo */ - orderVector(cgsort, gmx::arrayRefFromArray(fr->cginfo, cgsort.size()), &sort->intBuffer); - /* Rebuild the local cg index */ - if (dd->comm->bCGs) - { - /* We make a new, ordered atomGroups object and assign it to - * the old one. This causes some allocation overhead, but saves - * a copy back of the whole index. - */ - gmx::RangePartitioning ordered; - for (const gmx_cgsort_t &entry : cgsort) - { - ordered.appendBlock(atomGrouping.block(entry.ind).size()); - } - dd->atomGrouping_ = ordered; - } - else - { - dd->atomGrouping_.setAllBlocksSizeOne(dd->ncg_home); - } + orderVector(cgsort, fr->cginfo, &sort->intBuffer); /* Set the home atom number */ - dd->comm->atomRanges.setEnd(DDAtomRanges::Type::Home, dd->atomGrouping().fullRange().end()); + dd->comm->atomRanges.setEnd(DDAtomRanges::Type::Home, dd->ncg_home); /* The atoms are now exactly in grid order, update the grid order */ fr->nbv->setLocalAtomOrder(); @@ -2871,7 +2744,6 @@ void dd_partition_system(FILE *fplog, gmx_domdec_t *dd; gmx_domdec_comm_t *comm; gmx_ddbox_t ddbox = {0}; - t_block *cgs_gl; int64_t step_pcoupl; rvec cell_ns_x0, cell_ns_x1; int ncgindex_set, ncg_moved, nat_f_novirsum; @@ -3080,8 +2952,6 @@ void dd_partition_system(FILE *fplog, comm->n_load_have++; } - cgs_gl = &comm->cgs_gl; - bRedist = FALSE; if (bMasterState) { @@ -3098,17 +2968,9 @@ void dd_partition_system(FILE *fplog, distributeState(mdlog, dd, top_global, state_global, ddbox, state_local, f); - dd_make_local_cgs(dd, &top_local->cgs); - /* Ensure that we have space for the new distribution */ dd_check_alloc_ncg(fr, state_local, f, dd->ncg_home); - if (fr->cutoff_scheme == ecutsGROUP) - { - calc_cgcm(fplog, 0, dd->ncg_home, - &top_local->cgs, state_local->x.rvec_array(), fr->cg_cm); - } - inc_nrnb(nrnb, eNR_CGCM, comm->atomRanges.numHomeAtoms()); dd_set_cginfo(dd->globalAtomGroupIndices, 0, dd->ncg_home, fr, comm->bLocalCG); @@ -3129,17 +2991,10 @@ void dd_partition_system(FILE *fplog, clearDDStateIndices(dd, 0, 0); /* Restore the atom group indices from state_local */ - restoreAtomGroups(dd, cgs_gl->index, state_local); - make_dd_indices(dd, cgs_gl->index, 0); + restoreAtomGroups(dd, state_local); + make_dd_indices(dd, 0); ncgindex_set = dd->ncg_home; - if (fr->cutoff_scheme == ecutsGROUP) - { - /* Redetermine the cg COMs */ - calc_cgcm(fplog, 0, dd->ncg_home, - &top_local->cgs, state_local->x.rvec_array(), fr->cg_cm); - } - inc_nrnb(nrnb, eNR_CGCM, comm->atomRanges.numHomeAtoms()); dd_set_cginfo(dd->globalAtomGroupIndices, 0, dd->ncg_home, fr, comm->bLocalCG); @@ -3219,10 +3074,11 @@ void dd_partition_system(FILE *fplog, wallcycle_sub_stop(wcycle, ewcsDD_REDIST); } + // TODO: Integrate this code in the nbnxm module get_nsgrid_boundaries(ddbox.nboundeddim, state_local->box, dd, &ddbox, &comm->cell_x0, &comm->cell_x1, - dd->ncg_home, fr->cg_cm, + dd->ncg_home, as_rvec_array(state_local->x.data()), cell_ns_x0, cell_ns_x1, &grid_density); if (bBoxChanged) @@ -3266,7 +3122,7 @@ void dd_partition_system(FILE *fplog, fprintf(debug, "Step %s, sorting the %d home charge groups\n", gmx_step_str(step, sbuf), dd->ncg_home); } - dd_sort_state(dd, fr->cg_cm, fr, state_local); + dd_sort_state(dd, fr, state_local); /* After sorting and compacting we set the correct size */ dd_resize_state(state_local, f, comm->atomRanges.numHomeAtoms()); @@ -3302,7 +3158,7 @@ void dd_partition_system(FILE *fplog, setup_dd_communication(dd, state_local->box, &ddbox, fr, state_local, f); /* Set the indices */ - make_dd_indices(dd, cgs_gl->index, ncgindex_set); + make_dd_indices(dd, ncgindex_set); /* Set the charge group boundaries for neighbor searching */ set_cg_boundaries(&comm->zones); @@ -3332,7 +3188,7 @@ void dd_partition_system(FILE *fplog, dd_make_local_top(dd, &comm->zones, dd->npbcdim, state_local->box, comm->cellsize_min, np, fr, - fr->cutoff_scheme == ecutsGROUP ? fr->cg_cm : state_local->x.rvec_array(), + state_local->x.rvec_array(), top_global, top_local); wallcycle_sub_stop(wcycle, ewcsDD_MAKETOP); @@ -3356,7 +3212,7 @@ void dd_partition_system(FILE *fplog, if (dd->splitConstraints || dd->splitSettles) { /* Only for inter-cg constraints we need special code */ - n = dd_make_local_constraints(dd, n, &top_global, fr->cginfo, + n = dd_make_local_constraints(dd, n, &top_global, fr->cginfo.data(), constr, ir->nProjOrder, top_local->idef.il); } diff --git a/src/gromacs/domdec/redistribute.cpp b/src/gromacs/domdec/redistribute.cpp index 860138b716..4e5a5ea5f1 100644 --- a/src/gromacs/domdec/redistribute.cpp +++ b/src/gromacs/domdec/redistribute.cpp @@ -84,49 +84,21 @@ static inline int DD_FLAG_BW(int d) static void copyMovedAtomsToBufferPerAtom(gmx::ArrayRef move, - const gmx::RangePartitioning &atomGroups, int nvec, int vec, rvec *src, gmx_domdec_comm_t *comm) { int pos_vec[DIM*2] = { 0 }; - for (int g = 0; g < move.ssize(); g++) - { - const auto atomGroup = atomGroups.block(g); - /* Skip moved atoms */ - int m = move[g]; - if (m >= 0) - { - /* Copy to the communication buffer */ - int numAtoms = atomGroup.size(); - pos_vec[m] += 1 + vec*numAtoms; - for (int i : atomGroup) - { - copy_rvec(src[i], comm->cgcm_state[m][pos_vec[m]++]); - } - pos_vec[m] += (nvec - vec - 1)*numAtoms; - } - } -} - -static void -copyMovedChargeGroupCogs(gmx::ArrayRef move, - const gmx::RangePartitioning &atomGroups, - int nvec, const rvec *src, - gmx_domdec_comm_t *comm) -{ - int pos_vec[DIM*2] = { 0 }; - - for (int g = 0; g < move.ssize(); g++) + for (int i = 0; i < move.ssize(); i++) { - const auto atomGroup = atomGroups.block(g); /* Skip moved atoms */ - int m = move[g]; + const int m = move[i]; if (m >= 0) { /* Copy to the communication buffer */ - copy_rvec(src[g], comm->cgcm_state[m][pos_vec[m]]); - pos_vec[m] += 1 + atomGroup.size()*nvec; + pos_vec[m] += 1 + vec; + copy_rvec(src[i], comm->cgcm_state[m][pos_vec[m]++]); + pos_vec[m] += nvec - vec - 1; } } } @@ -156,30 +128,26 @@ copyMovedUpdateGroupCogs(gmx::ArrayRef move, static void clear_and_mark_ind(gmx::ArrayRef move, gmx::ArrayRef globalAtomGroupIndices, - const gmx::RangePartitioning &atomGroups, gmx::ArrayRef globalAtomIndices, gmx_ga2la_t *ga2la, char *bLocalCG, int *cell_index) { - for (int g = 0; g < move.ssize(); g++) + for (int a = 0; a < move.ssize(); a++) { - if (move[g] >= 0) + if (move[a] >= 0) { /* Clear the global indices */ - for (int a : atomGroups.block(g)) - { - ga2la->erase(globalAtomIndices[a]); - } + ga2la->erase(globalAtomIndices[a]); if (bLocalCG) { - bLocalCG[globalAtomGroupIndices[g]] = FALSE; + bLocalCG[globalAtomGroupIndices[a]] = FALSE; } - /* Signal that this group has moved using the ns cell index. + /* Signal that this atom has moved using the ns cell index. * Here we set it to -1. fill_grid will change it * from -1 to NSGRID_SIGNAL_MOVED_FAC*grid->ncells. */ - cell_index[g] = -1; + cell_index[a] = -1; } } } @@ -195,11 +163,7 @@ static void print_cg_move(FILE *fplog, fprintf(fplog, "\nStep %" PRId64 ":\n", step); - if (comm->bCGs) - { - mesg += "The charge group starting at atom"; - } - else if (comm->useUpdateGroups) + if (comm->useUpdateGroups) { mesg += "The update group starting at atom"; } @@ -208,7 +172,7 @@ static void print_cg_move(FILE *fplog, mesg += "Atom"; } mesg += gmx::formatString(" %d moved more than the distance allowed by the domain decomposition", - ddglatnr(dd, dd->atomGrouping().block(cg).begin())); + ddglatnr(dd, cg)); if (limitd > 0) { mesg += gmx::formatString(" (%f)", limitd); @@ -351,12 +315,9 @@ static int computeMoveFlag(const gmx_domdec_t &dd, return firstMoveDimValue + flag; } -/* Determine to which domains atomGroups in the range \p cg_start, \p cg_end should go. +/* Determine to which atoms in the range \p cg_start, \p cg_end should go. * - * Returns in the move array where the groups should go. - * Also updates \p cg_cm for jumps over periodic boundaries. - * - * \TODO Rename cg to atomGroup. + * Returns in the move array where the atoms should go. */ static void calc_cg_move(FILE *fplog, int64_t step, gmx_domdec_t *dd, @@ -364,37 +325,17 @@ static void calc_cg_move(FILE *fplog, int64_t step, const ivec tric_dir, matrix tcm, const rvec cell_x0, const rvec cell_x1, const MoveLimits &moveLimits, - const gmx::RangePartitioning &atomGroups, int cg_start, int cg_end, - rvec *cg_cm, gmx::ArrayRef move) { const int npbcdim = dd->npbcdim; auto x = makeArrayRef(state->x); - for (int g = cg_start; g < cg_end; g++) + for (int a = cg_start; a < cg_end; a++) { - const auto atomGroup = atomGroups.block(g); - const int numAtoms = atomGroup.size(); // TODO: Rename this center of geometry variable to cogNew rvec cm_new; - if (numAtoms == 1) - { - copy_rvec(x[atomGroup.begin()], cm_new); - } - else - { - real invNumAtoms = 1/static_cast(numAtoms); - clear_rvec(cm_new); - for (int k : atomGroup) - { - rvec_inc(cm_new, x[k]); - } - for (int d = 0; d < DIM; d++) - { - cm_new[d] = invNumAtoms*cm_new[d]; - } - } + copy_rvec(x[a], cm_new); ivec dev = { 0 }; /* Do pbc and check DD cell boundary crossings */ @@ -417,9 +358,9 @@ static void calc_cg_move(FILE *fplog, int64_t step, { if (pos_d >= moveLimits.upper[d]) { - cg_move_error(fplog, dd, step, g, d, 1, - cg_cm != state->x.rvec_array(), moveLimits.distance[d], - cg_cm[g], cm_new, pos_d); + cg_move_error(fplog, dd, step, a, d, 1, + false, moveLimits.distance[d], + cm_new, cm_new, pos_d); } dev[d] = 1; if (dd->ci[d] == dd->nc[d] - 1) @@ -430,13 +371,10 @@ static void calc_cg_move(FILE *fplog, int64_t step, cm_new[YY] = state->box[YY][YY] - cm_new[YY]; cm_new[ZZ] = state->box[ZZ][ZZ] - cm_new[ZZ]; } - for (int k : atomGroup) + rvec_dec(x[a], state->box[d]); + if (bScrew) { - rvec_dec(x[k], state->box[d]); - if (bScrew) - { - rotate_state_atom(state, k); - } + rotate_state_atom(state, a); } } } @@ -444,9 +382,9 @@ static void calc_cg_move(FILE *fplog, int64_t step, { if (pos_d < moveLimits.lower[d]) { - cg_move_error(fplog, dd, step, g, d, -1, - cg_cm != state->x.rvec_array(), moveLimits.distance[d], - cg_cm[g], cm_new, pos_d); + cg_move_error(fplog, dd, step, a, d, -1, + false, moveLimits.distance[d], + cm_new, cm_new, pos_d); } dev[d] = -1; if (dd->ci[d] == 0) @@ -457,13 +395,10 @@ static void calc_cg_move(FILE *fplog, int64_t step, cm_new[YY] = state->box[YY][YY] - cm_new[YY]; cm_new[ZZ] = state->box[ZZ][ZZ] - cm_new[ZZ]; } - for (int k : atomGroup) + rvec_inc(x[a], state->box[d]); + if (bScrew) { - rvec_inc(x[k], state->box[d]); - if (bScrew) - { - rotate_state_atom(state, k); - } + rotate_state_atom(state, a); } } } @@ -474,26 +409,18 @@ static void calc_cg_move(FILE *fplog, int64_t step, while (cm_new[d] >= state->box[d][d]) { rvec_dec(cm_new, state->box[d]); - for (int k : atomGroup) - { - rvec_dec(x[k], state->box[d]); - } + rvec_dec(x[a], state->box[d]); } while (cm_new[d] < 0) { rvec_inc(cm_new, state->box[d]); - for (int k : atomGroup) - { - rvec_inc(x[k], state->box[d]); - } + rvec_inc(x[a], state->box[d]); } } } - copy_rvec(cm_new, cg_cm[g]); - /* Temporarily store the flag in move */ - move[g] = computeMoveFlag(*dd, dev); + move[a] = computeMoveFlag(*dd, dev); } } @@ -633,12 +560,6 @@ void dd_redistribute_cg(FILE *fplog, int64_t step, check_screw_box(state->box); } - rvec *cg_cm = nullptr; - if (fr->cutoff_scheme == ecutsGROUP) - { - cg_cm = fr->cg_cm; - } - // Positions are always present, so there's nothing to flag bool bV = (state->flags & (1<flags & (1<box, tcm); - const gmx::RangePartitioning &atomGrouping = dd->atomGrouping(); - const int nthread = gmx_omp_nthreads_get(emntDomdec); /* Compute the center of geometry for all home charge groups @@ -725,10 +644,8 @@ void dd_redistribute_cg(FILE *fplog, int64_t step, /* Here we handle single atoms or charge groups */ calc_cg_move(fplog, step, dd, state, tric_dir, tcm, cell_x0, cell_x1, moveLimits, - atomGrouping, ( thread *dd->ncg_home)/nthread, ((thread+1)*dd->ncg_home)/nthread, - fr->cutoff_scheme == ecutsGROUP ? cg_cm : as_rvec_array(state->x.data()), move); } } @@ -756,11 +673,13 @@ void dd_redistribute_cg(FILE *fplog, int64_t step, /* We store the cg size in the lower 16 bits * and the place where the charge group should go * in the next 6 bits. This saves some communication volume. + * + * TODO: Remove the size, as it is now always 1. */ - const int nrcg = atomGrouping.block(cg).size(); - cggl_flag[ncg[mc]*DD_CGIBS+1] = nrcg | flag; + const int numAtomsInGroup = 1; + cggl_flag[ncg[mc]*DD_CGIBS+1] = numAtomsInGroup | flag; ncg[mc] += 1; - nat[mc] += nrcg; + nat[mc] += numAtomsInGroup; } } @@ -793,42 +712,28 @@ void dd_redistribute_cg(FILE *fplog, int64_t step, } } - switch (fr->cutoff_scheme) - { - case ecutsGROUP: - /* Recalculating cg_cm might be cheaper than communicating, - * but that could give rise to rounding issues. - */ - copyMovedChargeGroupCogs(move, dd->atomGrouping(), - nvec, cg_cm, comm); - break; - case ecutsVERLET: - /* With update groups we send over their COGs. - * Without update groups we send the moved atom coordinates - * over twice. This is so the code further down can be used - * without many conditionals both with and without update groups. - */ - copyMovedUpdateGroupCogs(move, nvec, state->x, comm); - break; - default: - gmx_incons("unimplemented"); - } + /* With update groups we send over their COGs. + * Without update groups we send the moved atom coordinates + * over twice. This is so the code further down can be used + * without many conditionals both with and without update groups. + */ + copyMovedUpdateGroupCogs(move, nvec, state->x, comm); int vectorIndex = 0; - copyMovedAtomsToBufferPerAtom(move, dd->atomGrouping(), + copyMovedAtomsToBufferPerAtom(move, nvec, vectorIndex++, state->x.rvec_array(), comm); if (bV) { - copyMovedAtomsToBufferPerAtom(move, dd->atomGrouping(), + copyMovedAtomsToBufferPerAtom(move, nvec, vectorIndex++, state->v.rvec_array(), comm); } if (bCGP) { - copyMovedAtomsToBufferPerAtom(move, dd->atomGrouping(), + copyMovedAtomsToBufferPerAtom(move, nvec, vectorIndex++, state->cg_p.rvec_array(), comm); @@ -837,13 +742,12 @@ void dd_redistribute_cg(FILE *fplog, int64_t step, int *moved = getMovedBuffer(comm, 0, dd->ncg_home); clear_and_mark_ind(move, - dd->globalAtomGroupIndices, dd->atomGrouping(), dd->globalAtomIndices, + dd->globalAtomGroupIndices, dd->globalAtomIndices, dd->ga2la, comm->bLocalCG, moved); /* Now we can remove the excess global atom-group indices from the list */ dd->globalAtomGroupIndices.resize(dd->ncg_home); - dd->atomGrouping_.reduceNumBlocks(dd->ncg_home); /* We reuse the intBuffer without reacquiring since we are in the same scope */ DDBufferAccess &flagBuffer = moveBuffer; @@ -852,7 +756,7 @@ void dd_redistribute_cg(FILE *fplog, int64_t step, /* Temporarily store atoms passed to our rank at the end of the range */ int home_pos_cg = dd->ncg_home; - int home_pos_at = dd->atomGrouping().subRange(0, dd->ncg_home).end(); + int home_pos_at = dd->ncg_home; for (int d = 0; d < dd->ndim; d++) { DDBufferAccess rvecBuffer(comm->rvecBuffer, 0); @@ -893,11 +797,6 @@ void dd_redistribute_cg(FILE *fplog, int64_t step, } dd_check_alloc_ncg(fr, state, f, home_pos_cg + ncg_recv); - if (fr->cutoff_scheme == ecutsGROUP) - { - /* Here we resize to more than necessary and shrink later */ - dd_resize_state(state, f, home_pos_at + ncg_recv*MAX_CGCGSIZE); - } /* Process the received charge or update groups */ int buf_pos = 0; @@ -920,7 +819,7 @@ void dd_redistribute_cg(FILE *fplog, int64_t step, rvec pos = { cog[0], cog[1], cog[2] }; cg_move_error(fplog, dd, step, cg, dim, (flag & DD_FLAG_FW(d)) ? 1 : 0, - fr->cutoff_scheme == ecutsGROUP, 0, + false, 0, pos, pos, pos[dim]); } } @@ -1001,19 +900,14 @@ void dd_redistribute_cg(FILE *fplog, int64_t step, } } - const int nrcg = flag & DD_FLAG_NRCG; + GMX_ASSERT((flag & DD_FLAG_NRCG) == 1, "Charge groups are gone, so all groups should have size 1"); + constexpr int nrcg = 1; if (mc == -1) { /* Set the global charge group index and size */ const int globalAtomGroupIndex = flagBuffer.buffer[cg*DD_CGIBS]; dd->globalAtomGroupIndices.push_back(globalAtomGroupIndex); - dd->atomGrouping_.appendBlock(nrcg); - /* Copy the state from the buffer */ - if (fr->cutoff_scheme == ecutsGROUP) - { - cg_cm = fr->cg_cm; - copy_rvec(cog, cg_cm[home_pos_cg]); - } + /* Skip the COG entry in the buffer */ buf_pos++; /* Set the cginfo */ @@ -1082,28 +976,20 @@ void dd_redistribute_cg(FILE *fplog, int64_t step, * and ncg_home and nat_home are not the real count, since there are * "holes" in the arrays for the charge groups that moved to neighbors. */ - if (fr->cutoff_scheme == ecutsVERLET) - { - /* We need to clear the moved flags for the received atoms, - * because the moved buffer will be passed to the nbnxn gridding call. - */ - int *moved = getMovedBuffer(comm, dd->ncg_home, home_pos_cg); - for (int i = dd->ncg_home; i < home_pos_cg; i++) - { - moved[i] = 0; - } + /* We need to clear the moved flags for the received atoms, + * because the moved buffer will be passed to the nbnxm gridding call. + */ + moved = getMovedBuffer(comm, dd->ncg_home, home_pos_cg); + + for (int i = dd->ncg_home; i < home_pos_cg; i++) + { + moved[i] = 0; } dd->ncg_home = home_pos_cg; comm->atomRanges.setEnd(DDAtomRanges::Type::Home, home_pos_at); - if (fr->cutoff_scheme == ecutsGROUP) - { - /* We overallocated before, we need to set the right size here */ - dd_resize_state(state, f, comm->atomRanges.numHomeAtoms()); - } - if (debug) { fprintf(debug, diff --git a/src/gromacs/domdec/utility.cpp b/src/gromacs/domdec/utility.cpp index e9e0cd4cbc..75fd9e49be 100644 --- a/src/gromacs/domdec/utility.cpp +++ b/src/gromacs/domdec/utility.cpp @@ -1,7 +1,7 @@ /* * This file is part of the GROMACS molecular simulation package. * - * Copyright (c) 2018, by the GROMACS development team, led by + * Copyright (c) 2018,2019, by the GROMACS development team, led by * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, * and including many others, as listed in the AUTHORS file in the * top-level source directory and at http://www.gromacs.org. @@ -127,24 +127,8 @@ void dd_check_alloc_ncg(t_forcerec *fr, PaddedVector *f, int numChargeGroups) { - if (numChargeGroups > fr->cg_nalloc) - { - if (debug) - { - fprintf(debug, "Reallocating forcerec: currently %d, required %d, allocating %d\n", fr->cg_nalloc, numChargeGroups, over_alloc_dd(numChargeGroups)); - } - fr->cg_nalloc = over_alloc_dd(numChargeGroups); - srenew(fr->cginfo, fr->cg_nalloc); - if (fr->cutoff_scheme == ecutsGROUP) - { - srenew(fr->cg_cm, fr->cg_nalloc); - } - } - if (fr->cutoff_scheme == ecutsVERLET) - { - /* We don't use charge groups, we use x in state to set up - * the atom communication. - */ - dd_resize_state(state, f, numChargeGroups); - } + fr->cginfo.resize(numChargeGroups); + + /* We use x during the setup of the atom communication */ + dd_resize_state(state, f, numChargeGroups); } diff --git a/src/gromacs/mdlib/constr.cpp b/src/gromacs/mdlib/constr.cpp index 496a4c827e..fe61663bff 100644 --- a/src/gromacs/mdlib/constr.cpp +++ b/src/gromacs/mdlib/constr.cpp @@ -925,7 +925,7 @@ Constraints::Impl::setConstraints(const gmx_localtop_t &top, { // We are using the local topology, so there are only // F_CONSTR constraints. - make_shake_sblock_dd(shaked, &idef->il[F_CONSTR], &top.cgs, cr->dd); + make_shake_sblock_dd(shaked, &idef->il[F_CONSTR], cr->dd); } else { diff --git a/src/gromacs/mdlib/forcerec.cpp b/src/gromacs/mdlib/forcerec.cpp index 4d0033c963..a4d41a14de 100644 --- a/src/gromacs/mdlib/forcerec.cpp +++ b/src/gromacs/mdlib/forcerec.cpp @@ -821,15 +821,15 @@ static cginfo_mb_t *init_cginfo_mb(FILE *fplog, const gmx_mtop_t *mtop, return cginfo_mb; } -static int *cginfo_expand(int nmb, cginfo_mb_t *cgi_mb) +static std::vector cginfo_expand(const int nmb, + const cginfo_mb_t *cgi_mb) { - int ncg, mb, cg; - int *cginfo; + const int ncg = cgi_mb[nmb - 1].cg_end; - ncg = cgi_mb[nmb-1].cg_end; - snew(cginfo, ncg); - mb = 0; - for (cg = 0; cg < ncg; cg++) + std::vector cginfo(ncg); + + int mb = 0; + for (int cg = 0; cg < ncg; cg++) { while (cg >= cgi_mb[mb].cg_end) { @@ -2052,13 +2052,6 @@ void init_forcerec(FILE *fp, fr->forceBufferForDirectVirialContributions = new std::vector; } - if (fr->cutoff_scheme == ecutsGROUP && - ncg_mtop(mtop) > fr->cg_nalloc && !DOMAINDECOMP(cr)) - { - /* Count the total number of charge groups */ - fr->cg_nalloc = ncg_mtop(mtop); - srenew(fr->cg_cm, fr->cg_nalloc); - } if (fr->shift_vec == nullptr) { snew(fr->shift_vec, SHIFTS); @@ -2285,11 +2278,7 @@ void init_forcerec(FILE *fp, fr->cginfo_mb = init_cginfo_mb(fp, mtop, fr, bNoSolvOpt, &bFEP_NonBonded, &fr->bExcl_IntraCGAll_InterCGNone); - if (DOMAINDECOMP(cr)) - { - fr->cginfo = nullptr; - } - else + if (!DOMAINDECOMP(cr)) { fr->cginfo = cginfo_expand(mtop->molblock.size(), fr->cginfo_mb); } @@ -2401,11 +2390,6 @@ void done_forcerec(t_forcerec *fr, int numMolBlocks) // PME-only ranks don't have a forcerec return; } - // cginfo is dynamically allocated if no domain decomposition - if (fr->cginfo != nullptr) - { - sfree(fr->cginfo); - } done_cginfo_mb(fr->cginfo_mb, numMolBlocks); sfree(fr->nbfp); done_interaction_const(fr->ic); diff --git a/src/gromacs/mdlib/shake.cpp b/src/gromacs/mdlib/shake.cpp index 6e5c3f4d0b..fa49e80294 100644 --- a/src/gromacs/mdlib/shake.cpp +++ b/src/gromacs/mdlib/shake.cpp @@ -3,7 +3,7 @@ * * Copyright (c) 1991-2000, University of Groningen, The Netherlands. * Copyright (c) 2001-2004, The GROMACS development team. - * Copyright (c) 2013,2014,2015,2017,2018, by the GROMACS development team, led by + * Copyright (c) 2013,2014,2015,2017,2018,2019, by the GROMACS development team, led by * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, * and including many others, as listed in the AUTHORS file in the * top-level source directory and at http://www.gromacs.org. @@ -288,9 +288,10 @@ make_shake_sblock_serial(shakedata *shaked, resizeLagrangianData(shaked, ncons); } +// TODO: Check if this code is useful. It might never be called. void -make_shake_sblock_dd(shakedata *shaked, - const t_ilist *ilcon, const t_block *cgs, +make_shake_sblock_dd(shakedata *shaked, + const t_ilist *ilcon, const gmx_domdec_t *dd) { int ncons, c, cg; @@ -308,10 +309,10 @@ make_shake_sblock_dd(shakedata *shaked, cg = 0; for (c = 0; c < ncons; c++) { - if (c == 0 || iatom[1] >= cgs->index[cg+1]) + if (c == 0 || iatom[1] >= cg + 1) { shaked->sblock[shaked->nblocks++] = 3*c; - while (iatom[1] >= cgs->index[cg+1]) + while (iatom[1] >= cg + 1) { cg++; } diff --git a/src/gromacs/mdlib/shake.h b/src/gromacs/mdlib/shake.h index 46a5b4065b..c1e19b14a6 100644 --- a/src/gromacs/mdlib/shake.h +++ b/src/gromacs/mdlib/shake.h @@ -1,7 +1,7 @@ /* * This file is part of the GROMACS molecular simulation package. * - * Copyright (c) 2018, by the GROMACS development team, led by + * Copyright (c) 2018,2019, by the GROMACS development team, led by * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, * and including many others, as listed in the AUTHORS file in the * top-level source directory and at http://www.gromacs.org. @@ -74,8 +74,8 @@ make_shake_sblock_serial(shakedata *shaked, //! Make SHAKE blocks when using DD. void -make_shake_sblock_dd(shakedata *shaked, - const t_ilist *ilcon, const t_block *cgs, +make_shake_sblock_dd(shakedata *shaked, + const t_ilist *ilcon, const gmx_domdec_t *dd); /*! \brief Shake all the atoms blockwise. It is assumed that all the constraints diff --git a/src/gromacs/mdlib/sim_util.cpp b/src/gromacs/mdlib/sim_util.cpp index 3a827652d9..14b2cd4c8c 100644 --- a/src/gromacs/mdlib/sim_util.cpp +++ b/src/gromacs/mdlib/sim_util.cpp @@ -980,7 +980,7 @@ void do_force(FILE *fplog, wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL); } - nbv->setAtomProperties(*mdatoms, *fr->cginfo); + nbv->setAtomProperties(*mdatoms, fr->cginfo); wallcycle_stop(wcycle, ewcNS); diff --git a/src/gromacs/mdrun/shellfc.cpp b/src/gromacs/mdrun/shellfc.cpp index 657e28ca32..83613bbe35 100644 --- a/src/gromacs/mdrun/shellfc.cpp +++ b/src/gromacs/mdrun/shellfc.cpp @@ -1006,7 +1006,7 @@ void relax_shell_flexcon(FILE *fplog, char sbuf[22]; gmx_bool bCont, bInit, bConverged; int nat, dd_ac0, dd_ac1 = 0, i; - int homenr = md->homenr, end = homenr, cg0, cg1; + int homenr = md->homenr, end = homenr; int nflexcon, number_steps, d, Min = 0, count = 0; #define Try (1-Min) /* At start Try = 1 */ @@ -1059,18 +1059,8 @@ void relax_shell_flexcon(FILE *fplog, * before do_force is called, which normally puts all * charge groups in the box. */ - if (inputrec->cutoff_scheme == ecutsVERLET) - { - auto xRef = state->x.arrayRefWithPadding().paddedArrayRef(); - put_atoms_in_box_omp(fr->ePBC, state->box, xRef.subArray(0, md->homenr), gmx_omp_nthreads_get(emntDefault)); - } - else - { - cg0 = 0; - cg1 = top->cgs.nr; - put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, state->box, - &(top->cgs), state->x.rvec_array(), fr->cg_cm); - } + auto xRef = state->x.arrayRefWithPadding().paddedArrayRef(); + put_atoms_in_box_omp(fr->ePBC, state->box, xRef.subArray(0, md->homenr), gmx_omp_nthreads_get(emntDefault)); if (graph) { diff --git a/src/gromacs/mdrun/tpi.cpp b/src/gromacs/mdrun/tpi.cpp index 424692afe9..47f863e1f9 100644 --- a/src/gromacs/mdrun/tpi.cpp +++ b/src/gromacs/mdrun/tpi.cpp @@ -132,6 +132,7 @@ static void realloc_bins(double **bin, int *nbin, int nbin_new) namespace gmx { +// TODO: Convert to use the nbnxm kernels by putting the system and the teset molecule on two separate search grids void Integrator::do_tpi() { @@ -143,7 +144,7 @@ Integrator::do_tpi() t_trxframe rerun_fr; gmx_bool bDispCorr, bCharge, bRFExcl, bNotLastFrame, bStateChanged, bNS; tensor force_vir, shake_vir, vir, pres; - int cg_tp, a_tp0, a_tp1, ngid, gid_tp, nener, e; + int a_tp0, a_tp1, ngid, gid_tp, nener, e; rvec *x_mol; rvec mu_tot, x_init, dx, x_tp; int nnodes, frame; @@ -271,12 +272,12 @@ Integrator::do_tpi() print_start(fplog, cr, walltime_accounting, "Test Particle Insertion"); /* The last charge group is the group to be inserted */ - cg_tp = top.cgs.nr - 1; - a_tp0 = top.cgs.index[cg_tp]; - a_tp1 = top.cgs.index[cg_tp+1]; + const t_atoms &atomsToInsert = top_global->moltype[top_global->molblock.back().type].atoms; + a_tp0 = top_global->natoms - atomsToInsert.nr; + a_tp1 = top_global->natoms; if (debug) { - fprintf(debug, "TPI cg %d, atoms %d-%d\n", cg_tp, a_tp0, a_tp1); + fprintf(debug, "TPI atoms %d-%d\n", a_tp0, a_tp1); } GMX_RELEASE_ASSERT(inputrec->rcoulomb <= inputrec->rlist && inputrec->rvdw <= inputrec->rlist, "Twin-range interactions are not supported with TPI"); @@ -296,6 +297,8 @@ Integrator::do_tpi() } bRFExcl = (bCharge && EEL_RF(fr->ic->eeltype)); + // TODO: Calculate the center of geometry of the molecule to insert +#if 0 calc_cgcm(fplog, cg_tp, cg_tp+1, &(top.cgs), state_global->x.rvec_array(), fr->cg_cm); if (bCavity) { @@ -313,6 +316,7 @@ Integrator::do_tpi() rvec_dec(x_mol[i], fr->cg_cm[cg_tp]); } } +#endif if (fplog) { @@ -346,7 +350,11 @@ Integrator::do_tpi() } ngid = groups->groups[SimulationAtomGroupType::EnergyOutput].nr; + // TODO: Figure out which energy group to use +#if 0 gid_tp = GET_CGINFO_GID(fr->cginfo[cg_tp]); +#endif + gid_tp = 0; nener = 1 + ngid; if (bDispCorr) { @@ -623,8 +631,11 @@ Integrator::do_tpi() clear_mat(vir); clear_mat(pres); - /* Set the charge group center of mass of the test particle */ + /* Set the center of geometry mass of the test molecule */ + // TODO: Compute and set the COG +#if 0 copy_rvec(x_init, fr->cg_cm[top.cgs.nr-1]); +#endif /* Calc energy (no forces) on new positions. * Since we only need the intermolecular energy diff --git a/src/gromacs/mdtypes/forcerec.h b/src/gromacs/mdtypes/forcerec.h index b4bc09534f..838dde224c 100644 --- a/src/gromacs/mdtypes/forcerec.h +++ b/src/gromacs/mdtypes/forcerec.h @@ -215,9 +215,7 @@ struct t_forcerec { // NOLINT (clang-analyzer-optin.performance.Padding) gmx_bool bGrid = FALSE; gmx_bool bExcl_IntraCGAll_InterCGNone = FALSE; struct cginfo_mb_t *cginfo_mb = nullptr; - int *cginfo = nullptr; - rvec *cg_cm = nullptr; - int cg_nalloc = 0; + std::vector cginfo; rvec *shift_vec = nullptr; int cutoff_scheme = 0; /* group- or Verlet-style cutoff */ diff --git a/src/gromacs/nbnxm/gridset.cpp b/src/gromacs/nbnxm/gridset.cpp index b4685a94a1..168fdd8792 100644 --- a/src/gromacs/nbnxm/gridset.cpp +++ b/src/gromacs/nbnxm/gridset.cpp @@ -127,7 +127,7 @@ void GridSet::putOnGrid(const matrix box, const int atomStart, const int atomEnd, real atomDensity, - const int *atinfo, + gmx::ArrayRef atomInfo, gmx::ArrayRef x, const int numAtomsMoved, const int *move, @@ -213,7 +213,7 @@ void GridSet::putOnGrid(const matrix box, /* Copy the already computed cell indices to the grid and sort, when needed */ grid.setCellIndices(ddZone, cellOffset, &gridSetData, gridWork_, - atomStart, atomEnd, atinfo, x, numAtomsMoved, nbat); + atomStart, atomEnd, atomInfo.data(), x, numAtomsMoved, nbat); if (ddZone == 0) { diff --git a/src/gromacs/nbnxm/gridset.h b/src/gromacs/nbnxm/gridset.h index f75744aa89..e59a8584c1 100644 --- a/src/gromacs/nbnxm/gridset.h +++ b/src/gromacs/nbnxm/gridset.h @@ -113,7 +113,7 @@ class GridSet int atomStart, int atomEnd, real atomDensity, - const int *atinfo, + gmx::ArrayRef atomInfo, gmx::ArrayRef x, int numAtomsMoved, const int *move, diff --git a/src/gromacs/nbnxm/nbnxm.cpp b/src/gromacs/nbnxm/nbnxm.cpp index 14037ae9be..688bcacac9 100644 --- a/src/gromacs/nbnxm/nbnxm.cpp +++ b/src/gromacs/nbnxm/nbnxm.cpp @@ -63,21 +63,21 @@ void nbnxn_put_on_grid(nonbonded_verlet_t *nb_verlet, int atomStart, int atomEnd, real atomDensity, - const int *atinfo, + gmx::ArrayRef atomInfo, gmx::ArrayRef x, int numAtomsMoved, const int *move) { nb_verlet->pairSearch_->putOnGrid(box, ddZone, lowerCorner, upperCorner, updateGroupsCog, atomStart, atomEnd, atomDensity, - atinfo, x, numAtomsMoved, move, + atomInfo, x, numAtomsMoved, move, nb_verlet->nbat.get()); } /* Calls nbnxn_put_on_grid for all non-local domains */ void nbnxn_put_on_grid_nonlocal(nonbonded_verlet_t *nbv, const struct gmx_domdec_zones_t *zones, - const int *atinfo, + gmx::ArrayRef atomInfo, gmx::ArrayRef x) { for (int zone = 1; zone < zones->n; zone++) @@ -95,7 +95,7 @@ void nbnxn_put_on_grid_nonlocal(nonbonded_verlet_t *nbv, zones->cg_range[zone], zones->cg_range[zone+1], -1, - atinfo, + atomInfo, x, 0, nullptr); } @@ -126,10 +126,10 @@ void nonbonded_verlet_t::setLocalAtomOrder() pairSearch_->setLocalAtomOrder(); } -void nonbonded_verlet_t::setAtomProperties(const t_mdatoms &mdatoms, - const int &atinfo) +void nonbonded_verlet_t::setAtomProperties(const t_mdatoms &mdatoms, + gmx::ArrayRef atomInfo) { - nbnxn_atomdata_set(nbat.get(), pairSearch_->gridSet(), &mdatoms, &atinfo); + nbnxn_atomdata_set(nbat.get(), pairSearch_->gridSet(), &mdatoms, atomInfo.data()); } void nonbonded_verlet_t::setCoordinates(const Nbnxm::AtomLocality locality, diff --git a/src/gromacs/nbnxm/nbnxm.h b/src/gromacs/nbnxm/nbnxm.h index 706cd46010..abf3cf3205 100644 --- a/src/gromacs/nbnxm/nbnxm.h +++ b/src/gromacs/nbnxm/nbnxm.h @@ -237,8 +237,8 @@ struct nonbonded_verlet_t t_nrnb *nrnb); //! Updates all the atom properties in Nbnxm - void setAtomProperties(const t_mdatoms &mdatoms, - const int &atinfo); + void setAtomProperties(const t_mdatoms &mdatoms, + gmx::ArrayRef atomInfo); //! Updates the coordinates in Nbnxm for the given locality void setCoordinates(Nbnxm::AtomLocality locality, @@ -359,7 +359,7 @@ void nbnxn_put_on_grid(nonbonded_verlet_t *nb_verlet, int atomStart, int atomEnd, real atomDensity, - const int *atinfo, + gmx::ArrayRef atomInfo, gmx::ArrayRef x, int numAtomsMoved, const int *move); @@ -371,7 +371,7 @@ void nbnxn_put_on_grid(nonbonded_verlet_t *nb_verlet, */ void nbnxn_put_on_grid_nonlocal(nonbonded_verlet_t *nb_verlet, const struct gmx_domdec_zones_t *zones, - const int *atinfo, + gmx::ArrayRef atomInfo, gmx::ArrayRef x); #endif // GMX_NBNXN_NBNXM_H diff --git a/src/gromacs/nbnxm/pairsearch.h b/src/gromacs/nbnxm/pairsearch.h index f67037058f..b4ec14b72c 100644 --- a/src/gromacs/nbnxm/pairsearch.h +++ b/src/gromacs/nbnxm/pairsearch.h @@ -183,7 +183,7 @@ class PairSearch int atomStart, int atomEnd, real atomDensity, - const int *atinfo, + gmx::ArrayRef atomInfo, gmx::ArrayRef x, int numAtomsMoved, const int *move, @@ -193,7 +193,7 @@ class PairSearch gridSet_.putOnGrid(box, ddZone, lowerCorner, upperCorner, updateGroupsCog, atomStart, atomEnd, atomDensity, - atinfo, x, numAtomsMoved, move, nbat); + atomInfo, x, numAtomsMoved, move, nbat); cycleCounting_.stop(enbsCCgrid); } diff --git a/src/gromacs/topology/mtop_util.cpp b/src/gromacs/topology/mtop_util.cpp index 8711e6953f..12e4a44944 100644 --- a/src/gromacs/topology/mtop_util.cpp +++ b/src/gromacs/topology/mtop_util.cpp @@ -1119,7 +1119,6 @@ static void gen_local_top(const gmx_mtop_t &mtop, { copyAtomtypesFromMtop(mtop, &top->atomtypes); copyIdefFromMtop(mtop, &top->idef, freeEnergyInteractionsAtEnd, bMergeConstr); - copyCgsFromMtop(mtop, &top->cgs); copyExclsFromMtop(mtop, &top->excls); if (!mtop.intermolecularExclusionGroup.empty()) { diff --git a/src/gromacs/topology/topology.cpp b/src/gromacs/topology/topology.cpp index e7e7d2eccd..ffbc051011 100644 --- a/src/gromacs/topology/topology.cpp +++ b/src/gromacs/topology/topology.cpp @@ -175,7 +175,6 @@ void done_top_mtop(t_topology *top, gmx_mtop_t *mtop) gmx_localtop_t::gmx_localtop_t() { - init_block_null(&cgs); init_blocka_null(&excls); init_idef(&idef); init_atomtypes(&atomtypes); @@ -186,7 +185,6 @@ gmx_localtop_t::~gmx_localtop_t() if (!useInDomainDecomp_) { done_idef(&idef); - done_block(&cgs); done_blocka(&excls); done_atomtypes(&atomtypes); } diff --git a/src/gromacs/topology/topology.h b/src/gromacs/topology/topology.h index f33727fa3f..31bc288694 100644 --- a/src/gromacs/topology/topology.h +++ b/src/gromacs/topology/topology.h @@ -215,8 +215,6 @@ struct gmx_localtop_t t_idef idef; //! Atomtype properties t_atomtypes atomtypes; - //! The charge groups - t_block cgs; //! The exclusions t_blocka excls; //! Flag for domain decomposition so we don't free already freed memory. -- 2.22.0