#include "gmxpre.h"
+#include "gromacs/utility/arrayref.h"
#include "partition.h"
#include "config.h"
#include "gromacs/domdec/collect.h"
#include "gromacs/domdec/dlb.h"
#include "gromacs/domdec/dlbtiming.h"
-#include "gromacs/domdec/domdec.h"
#include "gromacs/domdec/domdec_network.h"
#include "gromacs/domdec/ga2la.h"
#include "gromacs/domdec/localatomsetmanager.h"
+#include "gromacs/domdec/localtopology.h"
+#include "gromacs/domdec/localtopologychecker.h"
#include "gromacs/domdec/mdsetup.h"
+#include "gromacs/domdec/nsgrid.h"
#include "gromacs/ewald/pme_pp.h"
#include "gromacs/gmxlib/network.h"
#include "gromacs/gmxlib/nrnb.h"
#include "gromacs/mdlib/forcerec.h"
#include "gromacs/mdlib/gmx_omp_nthreads.h"
#include "gromacs/mdlib/mdatoms.h"
-#include "gromacs/mdlib/nsgrid.h"
#include "gromacs/mdlib/vsite.h"
#include "gromacs/mdtypes/commrec.h"
#include "gromacs/mdtypes/forcerec.h"
fprintf(fp,
"zone d0 %d d1 %d d2 %d min0 %6.3f max1 %6.3f mch0 %6.3f mch1 %6.3f p1_0 %6.3f p1_1 "
"%6.3f\n",
- d, i, j, zone->min0, zone->max1, zone->mch0, zone->mch0, zone->p1_0, zone->p1_1);
+ d,
+ i,
+ j,
+ zone->min0,
+ zone->max1,
+ zone->mch0,
+ zone->mch0,
+ zone->p1_0,
+ zone->p1_1);
}
/*! \brief Using the home grid size as input in cell_ns_x0 and cell_ns_x1
"Here we expect gmx_ddzone_t to consist of c_ddzoneNumReals reals (only)");
int numReals = numElementsInBuffer * c_ddzoneNumReals;
- ddSendrecv(dd, d, dddirBackward, gmx::arrayRefFromArray(&buf_s[0].min0, numReals),
+ ddSendrecv(dd,
+ d,
+ dddirBackward,
+ gmx::arrayRefFromArray(&buf_s[0].min0, numReals),
gmx::arrayRefFromArray(&buf_r[0].min0, numReals));
rvec dh = { 0 };
cellsizes[d].fracUpperMin = extr_s[d - 1][1];
if (debug)
{
- fprintf(debug, "Cell fraction d %d, max0 %f, min1 %f\n", d, cellsizes[d].fracLowerMax,
+ fprintf(debug,
+ "Cell fraction d %d, max0 %f, min1 %f\n",
+ d,
+ cellsizes[d].fracLowerMax,
cellsizes[d].fracUpperMin);
}
}
}
//! Sets the charge-group zones to be equal to the home zone.
-static void set_zones_ncg_home(gmx_domdec_t* dd)
+static void set_zones_numHomeAtoms(gmx_domdec_t* dd)
{
gmx_domdec_zones_t* zones;
int i;
zones->cg_range[0] = 0;
for (i = 1; i < zones->n + 1; i++)
{
- zones->cg_range[i] = dd->ncg_home;
+ zones->cg_range[i] = dd->numHomeAtoms;
}
- /* zone_ncg1[0] should always be equal to ncg_home */
- dd->comm->zone_ncg1[0] = dd->ncg_home;
+ /* zone_ncg1[0] should always be equal to numHomeAtoms */
+ dd->comm->zone_ncg1[0] = dd->numHomeAtoms;
}
//! Restore atom groups for the charge groups.
globalAtomGroupIndices[i] = atomGroupsState[i];
}
- dd->ncg_home = atomGroupsState.size();
+ dd->numHomeAtoms = atomGroupsState.size();
dd->comm->atomRanges.setEnd(DDAtomRanges::Type::Home, atomGroupsState.ssize());
- set_zones_ncg_home(dd);
+ set_zones_numHomeAtoms(dd);
}
-//! Sets the cginfo structures.
-static void dd_set_cginfo(gmx::ArrayRef<const int> index_gl, int cg0, int cg1, t_forcerec* fr)
+//! Sets the atom info structures.
+static void dd_set_atominfo(gmx::ArrayRef<const int> index_gl, int cg0, int cg1, t_forcerec* fr)
{
if (fr != nullptr)
{
- gmx::ArrayRef<cginfo_mb_t> cginfo_mb = fr->cginfo_mb;
- gmx::ArrayRef<int> cginfo = fr->cginfo;
+ gmx::ArrayRef<gmx::AtomInfoWithinMoleculeBlock> atomInfoForEachMoleculeBlock =
+ fr->atomInfoForEachMoleculeBlock;
+ gmx::ArrayRef<int64_t> atomInfo = fr->atomInfo;
for (int cg = cg0; cg < cg1; cg++)
{
- cginfo[cg] = ddcginfo(cginfo_mb, index_gl[cg]);
+ atomInfo[cg] = ddGetAtomInfo(atomInfoForEachMoleculeBlock, index_gl[cg]);
}
}
}
static void make_dd_indices(gmx_domdec_t* dd, const int atomStart)
{
const int numZones = dd->comm->zones.n;
- const int* zone2cg = dd->comm->zones.cg_range;
- const int* zone_ncg1 = dd->comm->zone_ncg1;
+ gmx::ArrayRef<const int> zone2cg = dd->comm->zones.cg_range;
+ gmx::ArrayRef<const int> zone_ncg1 = dd->comm->zone_ncg1;
gmx::ArrayRef<const int> globalAtomGroupIndices = dd->globalAtomGroupIndices;
std::vector<int>& globalAtomIndices = dd->globalAtomIndices;
gmx_ga2la_t& ga2la = *dd->ga2la;
- if (zone2cg[1] != dd->ncg_home)
+ if (zone2cg[1] != dd->numHomeAtoms)
{
gmx_incons("dd->ncg_zone is not up to date");
}
int globalAtomIndex = dd->globalAtomIndices[a];
if (have[globalAtomIndex] > 0)
{
- fprintf(stderr, "DD rank %d: global atom %d occurs twice: index %d and %d\n",
- dd->rank, globalAtomIndex + 1, have[globalAtomIndex], a + 1);
+ fprintf(stderr,
+ "DD rank %d: global atom %d occurs twice: index %d and %d\n",
+ dd->rank,
+ globalAtomIndex + 1,
+ have[globalAtomIndex],
+ a + 1);
}
else
{
int ngl = 0;
for (int i = 0; i < natoms_sys; i++)
{
- if (const auto entry = dd->ga2la->find(i))
+ if (const auto* entry = dd->ga2la->find(i))
{
const int a = entry->la;
if (a >= numAtomsInZones)
fprintf(stderr,
"DD rank %d: global atom %d marked as local atom %d, which is larger than "
"nat_tot (%d)\n",
- dd->rank, i + 1, a + 1, numAtomsInZones);
+ dd->rank,
+ i + 1,
+ a + 1,
+ numAtomsInZones);
nerr++;
}
else
fprintf(stderr,
"DD rank %d: global atom %d marked as local atom %d, which has global "
"atom index %d\n",
- dd->rank, i + 1, a + 1, dd->globalAtomIndices[a] + 1);
+ dd->rank,
+ i + 1,
+ a + 1,
+ dd->globalAtomIndices[a] + 1);
nerr++;
}
}
}
if (ngl != numAtomsInZones)
{
- fprintf(stderr, "DD rank %d, %s: %d global atom indices, %d local atoms\n", dd->rank, where,
- ngl, numAtomsInZones);
+ fprintf(stderr, "DD rank %d, %s: %d global atom indices, %d local atoms\n", dd->rank, where, ngl, numAtomsInZones);
}
for (int a = 0; a < numAtomsInZones; a++)
{
if (have[a] == 0)
{
- fprintf(stderr, "DD rank %d, %s: local atom %d, global %d has no global index\n",
- dd->rank, where, a + 1, dd->globalAtomIndices[a] + 1);
+ fprintf(stderr,
+ "DD rank %d, %s: local atom %d, global %d has no global index\n",
+ dd->rank,
+ where,
+ a + 1,
+ dd->globalAtomIndices[a] + 1);
}
}
}
}
-bool check_grid_jump(int64_t step, const gmx_domdec_t* dd, real cutoff, const gmx_ddbox_t* ddbox, gmx_bool bFatal)
-{
- gmx_domdec_comm_t* comm = dd->comm;
- bool invalid = false;
-
- for (int d = 1; d < dd->ndim; d++)
- {
- const DDCellsizesWithDlb& cellsizes = comm->cellsizesWithDlb[d];
- const int dim = dd->dim[d];
- const real limit = grid_jump_limit(comm, cutoff, d);
- real bfac = ddbox->box_size[dim];
- if (ddbox->tric_dir[dim])
- {
- bfac *= ddbox->skew_fac[dim];
- }
- if ((cellsizes.fracUpper - cellsizes.fracLowerMax) * bfac < limit
- || (cellsizes.fracLower - cellsizes.fracUpperMin) * bfac > -limit)
- {
- invalid = true;
-
- if (bFatal)
- {
- char buf[22];
-
- /* This error should never be triggered under normal
- * circumstances, but you never know ...
- */
- gmx_fatal(FARGS,
- "step %s: The domain decomposition grid has shifted too much in the "
- "%c-direction around cell %d %d %d. This should not have happened. "
- "Running with fewer ranks might avoid this issue.",
- gmx_step_str(step, buf), dim2char(dim), dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
- }
- }
- }
-
- return invalid;
-}
//! Return the duration of force calculations on this rank.
static float dd_force_load(gmx_domdec_comm_t* comm)
{
"step %s: The %c-size (%f) times the triclinic skew factor (%f) is smaller "
"than the smallest allowed cell size (%f) for domain decomposition grid cell "
"%d %d %d",
- gmx_step_str(step, buf), dim2char(dim),
- comm->cell_x1[dim] - comm->cell_x0[dim], ddbox->skew_fac[dim],
- dd->comm->cellsize_min[dim], dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
+ gmx_step_str(step, buf),
+ dim2char(dim),
+ comm->cell_x1[dim] - comm->cell_x0[dim],
+ ddbox->skew_fac[dim],
+ dd->comm->cellsize_min[dim],
+ dd->ci[XX],
+ dd->ci[YY],
+ dd->ci[ZZ]);
}
}
dd_move_cellx(dd, ddbox, cell_ns_x0, cell_ns_x1);
if (isDlbOn(dd->comm) && dd->ndim > 1)
{
- check_grid_jump(step, dd, dd->comm->systemInfo.cutoff, ddbox, TRUE);
+ gmx::check_grid_jump(step, dd, dd->comm->systemInfo.cutoff, ddbox, TRUE);
}
}
}
//! Compute and communicate to determine the load distribution across PP ranks.
-static void get_load_distribution(gmx_domdec_t* dd, gmx_wallcycle_t wcycle)
+static void get_load_distribution(gmx_domdec_t* dd, gmx_wallcycle* wcycle)
{
gmx_domdec_comm_t* comm;
domdec_load_t* load;
fprintf(debug, "get_load_distribution start\n");
}
- wallcycle_start(wcycle, ewcDDCOMMLOAD);
+ wallcycle_start(wcycle, WallCycleCounter::DDCommLoad);
comm = dd->comm;
comm->load[0].pme = comm->cycl[ddCyclPME];
}
+ // Either we have DLB off, or we have it on and the array is large enough
+ GMX_ASSERT(!isDlbOn(dd->comm) || static_cast<int>(dd->comm->cellsizesWithDlb.size()) == dd->ndim,
+ "DLB cell sizes data not set up properly ");
for (int d = dd->ndim - 1; d >= 0; d--)
{
- const DDCellsizesWithDlb* cellsizes = (isDlbOn(dd->comm) ? &comm->cellsizesWithDlb[d] : nullptr);
- const int dim = dd->dim[d];
+ const int dim = dd->dim[d];
/* Check if we participate in the communication in this dimension */
if (d == dd->ndim - 1 || (dd->ci[dd->dim[d + 1]] == 0 && dd->ci[dd->dim[dd->ndim - 1]] == 0))
{
load = &comm->load[d];
if (isDlbOn(dd->comm))
{
- cell_frac = cellsizes->fracUpper - cellsizes->fracLower;
+ cell_frac = comm->cellsizesWithDlb[d].fracUpper - comm->cellsizesWithDlb[d].fracLower;
}
int pos = 0;
if (d == dd->ndim - 1)
sbuf[pos++] = cell_frac;
if (d > 0)
{
- sbuf[pos++] = cellsizes->fracLowerMax;
- sbuf[pos++] = cellsizes->fracUpperMin;
+ sbuf[pos++] = comm->cellsizesWithDlb[d].fracLowerMax;
+ sbuf[pos++] = comm->cellsizesWithDlb[d].fracUpperMin;
}
}
if (bSepPME)
sbuf[pos++] = comm->load[d + 1].flags;
if (d > 0)
{
- sbuf[pos++] = cellsizes->fracLowerMax;
- sbuf[pos++] = cellsizes->fracUpperMin;
+ sbuf[pos++] = comm->cellsizesWithDlb[d].fracLowerMax;
+ sbuf[pos++] = comm->cellsizesWithDlb[d].fracUpperMin;
}
}
if (bSepPME)
* The communicators are setup such that the root always has rank 0.
*/
#if GMX_MPI
- MPI_Gather(sbuf, load->nload * sizeof(float), MPI_BYTE, load->load,
- load->nload * sizeof(float), MPI_BYTE, 0, comm->mpi_comm_load[d]);
+ MPI_Gather(sbuf,
+ load->nload * sizeof(float),
+ MPI_BYTE,
+ load->load,
+ load->nload * sizeof(float),
+ MPI_BYTE,
+ 0,
+ comm->mpi_comm_load[d]);
#endif
if (dd->ci[dim] == dd->master_ci[dim])
{
if (isDlbOn(comm))
{
- rowMaster = cellsizes->rowMaster.get();
+ rowMaster = comm->cellsizesWithDlb[d].rowMaster.get();
}
load->sum = 0;
load->max = 0;
}
}
- wallcycle_stop(wcycle, ewcDDCOMMLOAD);
+ wallcycle_stop(wcycle, WallCycleCounter::DDCommLoad);
if (debug)
{
sprintf(buf, " Average PME mesh/force load: %5.3f\n", pmeForceRatio);
fprintf(fplog, "%s", buf);
fprintf(stderr, "%s", buf);
- sprintf(buf, " Part of the total run time spent waiting due to PP/PME imbalance: %.1f %%\n",
+ sprintf(buf,
+ " Part of the total run time spent waiting due to PP/PME imbalance: %.1f %%\n",
std::fabs(lossFractionPme) * 100);
fprintf(fplog, "%s", buf);
fprintf(stderr, "%s", buf);
" had %s work to do than the PP ranks.\n"
" You might want to %s the number of PME ranks\n"
" or %s the cut-off and the grid spacing.\n",
- std::fabs(lossFractionPme * 100), (lossFractionPme < 0) ? "less" : "more",
+ std::fabs(lossFractionPme * 100),
+ (lossFractionPme < 0) ? "less" : "more",
(lossFractionPme < 0) ? "decrease" : "increase",
(lossFractionPme < 0) ? "decrease" : "increase");
fprintf(fplog, "%s\n", buf);
"step %s Measured %.1f %% performance loss due to load imbalance, "
"but the minimum cell size is smaller than 1.05 times the cell size limit. "
"Will no longer try dynamic load balancing.",
- gmx::toString(step).c_str(), dd_force_imb_perf_loss(dd) * 100);
+ gmx::toString(step).c_str(),
+ dd_force_imb_perf_loss(dd) * 100);
comm->dlbState = DlbState::offForever;
return;
.appendTextFormatted(
"step %s Turning on dynamic load balancing, because the performance loss due "
"to load imbalance is %.1f %%.",
- gmx::toString(step).c_str(), dd_force_imb_perf_loss(dd) * 100);
+ gmx::toString(step).c_str(),
+ dd_force_imb_perf_loss(dd) * 100);
comm->dlbState = DlbState::onCanTurnOff;
/* Store the non-DLB performance, so we can check if DLB actually
}
//! Merge atom buffers.
-static void merge_cg_buffers(int ncell,
- gmx_domdec_comm_dim_t* cd,
- int pulse,
- int* ncg_cell,
- gmx::ArrayRef<int> index_gl,
- const int* recv_i,
- gmx::ArrayRef<gmx::RVec> x,
- gmx::ArrayRef<const gmx::RVec> recv_vr,
- gmx::ArrayRef<cginfo_mb_t> cginfo_mb,
- gmx::ArrayRef<int> cginfo)
+static void merge_cg_buffers(int ncell,
+ gmx_domdec_comm_dim_t* cd,
+ int pulse,
+ int* ncg_cell,
+ gmx::ArrayRef<int> index_gl,
+ const int* recv_i,
+ gmx::ArrayRef<gmx::RVec> x,
+ gmx::ArrayRef<const gmx::RVec> recv_vr,
+ gmx::ArrayRef<gmx::AtomInfoWithinMoleculeBlock> atomInfoForEachMoleculeBlock,
+ gmx::ArrayRef<int64_t> atomInfo)
{
gmx_domdec_ind_t *ind, *ind_p;
int p, cell, c, cg, cg0, cg1, cg_gl;
{
index_gl[cg + shift] = index_gl[cg];
x[cg + shift] = x[cg];
- cginfo[cg + shift] = cginfo[cg];
+ atomInfo[cg + shift] = atomInfo[cg];
}
/* Correct the already stored send indices for the shift */
for (p = 1; p <= pulse; p++)
index_gl[cg1] = recv_i[cg0];
x[cg1] = recv_vr[cg0];
/* Copy information */
- cg_gl = index_gl[cg1];
- cginfo[cg1] = ddcginfo(cginfo_mb, cg_gl);
+ cg_gl = index_gl[cg1];
+ atomInfo[cg1] = ddGetAtomInfo(atomInfoForEachMoleculeBlock, cg_gl);
cg0++;
cg1++;
}
}
}
-/*! \brief Add the atom groups we need to send in this pulse from this
- * zone to \p localAtomGroups and \p work. */
-static void get_zone_pulse_cgs(gmx_domdec_t* dd,
- int zonei,
- int zone,
- int cg0,
- int cg1,
- gmx::ArrayRef<const int> globalAtomGroupIndices,
- int dim,
- int dim_ind,
- int dim0,
- int dim1,
- int dim2,
- real r_comm2,
- real r_bcomm2,
- matrix box,
- bool distanceIsTriclinic,
- rvec* normal,
- real skew_fac2_d,
- real skew_fac_01,
- rvec* v_d,
- rvec* v_0,
- rvec* v_1,
- const dd_corners_t* c,
- const rvec sf2_round,
- gmx_bool bDistBonded,
- gmx_bool bBondComm,
- gmx_bool bDist2B,
- gmx_bool bDistMB,
- rvec* cg_cm,
- gmx::ArrayRef<const int> cginfo,
- std::vector<int>* localAtomGroups,
- dd_comm_setup_work_t* work)
+/*! \brief Add the atom groups and coordinates we need to send in this
+ * pulse from this zone to \p localAtomGroups and \p work. */
+static void get_zone_pulse_groups(gmx_domdec_t* dd,
+ int zonei,
+ int zone,
+ int cg0,
+ int cg1,
+ gmx::ArrayRef<const int> globalAtomGroupIndices,
+ int dim,
+ int dim_ind,
+ int dim0,
+ int dim1,
+ int dim2,
+ real r_comm2,
+ real r_bcomm2,
+ matrix box,
+ bool distanceIsTriclinic,
+ rvec* normal,
+ real skew_fac2_d,
+ real skew_fac_01,
+ rvec* v_d,
+ rvec* v_0,
+ rvec* v_1,
+ const dd_corners_t* c,
+ const rvec sf2_round,
+ gmx_bool bDistBonded,
+ gmx_bool bBondComm,
+ gmx_bool bDist2B,
+ gmx_bool bDistMB,
+ gmx::ArrayRef<const gmx::RVec> coordinates,
+ gmx::ArrayRef<const int64_t> atomInfo,
+ std::vector<int>* localAtomGroups,
+ dd_comm_setup_work_t* work)
{
gmx_domdec_comm_t* comm;
gmx_bool bScrew;
if (!distanceIsTriclinic)
{
/* Rectangular direction, easy */
- r = cg_cm[cg][dim] - c->c[dim_ind][zone];
+ r = coordinates[cg][dim] - c->c[dim_ind][zone];
if (r > 0)
{
r2 += r * r;
}
if (bDistMB_pulse)
{
- r = cg_cm[cg][dim] - c->bc[dim_ind];
+ r = coordinates[cg][dim] - c->bc[dim_ind];
if (r > 0)
{
rb2 += r * r;
*/
if (dim_ind >= 1 && (zonei == 1 || zonei == 2))
{
- r = cg_cm[cg][dim0] - c->cr0;
+ r = coordinates[cg][dim0] - c->cr0;
/* This is the first dimension, so always r >= 0 */
r2 += r * r;
if (bDistMB_pulse)
}
if (dim_ind == 2 && (zonei == 2 || zonei == 3))
{
- r = cg_cm[cg][dim1] - c->cr1[zone];
+ r = coordinates[cg][dim1] - c->cr1[zone];
if (r > 0)
{
r2 += r * r;
}
if (bDistMB_pulse)
{
- r = cg_cm[cg][dim1] - c->bcr1;
+ r = coordinates[cg][dim1] - c->bcr1;
if (r > 0)
{
rb2 += r * r;
*/
if (dim_ind >= 1 && (zonei == 1 || zonei == 2))
{
- rn[dim0] = cg_cm[cg][dim0] - c->cr0;
+ rn[dim0] = coordinates[cg][dim0] - c->cr0;
for (i = dim0 + 1; i < DIM; i++)
{
- rn[dim0] -= cg_cm[cg][i] * v_0[i][dim0];
+ rn[dim0] -= coordinates[cg][i] * v_0[i][dim0];
}
r2 = rn[dim0] * rn[dim0] * sf2_round[dim0];
if (bDistMB_pulse)
if (dim_ind == 2 && (zonei == 2 || zonei == 3))
{
GMX_ASSERT(dim1 >= 0 && dim1 < DIM, "Must have a valid dimension index");
- rn[dim1] += cg_cm[cg][dim1] - c->cr1[zone];
+ rn[dim1] += coordinates[cg][dim1] - c->cr1[zone];
tric_sh = 0;
for (i = dim1 + 1; i < DIM; i++)
{
- tric_sh -= cg_cm[cg][i] * v_1[i][dim1];
+ tric_sh -= coordinates[cg][i] * v_1[i][dim1];
}
rn[dim1] += tric_sh;
if (rn[dim1] > 0)
}
if (bDistMB_pulse)
{
- rb[dim1] += cg_cm[cg][dim1] - c->bcr1 + tric_sh;
+ rb[dim1] += coordinates[cg][dim1] - c->bcr1 + tric_sh;
if (rb[dim1] > 0)
{
rb2 += rb[dim1] * rb[dim1] * sf2_round[dim1];
}
}
/* The distance along the communication direction */
- rn[dim] += cg_cm[cg][dim] - c->c[dim_ind][zone];
+ rn[dim] += coordinates[cg][dim] - c->c[dim_ind][zone];
tric_sh = 0;
for (i = dim + 1; i < DIM; i++)
{
- tric_sh -= cg_cm[cg][i] * v_d[i][dim];
+ tric_sh -= coordinates[cg][i] * v_d[i][dim];
}
rn[dim] += tric_sh;
if (rn[dim] > 0)
{
clear_rvec(rb);
GMX_ASSERT(dim >= 0 && dim < DIM, "Must have a valid dimension index");
- rb[dim] += cg_cm[cg][dim] - c->bc[dim_ind] + tric_sh;
+ rb[dim] += coordinates[cg][dim] - c->bc[dim_ind] + tric_sh;
if (rb[dim] > 0)
{
rb2 += rb[dim] * rb[dim] * skew_fac2_d;
if (r2 < r_comm2
|| (bDistBonded && ((bDistMB && rb2 < r_bcomm2) || (bDist2B && r2 < r_bcomm2))
&& (!bBondComm
- || (GET_CGINFO_BOND_INTER(cginfo[cg])
+ || ((atomInfo[cg] & gmx::sc_atomInfo_BondCommunication)
&& missing_link(*comm->bondedLinks, globalAtomGroupIndices[cg], *dd->ga2la)))))
{
/* Store the local and global atom group indices and position */
rvec posPbc;
if (dd->ci[dim] == 0)
{
- /* Correct cg_cm for pbc */
- rvec_add(cg_cm[cg], box[dim], posPbc);
+ /* Correct coordinates for pbc */
+ rvec_add(coordinates[cg], box[dim], posPbc);
if (bScrew)
{
posPbc[YY] = box[YY][YY] - posPbc[YY];
}
else
{
- copy_rvec(cg_cm[cg], posPbc);
+ copy_rvec(coordinates[cg], posPbc);
}
vbuf.emplace_back(posPbc[XX], posPbc[YY], posPbc[ZZ]);
* This can not be done in init_domain_decomposition,
* as the numbers of threads is determined later.
*/
- int numThreads = gmx_omp_nthreads_get(emntDomdec);
+ int numThreads = gmx_omp_nthreads_get(ModuleMultiThread::Domdec);
comm->dth.resize(numThreads);
}
v_1 = ddbox->v[dim1];
}
- zone_cg_range = zones->cg_range;
- gmx::ArrayRef<cginfo_mb_t> cginfo_mb = fr->cginfo_mb;
+ zone_cg_range = zones->cg_range.data();
+ gmx::ArrayRef<gmx::AtomInfoWithinMoleculeBlock> atomInfoForEachMoleculeBlock =
+ fr->atomInfoForEachMoleculeBlock;
zone_cg_range[0] = 0;
- zone_cg_range[1] = dd->ncg_home;
- comm->zone_ncg1[0] = dd->ncg_home;
- pos_cg = dd->ncg_home;
+ zone_cg_range[1] = dd->numHomeAtoms;
+ comm->zone_ncg1[0] = dd->numHomeAtoms;
+ pos_cg = dd->numHomeAtoms;
nat_tot = comm->atomRanges.numHomeAtoms();
nzone = 1;
int cg0_th = cg0 + ((cg1 - cg0) * th) / numThreads;
int cg1_th = cg0 + ((cg1 - cg0) * (th + 1)) / numThreads;
- /* Get the cg's for this pulse in this zone */
- get_zone_pulse_cgs(dd, zonei, zone, cg0_th, cg1_th, dd->globalAtomGroupIndices,
- dim, dim_ind, dim0, dim1, dim2, r_comm2, r_bcomm2, box,
- distanceIsTriclinic, normal, skew_fac2_d, skew_fac_01,
- v_d, v_0, v_1, &corners, sf2_round, bDistBonded, bBondComm,
- bDist2B, bDistMB, state->x.rvec_array(), fr->cginfo,
- th == 0 ? &ind->index : &work.localAtomGroupBuffer, &work);
+ /* Get the atom groups and coordinates for this pulse in this zone */
+ get_zone_pulse_groups(dd,
+ zonei,
+ zone,
+ cg0_th,
+ cg1_th,
+ dd->globalAtomGroupIndices,
+ dim,
+ dim_ind,
+ dim0,
+ dim1,
+ dim2,
+ r_comm2,
+ r_bcomm2,
+ box,
+ distanceIsTriclinic,
+ normal,
+ skew_fac2_d,
+ skew_fac_01,
+ v_d,
+ v_0,
+ v_1,
+ &corners,
+ sf2_round,
+ bDistBonded,
+ bBondComm,
+ bDist2B,
+ bDistMB,
+ state->x,
+ fr->atomInfo,
+ th == 0 ? &ind->index : &work.localAtomGroupBuffer,
+ &work);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
} // END
{
const dd_comm_setup_work_t& dth = comm->dth[th];
- ind->index.insert(ind->index.end(), dth.localAtomGroupBuffer.begin(),
+ ind->index.insert(ind->index.end(),
+ dth.localAtomGroupBuffer.begin(),
dth.localAtomGroupBuffer.end());
- atomGroups.insert(atomGroups.end(), dth.atomGroupBuffer.begin(),
- dth.atomGroupBuffer.end());
- positions.insert(positions.end(), dth.positionBuffer.begin(),
- dth.positionBuffer.end());
+ atomGroups.insert(
+ atomGroups.end(), dth.atomGroupBuffer.begin(), dth.atomGroupBuffer.end());
+ positions.insert(
+ positions.end(), dth.positionBuffer.begin(), dth.positionBuffer.end());
comm->dth[0].nat += dth.nat;
ind->nsend[zone] += dth.nsend_zone;
}
}
ddSendrecv<int>(dd, dim_ind, dddirBackward, work.atomGroupBuffer, integerBufferRef);
- /* Make space for cg_cm */
+ /* Make space for atominfo */
dd_resize_atominfo_and_state(fr, state, pos_cg + ind->nrecv[nzone]);
/* Communicate the coordinates */
for (int i = 0; i < ind->nrecv[zone]; i++)
{
int globalAtomIndex = dd->globalAtomGroupIndices[pos_cg];
- fr->cginfo[pos_cg] = ddcginfo(cginfo_mb, globalAtomIndex);
+ fr->atomInfo[pos_cg] =
+ ddGetAtomInfo(atomInfoForEachMoleculeBlock, globalAtomIndex);
pos_cg++;
}
if (p == 0)
else
{
/* This part of the code is never executed with bBondComm. */
- merge_cg_buffers(nzone, cd, p, zone_cg_range, dd->globalAtomGroupIndices,
- integerBufferRef.data(), state->x, rvecBufferRef, fr->cginfo_mb,
- fr->cginfo);
+ merge_cg_buffers(nzone,
+ cd,
+ p,
+ zone_cg_range,
+ dd->globalAtomGroupIndices,
+ integerBufferRef.data(),
+ state->x,
+ rvecBufferRef,
+ fr->atomInfoForEachMoleculeBlock,
+ fr->atomInfo);
pos_cg += ind->nrecv[nzone];
}
nat_tot += ind->nrecv[nzone + 1];
if (!bBondComm)
{
- /* We don't need to update cginfo, since that was alrady done above.
+ /* We don't need to update atominfo, since that was already done above.
* So we pass NULL for the forcerec.
*/
- dd_set_cginfo(dd->globalAtomGroupIndices, dd->ncg_home, dd->globalAtomGroupIndices.size(), nullptr);
+ dd_set_atominfo(
+ dd->globalAtomGroupIndices, dd->numHomeAtoms, dd->globalAtomGroupIndices.size(), nullptr);
}
if (debug)
{
for (z = zone_start; z < zone_end; z++)
{
- fprintf(debug, "zone %d %6.3f - %6.3f %6.3f - %6.3f %6.3f - %6.3f\n", z,
- zones->size[z].x0[XX], zones->size[z].x1[XX], zones->size[z].x0[YY],
- zones->size[z].x1[YY], zones->size[z].x0[ZZ], zones->size[z].x1[ZZ]);
- fprintf(debug, "zone %d bb %6.3f - %6.3f %6.3f - %6.3f %6.3f - %6.3f\n", z,
- zones->size[z].bb_x0[XX], zones->size[z].bb_x1[XX], zones->size[z].bb_x0[YY],
- zones->size[z].bb_x1[YY], zones->size[z].bb_x0[ZZ], zones->size[z].bb_x1[ZZ]);
+ fprintf(debug,
+ "zone %d %6.3f - %6.3f %6.3f - %6.3f %6.3f - %6.3f\n",
+ z,
+ zones->size[z].x0[XX],
+ zones->size[z].x1[XX],
+ zones->size[z].x0[YY],
+ zones->size[z].x1[YY],
+ zones->size[z].x0[ZZ],
+ zones->size[z].x1[ZZ]);
+ fprintf(debug,
+ "zone %d bb %6.3f - %6.3f %6.3f - %6.3f %6.3f - %6.3f\n",
+ z,
+ zones->size[z].bb_x0[XX],
+ zones->size[z].bb_x1[XX],
+ zones->size[z].bb_x0[YY],
+ zones->size[z].bb_x1[YY],
+ zones->size[z].bb_x0[ZZ],
+ zones->size[z].bb_x1[ZZ]);
}
}
}
dd_sort_order_nbnxn(fr, &sort->sorted);
/* We alloc with the old size, since cgindex is still old */
- DDBufferAccess<gmx::RVec> rvecBuffer(dd->comm->rvecBuffer, dd->ncg_home);
+ DDBufferAccess<gmx::RVec> rvecBuffer(dd->comm->rvecBuffer, dd->numHomeAtoms);
/* Set the new home atom/charge group count */
- dd->ncg_home = sort->sorted.size();
+ dd->numHomeAtoms = sort->sorted.size();
if (debug)
{
- fprintf(debug, "Set the new home atom count to %d\n", dd->ncg_home);
+ fprintf(debug, "Set the new home atom count to %d\n", dd->numHomeAtoms);
}
/* Reorder the state */
gmx::ArrayRef<const gmx_cgsort_t> cgsort = sort->sorted;
- GMX_RELEASE_ASSERT(cgsort.ssize() == dd->ncg_home, "We should sort all the home atom groups");
+ GMX_RELEASE_ASSERT(cgsort.ssize() == dd->numHomeAtoms,
+ "We should sort all the home atom groups");
- if (state->flags & (1 << estX))
+ if (state->flags & enumValueToBitMask(StateEntry::X))
{
orderVector(cgsort, makeArrayRef(state->x), rvecBuffer.buffer);
}
- if (state->flags & (1 << estV))
+ if (state->flags & enumValueToBitMask(StateEntry::V))
{
orderVector(cgsort, makeArrayRef(state->v), rvecBuffer.buffer);
}
- if (state->flags & (1 << estCGP))
+ if (state->flags & enumValueToBitMask(StateEntry::Cgp))
{
orderVector(cgsort, makeArrayRef(state->cg_p), rvecBuffer.buffer);
}
/* Reorder the global cg index */
orderVector<int>(cgsort, dd->globalAtomGroupIndices, &sort->intBuffer);
- /* Reorder the cginfo */
- orderVector<int>(cgsort, fr->cginfo, &sort->intBuffer);
+ /* Reorder the atom info */
+ orderVector<int64_t>(cgsort, fr->atomInfo, &sort->int64Buffer);
/* Set the home atom number */
- dd->comm->atomRanges.setEnd(DDAtomRanges::Type::Home, dd->ncg_home);
+ dd->comm->atomRanges.setEnd(DDAtomRanges::Type::Home, dd->numHomeAtoms);
/* The atoms are now exactly in grid order, update the grid order */
fr->nbv->setLocalAtomOrder();
comm->load_pme = 0;
}
-void print_dd_statistics(const t_commrec* cr, const t_inputrec* ir, FILE* fplog)
+namespace gmx
+{
+
+bool check_grid_jump(int64_t step, const gmx_domdec_t* dd, real cutoff, const gmx_ddbox_t* ddbox, bool bFatal)
+{
+ gmx_domdec_comm_t* comm = dd->comm;
+ bool invalid = false;
+
+ for (int d = 1; d < dd->ndim; d++)
+ {
+ const DDCellsizesWithDlb& cellsizes = comm->cellsizesWithDlb[d];
+ const int dim = dd->dim[d];
+ const real limit = grid_jump_limit(comm, cutoff, d);
+ real bfac = ddbox->box_size[dim];
+ if (ddbox->tric_dir[dim])
+ {
+ bfac *= ddbox->skew_fac[dim];
+ }
+ if ((cellsizes.fracUpper - cellsizes.fracLowerMax) * bfac < limit
+ || (cellsizes.fracLower - cellsizes.fracUpperMin) * bfac > -limit)
+ {
+ invalid = true;
+
+ if (bFatal)
+ {
+ char buf[22];
+
+ /* This error should never be triggered under normal
+ * circumstances, but you never know ...
+ */
+ gmx_fatal(FARGS,
+ "step %s: The domain decomposition grid has shifted too much in the "
+ "%c-direction around cell %d %d %d. This should not have happened. "
+ "Running with fewer ranks might avoid this issue.",
+ gmx_step_str(step, buf),
+ dim2char(dim),
+ dd->ci[XX],
+ dd->ci[YY],
+ dd->ci[ZZ]);
+ }
+ }
+ }
+
+ return invalid;
+}
+
+void print_dd_statistics(const t_commrec* cr, const t_inputrec& inputrec, FILE* fplog)
{
gmx_domdec_comm_t* comm = cr->dd->comm;
case DDAtomRanges::Type::Vsites:
if (cr->dd->vsite_comm)
{
- fprintf(fplog, " av. #atoms communicated per step for vsites: %d x %.1f\n",
- (EEL_PME(ir->coulombtype) || ir->coulombtype == eelEWALD) ? 3 : 2, av);
+ fprintf(fplog,
+ " av. #atoms communicated per step for vsites: %d x %.1f\n",
+ (EEL_PME(inputrec.coulombtype)
+ || inputrec.coulombtype == CoulombInteractionType::Ewald)
+ ? 3
+ : 2,
+ av);
}
break;
case DDAtomRanges::Type::Constraints:
if (cr->dd->constraint_comm)
{
- fprintf(fplog, " av. #atoms communicated per step for LINCS: %d x %.1f\n",
- 1 + ir->nLincsIter, av);
+ fprintf(fplog,
+ " av. #atoms communicated per step for LINCS: %d x %.1f\n",
+ 1 + inputrec.nLincsIter,
+ av);
}
break;
default: gmx_incons(" Unknown type for DD statistics");
}
fprintf(fplog, "\n");
- if (comm->ddSettings.recordLoad && EI_DYNAMICS(ir->eI))
+ if (comm->ddSettings.recordLoad && EI_DYNAMICS(inputrec.eI))
{
print_dd_load_av(fplog, cr->dd);
}
const gmx::MDLogger& mdlog,
int64_t step,
const t_commrec* cr,
- gmx_bool bMasterState,
+ bool bMasterState,
int nstglobalcomm,
t_state* state_global,
const gmx_mtop_t& top_global,
- const t_inputrec* ir,
+ const t_inputrec& inputrec,
gmx::ImdSession* imdSession,
pull_t* pull_work,
t_state* state_local,
gmx::Constraints* constr,
t_nrnb* nrnb,
gmx_wallcycle* wcycle,
- gmx_bool bVerbose)
+ bool bVerbose)
{
- gmx_domdec_t* dd;
- gmx_domdec_comm_t* comm;
- gmx_ddbox_t ddbox = { 0 };
- int64_t step_pcoupl;
- rvec cell_ns_x0, cell_ns_x1;
- int ncgindex_set, ncg_moved, nat_f_novirsum;
- gmx_bool bBoxChanged, bNStGlobalComm, bDoDLB, bCheckWhetherToTurnDlbOn, bLogLoad;
- gmx_bool bRedist;
- ivec np;
- real grid_density;
- char sbuf[22];
-
- wallcycle_start(wcycle, ewcDOMDEC);
-
- dd = cr->dd;
- comm = dd->comm;
+ gmx_ddbox_t ddbox = { 0 };
+ int ncgindex_set;
+ char sbuf[22];
+
+ wallcycle_start(wcycle, WallCycleCounter::Domdec);
+
+ gmx_domdec_t* dd = cr->dd;
+ gmx_domdec_comm_t* comm = dd->comm;
// TODO if the update code becomes accessible here, use
// upd->deform for this logic.
- bBoxChanged = (bMasterState || inputrecDeform(ir));
- if (ir->epc != epcNO)
+ bool bBoxChanged = (bMasterState || inputrecDeform(&inputrec));
+ if (inputrec.epc != PressureCoupling::No)
{
/* With nstpcouple > 1 pressure coupling happens.
* one step after calculating the pressure.
* We need to determine the last step in which p-coupling occurred.
* MRS -- need to validate this for vv?
*/
- int n = ir->nstpcouple;
+ int n = inputrec.nstpcouple;
+ int64_t step_pcoupl;
if (n == 1)
{
step_pcoupl = step - 1;
}
if (step_pcoupl >= comm->partition_step)
{
- bBoxChanged = TRUE;
+ bBoxChanged = true;
}
}
- bNStGlobalComm = (step % nstglobalcomm == 0);
-
+ bool bNStGlobalComm = (step % nstglobalcomm == 0);
+ bool bDoDLB;
if (!isDlbOn(comm))
{
- bDoDLB = FALSE;
+ bDoDLB = false;
}
else
{
* Since it requires (possibly expensive) global communication,
* we might want to do DLB less frequently.
*/
- if (bBoxChanged || ir->epc != epcNO)
+ if (bBoxChanged || inputrec.epc != PressureCoupling::No)
{
bDoDLB = bBoxChanged;
}
/* Check if we have recorded loads on the nodes */
if (comm->ddSettings.recordLoad && dd_load_count(comm) > 0)
{
- bCheckWhetherToTurnDlbOn = dd_dlb_get_should_check_whether_to_turn_dlb_on(dd);
+ bool bCheckWhetherToTurnDlbOn = dd_dlb_get_should_check_whether_to_turn_dlb_on(dd);
/* Print load every nstlog, first and last step to the log file */
- bLogLoad = ((ir->nstlog > 0 && step % ir->nstlog == 0) || comm->n_load_collect == 0
- || (ir->nsteps >= 0 && (step + ir->nstlist > ir->init_step + ir->nsteps)));
+ bool bLogLoad = ((inputrec.nstlog > 0 && step % inputrec.nstlog == 0) || comm->n_load_collect == 0
+ || (inputrec.nsteps >= 0
+ && (step + inputrec.nstlist > inputrec.init_step + inputrec.nsteps)));
/* Avoid extra communication due to verbose screen output
* when nstglobalcomm is set.
*/
if (bDoDLB || bLogLoad || bCheckWhetherToTurnDlbOn
- || (bVerbose && (ir->nstlist == 0 || nstglobalcomm <= ir->nstlist)))
+ || (bVerbose && (inputrec.nstlist == 0 || nstglobalcomm <= inputrec.nstlist)))
{
get_load_distribution(dd, wcycle);
if (DDMASTER(dd))
if (comm->dlbState == DlbState::onCanTurnOff
&& dd->comm->n_load_have % c_checkTurnDlbOffInterval == c_checkTurnDlbOffInterval - 1)
{
- gmx_bool turnOffDlb;
+ bool turnOffDlb;
if (DDMASTER(dd))
{
/* If the running averaged cycles with DLB are more
{
/* To turn off DLB, we need to redistribute the atoms */
dd_collect_state(dd, state_local, state_global);
- bMasterState = TRUE;
+ bMasterState = true;
turn_off_dlb(mdlog, dd, step);
}
}
}
else if (bCheckWhetherToTurnDlbOn)
{
- gmx_bool turnOffDlbForever = FALSE;
- gmx_bool turnOnDlb = FALSE;
+ bool turnOffDlbForever = false;
+ bool turnOnDlb = false;
/* Since the timings are node dependent, the master decides */
if (DDMASTER(dd))
if (comm->dlbSlowerPartitioningCount > 0
&& dd->ddp_count < comm->dlbSlowerPartitioningCount + 10 * c_checkTurnDlbOnInterval)
{
- turnOffDlbForever = TRUE;
+ turnOffDlbForever = true;
}
comm->haveTurnedOffDlb = false;
/* Register when we last measured DLB slowdown */
*/
if (comm->ddRankSetup.usePmeOnlyRanks && dd_pme_f_ratio(dd) > 1 - DD_PERF_LOSS_DLB_ON)
{
- turnOnDlb = FALSE;
+ turnOnDlb = false;
}
else
{
}
struct
{
- gmx_bool turnOffDlbForever;
- gmx_bool turnOnDlb;
+ bool turnOffDlbForever;
+ bool turnOnDlb;
} bools{ turnOffDlbForever, turnOnDlb };
dd_bcast(dd, sizeof(bools), &bools);
if (bools.turnOffDlbForever)
else if (bools.turnOnDlb)
{
turn_on_dlb(mdlog, dd, step);
- bDoDLB = TRUE;
+ bDoDLB = true;
}
}
}
comm->n_load_have++;
}
- bRedist = FALSE;
+ bool bRedist = false;
if (bMasterState)
{
/* Clear the old state */
distributeState(mdlog, dd, top_global, state_global, ddbox, state_local);
/* Ensure that we have space for the new distribution */
- dd_resize_atominfo_and_state(fr, state_local, dd->ncg_home);
+ dd_resize_atominfo_and_state(fr, state_local, dd->numHomeAtoms);
inc_nrnb(nrnb, eNR_CGCM, comm->atomRanges.numHomeAtoms());
- dd_set_cginfo(dd->globalAtomGroupIndices, 0, dd->ncg_home, fr);
+ dd_set_atominfo(dd->globalAtomGroupIndices, 0, dd->numHomeAtoms, fr);
}
else if (state_local->ddp_count != dd->ddp_count)
{
gmx_fatal(FARGS,
"Internal inconsistency state_local->ddp_count (%d) > dd->ddp_count (%" PRId64
")",
- state_local->ddp_count, dd->ddp_count);
+ state_local->ddp_count,
+ dd->ddp_count);
}
if (state_local->ddp_count_cg_gl != state_local->ddp_count)
gmx_fatal(FARGS,
"Internal inconsistency state_local->ddp_count_cg_gl (%d) != "
"state_local->ddp_count (%d)",
- state_local->ddp_count_cg_gl, state_local->ddp_count);
+ state_local->ddp_count_cg_gl,
+ state_local->ddp_count);
}
/* Clear the old state */
/* Restore the atom group indices from state_local */
restoreAtomGroups(dd, state_local);
make_dd_indices(dd, 0);
- ncgindex_set = dd->ncg_home;
+ ncgindex_set = dd->numHomeAtoms;
inc_nrnb(nrnb, eNR_CGCM, comm->atomRanges.numHomeAtoms());
- dd_set_cginfo(dd->globalAtomGroupIndices, 0, dd->ncg_home, fr);
+ dd_set_atominfo(dd->globalAtomGroupIndices, 0, dd->numHomeAtoms, fr);
set_ddbox(*dd, bMasterState, state_local->box, true, state_local->x, &ddbox);
}
set_ddbox(*dd, bMasterState, state_local->box, bNStGlobalComm, state_local->x, &ddbox);
- bBoxChanged = TRUE;
- bRedist = TRUE;
+ bBoxChanged = true;
+ bRedist = true;
}
/* Copy needed for dim's without pbc when avoiding communication */
copy_rvec(ddbox.box0, comm->box0);
if (comm->systemInfo.useUpdateGroups)
{
comm->updateGroupsCog->addCogs(
- gmx::arrayRefFromArray(dd->globalAtomGroupIndices.data(), dd->ncg_home), state_local->x);
+ gmx::arrayRefFromArray(dd->globalAtomGroupIndices.data(), dd->numHomeAtoms),
+ state_local->x);
}
/* Check if we should sort the charge groups */
* Thus we need to keep track of how many charge groups will move for
* obtaining correct local charge group / atom counts.
*/
- ncg_moved = 0;
+ int ncg_moved = 0;
if (bRedist)
{
- wallcycle_sub_start(wcycle, ewcsDD_REDIST);
+ wallcycle_sub_start(wcycle, WallCycleSubCounter::DDRedist);
- ncgindex_set = dd->ncg_home;
+ ncgindex_set = dd->numHomeAtoms;
dd_redistribute_cg(fplog, step, dd, ddbox.tric_dir, state_local, fr, nrnb, &ncg_moved);
GMX_RELEASE_ASSERT(bSortCG, "Sorting is required after redistribution");
if (comm->systemInfo.useUpdateGroups)
{
comm->updateGroupsCog->addCogs(
- gmx::arrayRefFromArray(dd->globalAtomGroupIndices.data(), dd->ncg_home),
+ gmx::arrayRefFromArray(dd->globalAtomGroupIndices.data(), dd->numHomeAtoms),
state_local->x);
}
- wallcycle_sub_stop(wcycle, ewcsDD_REDIST);
+ wallcycle_sub_stop(wcycle, WallCycleSubCounter::DDRedist);
}
- // TODO: Integrate this code in the nbnxm module
- get_nsgrid_boundaries(ddbox.nboundeddim, state_local->box, dd, &ddbox, &comm->cell_x0,
- &comm->cell_x1, dd->ncg_home, as_rvec_array(state_local->x.data()),
- cell_ns_x0, cell_ns_x1, &grid_density);
+ RVec cell_ns_x0, cell_ns_x1;
+ get_nsgrid_boundaries(ddbox.nboundeddim,
+ state_local->box,
+ dd,
+ &ddbox,
+ &comm->cell_x0,
+ &comm->cell_x1,
+ dd->numHomeAtoms,
+ as_rvec_array(state_local->x.data()),
+ cell_ns_x0,
+ cell_ns_x1);
if (bBoxChanged)
{
if (bSortCG)
{
- wallcycle_sub_start(wcycle, ewcsDD_GRID);
+ wallcycle_sub_start(wcycle, WallCycleSubCounter::DDGrid);
/* Sort the state on charge group position.
* This enables exact restarts from this step.
/* Fill the ns grid with the home cell,
* so we can sort with the indices.
*/
- set_zones_ncg_home(dd);
+ set_zones_numHomeAtoms(dd);
set_zones_size(dd, state_local->box, &ddbox, 0, 1, ncg_moved);
- nbnxn_put_on_grid(fr->nbv.get(), state_local->box, 0, comm->zones.size[0].bb_x0,
- comm->zones.size[0].bb_x1, comm->updateGroupsCog.get(),
- { 0, dd->ncg_home }, comm->zones.dens_zone0, fr->cginfo, state_local->x,
- ncg_moved, bRedist ? comm->movedBuffer.data() : nullptr);
+ nbnxn_put_on_grid(fr->nbv.get(),
+ state_local->box,
+ 0,
+ comm->zones.size[0].bb_x0,
+ comm->zones.size[0].bb_x1,
+ comm->updateGroupsCog.get(),
+ { 0, dd->numHomeAtoms },
+ comm->zones.dens_zone0,
+ fr->atomInfo,
+ state_local->x,
+ ncg_moved,
+ bRedist ? comm->movedBuffer.data() : nullptr);
if (debug)
{
- fprintf(debug, "Step %s, sorting the %d home charge groups\n", gmx_step_str(step, sbuf),
- dd->ncg_home);
+ fprintf(debug, "Step %s, sorting the %d home charge groups\n", gmx_step_str(step, sbuf), dd->numHomeAtoms);
}
dd_sort_state(dd, fr, state_local);
dd->ga2la->clear(false);
ncgindex_set = 0;
- wallcycle_sub_stop(wcycle, ewcsDD_GRID);
+ wallcycle_sub_stop(wcycle, WallCycleSubCounter::DDGrid);
}
else
{
/* With the group scheme the sorting array is part of the DD state,
* but it just got out of sync, so mark as invalid by emptying it.
*/
- if (ir->cutoff_scheme == ecutsGROUP)
+ if (inputrec.cutoff_scheme == CutoffScheme::Group)
{
comm->sort->sorted.clear();
}
comm->updateGroupsCog->clear();
}
- wallcycle_sub_start(wcycle, ewcsDD_SETUPCOMM);
+ wallcycle_sub_start(wcycle, WallCycleSubCounter::DDSetupComm);
/* Set the induces for the home atoms */
- set_zones_ncg_home(dd);
+ set_zones_numHomeAtoms(dd);
make_dd_indices(dd, ncgindex_set);
/* Setup up the communication and communicate the coordinates */
setup_dd_communication(dd, state_local->box, &ddbox, fr, state_local);
/* Set the indices for the halo atoms */
- make_dd_indices(dd, dd->ncg_home);
+ make_dd_indices(dd, dd->numHomeAtoms);
/* Set the charge group boundaries for neighbor searching */
set_cg_boundaries(&comm->zones);
/* When bSortCG=true, we have already set the size for zone 0 */
set_zones_size(dd, state_local->box, &ddbox, bSortCG ? 1 : 0, comm->zones.n, 0);
- wallcycle_sub_stop(wcycle, ewcsDD_SETUPCOMM);
+ wallcycle_sub_stop(wcycle, WallCycleSubCounter::DDSetupComm);
/*
write_dd_pdb("dd_home",step,"dump",top_global,cr,
-1,state_local->x.rvec_array(),state_local->box);
*/
- wallcycle_sub_start(wcycle, ewcsDD_MAKETOP);
+ wallcycle_sub_start(wcycle, WallCycleSubCounter::DDMakeTop);
/* Extract a local topology from the global topology */
+ IVec numPulses;
for (int i = 0; i < dd->ndim; i++)
{
- np[dd->dim[i]] = comm->cd[i].numPulses();
+ numPulses[dd->dim[i]] = comm->cd[i].numPulses();
}
- dd_make_local_top(dd, &comm->zones, dd->unitCellInfo.npbcdim, state_local->box,
- comm->cellsize_min, np, fr, state_local->x.rvec_array(), top_global, top_local);
+ int numBondedInteractionsToReduce = dd_make_local_top(dd,
+ &comm->zones,
+ dd->unitCellInfo.npbcdim,
+ state_local->box,
+ comm->cellsize_min,
+ numPulses,
+ fr,
+ state_local->x,
+ top_global,
+ fr->atomInfo,
+ top_local);
+ dd->localTopologyChecker->scheduleCheckOfLocalTopology(numBondedInteractionsToReduce);
- wallcycle_sub_stop(wcycle, ewcsDD_MAKETOP);
+ wallcycle_sub_stop(wcycle, WallCycleSubCounter::DDMakeTop);
- wallcycle_sub_start(wcycle, ewcsDD_MAKECONSTR);
+ wallcycle_sub_start(wcycle, WallCycleSubCounter::DDMakeConstr);
/* Set up the special atom communication */
int n = comm->atomRanges.end(DDAtomRanges::Type::Zones);
for (int i = static_cast<int>(DDAtomRanges::Type::Zones) + 1;
- i < static_cast<int>(DDAtomRanges::Type::Number); i++)
+ i < static_cast<int>(DDAtomRanges::Type::Number);
+ i++)
{
auto range = static_cast<DDAtomRanges::Type>(i);
switch (range)
}
break;
case DDAtomRanges::Type::Constraints:
- if (dd->comm->systemInfo.haveSplitConstraints || dd->comm->systemInfo.haveSplitSettles)
+ if (dd->comm->systemInfo.mayHaveSplitConstraints || dd->comm->systemInfo.mayHaveSplitSettles)
{
/* Only for inter-cg constraints we need special code */
- n = dd_make_local_constraints(dd, n, &top_global, fr->cginfo.data(), constr,
- ir->nProjOrder, top_local->idef.il);
+ n = dd_make_local_constraints(dd,
+ n,
+ top_global,
+ fr->atomInfo,
+ constr,
+ inputrec.nProjOrder,
+ top_local->idef.il);
}
break;
default: gmx_incons("Unknown special atom type setup");
comm->atomRanges.setEnd(range, n);
}
- wallcycle_sub_stop(wcycle, ewcsDD_MAKECONSTR);
+ wallcycle_sub_stop(wcycle, WallCycleSubCounter::DDMakeConstr);
- wallcycle_sub_start(wcycle, ewcsDD_TOPOTHER);
+ wallcycle_sub_start(wcycle, WallCycleSubCounter::DDTopOther);
/* Make space for the extra coordinates for virtual site
* or constraint communication.
state_change_natoms(state_local, state_local->natoms);
+ int nat_f_novirsum;
if (vsite && vsite->numInterUpdategroupVirtualSites())
{
nat_f_novirsum = comm->atomRanges.end(DDAtomRanges::Type::Vsites);
}
else
{
- if (EEL_FULL(ir->coulombtype) && dd->haveExclusions)
+ if (EEL_FULL(inputrec.coulombtype) && dd->haveExclusions)
{
nat_f_novirsum = comm->atomRanges.end(DDAtomRanges::Type::Zones);
}
* allocation, zeroing and copying, but this is probably not worth
* the complications and checking.
*/
- forcerec_set_ranges(fr, comm->atomRanges.end(DDAtomRanges::Type::Zones),
- comm->atomRanges.end(DDAtomRanges::Type::Constraints), nat_f_novirsum);
+ forcerec_set_ranges(fr,
+ comm->atomRanges.end(DDAtomRanges::Type::Zones),
+ comm->atomRanges.end(DDAtomRanges::Type::Constraints),
+ nat_f_novirsum);
/* Update atom data for mdatoms and several algorithms */
- mdAlgorithmsSetupAtomData(cr, ir, top_global, top_local, fr, f, mdAtoms, constr, vsite, nullptr);
+ mdAlgorithmsSetupAtomData(cr, inputrec, top_global, top_local, fr, f, mdAtoms, constr, vsite, nullptr);
- auto mdatoms = mdAtoms->mdatoms();
+ auto* mdatoms = mdAtoms->mdatoms();
if (!thisRankHasDuty(cr, DUTY_PME))
{
/* Send the charges and/or c6/sigmas to our PME only node */
- gmx_pme_send_parameters(cr, fr->ic, mdatoms->nChargePerturbed != 0,
- mdatoms->nTypePerturbed != 0, mdatoms->chargeA, mdatoms->chargeB,
- mdatoms->sqrt_c6A, mdatoms->sqrt_c6B, mdatoms->sigmaA,
- mdatoms->sigmaB, dd_pme_maxshift_x(dd), dd_pme_maxshift_y(dd));
+ gmx_pme_send_parameters(
+ cr,
+ *fr->ic,
+ mdatoms->nChargePerturbed != 0,
+ mdatoms->nTypePerturbed != 0,
+ mdatoms->chargeA ? gmx::arrayRefFromArray(mdatoms->chargeA, mdatoms->nr)
+ : gmx::ArrayRef<real>{},
+ mdatoms->chargeB ? gmx::arrayRefFromArray(mdatoms->chargeB, mdatoms->nr)
+ : gmx::ArrayRef<real>{},
+ mdatoms->sqrt_c6A ? gmx::arrayRefFromArray(mdatoms->sqrt_c6A, mdatoms->nr)
+ : gmx::ArrayRef<real>{},
+ mdatoms->sqrt_c6B ? gmx::arrayRefFromArray(mdatoms->sqrt_c6B, mdatoms->nr)
+ : gmx::ArrayRef<real>{},
+ mdatoms->sigmaA ? gmx::arrayRefFromArray(mdatoms->sigmaA, mdatoms->nr)
+ : gmx::ArrayRef<real>{},
+ mdatoms->sigmaB ? gmx::arrayRefFromArray(mdatoms->sigmaB, mdatoms->nr)
+ : gmx::ArrayRef<real>{},
+ dd_pme_maxshift_x(*dd),
+ dd_pme_maxshift_y(*dd));
}
if (dd->atomSets != nullptr)
}
// The pull group construction can need the atom sets updated above
- if (ir->bPull)
+ if (inputrec.bPull)
{
/* Update the local pull groups */
dd_make_local_pull_groups(cr, pull_work);
}
- /* Update the local atoms to be communicated via the IMD protocol if bIMD is TRUE. */
+ /* Update the local atoms to be communicated via the IMD protocol if bIMD is true. */
imdSession->dd_make_local_IMD_atoms(dd);
add_dd_statistics(dd);
*/
dd_move_x_vsites(*dd, state_local->box, state_local->x.rvec_array());
- wallcycle_sub_stop(wcycle, ewcsDD_TOPOTHER);
+ wallcycle_sub_stop(wcycle, WallCycleSubCounter::DDTopOther);
if (comm->ddSettings.nstDDDump > 0 && step % comm->ddSettings.nstDDDump == 0)
{
- dd_move_x(dd, state_local->box, state_local->x, nullWallcycle);
- write_dd_pdb("dd_dump", step, "dump", &top_global, cr, -1, state_local->x.rvec_array(),
+ dd_move_x(dd, state_local->box, state_local->x, nullptr);
+ write_dd_pdb("dd_dump",
+ step,
+ "dump",
+ top_global,
+ cr,
+ -1,
+ state_local->x.rvec_array(),
state_local->box);
}
check_index_consistency(dd, top_global.natoms, "after partitioning");
}
- wallcycle_stop(wcycle, ewcDOMDEC);
+ wallcycle_stop(wcycle, WallCycleCounter::Domdec);
}
-/*! \brief Check whether bonded interactions are missing, if appropriate */
-void checkNumberOfBondedInteractions(const gmx::MDLogger& mdlog,
- t_commrec* cr,
- int totalNumberOfBondedInteractions,
- const gmx_mtop_t* top_global,
- const gmx_localtop_t* top_local,
- gmx::ArrayRef<const gmx::RVec> x,
- const matrix box,
- bool* shouldCheckNumberOfBondedInteractions)
-{
- if (*shouldCheckNumberOfBondedInteractions)
- {
- if (totalNumberOfBondedInteractions != cr->dd->nbonded_global)
- {
- dd_print_missing_interactions(mdlog, cr, totalNumberOfBondedInteractions, top_global,
- top_local, x, box); // Does not return
- }
- *shouldCheckNumberOfBondedInteractions = false;
- }
-}
+} // namespace gmx