{
CartesianRankSetup cartSetup;
+ // As a default, both group and sim communicators are equal to the default communicator
+ cr->mpi_comm_mygroup = cr->mpiDefaultCommunicator;
+ cr->mpi_comm_mysim = cr->mpiDefaultCommunicator;
+ cr->nnodes = cr->sizeOfDefaultCommunicator;
+ cr->nodeid = cr->rankInDefaultCommunicator;
+ cr->sim_nodeid = cr->rankInDefaultCommunicator;
+
if (ddRankSetup.usePmeOnlyRanks)
{
/* Split the communicator into a PP and PME part */
{
/* All nodes do PP and PME */
/* We do not require separate communicators */
- cr->mpi_comm_mygroup = cr->mpi_comm_mysim;
-
cartSetup.bCartesianPP = false;
cartSetup.bCartesianPP_PME = false;
}
/*! \brief Generate the simulation system information */
static DDSystemInfo getSystemInfo(const gmx::MDLogger& mdlog,
- const t_commrec* cr,
+ DDRole ddRole,
+ MPI_Comm communicator,
const DomdecOptions& options,
const gmx_mtop_t& mtop,
const t_inputrec& ir,
{
real r_2b, r_mb;
- if (MASTER(cr))
+ if (ddRole == DDRole::Master)
{
dd_bonded_cg_distance(mdlog, &mtop, &ir, xGlobal, box,
options.checkBondedInteractions, &r_2b, &r_mb);
}
- gmx_bcast(sizeof(r_2b), &r_2b, cr->mpi_comm_mygroup);
- gmx_bcast(sizeof(r_mb), &r_mb, cr->mpi_comm_mygroup);
+ gmx_bcast(sizeof(r_2b), &r_2b, communicator);
+ gmx_bcast(sizeof(r_mb), &r_mb, communicator);
/* We use an initial margin of 10% for the minimum cell size,
* except when we are just below the non-bonded cut-off.
/*! \brief Exit with a fatal error if the DDGridSetup cannot be
* implemented. */
static void checkDDGridSetup(const DDGridSetup& ddGridSetup,
- const t_commrec* cr,
+ DDRole ddRole,
+ MPI_Comm communicator,
+ int numNodes,
const DomdecOptions& options,
const DDSettings& ddSettings,
const DDSystemInfo& systemInfo,
ddSettings.initialDlbState != DlbState::offUser ? " or -dds" : "",
bC ? " or your LINCS settings" : "");
- gmx_fatal_collective(FARGS, cr->mpi_comm_mysim, MASTER(cr),
+ gmx_fatal_collective(FARGS, communicator, ddRole == DDRole::Master,
"There is no domain decomposition for %d ranks that is compatible "
"with the given box and a minimum cell size of %g nm\n"
"%s\n"
"Look in the log file for details on the domain decomposition",
- cr->nnodes - ddGridSetup.numPmeOnlyRanks, cellsizeLimit, buf);
+ numNodes - ddGridSetup.numPmeOnlyRanks, cellsizeLimit, buf);
}
const real acs = average_cellsize_min(ddbox, ddGridSetup.numDomains);
else
{
gmx_fatal_collective(
- FARGS, cr->mpi_comm_mysim, MASTER(cr),
+ FARGS, communicator, ddRole == DDRole::Master,
"The initial cell size (%f) is smaller than the cell size limit (%f), change "
"options -dd, -rdd or -rcon, see the log file for details",
acs, cellsizeLimit);
const int numPPRanks =
ddGridSetup.numDomains[XX] * ddGridSetup.numDomains[YY] * ddGridSetup.numDomains[ZZ];
- if (cr->nnodes - numPPRanks != ddGridSetup.numPmeOnlyRanks)
+ if (numNodes - numPPRanks != ddGridSetup.numPmeOnlyRanks)
{
- gmx_fatal_collective(FARGS, cr->mpi_comm_mysim, MASTER(cr),
+ gmx_fatal_collective(FARGS, communicator, ddRole == DDRole::Master,
"The size of the domain decomposition grid (%d) does not match the "
"number of PP ranks (%d). The total number of ranks is %d",
- numPPRanks, cr->nnodes - ddGridSetup.numPmeOnlyRanks, cr->nnodes);
+ numPPRanks, numNodes - ddGridSetup.numPmeOnlyRanks, numNodes);
}
if (ddGridSetup.numPmeOnlyRanks > numPPRanks)
{
- gmx_fatal_collective(FARGS, cr->mpi_comm_mysim, MASTER(cr),
+ gmx_fatal_collective(FARGS, communicator, ddRole == DDRole::Master,
"The number of separate PME ranks (%d) is larger than the number of "
"PP ranks (%d), this is not supported.",
ddGridSetup.numPmeOnlyRanks, numPPRanks);
/*! \brief Set the cell size and interaction limits, as well as the DD grid */
static DDRankSetup getDDRankSetup(const gmx::MDLogger& mdlog,
- t_commrec* cr,
+ int numNodes,
const DDGridSetup& ddGridSetup,
const t_inputrec& ir)
{
DDRankSetup ddRankSetup;
- ddRankSetup.numPPRanks = cr->nnodes - ddGridSetup.numPmeOnlyRanks;
+ ddRankSetup.numPPRanks = numNodes - ddGridSetup.numPmeOnlyRanks;
copy_ivec(ddGridSetup.numDomains, ddRankSetup.numPPCells);
ddRankSetup.usePmeOnlyRanks = (ddGridSetup.numPmeOnlyRanks > 0);
/*! \brief Set the cell size and interaction limits */
static void set_dd_limits(const gmx::MDLogger& mdlog,
- t_commrec* cr,
+ DDRole ddRole,
gmx_domdec_t* dd,
const DomdecOptions& options,
const DDSettings& ddSettings,
gmx::boolToString(systemInfo.filterBondedCommunication), comm->cellsize_limit);
}
- if (MASTER(cr))
+ if (ddRole == DDRole::Master)
{
check_dd_restrictions(dd, ir, mdlog);
}
* in \c options.numPmeRanks.
*/
static bool canMake1DDomainDecomposition(const DDSettings& ddSettingsOriginal,
- const t_commrec* cr,
+ DDRole ddRole,
+ MPI_Comm communicator,
const int numRanksRequested,
const DomdecOptions& options,
const gmx_mtop_t& mtop,
// Ensure we don't write any output from this checking routine
gmx::MDLogger dummyLogger;
- DDSystemInfo systemInfo = getSystemInfo(dummyLogger, cr, options, mtop, ir, box, xGlobal);
+ DDSystemInfo systemInfo =
+ getSystemInfo(dummyLogger, ddRole, communicator, options, mtop, ir, box, xGlobal);
DDSettings ddSettings = ddSettingsOriginal;
ddSettings.request1D = true;
options.dlbScaling, ir, systemInfo.cellsizeLimit);
gmx_ddbox_t ddbox = { 0 };
DDGridSetup ddGridSetup =
- getDDGridSetup(dummyLogger, cr, numRanksRequested, options, ddSettings, systemInfo,
- gridSetupCellsizeLimit, mtop, ir, box, xGlobal, &ddbox);
+ getDDGridSetup(dummyLogger, ddRole, communicator, numRanksRequested, options, ddSettings,
+ systemInfo, gridSetupCellsizeLimit, mtop, ir, box, xGlobal, &ddbox);
const bool canMake1DDD = (ddGridSetup.numDomains[XX] != 0);
mtop_(mtop),
ir_(ir)
{
- GMX_LOG(mdlog_.info).appendTextFormatted("\nInitializing Domain Decomposition on %d ranks", cr_->nnodes);
+ GMX_LOG(mdlog_.info).appendTextFormatted("\nInitializing Domain Decomposition on %d ranks", cr_->sizeOfDefaultCommunicator);
ddSettings_ = getDDSettings(mdlog_, options_, mdrunOptions, ir_);
if (prefer1D
- && canMake1DDomainDecomposition(ddSettings_, cr_, cr_->nnodes, options_, mtop_, ir_, box, xGlobal))
+ && canMake1DDomainDecomposition(ddSettings_, MASTER(cr_) ? DDRole::Master : DDRole::Agent,
+ cr->mpiDefaultCommunicator, cr_->sizeOfDefaultCommunicator,
+ options_, mtop_, ir_, box, xGlobal))
{
ddSettings_.request1D = true;
}
if (ddSettings_.eFlop > 1)
{
/* Ensure that we have different random flop counts on different ranks */
- srand(1 + cr_->nodeid);
+ srand(1 + cr_->rankInDefaultCommunicator);
}
- systemInfo_ = getSystemInfo(mdlog_, cr_, options_, mtop_, ir_, box, xGlobal);
+ systemInfo_ = getSystemInfo(mdlog_, MASTER(cr_) ? DDRole::Master : DDRole::Agent,
+ cr->mpiDefaultCommunicator, options_, mtop_, ir_, box, xGlobal);
- const int numRanksRequested = cr_->nnodes;
+ const int numRanksRequested = cr_->sizeOfDefaultCommunicator;
const bool checkForLargePrimeFactors = (options_.numCells[0] <= 0);
checkForValidRankCountRequests(numRanksRequested, EEL_PME(ir_.coulombtype),
options_.numPmeRanks, checkForLargePrimeFactors);
const real gridSetupCellsizeLimit =
getDDGridSetupCellSizeLimit(mdlog_, !isDlbDisabled(ddSettings_.initialDlbState),
options_.dlbScaling, ir_, systemInfo_.cellsizeLimit);
- ddGridSetup_ = getDDGridSetup(mdlog_, cr_, numRanksRequested, options_, ddSettings_, systemInfo_,
- gridSetupCellsizeLimit, mtop_, ir_, box, xGlobal, &ddbox_);
- checkDDGridSetup(ddGridSetup_, cr_, options_, ddSettings_, systemInfo_, gridSetupCellsizeLimit, ddbox_);
+ ddGridSetup_ =
+ getDDGridSetup(mdlog_, MASTER(cr_) ? DDRole::Master : DDRole::Agent,
+ cr->mpiDefaultCommunicator, numRanksRequested, options_, ddSettings_,
+ systemInfo_, gridSetupCellsizeLimit, mtop_, ir_, box, xGlobal, &ddbox_);
+ checkDDGridSetup(ddGridSetup_, MASTER(cr_) ? DDRole::Master : DDRole::Agent,
+ cr->mpiDefaultCommunicator, cr->sizeOfDefaultCommunicator, options_,
+ ddSettings_, systemInfo_, gridSetupCellsizeLimit, ddbox_);
cr_->npmenodes = ddGridSetup_.numPmeOnlyRanks;
- ddRankSetup_ = getDDRankSetup(mdlog_, cr_, ddGridSetup_, ir_);
+ ddRankSetup_ = getDDRankSetup(mdlog_, cr_->sizeOfDefaultCommunicator, ddGridSetup_, ir_);
/* Generate the group communicator, also decides the duty of each rank */
cartSetup_ = makeGroupCommunicators(mdlog_, ddSettings_, options_.rankOrder, ddRankSetup_, cr_,
dd->comm->ddRankSetup = ddRankSetup_;
dd->comm->cartesianRankSetup = cartSetup_;
- set_dd_limits(mdlog_, cr_, dd, options_, ddSettings_, systemInfo_, ddGridSetup_,
- ddRankSetup_.numPPRanks, &mtop_, &ir_, ddbox_);
+ set_dd_limits(mdlog_, MASTER(cr_) ? DDRole::Master : DDRole::Agent, dd, options_, ddSettings_,
+ systemInfo_, ddGridSetup_, ddRankSetup_.numPPRanks, &mtop_, &ir_, ddbox_);
setupGroupCommunication(mdlog_, ddSettings_, pmeRanks_, cr_, mtop_.natoms, dd);
} // namespace gmx
-static gmx_bool test_dd_cutoff(t_commrec* cr, const matrix box, gmx::ArrayRef<const gmx::RVec> x, real cutoffRequested)
+static gmx_bool test_dd_cutoff(const t_commrec* cr, const matrix box, gmx::ArrayRef<const gmx::RVec> x, real cutoffRequested)
{
- gmx_domdec_t* dd;
- gmx_ddbox_t ddbox;
- int d, dim, np;
- real inv_cell_size;
- int LocallyLimited;
+ gmx_ddbox_t ddbox;
+ int d, dim, np;
+ real inv_cell_size;
+ int LocallyLimited;
- dd = cr->dd;
+ const auto* dd = cr->dd;
set_ddbox(*dd, false, box, true, x, &ddbox);
{
inputrec = &inputrecInstance;
}
- init_parallel(cr->mpi_comm_mygroup, MASTER(cr), inputrec, &mtop, partialDeserializedTpr.get());
+ init_parallel(cr->mpiDefaultCommunicator, MASTER(cr), inputrec, &mtop,
+ partialDeserializedTpr.get());
}
GMX_RELEASE_ASSERT(inputrec != nullptr, "All ranks should have a valid inputrec now");
partialDeserializedTpr.reset(nullptr);
gpuAccelerationOfNonbondedIsUseful(mdlog, *inputrec, !GMX_THREAD_MPI), gpusWereDetected);
useGpuForPme = decideWhetherToUseGpusForPme(
useGpuForNonbonded, pmeTarget, userGpuTaskAssignment, *hwinfo, *inputrec, mtop,
- cr->nnodes, domdecOptions.numPmeRanks, gpusWereDetected);
+ cr->sizeOfDefaultCommunicator, domdecOptions.numPmeRanks, gpusWereDetected);
auto canUseGpuForBonded = buildSupportsGpuBondeds(nullptr)
&& inputSupportsGpuBondeds(*inputrec, mtop, nullptr);
useGpuForBonded = decideWhetherToUseGpusForBonded(
{
globalState = std::make_unique<t_state>();
}
- broadcastStateWithoutDynamics(cr->mpi_comm_mygroup, DOMAINDECOMP(cr), PAR(cr), globalState.get());
+ broadcastStateWithoutDynamics(cr->mpiDefaultCommunicator, DOMAINDECOMP(cr), PAR(cr),
+ globalState.get());
}
/* A parallel command line option consistency check that we can
{
if (domdecOptions.numPmeRanks > 0)
{
- gmx_fatal_collective(FARGS, cr->mpi_comm_mysim, MASTER(cr),
+ gmx_fatal_collective(FARGS, cr->mpiDefaultCommunicator, MASTER(cr),
"PME-only ranks are requested, but the system does not use PME "
"for electrostatics or LJ");
}
if (PAR(cr))
{
- gmx_bcast(sizeof(box), box, cr->mpi_comm_mygroup);
+ gmx_bcast(sizeof(box), box, cr->mpiDefaultCommunicator);
}
if (inputrec->cutoff_scheme != ecutsVERLET)
"Verlet scheme, or use an earlier version of GROMACS if necessary.");
}
/* Update rlist and nstlist. */
+ /* Note: prepare_verlet_scheme is calling increaseNstlist(...), which (while attempting to
+ * increase rlist) tries to check if the newly chosen value fits with the DD scheme. As this is
+ * run before any DD scheme is set up, this check is never executed. See #3334 for more details.
+ */
prepare_verlet_scheme(fplog, cr, inputrec, nstlist_cmdline, &mtop, box,
useGpuForNonbonded || (emulateGpuNonbonded == EmulateGpuNonbonded::Yes),
*hwinfo->cpuInfo);
else
{
/* PME, if used, is done on all nodes with 1D decomposition */
- cr->npmenodes = 0;
- cr->duty = (DUTY_PP | DUTY_PME);
+ cr->nnodes = cr->sizeOfDefaultCommunicator;
+ cr->sim_nodeid = cr->rankInDefaultCommunicator;
+ cr->nodeid = cr->rankInDefaultCommunicator;
+ cr->npmenodes = 0;
+ cr->duty = (DUTY_PP | DUTY_PME);
if (inputrec->pbcType == PbcType::Screw)
{