gmx_domdec_t *dd;
MPI_Comm mpi_comm_pp_physicalnode;
- if (!(cr->duty & DUTY_PP) || gpu_id < 0)
+ if (!thisRankHasDuty(cr, DUTY_PP) || gpu_id < 0)
{
/* Only ranks with short-ranged tasks (currently) use GPUs.
* If we don't have GPUs assigned, there are no resources to share.
*/
snew(comm->ddindex2simnodeid, dd->nnodes);
snew(buf, dd->nnodes);
- if (cr->duty & DUTY_PP)
+ if (thisRankHasDuty(cr, DUTY_PP))
{
buf[dd_index(dd->nc, dd->ci)] = cr->sim_nodeid;
}
int *buf;
snew(comm->ddindex2simnodeid, dd->nnodes);
snew(buf, dd->nnodes);
- if (cr->duty & DUTY_PP)
+ if (thisRankHasDuty(cr, DUTY_PP))
{
buf[dd_index(dd->nc, dd->ci)] = cr->sim_nodeid;
}
/* Split the sim communicator into PP and PME only nodes */
MPI_Comm_split(cr->mpi_comm_mysim,
- cr->duty,
+ getThisRankDuties(cr),
dd_index(comm->ntot, dd->ci),
&cr->mpi_comm_mygroup);
}
/* Split the sim communicator into PP and PME only nodes */
MPI_Comm_split(cr->mpi_comm_mysim,
- cr->duty,
+ getThisRankDuties(cr),
cr->nodeid,
&cr->mpi_comm_mygroup);
MPI_Comm_rank(cr->mpi_comm_mygroup, &cr->nodeid);
if (fplog)
{
fprintf(fplog, "This rank does only %s work.\n\n",
- (cr->duty & DUTY_PP) ? "particle-particle" : "PME-mesh");
+ thisRankHasDuty(cr, DUTY_PP) ? "particle-particle" : "PME-mesh");
}
}
#endif
}
- if (cr->duty & DUTY_PP)
+ if (thisRankHasDuty(cr, DUTY_PP))
{
/* Copy or make a new PP communicator */
make_pp_communicator(fplog, dd, cr, CartReorder);
receive_ddindex2simnodeid(dd, cr);
}
- if (!(cr->duty & DUTY_PME))
+ if (!thisRankHasDuty(cr, DUTY_PME))
{
/* Set up the commnuication to our PME node */
dd->pme_nodeid = dd_simnode2pmenode(dd, cr, cr->sim_nodeid);
make_dd_communicators(fplog, cr, dd, options.rankOrder);
- if (cr->duty & DUTY_PP)
+ if (thisRankHasDuty(cr, DUTY_PP))
{
set_ddgrid_parameters(fplog, dd, options.dlbScaling, mtop, ir, ddbox);
make_local_gb(cr, fr->born, ir->gb_algorithm);
}
- if (!(cr->duty & DUTY_PME))
+ if (!thisRankHasDuty(cr, DUTY_PME))
{
/* Send the charges and/or c6/sigmas to our PME only node */
gmx_pme_send_parameters(cr,
snew(pme_lb, 1);
- pme_lb->bSepPMERanks = !(cr->duty & DUTY_PME);
+ pme_lb->bSepPMERanks = !thisRankHasDuty(cr, DUTY_PME);
/* Initially we turn on balancing directly on based on PP/PME imbalance */
pme_lb->bTriggerOnDLB = FALSE;
MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
#endif
}
- pme->bPPnode = (cr->duty & DUTY_PP);
+ pme->bPPnode = thisRankHasDuty(cr, DUTY_PP);
}
pme->nthread = nthread;
snew(hash_pp_s, nrank_world);
hash_s[rank_world] = myhash;
- hash_pp_s[rank_world] = (cr->duty & DUTY_PP) ? myhash : -1;
+ hash_pp_s[rank_world] = thisRankHasDuty(cr, DUTY_PP) ? myhash : -1;
MPI_Allreduce(hash_s, hash, nrank_world, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(hash_pp_s, hash_pp, nrank_world, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
if (hash_pp[i] == myhash)
{
nrank_pp_intranode++;
- if ((cr->duty & DUTY_PP) && i < rank_world)
+ if (thisRankHasDuty(cr, DUTY_PP) && i < rank_world)
{
rank_pp_intranode++;
}
if (debug)
{
char sbuf[STRLEN];
- if ((cr->duty & DUTY_PP) && (cr->duty & DUTY_PME))
+ if (thisRankHasDuty(cr, DUTY_PP) && thisRankHasDuty(cr, DUTY_PME))
{
sprintf(sbuf, "PP+PME");
}
else
{
- sprintf(sbuf, "%s", (cr->duty & DUTY_PP) ? "PP" : "PME");
+ sprintf(sbuf, "%s", thisRankHasDuty(cr, DUTY_PP) ? "PP" : "PME");
}
fprintf(debug, "On %3s rank %d: nrank_intranode=%d, rank_intranode=%d, "
"nrank_pp_intranode=%d, rank_pp_intranode=%d\n",
/* With CUDA detect only on one rank per host, with OpenCL need do
* the detection on all PP ranks */
- bool isOpenclPpRank = ((GMX_GPU == GMX_GPU_OPENCL) && (cr->duty & DUTY_PP));
+ bool isOpenclPpRank = ((GMX_GPU == GMX_GPU_OPENCL) && thisRankHasDuty(cr, DUTY_PP));
if (rank_local == 0 || isOpenclPpRank)
{
ewaldOutput.vir_q);
}
- if ((EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)) && (cr->duty & DUTY_PME) && (pme_run_mode(fr->pmedata) == PmeRunMode::CPU))
+ if ((EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)) &&
+ thisRankHasDuty(cr, DUTY_PME) && (pme_run_mode(fr->pmedata) == PmeRunMode::CPU))
{
/* Do reciprocal PME for Coulomb and/or LJ. */
assert(fr->n_tpi >= 0);
snew(fr->ns, 1);
init_ns(fp, cr, fr->ns, fr, mtop);
- if (cr->duty & DUTY_PP)
+ if (thisRankHasDuty(cr, DUTY_PP))
{
gmx_nonbonded_setup(fr, bGenericKernelOnly);
}
gmx_bool bIsPPrankUsingGPU;
char gpu_err_str[STRLEN];
- bIsPPrankUsingGPU = (cr->duty & DUTY_PP) && fr && fr->nbv && fr->nbv->bUseGPU;
+ bIsPPrankUsingGPU = thisRankHasDuty(cr, DUTY_PP) && fr && fr->nbv && fr->nbv->bUseGPU;
if (bIsPPrankUsingGPU)
{
/* number of MPI processes/threads per physical node */
nppn = cr->nrank_intranode;
- bSepPME = ( (cr->duty & DUTY_PP) && !(cr->duty & DUTY_PME)) ||
- (!(cr->duty & DUTY_PP) && (cr->duty & DUTY_PME));
+ bSepPME = (thisRankHasDuty(cr, DUTY_PP) != thisRankHasDuty(cr, DUTY_PME));
manage_number_of_openmp_threads(mdlog, cr, bOMP,
nthreads_hw_avail,
gmx_walltime_accounting_t walltime_accounting,
gmx_wallcycle_t wcycle)
{
- if (!(cr->duty & DUTY_PME))
+ if (!thisRankHasDuty(cr, DUTY_PME))
{
/* Tell the PME only node to finish */
gmx_pme_send_finish(cr);
const auto pmeRunMode = fr->pmedata ? pme_run_mode(fr->pmedata) : PmeRunMode::CPU;
// TODO slim this conditional down - inputrec and duty checks should mean the same in proper code!
- const bool useGpuPme = EEL_PME(fr->ic->eeltype) && (cr->duty & DUTY_PME) &&
+ const bool useGpuPme = EEL_PME(fr->ic->eeltype) && thisRankHasDuty(cr, DUTY_PME) &&
((pmeRunMode == PmeRunMode::GPU) || (pmeRunMode == PmeRunMode::Hybrid));
// a comment for uncrustify
const ArrayRef<RVec> pmeGpuForces = *fr->forceBufferIntermediate;
fr->shift_vec, nbv->nbat);
#if GMX_MPI
- if (!(cr->duty & DUTY_PME))
+ if (!thisRankHasDuty(cr, DUTY_PME))
{
/* Send particle coordinates to the pme nodes.
* Since this is only implemented for domain decomposition
reset_enerdata(enerd);
clear_rvecs(SHIFTS, fr->fshift);
- if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
+ if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
{
wallcycle_start(wcycle, ewcPPDURINGPME);
dd_force_flop_start(cr->dd, nrnb);
}
}
- if (PAR(cr) && !(cr->duty & DUTY_PME))
+ if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME))
{
/* In case of node-splitting, the PP nodes receive the long-range
* forces, virial and energy from the PME nodes here.
}
#if GMX_MPI
- if (!(cr->duty & DUTY_PME))
+ if (!thisRankHasDuty(cr, DUTY_PME))
{
/* Send particle coordinates to the pme nodes.
* Since this is only implemented for domain decomposition
x, box, fr, &top->idef, graph, fr->born);
}
- if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
+ if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
{
wallcycle_start(wcycle, ewcPPDURINGPME);
dd_force_flop_start(cr->dd, nrnb);
}
}
- if (PAR(cr) && !(cr->duty & DUTY_PME))
+ if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME))
{
/* In case of node-splitting, the PP nodes receive the long-range
* forces, virial and energy from the PME nodes here.
sfree(nrnb_tot);
}
- if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
+ if (thisRankHasDuty(cr, DUTY_PP) && DOMAINDECOMP(cr))
{
print_dd_statistics(cr, inputrec, fplog);
}
* to task parallelism. */
int nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
int nthreads_pme = gmx_omp_nthreads_get(emntPME);
- wallcycle_scale_by_num_threads(wcycle, cr->duty == DUTY_PME, nthreads_pp, nthreads_pme);
+ wallcycle_scale_by_num_threads(wcycle, thisRankHasDuty(cr, DUTY_PME) && !thisRankHasDuty(cr, DUTY_PP), nthreads_pp, nthreads_pme);
auto cycle_sum(wallcycle_sum(cr, wcycle));
if (printReport)
#include "gromacs/math/vectypes.h"
#include "gromacs/utility/basedefinitions.h"
+#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/gmxmpi.h"
#include "gromacs/utility/real.h"
/* For domain decomposition */
gmx_domdec_t *dd;
- /* The duties of this node, see the defines above */
+ /* The duties of this node, see the DUTY_ defines above.
+ * This should be read through thisRankHasDuty() or getThisRankDuties().
+ */
int duty;
gmx_multisim_t *ms;
mpi_in_place_buf_t *mpb;
};
+/*! \brief
+ * Returns the rank's duty, and asserts that it has been initialized.
+ */
+inline int getThisRankDuties(const t_commrec *cr)
+{
+ GMX_ASSERT(cr, "Invalid commrec pointer");
+ GMX_ASSERT(cr->duty != 0, "Commrec duty was not initialized!");
+ return cr->duty;
+}
+
+/*! \brief
+ * A convenience getter for the commrec duty assignment;
+ * asserts that duty is actually valid (have been initialized).
+ *
+ * \param[in] cr Communication structure pointer
+ * \param[in] duty A single duty's corresponding DUTY_ flag. Combinations are not supported.
+ *
+ * \returns Whether this duty is assigned to this rank.
+ */
+inline bool thisRankHasDuty(const t_commrec *cr, int duty)
+{
+ GMX_ASSERT((duty == DUTY_PME) || (duty == DUTY_PP), "Invalid duty type");
+ return (getThisRankDuties(cr) & duty);
+}
+
//! True if this is a simulation with more than 1 node
#define PAR(cr) ((cr)->nnodes > 1)
#define MASTER(cr) (((cr)->nodeid == 0) || !PAR(cr))
//! True if this is the particle-particle master
-#define SIMMASTER(cr) ((MASTER(cr) && ((cr)->duty & DUTY_PP)) || !PAR(cr))
+#define SIMMASTER(cr) ((MASTER(cr) && thisRankHasDuty((cr), DUTY_PP)) || !PAR(cr))
//! The node id for this rank
#define RANK(cr, nodeid) (nodeid)
reset_all_counters(fplog, mdlog, cr, step, &step_rel, ir, wcycle, nrnb, walltime_accounting,
use_GPU(fr->nbv) ? fr->nbv : nullptr, fr->pmedata);
wcycle_set_reset_counters(wcycle, -1);
- if (!(cr->duty & DUTY_PME))
+ if (!thisRankHasDuty(cr, DUTY_PME))
{
/* Tell our PME node to reset its counters */
gmx_pme_send_resetcounters(cr, step);
close_trx(status);
}
- if (!(cr->duty & DUTY_PME))
+ if (!thisRankHasDuty(cr, DUTY_PME))
{
/* Tell the PME only node to finish */
gmx_pme_send_finish(cr);
hwinfo->nthreads_hw_avail,
hw_opt.nthreads_omp,
hw_opt.nthreads_omp_pme,
- (cr->duty & DUTY_PP) == 0,
+ !thisRankHasDuty(cr, DUTY_PP),
inputrec->cutoff_scheme == ecutsVERLET);
#ifndef NDEBUG
* across PP ranks on a node, with possible multiple devices
* or sharing devices on a node, either from the user
* selection, or automatically. */
- bool rankCanUseGpu = cr->duty & DUTY_PP;
+ bool rankCanUseGpu = thisRankHasDuty(cr, DUTY_PP);
gpuTaskAssignment = mapPpRanksToGpus(rankCanUseGpu, cr, hwinfo->gpu_info, hw_opt);
}
gmx_device_info_t *shortRangedDeviceInfo = nullptr;
int shortRangedDeviceId = -1;
- if (cr->duty & DUTY_PP)
+ if (thisRankHasDuty(cr, DUTY_PP))
{
if (!gpuTaskAssignment.empty())
{
}
snew(nrnb, 1);
- if (cr->duty & DUTY_PP)
+ if (thisRankHasDuty(cr, DUTY_PP))
{
/* Initiate forcerecord */
fr = mk_forcerec();
int nthread_local;
/* threads on this MPI process or TMPI thread */
- if (cr->duty & DUTY_PP)
+ if (thisRankHasDuty(cr, DUTY_PP))
{
nthread_local = gmx_omp_nthreads_get(emntNonbonded);
}
gmx_bcast_sim(sizeof(nTypePerturbed), &nTypePerturbed, cr);
}
- if (cr->duty & DUTY_PME)
+ if (thisRankHasDuty(cr, DUTY_PME))
{
try
{
signal_handler_install();
}
- if (cr->duty & DUTY_PP)
+ if (thisRankHasDuty(cr, DUTY_PP))
{
/* Assumes uniform use of the number of OpenMP threads */
walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault));