/* The source code in this file should be thread-safe.
Please keep it that way. */
-CommrecHandle init_commrec(MPI_Comm communicator,
- const gmx_multisim_t *ms)
+CommrecHandle init_commrec(MPI_Comm communicator, const gmx_multisim_t* ms)
{
CommrecHandle handle;
- t_commrec *cr;
+ t_commrec* cr;
snew(cr, 1);
handle.reset(cr);
int rankInCommunicator, sizeOfCommunicator;
#if GMX_MPI
-# if GMX_LIB_MPI
+# if GMX_LIB_MPI
GMX_RELEASE_ASSERT(gmx_mpi_initialized(), "Must have initialized MPI before building commrec");
-# endif
+# endif
MPI_Comm_rank(communicator, &rankInCommunicator);
MPI_Comm_size(communicator, &sizeOfCommunicator);
#else
return handle;
}
-void done_commrec(t_commrec *cr)
+void done_commrec(t_commrec* cr)
{
if (MASTER(cr))
{
// structure of the commrec and domdec initialization code makes
// it hard to avoid both leaks and double frees.
bool mySimIsMyGroup = (cr->mpi_comm_mysim == cr->mpi_comm_mygroup);
- if (cr->mpi_comm_mysim != MPI_COMM_NULL &&
- cr->mpi_comm_mysim != MPI_COMM_WORLD)
+ if (cr->mpi_comm_mysim != MPI_COMM_NULL && cr->mpi_comm_mysim != MPI_COMM_WORLD)
{
// TODO see above
// MPI_Comm_free(&cr->mpi_comm_mysim);
}
- if (!mySimIsMyGroup &&
- cr->mpi_comm_mygroup != MPI_COMM_NULL &&
- cr->mpi_comm_mygroup != MPI_COMM_WORLD)
+ if (!mySimIsMyGroup && cr->mpi_comm_mygroup != MPI_COMM_NULL && cr->mpi_comm_mygroup != MPI_COMM_WORLD)
{
// TODO see above
// MPI_Comm_free(&cr->mpi_comm_mygroup);
sfree(cr);
}
-void gmx_setup_nodecomm(FILE gmx_unused *fplog, t_commrec *cr)
+void gmx_setup_nodecomm(FILE gmx_unused* fplog, t_commrec* cr)
{
- gmx_nodecomm_t *nc;
+ gmx_nodecomm_t* nc;
/* Many MPI implementations do not optimize MPI_Allreduce
* (and probably also other global communication calls)
nc->bUse = FALSE;
#if !GMX_THREAD_MPI
-#if GMX_MPI
+# if GMX_MPI
int n, rank;
// TODO PhysicalNodeCommunicator could be extended/used to handle
MPI_Comm_rank(nc->comm_intra, &nc->rank_intra);
if (debug)
{
- fprintf(debug, "In gmx_setup_nodecomm: node ID %d rank within node %d\n",
- rank, nc->rank_intra);
+ fprintf(debug, "In gmx_setup_nodecomm: node ID %d rank within node %d\n", rank, nc->rank_intra);
}
/* The inter-node communicator, split on rank_intra.
* We actually only need the one for rank=0,
MPI_Comm_size(nc->comm_intra, &ni);
if (debug)
{
- fprintf(debug, "In gmx_setup_nodecomm: groups %d, my group size %d\n",
- ng, ni);
+ fprintf(debug, "In gmx_setup_nodecomm: groups %d, my group size %d\n", ng, ni);
}
- if (getenv("GMX_NO_NODECOMM") == nullptr &&
- ((ng > 1 && ng < n) || (ni > 1 && ni < n)))
+ if (getenv("GMX_NO_NODECOMM") == nullptr && ((ng > 1 && ng < n) || (ni > 1 && ni < n)))
{
nc->bUse = TRUE;
if (fplog)
{
- fprintf(fplog, "Using two step summing over %d groups of on average %.1f ranks\n\n",
- ng, (real)n/(real)ng);
+ fprintf(fplog, "Using two step summing over %d groups of on average %.1f ranks\n\n", ng,
+ (real)n / (real)ng);
}
if (nc->rank_intra > 0)
{
MPI_Comm_free(&nc->comm_intra);
if (debug)
{
- fprintf(debug, "In gmx_setup_nodecomm: not unsing separate inter- and intra-node communicators.\n");
+ fprintf(debug,
+ "In gmx_setup_nodecomm: not unsing separate inter- and intra-node "
+ "communicators.\n");
}
}
-#endif
+# endif
#else
/* tMPI runs only on a single node so just use the nodeid */
nc->rank_intra = cr->nodeid;
#endif
}
-void gmx_barrier(const t_commrec gmx_unused *cr)
+void gmx_barrier(const t_commrec gmx_unused* cr)
{
#if !GMX_MPI
gmx_call("gmx_barrier");
#endif
}
-void gmx_bcast(int gmx_unused nbytes, void gmx_unused *b, const t_commrec gmx_unused *cr)
+void gmx_bcast(int gmx_unused nbytes, void gmx_unused* b, const t_commrec gmx_unused* cr)
{
#if !GMX_MPI
gmx_call("gmx_bast");
#endif
}
-void gmx_bcast_sim(int gmx_unused nbytes, void gmx_unused *b, const t_commrec gmx_unused *cr)
+void gmx_bcast_sim(int gmx_unused nbytes, void gmx_unused* b, const t_commrec gmx_unused* cr)
{
#if !GMX_MPI
gmx_call("gmx_bast");
#endif
}
-void gmx_sumd(int gmx_unused nr, double gmx_unused r[], const t_commrec gmx_unused *cr)
+void gmx_sumd(int gmx_unused nr, double gmx_unused r[], const t_commrec gmx_unused* cr)
{
#if !GMX_MPI
gmx_call("gmx_sumd");
#else
-#if MPI_IN_PLACE_EXISTS
+# if MPI_IN_PLACE_EXISTS
if (cr->nc.bUse)
{
if (cr->nc.rank_intra == 0)
{
/* Use two step summing. */
- MPI_Reduce(MPI_IN_PLACE, r, nr, MPI_DOUBLE, MPI_SUM, 0,
- cr->nc.comm_intra);
+ MPI_Reduce(MPI_IN_PLACE, r, nr, MPI_DOUBLE, MPI_SUM, 0, cr->nc.comm_intra);
/* Sum the roots of the internal (intra) buffers. */
- MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_DOUBLE, MPI_SUM,
- cr->nc.comm_inter);
+ MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_DOUBLE, MPI_SUM, cr->nc.comm_inter);
}
else
{
}
else
{
- MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_DOUBLE, MPI_SUM,
- cr->mpi_comm_mygroup);
+ MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_DOUBLE, MPI_SUM, cr->mpi_comm_mygroup);
}
-#else
+# else
int i;
if (nr > cr->mpb->dbuf_alloc)
if (cr->nc.rank_intra == 0)
{
/* Sum with the buffers reversed */
- MPI_Allreduce(cr->mpb->dbuf, r, nr, MPI_DOUBLE, MPI_SUM,
- cr->nc.comm_inter);
+ MPI_Allreduce(cr->mpb->dbuf, r, nr, MPI_DOUBLE, MPI_SUM, cr->nc.comm_inter);
}
MPI_Bcast(r, nr, MPI_DOUBLE, 0, cr->nc.comm_intra);
}
else
{
- MPI_Allreduce(r, cr->mpb->dbuf, nr, MPI_DOUBLE, MPI_SUM,
- cr->mpi_comm_mygroup);
+ MPI_Allreduce(r, cr->mpb->dbuf, nr, MPI_DOUBLE, MPI_SUM, cr->mpi_comm_mygroup);
for (i = 0; i < nr; i++)
{
r[i] = cr->mpb->dbuf[i];
}
}
-#endif
+# endif
#endif
}
-void gmx_sumf(int gmx_unused nr, float gmx_unused r[], const t_commrec gmx_unused *cr)
+void gmx_sumf(int gmx_unused nr, float gmx_unused r[], const t_commrec gmx_unused* cr)
{
#if !GMX_MPI
gmx_call("gmx_sumf");
#else
-#if MPI_IN_PLACE_EXISTS
+# if MPI_IN_PLACE_EXISTS
if (cr->nc.bUse)
{
/* Use two step summing. */
if (cr->nc.rank_intra == 0)
{
- MPI_Reduce(MPI_IN_PLACE, r, nr, MPI_FLOAT, MPI_SUM, 0,
- cr->nc.comm_intra);
+ MPI_Reduce(MPI_IN_PLACE, r, nr, MPI_FLOAT, MPI_SUM, 0, cr->nc.comm_intra);
/* Sum the roots of the internal (intra) buffers */
- MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_FLOAT, MPI_SUM,
- cr->nc.comm_inter);
+ MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_FLOAT, MPI_SUM, cr->nc.comm_inter);
}
else
{
{
MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_FLOAT, MPI_SUM, cr->mpi_comm_mygroup);
}
-#else
+# else
int i;
if (nr > cr->mpb->fbuf_alloc)
if (cr->nc.rank_intra == 0)
{
/* Sum with the buffers reversed */
- MPI_Allreduce(cr->mpb->fbuf, r, nr, MPI_FLOAT, MPI_SUM,
- cr->nc.comm_inter);
+ MPI_Allreduce(cr->mpb->fbuf, r, nr, MPI_FLOAT, MPI_SUM, cr->nc.comm_inter);
}
MPI_Bcast(r, nr, MPI_FLOAT, 0, cr->nc.comm_intra);
}
else
{
- MPI_Allreduce(r, cr->mpb->fbuf, nr, MPI_FLOAT, MPI_SUM,
- cr->mpi_comm_mygroup);
+ MPI_Allreduce(r, cr->mpb->fbuf, nr, MPI_FLOAT, MPI_SUM, cr->mpi_comm_mygroup);
for (i = 0; i < nr; i++)
{
r[i] = cr->mpb->fbuf[i];
}
}
-#endif
+# endif
#endif
}
-void gmx_sumi(int gmx_unused nr, int gmx_unused r[], const t_commrec gmx_unused *cr)
+void gmx_sumi(int gmx_unused nr, int gmx_unused r[], const t_commrec gmx_unused* cr)
{
#if !GMX_MPI
gmx_call("gmx_sumi");
#else
-#if MPI_IN_PLACE_EXISTS
+# if MPI_IN_PLACE_EXISTS
if (cr->nc.bUse)
{
/* Use two step summing */
{
MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_INT, MPI_SUM, cr->mpi_comm_mygroup);
}
-#else
+# else
int i;
if (nr > cr->mpb->ibuf_alloc)
r[i] = cr->mpb->ibuf[i];
}
}
-#endif
+# endif
#endif
}
-void gmx_sumli(int gmx_unused nr, int64_t gmx_unused r[], const t_commrec gmx_unused *cr)
+void gmx_sumli(int gmx_unused nr, int64_t gmx_unused r[], const t_commrec gmx_unused* cr)
{
#if !GMX_MPI
gmx_call("gmx_sumli");
#else
-#if MPI_IN_PLACE_EXISTS
+# if MPI_IN_PLACE_EXISTS
if (cr->nc.bUse)
{
/* Use two step summing */
if (cr->nc.rank_intra == 0)
{
- MPI_Reduce(MPI_IN_PLACE, r, nr, MPI_INT64_T, MPI_SUM, 0,
- cr->nc.comm_intra);
+ MPI_Reduce(MPI_IN_PLACE, r, nr, MPI_INT64_T, MPI_SUM, 0, cr->nc.comm_intra);
/* Sum with the buffers reversed */
- MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_INT64_T, MPI_SUM,
- cr->nc.comm_inter);
+ MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_INT64_T, MPI_SUM, cr->nc.comm_inter);
}
else
{
{
MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_INT64_T, MPI_SUM, cr->mpi_comm_mygroup);
}
-#else
+# else
int i;
if (nr > cr->mpb->libuf_alloc)
if (cr->nc.bUse)
{
/* Use two step summing */
- MPI_Allreduce(r, cr->mpb->libuf, nr, MPI_INT64_T, MPI_SUM,
- cr->nc.comm_intra);
+ MPI_Allreduce(r, cr->mpb->libuf, nr, MPI_INT64_T, MPI_SUM, cr->nc.comm_intra);
if (cr->nc.rank_intra == 0)
{
/* Sum with the buffers reversed */
- MPI_Allreduce(cr->mpb->libuf, r, nr, MPI_INT64_T, MPI_SUM,
- cr->nc.comm_inter);
+ MPI_Allreduce(cr->mpb->libuf, r, nr, MPI_INT64_T, MPI_SUM, cr->nc.comm_inter);
}
MPI_Bcast(r, nr, MPI_INT64_T, 0, cr->nc.comm_intra);
}
else
{
- MPI_Allreduce(r, cr->mpb->libuf, nr, MPI_INT64_T, MPI_SUM,
- cr->mpi_comm_mygroup);
+ MPI_Allreduce(r, cr->mpb->libuf, nr, MPI_INT64_T, MPI_SUM, cr->mpi_comm_mygroup);
for (i = 0; i < nr; i++)
{
r[i] = cr->mpb->libuf[i];
}
}
-#endif
+# endif
#endif
}
-const char *opt2fn_master(const char *opt, int nfile, const t_filenm fnm[],
- t_commrec *cr)
+const char* opt2fn_master(const char* opt, int nfile, const t_filenm fnm[], t_commrec* cr)
{
return SIMMASTER(cr) ? opt2fn(opt, nfile, fnm) : nullptr;
}
-void gmx_fatal_collective(int f_errno, const char *file, int line,
- MPI_Comm comm, gmx_bool bMaster,
- gmx_fmtstr const char *fmt, ...)
+void gmx_fatal_collective(int f_errno,
+ const char* file,
+ int line,
+ MPI_Comm comm,
+ gmx_bool bMaster,
+ gmx_fmtstr const char* fmt,
+ ...)
{
va_list ap;
gmx_bool bFinalize;
#if GMX_MPI
- int result;
+ int result;
/* Check if we are calling on all processes in MPI_COMM_WORLD */
MPI_Comm_compare(comm, MPI_COMM_WORLD, &result);
/* Any result except MPI_UNEQUAL allows us to call MPI_Finalize */
va_end(ap);
}
-void simulationBarrier(const t_commrec *cr)
+void simulationBarrier(const t_commrec* cr)
{
if (PAR(cr))
{