* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
-#ifdef HAVE_CONFIG_H
-#include <config.h>
-#endif
+#include "gmxpre.h"
+
+#include "gromacs/legacyheaders/network.h"
+
+#include "config.h"
-#include <string.h>
-#include "gmx_fatal.h"
-#include "main.h"
-#include "gromacs/utility/smalloc.h"
-#include "types/commrec.h"
-#include "network.h"
-#include "copyrite.h"
#include <ctype.h>
-#include "macros.h"
-#include "string2.h"
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include "gromacs/legacyheaders/copyrite.h"
+#include "gromacs/legacyheaders/macros.h"
+#include "gromacs/legacyheaders/types/commrec.h"
+#include "gromacs/utility/basenetwork.h"
+#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/futil.h"
#include "gromacs/utility/gmxmpi.h"
-
+#include "gromacs/utility/smalloc.h"
/* The source code in this file should be thread-safe.
Please keep it that way. */
-gmx_bool gmx_mpi_initialized(void)
-{
- int n;
-#ifndef GMX_MPI
- return 0;
-#else
- MPI_Initialized(&n);
-
- return n;
-#endif
-}
-
void gmx_fill_commrec_from_mpi(t_commrec gmx_unused *cr)
{
#ifndef GMX_MPI
#endif
}
-int gmx_node_num(void)
-{
-#ifndef GMX_MPI
- return 1;
-#else
- int i;
- (void) MPI_Comm_size(MPI_COMM_WORLD, &i);
- return i;
-#endif
-}
-
-int gmx_node_rank(void)
-{
-#ifndef GMX_MPI
- return 0;
-#else
- int i;
- (void) MPI_Comm_rank(MPI_COMM_WORLD, &i);
- return i;
-#endif
-}
-
-#if defined GMX_LIB_MPI && defined GMX_TARGET_BGQ
-#include <spi/include/kernel/location.h>
-#endif
-
-int gmx_physicalnode_id_hash(void)
-{
- int hash_int;
-
-#ifndef GMX_LIB_MPI
- /* We have a single physical node */
- hash_int = 0;
-#else
- int resultlen;
- char mpi_hostname[MPI_MAX_PROCESSOR_NAME];
-
- /* This procedure can only differentiate nodes with different names.
- * Architectures where different physical nodes have identical names,
- * such as IBM Blue Gene, should use an architecture specific solution.
- */
- MPI_Get_processor_name(mpi_hostname, &resultlen);
-
- /* The string hash function returns an unsigned int. We cast to an int.
- * Negative numbers are converted to positive by setting the sign bit to 0.
- * This makes the hash one bit smaller.
- * A 63-bit hash (with 64-bit int) should be enough for unique node hashes,
- * even on a million node machine. 31 bits might not be enough though!
- */
- hash_int =
- (int)gmx_string_fullhash_func(mpi_hostname, gmx_string_hash_init);
- if (hash_int < 0)
- {
- hash_int -= INT_MIN;
- }
-#endif
-
- return hash_int;
-}
-
-/* TODO: this function should be fully replaced by gmx_physicalnode_id_hash */
-int gmx_hostname_num()
-{
-#ifndef GMX_MPI
- return 0;
-#else
-#ifdef GMX_THREAD_MPI
- /* thread-MPI currently puts the thread number in the process name,
- * we might want to change this, as this is inconsistent with what
- * most MPI implementations would do when running on a single node.
- */
- return 0;
-#else
- int resultlen, hostnum, i, j;
- char mpi_hostname[MPI_MAX_PROCESSOR_NAME], hostnum_str[MPI_MAX_PROCESSOR_NAME];
-
- MPI_Get_processor_name(mpi_hostname, &resultlen);
-#ifdef GMX_TARGET_BGQ
- Personality_t personality;
- Kernel_GetPersonality(&personality, sizeof(personality));
- /* Each MPI rank has a unique coordinate in a 6-dimensional space
- (A,B,C,D,E,T), with dimensions A-E corresponding to different
- physical nodes, and T within each node. Each node has sixteen
- physical cores, each of which can have up to four hardware
- threads, so 0 <= T <= 63 (but the maximum value of T depends on
- the confituration of ranks and OpenMP threads per
- node). However, T is irrelevant for computing a suitable return
- value for gmx_hostname_num().
- */
- hostnum = personality.Network_Config.Acoord;
- hostnum *= personality.Network_Config.Bnodes;
- hostnum += personality.Network_Config.Bcoord;
- hostnum *= personality.Network_Config.Cnodes;
- hostnum += personality.Network_Config.Ccoord;
- hostnum *= personality.Network_Config.Dnodes;
- hostnum += personality.Network_Config.Dcoord;
- hostnum *= personality.Network_Config.Enodes;
- hostnum += personality.Network_Config.Ecoord;
-#else
- /* This procedure can only differentiate nodes with host names
- * that end on unique numbers.
- */
- i = 0;
- j = 0;
- /* Only parse the host name up to the first dot */
- while (i < resultlen && mpi_hostname[i] != '.')
- {
- if (isdigit(mpi_hostname[i]))
- {
- hostnum_str[j++] = mpi_hostname[i];
- }
- i++;
- }
- hostnum_str[j] = '\0';
- if (j == 0)
- {
- hostnum = 0;
- }
- else
- {
- /* Use only the last 9 decimals, so we don't overflow an int */
- hostnum = strtol(hostnum_str + max(0, j-9), NULL, 10);
- }
-#endif
-
- if (debug)
- {
- fprintf(debug, "In gmx_hostname_num: hostname '%s', hostnum %d\n",
- mpi_hostname, hostnum);
-#ifdef GMX_TARGET_BGQ
- fprintf(debug,
- "Torus ID A: %d / %d B: %d / %d C: %d / %d D: %d / %d E: %d / %d\nNode ID T: %d / %d core: %d / %d hardware thread: %d / %d\n",
- personality.Network_Config.Acoord,
- personality.Network_Config.Anodes,
- personality.Network_Config.Bcoord,
- personality.Network_Config.Bnodes,
- personality.Network_Config.Ccoord,
- personality.Network_Config.Cnodes,
- personality.Network_Config.Dcoord,
- personality.Network_Config.Dnodes,
- personality.Network_Config.Ecoord,
- personality.Network_Config.Enodes,
- Kernel_ProcessorCoreID(),
- 16,
- Kernel_ProcessorID(),
- 64,
- Kernel_ProcessorThreadID(),
- 4);
-#endif
- }
- return hostnum;
-#endif
-#endif
-}
-
void gmx_setup_nodecomm(FILE gmx_unused *fplog, t_commrec *cr)
{
gmx_nodecomm_t *nc;
- int n, rank, hostnum, ng, ni;
+ int n, rank, nodehash, ng, ni;
/* Many MPI implementations do not optimize MPI_Allreduce
* (and probably also other global communication calls)
MPI_Comm_size(cr->mpi_comm_mygroup, &n);
MPI_Comm_rank(cr->mpi_comm_mygroup, &rank);
- hostnum = gmx_hostname_num();
+ nodehash = gmx_physicalnode_id_hash();
if (debug)
{
/* The intra-node communicator, split on node number */
- MPI_Comm_split(cr->mpi_comm_mygroup, hostnum, rank, &nc->comm_intra);
+ MPI_Comm_split(cr->mpi_comm_mygroup, nodehash, rank, &nc->comm_intra);
MPI_Comm_rank(nc->comm_intra, &nc->rank_intra);
if (debug)
{
- fprintf(debug, "In gmx_setup_nodecomm: node rank %d rank_intra %d\n",
+ fprintf(debug, "In gmx_setup_nodecomm: node ID %d rank within node %d\n",
rank, nc->rank_intra);
}
/* The inter-node communicator, split on rank_intra.
nc->bUse = TRUE;
if (fplog)
{
- fprintf(fplog, "Using two step summing over %d groups of on average %.1f processes\n\n",
+ fprintf(fplog, "Using two step summing over %d groups of on average %.1f ranks\n\n",
ng, (real)n/(real)ng);
}
if (nc->rank_intra > 0)
/* thread-MPI is not initialized when not running in parallel */
#if defined GMX_MPI && !defined GMX_THREAD_MPI
int nrank_world, rank_world;
- int i, mynum, *num, *num_s, *num_pp, *num_pp_s;
+ int i, myhash, *hash, *hash_s, *hash_pp, *hash_pp_s;
MPI_Comm_size(MPI_COMM_WORLD, &nrank_world);
MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
- /* Get the node number from the hostname to identify the nodes */
- mynum = gmx_hostname_num();
+ /* Get a (hopefully unique) hash that identifies our physical node */
+ myhash = gmx_physicalnode_id_hash();
/* We can't rely on MPI_IN_PLACE, so we need send and receive buffers */
- snew(num, nrank_world);
- snew(num_s, nrank_world);
- snew(num_pp, nrank_world);
- snew(num_pp_s, nrank_world);
+ snew(hash, nrank_world);
+ snew(hash_s, nrank_world);
+ snew(hash_pp, nrank_world);
+ snew(hash_pp_s, nrank_world);
- num_s[rank_world] = mynum;
- num_pp_s[rank_world] = (cr->duty & DUTY_PP) ? mynum : -1;
+ hash_s[rank_world] = myhash;
+ hash_pp_s[rank_world] = (cr->duty & DUTY_PP) ? myhash : -1;
- MPI_Allreduce(num_s, num, nrank_world, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
- MPI_Allreduce(num_pp_s, num_pp, nrank_world, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
+ MPI_Allreduce(hash_s, hash, nrank_world, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
+ MPI_Allreduce(hash_pp_s, hash_pp, nrank_world, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
nrank_intranode = 0;
rank_intranode = 0;
rank_pp_intranode = 0;
for (i = 0; i < nrank_world; i++)
{
- if (num[i] == mynum)
+ if (hash[i] == myhash)
{
nrank_intranode++;
if (i < rank_world)
rank_intranode++;
}
}
- if ((cr->duty & DUTY_PP) && num_pp[i] == mynum)
+ if (hash_pp[i] == myhash)
{
nrank_pp_intranode++;
- if (i < rank_world)
+ if ((cr->duty & DUTY_PP) && i < rank_world)
{
rank_pp_intranode++;
}
}
}
- sfree(num);
- sfree(num_s);
- sfree(num_pp);
- sfree(num_pp_s);
+ sfree(hash);
+ sfree(hash_s);
+ sfree(hash_pp);
+ sfree(hash_pp_s);
#else
/* Serial or thread-MPI code: we run within a single physical node */
nrank_intranode = cr->nnodes;
{
sprintf(sbuf, "%s", cr->duty & DUTY_PP ? "PP" : "PME");
}
- fprintf(debug, "On %3s node %d: nrank_intranode=%d, rank_intranode=%d, "
+ fprintf(debug, "On %3s rank %d: nrank_intranode=%d, rank_intranode=%d, "
"nrank_pp_intranode=%d, rank_pp_intranode=%d\n",
sbuf, cr->sim_nodeid,
nrank_intranode, rank_intranode,
#endif
}
-void gmx_abort(int gmx_unused noderank, int gmx_unused nnodes, int gmx_unused errorno)
-{
-#ifndef GMX_MPI
- gmx_call("gmx_abort");
-#else
-#ifdef GMX_THREAD_MPI
- fprintf(stderr, "Halting program %s\n", ShortProgram());
- gmx_thanx(stderr);
- exit(1);
-#else
- if (nnodes > 1)
- {
- fprintf(stderr, "Halting parallel program %s on CPU %d out of %d\n",
- ShortProgram(), noderank, nnodes);
- }
- else
- {
- fprintf(stderr, "Halting program %s\n", ShortProgram());
- }
-
- gmx_thanx(stderr);
- MPI_Abort(MPI_COMM_WORLD, errorno);
- exit(1);
-#endif
-#endif
-}
-
void gmx_bcast(int gmx_unused nbytes, void gmx_unused *b, const t_commrec gmx_unused *cr)
{
#ifndef GMX_MPI
#endif
#endif
}
+
+gmx_bool gmx_fexist_master(const char *fname, t_commrec *cr)
+{
+ gmx_bool bExist;
+
+ if (SIMMASTER(cr))
+ {
+ bExist = gmx_fexist(fname);
+ }
+ if (PAR(cr))
+ {
+ gmx_bcast(sizeof(bExist), &bExist, cr);
+ }
+ return bExist;
+}
+
+void gmx_fatal_collective(int f_errno, const char *file, int line,
+ const t_commrec *cr, gmx_domdec_t *dd,
+ const char *fmt, ...)
+{
+ va_list ap;
+ gmx_bool bMaster, bFinalize;
+#ifdef GMX_MPI
+ int result;
+ /* Check if we are calling on all processes in MPI_COMM_WORLD */
+ if (cr != NULL)
+ {
+ MPI_Comm_compare(cr->mpi_comm_mysim, MPI_COMM_WORLD, &result);
+ }
+ else
+ {
+ MPI_Comm_compare(dd->mpi_comm_all, MPI_COMM_WORLD, &result);
+ }
+ /* Any result except MPI_UNEQUAL allows us to call MPI_Finalize */
+ bFinalize = (result != MPI_UNEQUAL);
+#else
+ bFinalize = TRUE;
+#endif
+ bMaster = (cr != NULL && MASTER(cr)) || (dd != NULL && DDMASTER(dd));
+
+ va_start(ap, fmt);
+ gmx_fatal_mpi_va(f_errno, file, line, bMaster, bFinalize, fmt, ap);
+ va_end(ap);
+}