2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 #include "basenetwork.h"
55 #include "gromacs/utility/cstringutil.h"
56 #include "gromacs/utility/fatalerror.h"
57 #include "gromacs/utility/gmxmpi.h"
58 #include "gromacs/utility/programcontext.h"
60 int gmx_gethostname(char *name, size_t len)
64 gmx_incons("gmx_gethostname called with len<8");
66 #if defined(HAVE_UNISTD_H) && !defined(__native_client__) && !defined(__MINGW32__)
67 if (gethostname(name, len-1) != 0)
69 std::strncpy(name, "unknown", 8);
74 std::strncpy(name, "unknown", 8);
79 gmx_bool gmx_mpi_initialized(void)
91 int gmx_node_num(void)
97 if (!gmx_mpi_initialized())
103 (void) MPI_Comm_size(MPI_COMM_WORLD, &i);
108 int gmx_node_rank(void)
113 #ifdef GMX_THREAD_MPI
114 if (!gmx_mpi_initialized())
120 (void) MPI_Comm_rank(MPI_COMM_WORLD, &i);
125 static int mpi_hostname_hash(void)
130 /* We have a single physical node */
134 char mpi_hostname[MPI_MAX_PROCESSOR_NAME];
136 /* This procedure can only differentiate nodes with different names.
137 * Architectures where different physical nodes have identical names,
138 * such as IBM Blue Gene, should use an architecture specific solution.
140 MPI_Get_processor_name(mpi_hostname, &resultlen);
142 /* The string hash function returns an unsigned int. We cast to an int.
143 * Negative numbers are converted to positive by setting the sign bit to 0.
144 * This makes the hash one bit smaller.
145 * A 63-bit hash (with 64-bit int) should be enough for unique node hashes,
146 * even on a million node machine. 31 bits might not be enough though!
149 (int)gmx_string_fullhash_func(mpi_hostname, gmx_string_hash_init);
159 #if defined GMX_LIB_MPI && defined GMX_TARGET_BGQ
161 /* IBM's declaration of this function in
162 * /bgsys/drivers/V1R2M2/ppc64/spi/include/kernel/process.h
163 * erroneously fails to specify __INLINE__, despite
164 * /bgsys/drivers/V1R2M2/ppc64/spi/include/kernel/cnk/process_impl.h
165 * specifiying __INLINE__, so bgclang thinks they are different enough
166 * to complain about. */
167 static uint64_t Kernel_GetJobID();
169 #include <spi/include/kernel/location.h>
171 static int bgq_nodenum(void)
174 Personality_t personality;
175 Kernel_GetPersonality(&personality, sizeof(personality));
176 /* Each MPI rank has a unique coordinate in a 6-dimensional space
177 (A,B,C,D,E,T), with dimensions A-E corresponding to different
178 physical nodes, and T within each node. Each node has sixteen
179 physical cores, each of which can have up to four hardware
180 threads, so 0 <= T <= 63 (but the maximum value of T depends on
181 the confituration of ranks and OpenMP threads per
182 node). However, T is irrelevant for computing a suitable return
183 value for gmx_hostname_num().
185 hostnum = personality.Network_Config.Acoord;
186 hostnum *= personality.Network_Config.Bnodes;
187 hostnum += personality.Network_Config.Bcoord;
188 hostnum *= personality.Network_Config.Cnodes;
189 hostnum += personality.Network_Config.Ccoord;
190 hostnum *= personality.Network_Config.Dnodes;
191 hostnum += personality.Network_Config.Dcoord;
192 hostnum *= personality.Network_Config.Enodes;
193 hostnum += personality.Network_Config.Ecoord;
198 "Torus ID A: %d / %d B: %d / %d C: %d / %d D: %d / %d E: %d / %d\n"
199 "Node ID T: %d / %d core: %d / %d hardware thread: %d / %d\n",
200 personality.Network_Config.Acoord,
201 personality.Network_Config.Anodes,
202 personality.Network_Config.Bcoord,
203 personality.Network_Config.Bnodes,
204 personality.Network_Config.Ccoord,
205 personality.Network_Config.Cnodes,
206 personality.Network_Config.Dcoord,
207 personality.Network_Config.Dnodes,
208 personality.Network_Config.Ecoord,
209 personality.Network_Config.Enodes,
210 Kernel_ProcessorCoreID(),
212 Kernel_ProcessorID(),
214 Kernel_ProcessorThreadID(),
221 int gmx_physicalnode_id_hash(void)
228 #ifdef GMX_THREAD_MPI
229 /* thread-MPI currently puts the thread number in the process name,
230 * we might want to change this, as this is inconsistent with what
231 * most MPI implementations would do when running on a single node.
235 #ifdef GMX_TARGET_BGQ
236 hash = bgq_nodenum();
238 hash = mpi_hostname_hash();
245 fprintf(debug, "In gmx_physicalnode_id_hash: hash %d\n", hash);
252 void gmx_abort(int errorno)
254 const char *programName = "GROMACS";
257 programName = gmx::getProgramContext().displayName();
259 catch (const std::exception &)
262 const int nnodes = gmx_node_num();
263 const int noderank = gmx_node_rank();
266 std::fprintf(stderr, "Halting parallel program %s on rank %d out of %d\n",
267 programName, noderank, nnodes);
271 std::fprintf(stderr, "Halting program %s\n", programName);
274 MPI_Abort(MPI_COMM_WORLD, errorno);