3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gromacs Runs On Most of All Computer Systems
41 * This module defines the interface of the actual communication routines.
46 #include "types/simple.h"
47 #include "types/commrec.h"
50 #include "gmx_fatal.h"
56 t_commrec *init_commrec(void);
57 /* Allocate, initialize and return the commrec. */
59 t_commrec *reinitialize_commrec_for_this_thread(const t_commrec *cro);
60 /* Initialize communication records for thread-parallel simulations.
61 Must be called on all threads before any communication takes place by
62 the individual threads. Copies the original commrec to
63 thread-local versions (a small memory leak results because we don't
64 deallocate the old shared version). */
66 void gmx_fill_commrec_from_mpi(t_commrec *cr);
67 /* Continues t_commrec construction */
69 int gmx_node_num(void);
70 /* return the number of nodes in the ring */
72 int gmx_node_rank(void);
73 /* return the rank of the node */
75 int gmx_physicalnode_id_hash(void);
76 /* Return a non-negative hash that is, hopefully, unique for each physical node.
77 * This hash is useful for determining hardware locality.
80 int gmx_hostname_num(void);
81 /* Ostensibly, returns a integer characteristic of and unique to each
82 physical node in the MPI system. If the first part of the MPI
83 hostname (up to the first dot) ends with a number, returns this
84 number. If the first part of the MPI hostname does not ends in a
85 number (0-9 characters), returns 0.
88 void gmx_setup_nodecomm(FILE *fplog, t_commrec *cr);
89 /* Sets up fast global communication for clusters with multi-core nodes */
91 void gmx_init_intranode_counters(t_commrec *cr);
92 /* Initializes intra-physical-node MPI process/thread counts and ID. */
94 gmx_bool gmx_mpi_initialized(void);
95 /* return TRUE when MPI_Init has been called.
96 * return FALSE when MPI_Init has not been called OR
97 * when GROMACS was compiled without MPI support.
100 void gmx_barrier(const t_commrec *cr);
101 /* Wait till all processes in cr->mpi_comm_mygroup have reached the barrier */
103 void gmx_bcast(int nbytes, void *b, const t_commrec *cr);
104 /* Broadcast nbytes bytes from the master to cr->mpi_comm_mygroup */
106 void gmx_bcast_sim(int nbytes, void *b, const t_commrec *cr);
107 /* Broadcast nbytes bytes from the sim master to cr->mpi_comm_mysim */
109 void gmx_sumi(int nr, int r[], const t_commrec *cr);
110 /* Calculate the global sum of an array of ints */
112 void gmx_sumli(int nr, gmx_large_int_t r[], const t_commrec *cr);
113 /* Calculate the global sum of an array of large ints */
115 void gmx_sumf(int nr, float r[], const t_commrec *cr);
116 /* Calculate the global sum of an array of floats */
118 void gmx_sumd(int nr, double r[], const t_commrec *cr);
119 /* Calculate the global sum of an array of doubles */
121 void gmx_sumf_comm(int nr, float r[], MPI_Comm mpi_comm);
122 /* Calculate the global sum of an array of floats */
124 void gmx_sumd_comm(int nr, double r[], MPI_Comm mpi_comm);
125 /* Calculate the global sum of an array of doubles */
127 void gmx_sumi_sim(int nr, int r[], const gmx_multisim_t *ms);
128 /* Calculate the sum over the simulations of an array of ints */
130 void gmx_sumli_sim(int nr, gmx_large_int_t r[], const gmx_multisim_t *ms);
131 /* Calculate the sum over the simulations of an array of large ints */
133 void gmx_sumf_sim(int nr, float r[], const gmx_multisim_t *ms);
134 /* Calculate the sum over the simulations of an array of floats */
136 void gmx_sumd_sim(int nr, double r[], const gmx_multisim_t *ms);
137 /* Calculate the sum over the simulations of an array of doubles */
139 void gmx_abort(int nodeid, int nnodes, int errorno);
140 /* Abort the parallel run */
143 #define gmx_sum_comm gmx_sumd_comm
144 #define gmx_sum gmx_sumd
145 #define gmx_sum_sim gmx_sumd_sim
147 #define gmx_sum_comm gmx_sumf_comm
148 #define gmx_sum gmx_sumf
149 #define gmx_sum_sim gmx_sumf_sim
153 #define debug_gmx() do { FILE *fp = debug ? debug : stderr; \
154 if (bDebugMode()) { fprintf(fp, "NODEID=%d, %s %d\n", gmx_mpi_initialized() ? gmx_node_rank() : -1, __FILE__, __LINE__); } fflush(fp); } while (0)
164 #endif /* _network_h */