3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gromacs Runs On Most of All Computer Systems
41 * This module defines the interface of the actual communication routines.
46 #include "types/simple.h"
47 #include "types/commrec.h"
50 #include "gmx_fatal.h"
56 t_commrec *init_commrec(void);
57 /* Allocate, initialize and return the commrec. */
59 t_commrec *reinitialize_commrec_for_this_thread(const t_commrec *cro);
60 /* Initialize communication records for thread-parallel simulations.
61 Must be called on all threads before any communication takes place by
62 the individual threads. Copies the original commrec to
63 thread-local versions (a small memory leak results because we don't
64 deallocate the old shared version). */
66 void gmx_fill_commrec_from_mpi(t_commrec *cr);
67 /* Continues t_commrec construction */
69 int gmx_node_num(void);
70 /* return the number of nodes in the ring */
72 int gmx_node_rank(void);
73 /* return the rank of the node */
75 int gmx_hostname_num(void);
76 /* Ostensibly, returns a integer characteristic of and unique to each
77 physical node in the MPI system. If the first part of the MPI
78 hostname (up to the first dot) ends with a number, returns this
79 number. If the first part of the MPI hostname does not ends in a
80 number (0-9 characters), returns 0.
83 void gmx_setup_nodecomm(FILE *fplog, t_commrec *cr);
84 /* Sets up fast global communication for clusters with multi-core nodes */
86 void gmx_init_intranode_counters(t_commrec *cr);
87 /* Initializes intra-physical-node MPI process/thread counts and ID. */
89 gmx_bool gmx_mpi_initialized(void);
90 /* return TRUE when MPI_Init has been called.
91 * return FALSE when MPI_Init has not been called OR
92 * when GROMACS was compiled without MPI support.
95 void gmx_barrier(const t_commrec *cr);
96 /* Wait till all processes in cr->mpi_comm_mygroup have reached the barrier */
98 void gmx_bcast(int nbytes, void *b, const t_commrec *cr);
99 /* Broadcast nbytes bytes from the master to cr->mpi_comm_mygroup */
101 void gmx_bcast_sim(int nbytes, void *b, const t_commrec *cr);
102 /* Broadcast nbytes bytes from the sim master to cr->mpi_comm_mysim */
104 void gmx_sumi(int nr, int r[], const t_commrec *cr);
105 /* Calculate the global sum of an array of ints */
107 void gmx_sumli(int nr, gmx_large_int_t r[], const t_commrec *cr);
108 /* Calculate the global sum of an array of large ints */
110 void gmx_sumf(int nr, float r[], const t_commrec *cr);
111 /* Calculate the global sum of an array of floats */
113 void gmx_sumd(int nr, double r[], const t_commrec *cr);
114 /* Calculate the global sum of an array of doubles */
116 void gmx_sumf_comm(int nr, float r[], MPI_Comm mpi_comm);
117 /* Calculate the global sum of an array of floats */
119 void gmx_sumd_comm(int nr, double r[], MPI_Comm mpi_comm);
120 /* Calculate the global sum of an array of doubles */
122 void gmx_sumi_sim(int nr, int r[], const gmx_multisim_t *ms);
123 /* Calculate the sum over the simulations of an array of ints */
125 void gmx_sumli_sim(int nr, gmx_large_int_t r[], const gmx_multisim_t *ms);
126 /* Calculate the sum over the simulations of an array of large ints */
128 void gmx_sumf_sim(int nr, float r[], const gmx_multisim_t *ms);
129 /* Calculate the sum over the simulations of an array of floats */
131 void gmx_sumd_sim(int nr, double r[], const gmx_multisim_t *ms);
132 /* Calculate the sum over the simulations of an array of doubles */
134 void gmx_abort(int nodeid, int nnodes, int errorno);
135 /* Abort the parallel run */
138 #define gmx_sum_comm gmx_sumd_comm
139 #define gmx_sum gmx_sumd
140 #define gmx_sum_sim gmx_sumd_sim
142 #define gmx_sum_comm gmx_sumf_comm
143 #define gmx_sum gmx_sumf
144 #define gmx_sum_sim gmx_sumf_sim
148 #define debug_gmx() do { FILE *fp = debug ? debug : stderr; \
149 if (bDebugMode()) { fprintf(fp, "NODEID=%d, %s %d\n", gmx_mpi_initialized() ? gmx_node_rank() : -1, __FILE__, __LINE__); } fflush(fp); } while (0)
159 #endif /* _network_h */