3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * GRoups of Organic Molecules in ACtion for Science
42 #include "../thread_mpi/tmpi.h"
43 #include "../thread_mpi/mpi_bindings.h"
45 typedef void* MPI_Comm;
46 typedef void* MPI_Request;
47 typedef void* MPI_Group;
61 typedef struct gmx_domdec_master *gmx_domdec_master_p_t;
64 int j0; /* j-zone start */
65 int j1; /* j-zone end */
66 int cg1; /* i-charge-group end */
67 int jcg0; /* j-charge-group start */
68 int jcg1; /* j-charge-group end */
69 ivec shift0; /* Minimum shifts to consider */
70 ivec shift1; /* Maximum shifts to consider */
71 } gmx_domdec_ns_ranges_t;
74 rvec x0; /* Zone lower corner in triclinic coordinates */
75 rvec x1; /* Zone upper corner in triclinic coordinates */
76 rvec bb_x0; /* Zone bounding box lower corner in Cartesian coords */
77 rvec bb_x1; /* Zone bounding box upper corner in Cartesian coords */
78 } gmx_domdec_zone_size_t;
81 /* The number of zones including the home zone */
83 /* The shift of the zones with respect to the home zone */
84 ivec shift[DD_MAXZONE];
85 /* The charge group boundaries for the zones */
86 int cg_range[DD_MAXZONE+1];
87 /* The number of neighbor search zones with i-particles */
89 /* The neighbor search charge group ranges for each i-zone */
90 gmx_domdec_ns_ranges_t izone[DD_MAXIZONE];
91 /* Boundaries of the zones */
92 gmx_domdec_zone_size_t size[DD_MAXZONE];
93 /* The cg density of the home zone */
97 typedef struct gmx_ga2la *gmx_ga2la_t;
99 typedef struct gmx_hash *gmx_hash_t;
101 typedef struct gmx_reverse_top *gmx_reverse_top_p_t;
103 typedef struct gmx_domdec_constraints *gmx_domdec_constraints_p_t;
105 typedef struct gmx_domdec_specat_comm *gmx_domdec_specat_comm_p_t;
107 typedef struct gmx_domdec_comm *gmx_domdec_comm_p_t;
109 typedef struct gmx_pme_comm_n_box *gmx_pme_comm_n_box_p_t;
116 /* Tells if the box is skewed for each of the three cartesian directions */
119 /* Orthogonal vectors for triclinic cells, Cartesian index */
121 /* Normal vectors for the cells walls */
127 /* these buffers are used as destination buffers if MPI_IN_PLACE isn't
129 int *ibuf; /* for ints */
132 gmx_large_int_t *libuf;
135 float *fbuf; /* for floats */
138 double *dbuf; /* for doubles */
140 } mpi_in_place_buf_t;
144 /* The DD particle-particle nodes only */
145 /* The communication setup within the communicator all
146 * defined in dd->comm in domdec.c
149 MPI_Comm mpi_comm_all;
150 /* Use MPI_Sendrecv communication instead of non-blocking calls */
152 /* The local DD cell index and rank */
157 /* Communication with the PME only nodes */
159 gmx_bool pme_receive_vir_ener;
160 gmx_pme_comm_n_box_p_t cnb;
162 MPI_Request req_pme[4];
165 /* The communication setup, identical for each cell, cartesian index */
168 ivec dim; /* indexed by 0 to ndim */
171 /* PBC from dim 0 to npbcdim */
177 /* Forward and backward neighboring cells, indexed by 0 to ndim */
178 int neighbor[DIM][2];
180 /* Only available on the master node */
181 gmx_domdec_master_p_t ma;
183 /* Are there inter charge group constraints */
184 gmx_bool bInterCGcons;
185 gmx_bool bInterCGsettles;
187 /* Global atom number to interaction list */
188 gmx_reverse_top_p_t reverse_top;
192 /* The number of inter charge-group exclusions */
196 gmx_hash_t ga2la_vsite;
197 gmx_domdec_specat_comm_p_t vsite_comm;
199 /* Constraint stuff */
200 gmx_domdec_constraints_p_t constraints;
201 gmx_domdec_specat_comm_p_t constraint_comm;
203 /* The local to gobal charge group index and local cg to local atom index */
209 /* Local atom to local cg index, only for special cases */
213 /* The number of home atoms */
215 /* The total number of atoms: home and received zones */
217 /* Index from the local atoms to the global atoms */
221 /* Global atom number to local atom number list */
224 /* Communication stuff */
225 gmx_domdec_comm_p_t comm;
227 /* The partioning count, to keep track of the state */
228 gmx_large_int_t ddp_count;
231 /* gmx_pme_recv_f buffer */
232 int pme_recv_f_alloc;
233 rvec *pme_recv_f_buf;
237 typedef struct gmx_partdec *gmx_partdec_p_t;
242 MPI_Group mpi_group_masters;
243 MPI_Comm mpi_comm_masters;
244 /* these buffers are used as destination buffers if MPI_IN_PLACE isn't
246 mpi_in_place_buf_t *mpb;
249 #define DUTY_PP (1<<0)
250 #define DUTY_PME (1<<1)
262 } gmx_commrec_thread_t;
265 /* The nodeids in one sim are numbered sequentially from 0.
266 * All communication within some simulation should happen
267 * in mpi_comm_mysim, or its subset mpi_comm_mygroup.
269 int sim_nodeid,nnodes,npmenodes;
271 /* thread numbers: */
272 /* Not used yet: int threadid, nthreads; */
273 /* The nodeid in the PP/PME, PP or PME group */
275 MPI_Comm mpi_comm_mysim;
276 MPI_Comm mpi_comm_mygroup;
278 /* intra-node stuff */
279 int nodeid_intra; /* ID over all intra nodes */
280 int nodeid_group_intra; /* ID within my group (separate 0-n IDs for PP/PME-only nodes) */
281 int nnodes_intra; /* total number of intra nodes */
282 int nnodes_pp_intra; /* total number of PP intra nodes */
284 #ifdef GMX_THREAD_SHM_FDECOMP
285 gmx_commrec_thread_t thread;
290 /* For domain decomposition */
293 /* For particle decomposition */
296 /* The duties of this node, see the defines above */
301 /* these buffers are used as destination buffers if MPI_IN_PLACE isn't
303 mpi_in_place_buf_t *mpb;
306 #define MASTERNODE(cr) (((cr)->nodeid == 0) || !PAR(cr))
307 /* #define MASTERTHREAD(cr) ((cr)->threadid == 0) */
308 /* #define MASTER(cr) (MASTERNODE(cr) && MASTERTHREAD(cr)) */
309 #define MASTER(cr) MASTERNODE(cr)
310 #define SIMMASTER(cr) ((MASTER(cr) && ((cr)->duty & DUTY_PP)) || !PAR(cr))
311 #define NODEPAR(cr) ((cr)->nnodes > 1)
312 /* #define THREADPAR(cr) ((cr)->nthreads > 1) */
313 /* #define PAR(cr) (NODEPAR(cr) || THREADPAR(cr)) */
314 #define PAR(cr) NODEPAR(cr)
315 #define RANK(cr,nodeid) (nodeid)
316 #define MASTERRANK(cr) (0)
318 #define DOMAINDECOMP(cr) (((cr)->dd != NULL) && PAR(cr))
319 #define DDMASTER(dd) ((dd)->rank == (dd)->masterrank)
321 #define PARTDECOMP(cr) ((cr)->pd != NULL)
323 #define MULTISIM(cr) ((cr)->ms)
324 #define MSRANK(ms,nodeid) (nodeid)
325 #define MASTERSIM(ms) ((ms)->sim == 0)
327 /* The master of all (the node that prints the remaining run time etc.) */
328 #define MULTIMASTER(cr) (SIMMASTER(cr) && (!MULTISIM(cr) || MASTERSIM((cr)->ms)))