2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2008,2009,2010,2012,2013, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
41 #include "domdec_network.h"
43 #include "gromacs/utility/gmxmpi.h"
46 #define DDMASTERRANK(dd) (dd->masterrank)
49 void dd_sendrecv_int(const gmx_domdec_t gmx_unused *dd,
50 int gmx_unused ddimind, int gmx_unused direction,
51 int gmx_unused *buf_s, int gmx_unused n_s,
52 int gmx_unused *buf_r, int gmx_unused n_r)
58 rank_s = dd->neighbor[ddimind][direction == dddirForward ? 0 : 1];
59 rank_r = dd->neighbor[ddimind][direction == dddirForward ? 1 : 0];
63 MPI_Sendrecv(buf_s, n_s*sizeof(int), MPI_BYTE, rank_s, 0,
64 buf_r, n_r*sizeof(int), MPI_BYTE, rank_r, 0,
65 dd->mpi_comm_all, &stat);
69 MPI_Send( buf_s, n_s*sizeof(int), MPI_BYTE, rank_s, 0,
74 MPI_Recv( buf_r, n_r*sizeof(int), MPI_BYTE, rank_r, 0,
75 dd->mpi_comm_all, &stat);
81 void dd_sendrecv_real(const gmx_domdec_t gmx_unused *dd,
82 int gmx_unused ddimind, int gmx_unused direction,
83 real gmx_unused *buf_s, int gmx_unused n_s,
84 real gmx_unused *buf_r, int gmx_unused n_r)
90 rank_s = dd->neighbor[ddimind][direction == dddirForward ? 0 : 1];
91 rank_r = dd->neighbor[ddimind][direction == dddirForward ? 1 : 0];
95 MPI_Sendrecv(buf_s, n_s*sizeof(real), MPI_BYTE, rank_s, 0,
96 buf_r, n_r*sizeof(real), MPI_BYTE, rank_r, 0,
97 dd->mpi_comm_all, &stat);
101 MPI_Send( buf_s, n_s*sizeof(real), MPI_BYTE, rank_s, 0,
106 MPI_Recv( buf_r, n_r*sizeof(real), MPI_BYTE, rank_r, 0,
107 dd->mpi_comm_all, &stat);
113 void dd_sendrecv_rvec(const gmx_domdec_t gmx_unused *dd,
114 int gmx_unused ddimind, int gmx_unused direction,
115 rvec gmx_unused *buf_s, int gmx_unused n_s,
116 rvec gmx_unused *buf_r, int gmx_unused n_r)
122 rank_s = dd->neighbor[ddimind][direction == dddirForward ? 0 : 1];
123 rank_r = dd->neighbor[ddimind][direction == dddirForward ? 1 : 0];
127 MPI_Sendrecv(buf_s[0], n_s*sizeof(rvec), MPI_BYTE, rank_s, 0,
128 buf_r[0], n_r*sizeof(rvec), MPI_BYTE, rank_r, 0,
129 dd->mpi_comm_all, &stat);
133 MPI_Send( buf_s[0], n_s*sizeof(rvec), MPI_BYTE, rank_s, 0,
138 MPI_Recv( buf_r[0], n_r*sizeof(rvec), MPI_BYTE, rank_r, 0,
139 dd->mpi_comm_all, &stat);
145 void dd_sendrecv2_rvec(const gmx_domdec_t gmx_unused *dd,
146 int gmx_unused ddimind,
147 rvec gmx_unused *buf_s_fw, int gmx_unused n_s_fw,
148 rvec gmx_unused *buf_r_fw, int gmx_unused n_r_fw,
149 rvec gmx_unused *buf_s_bw, int gmx_unused n_s_bw,
150 rvec gmx_unused *buf_r_bw, int gmx_unused n_r_bw)
153 int rank_fw, rank_bw, nreq;
157 rank_fw = dd->neighbor[ddimind][0];
158 rank_bw = dd->neighbor[ddimind][1];
162 /* Try to send and receive in two directions simultaneously.
163 * Should be faster, especially on machines
164 * with full 3D communication networks.
165 * However, it could be that communication libraries are
166 * optimized for MPI_Sendrecv and non-blocking MPI calls
168 * SendRecv2 can be turned on with the env.var. GMX_DD_SENDRECV2
173 MPI_Irecv(buf_r_fw[0], n_r_fw*sizeof(rvec), MPI_BYTE,
174 rank_bw, 0, dd->mpi_comm_all, &req[nreq++]);
178 MPI_Irecv(buf_r_bw[0], n_r_bw*sizeof(rvec), MPI_BYTE,
179 rank_fw, 1, dd->mpi_comm_all, &req[nreq++]);
183 MPI_Isend(buf_s_fw[0], n_s_fw*sizeof(rvec), MPI_BYTE,
184 rank_fw, 0, dd->mpi_comm_all, &req[nreq++]);
188 MPI_Isend(buf_s_bw[0], n_s_bw*sizeof(rvec), MPI_BYTE,
189 rank_bw, 1, dd->mpi_comm_all, &req[nreq++]);
193 MPI_Waitall(nreq, req, stat);
198 /* Communicate in two ordered phases.
199 * This is slower, even on a dual-core Opteron cluster
200 * with a single full-duplex network connection per machine.
203 MPI_Sendrecv(buf_s_fw[0], n_s_fw*sizeof(rvec), MPI_BYTE, rank_fw, 0,
204 buf_r_fw[0], n_r_fw*sizeof(rvec), MPI_BYTE, rank_bw, 0,
205 dd->mpi_comm_all, &stat[0]);
207 MPI_Sendrecv(buf_s_bw[0], n_s_bw*sizeof(rvec), MPI_BYTE, rank_bw, 0,
208 buf_r_bw[0], n_r_bw*sizeof(rvec), MPI_BYTE, rank_fw, 0,
209 dd->mpi_comm_all, &stat[0]);
214 /* IBM's BlueGene(/L) MPI_Bcast dereferences the data pointer
215 * even when 0 == nbytes, so we protect calls to it on BlueGene.
216 * Fortunately dd_bcast() and dd_bcastc() are only
217 * called during DD setup and partition.
220 void dd_bcast(gmx_domdec_t gmx_unused *dd, int gmx_unused nbytes, void gmx_unused *data)
227 MPI_Bcast(data, nbytes, MPI_BYTE,
228 DDMASTERRANK(dd), dd->mpi_comm_all);
235 void dd_bcastc(gmx_domdec_t *dd, int nbytes, void *src, void *dest)
239 memcpy(dest, src, nbytes);
246 MPI_Bcast(dest, nbytes, MPI_BYTE,
247 DDMASTERRANK(dd), dd->mpi_comm_all);
254 void dd_scatter(gmx_domdec_t gmx_unused *dd, int gmx_unused nbytes, void gmx_unused *src, void gmx_unused *dest)
257 MPI_Scatter(src, nbytes, MPI_BYTE,
258 dest, nbytes, MPI_BYTE,
259 DDMASTERRANK(dd), dd->mpi_comm_all);
263 void dd_gather(gmx_domdec_t gmx_unused *dd, int gmx_unused nbytes, void gmx_unused *src, void gmx_unused *dest)
266 MPI_Gather(src, nbytes, MPI_BYTE,
267 dest, nbytes, MPI_BYTE,
268 DDMASTERRANK(dd), dd->mpi_comm_all);
272 void dd_scatterv(gmx_domdec_t gmx_unused *dd,
273 int gmx_unused *scounts, int gmx_unused *disps, void gmx_unused *sbuf,
274 int gmx_unused rcount, void gmx_unused *rbuf)
281 /* MPI does not allow NULL pointers */
284 MPI_Scatterv(sbuf, scounts, disps, MPI_BYTE,
285 rbuf, rcount, MPI_BYTE,
286 DDMASTERRANK(dd), dd->mpi_comm_all);
290 void dd_gatherv(gmx_domdec_t gmx_unused *dd,
291 int gmx_unused scount, void gmx_unused *sbuf,
292 int gmx_unused *rcounts, int gmx_unused *disps, void gmx_unused *rbuf)
299 /* MPI does not allow NULL pointers */
302 MPI_Gatherv(sbuf, scount, MPI_BYTE,
303 rbuf, rcounts, disps, MPI_BYTE,
304 DDMASTERRANK(dd), dd->mpi_comm_all);