2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2008,2009,2010,2012,2013,2014,2015,2016,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file defines functions for (mostly) the domdec module
39 * to use MPI functionality
41 * \author Berk Hess <hess@kth.se>
42 * \ingroup module_domdec
47 #include "domdec_network.h"
53 #include "gromacs/domdec/domdec_struct.h"
54 #include "gromacs/utility/gmxmpi.h"
56 #include "domdec_internal.h"
59 /*! \brief Returns the MPI rank of the domain decomposition master rank */
60 #define DDMASTERRANK(dd) ((dd)->masterrank)
63 /*! \brief Move data of type \p T in the communication region one cell along
64 * the domain decomposition
66 * Moves in the dimension indexed by ddDimensionIndex, either forward
67 * (direction=dddirFoward) or backward (direction=dddirBackward).
71 ddSendrecv(const struct gmx_domdec_t *dd,
75 int numElementsToSend,
77 int numElementsToReceive)
80 int sendRank = dd->neighbor[ddDimensionIndex][direction == dddirForward ? 0 : 1];
81 int receiveRank = dd->neighbor[ddDimensionIndex][direction == dddirForward ? 1 : 0];
83 constexpr int mpiTag = 0;
85 if (numElementsToSend > 0 && numElementsToReceive > 0)
87 MPI_Sendrecv(sendBuffer, numElementsToSend*sizeof(T), MPI_BYTE,
89 receiveBuffer, numElementsToReceive*sizeof(T), MPI_BYTE,
94 else if (numElementsToSend > 0)
96 MPI_Send( sendBuffer, numElementsToSend*sizeof(T), MPI_BYTE,
100 else if (numElementsToReceive > 0)
102 MPI_Recv( receiveBuffer, numElementsToReceive*sizeof(T), MPI_BYTE,
108 GMX_UNUSED_VALUE(dd);
109 GMX_UNUSED_VALUE(ddDimensionIndex);
110 GMX_UNUSED_VALUE(direction);
111 GMX_UNUSED_VALUE(sendBuffer);
112 GMX_UNUSED_VALUE(numElementsToSend);
113 GMX_UNUSED_VALUE(receiveBuffer);
114 GMX_UNUSED_VALUE(numElementsToReceive);
118 //! Specialization of extern template for int
119 template void ddSendrecv(const gmx_domdec_t *, int, int,
120 int *, int, int *, int);
121 //! Specialization of extern template for real
122 template void ddSendrecv(const gmx_domdec_t *, int, int,
123 real *, int, real *, int);
124 //! Specialization of extern template for gmx::RVec
125 template void ddSendrecv(const gmx_domdec_t *, int, int,
126 rvec *, int, rvec *, int);
128 template <typename T>
130 ddSendrecv(const gmx_domdec_t *dd,
131 int ddDimensionIndex,
133 gmx::ArrayRef<T> sendBuffer,
134 gmx::ArrayRef<T> receiveBuffer)
136 ddSendrecv(dd, ddDimensionIndex, direction,
137 sendBuffer.data(), sendBuffer.size(),
138 receiveBuffer.data(), receiveBuffer.size());
141 //! Specialization of extern template for int
142 template void ddSendrecv(const gmx_domdec_t *, int, int,
143 gmx::ArrayRef<int>, gmx::ArrayRef<int>);
144 //! Specialization of extern template for real
145 template void ddSendrecv(const gmx_domdec_t *, int, int,
146 gmx::ArrayRef<real>, gmx::ArrayRef<real>);
147 //! Specialization of extern template for gmx::RVec
148 template void ddSendrecv(const gmx_domdec_t *, int, int,
149 gmx::ArrayRef<gmx::RVec>, gmx::ArrayRef<gmx::RVec>);
151 void dd_sendrecv2_rvec(const struct gmx_domdec_t gmx_unused *dd,
152 int gmx_unused ddimind,
153 rvec gmx_unused *buf_s_fw, int gmx_unused n_s_fw,
154 rvec gmx_unused *buf_r_fw, int gmx_unused n_r_fw,
155 rvec gmx_unused *buf_s_bw, int gmx_unused n_s_bw,
156 rvec gmx_unused *buf_r_bw, int gmx_unused n_r_bw)
159 int rank_fw, rank_bw, nreq;
163 rank_fw = dd->neighbor[ddimind][0];
164 rank_bw = dd->neighbor[ddimind][1];
166 if (!dd->comm->ddSettings.useSendRecv2)
168 /* Try to send and receive in two directions simultaneously.
169 * Should be faster, especially on machines
170 * with full 3D communication networks.
171 * However, it could be that communication libraries are
172 * optimized for MPI_Sendrecv and non-blocking MPI calls
174 * SendRecv2 can be turned on with the env.var. GMX_DD_SENDRECV2
179 MPI_Irecv(buf_r_fw[0], n_r_fw*sizeof(rvec), MPI_BYTE,
180 rank_bw, 0, dd->mpi_comm_all, &req[nreq++]);
184 MPI_Irecv(buf_r_bw[0], n_r_bw*sizeof(rvec), MPI_BYTE,
185 rank_fw, 1, dd->mpi_comm_all, &req[nreq++]);
189 MPI_Isend(buf_s_fw[0], n_s_fw*sizeof(rvec), MPI_BYTE,
190 rank_fw, 0, dd->mpi_comm_all, &req[nreq++]);
194 MPI_Isend(buf_s_bw[0], n_s_bw*sizeof(rvec), MPI_BYTE,
195 rank_bw, 1, dd->mpi_comm_all, &req[nreq++]);
199 MPI_Waitall(nreq, req, stat); //NOLINT(clang-analyzer-optin.mpi.MPI-Checker)
204 /* Communicate in two ordered phases.
205 * This is slower, even on a dual-core Opteron cluster
206 * with a single full-duplex network connection per machine.
209 MPI_Sendrecv(buf_s_fw[0], n_s_fw*sizeof(rvec), MPI_BYTE, rank_fw, 0,
210 buf_r_fw[0], n_r_fw*sizeof(rvec), MPI_BYTE, rank_bw, 0,
211 dd->mpi_comm_all, &stat[0]);
213 MPI_Sendrecv(buf_s_bw[0], n_s_bw*sizeof(rvec), MPI_BYTE, rank_bw, 0,
214 buf_r_bw[0], n_r_bw*sizeof(rvec), MPI_BYTE, rank_fw, 0,
215 dd->mpi_comm_all, &stat[0]);
220 void dd_bcast(const gmx_domdec_t gmx_unused *dd, int gmx_unused nbytes, void gmx_unused *data)
225 MPI_Bcast(data, nbytes, MPI_BYTE,
226 DDMASTERRANK(dd), dd->mpi_comm_all);
231 void dd_bcastc(const gmx_domdec_t *dd, int nbytes, void *src, void *dest)
233 if (DDMASTER(dd) || dd->nnodes == 1)
235 memcpy(dest, src, nbytes);
240 MPI_Bcast(dest, nbytes, MPI_BYTE,
241 DDMASTERRANK(dd), dd->mpi_comm_all);
246 void dd_scatter(const gmx_domdec_t gmx_unused *dd, int gmx_unused nbytes, const void gmx_unused *src, void *dest)
251 /* Some MPI implementions don't specify const */
252 MPI_Scatter(const_cast<void *>(src), nbytes, MPI_BYTE,
253 dest, nbytes, MPI_BYTE,
254 DDMASTERRANK(dd), dd->mpi_comm_all);
259 /* 1 rank, either we copy everything, or dest=src: nothing to do */
262 memcpy(dest, src, nbytes);
267 void dd_gather(const gmx_domdec_t gmx_unused *dd, int gmx_unused nbytes, const void gmx_unused *src, void gmx_unused *dest)
270 /* Some MPI implementions don't specify const */
271 MPI_Gather(const_cast<void *>(src), nbytes, MPI_BYTE,
272 dest, nbytes, MPI_BYTE,
273 DDMASTERRANK(dd), dd->mpi_comm_all);
277 void dd_scatterv(const gmx_domdec_t gmx_unused *dd,
278 int gmx_unused *scounts, int gmx_unused *disps, const void *sbuf,
279 int rcount, void *rbuf)
288 /* MPI does not allow NULL pointers */
291 /* Some MPI implementions don't specify const */
292 MPI_Scatterv(const_cast<void *>(sbuf), scounts, disps, MPI_BYTE,
293 rbuf, rcount, MPI_BYTE,
294 DDMASTERRANK(dd), dd->mpi_comm_all);
299 /* 1 rank, either we copy everything, or rbuf=sbuf: nothing to do */
302 memcpy(rbuf, sbuf, rcount);
307 void dd_gatherv(const gmx_domdec_t gmx_unused *dd,
308 int gmx_unused scount, const void gmx_unused *sbuf,
309 int gmx_unused *rcounts, int gmx_unused *disps, void gmx_unused *rbuf)
316 /* MPI does not allow NULL pointers */
319 /* Some MPI implementions don't specify const */
320 MPI_Gatherv(const_cast<void *>(sbuf), scount, MPI_BYTE,
321 rbuf, rcounts, disps, MPI_BYTE,
322 DDMASTERRANK(dd), dd->mpi_comm_all);