2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief This file contains function definitions necessary for
40 * managing the offload of long-ranged PME work to separate MPI rank,
41 * for computing energies and forces (Coulomb and LJ).
43 * \author Berk Hess <hess@kth.se>
44 * \ingroup module_ewald
54 #include "gromacs/domdec/domdec.h"
55 #include "gromacs/domdec/domdec_struct.h"
56 #include "gromacs/ewald/pme.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/math/vec.h"
59 #include "gromacs/mdlib/gmx_omp_nthreads.h"
60 #include "gromacs/mdtypes/commrec.h"
61 #include "gromacs/mdtypes/forceoutput.h"
62 #include "gromacs/mdtypes/interaction_const.h"
63 #include "gromacs/mdtypes/md_enums.h"
64 #include "gromacs/timing/wallcycle.h"
65 #include "gromacs/utility/fatalerror.h"
66 #include "gromacs/utility/gmxmpi.h"
67 #include "gromacs/utility/smalloc.h"
69 #include "pme_internal.h"
70 #include "pme_pp_communication.h"
72 /*! \brief Block to wait for communication to PME ranks to complete
74 * This should be faster with a real non-blocking MPI implementation
76 static constexpr bool c_useDelayedWait = false;
78 /*! \brief Wait for the pending data send requests to PME ranks to complete */
79 static void gmx_pme_send_coeffs_coords_wait(gmx_domdec_t *dd)
84 MPI_Waitall(dd->nreq_pme, dd->req_pme, MPI_STATUSES_IGNORE);
90 /*! \brief Send data to PME ranks */
91 static void gmx_pme_send_coeffs_coords(const t_commrec *cr, unsigned int flags,
92 real gmx_unused *chargeA, real gmx_unused *chargeB,
93 real gmx_unused *c6A, real gmx_unused *c6B,
94 real gmx_unused *sigmaA, real gmx_unused *sigmaB,
95 matrix box, rvec gmx_unused *x,
96 real lambda_q, real lambda_lj,
97 int maxshift_x, int maxshift_y,
101 gmx_pme_comm_n_box_t *cnb;
105 n = dd_numHomeAtoms(*dd);
109 fprintf(debug, "PP rank %d sending to PME rank %d: %d%s%s%s%s\n",
110 cr->sim_nodeid, dd->pme_nodeid, n,
111 (flags & PP_PME_CHARGE) ? " charges" : "",
112 (flags & PP_PME_SQRTC6) ? " sqrtC6" : "",
113 (flags & PP_PME_SIGMA) ? " sigma" : "",
114 (flags & PP_PME_COORD) ? " coordinates" : "");
117 if (c_useDelayedWait)
119 /* We can not use cnb until pending communication has finished */
120 gmx_pme_send_coeffs_coords_wait(dd);
123 if (dd->pme_receive_vir_ener)
125 /* Peer PP node: communicate all data */
126 if (dd->cnb == nullptr)
134 cnb->maxshift_x = maxshift_x;
135 cnb->maxshift_y = maxshift_y;
136 cnb->lambda_q = lambda_q;
137 cnb->lambda_lj = lambda_lj;
139 if (flags & PP_PME_COORD)
141 copy_mat(box, cnb->box);
144 MPI_Isend(cnb, sizeof(*cnb), MPI_BYTE,
145 dd->pme_nodeid, eCommType_CNB, cr->mpi_comm_mysim,
146 &dd->req_pme[dd->nreq_pme++]);
149 else if (flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
152 /* Communicate only the number of atoms */
153 MPI_Isend(&n, sizeof(n), MPI_BYTE,
154 dd->pme_nodeid, eCommType_CNB, cr->mpi_comm_mysim,
155 &dd->req_pme[dd->nreq_pme++]);
162 if (flags & PP_PME_CHARGE)
164 MPI_Isend(chargeA, n*sizeof(real), MPI_BYTE,
165 dd->pme_nodeid, eCommType_ChargeA, cr->mpi_comm_mysim,
166 &dd->req_pme[dd->nreq_pme++]);
168 if (flags & PP_PME_CHARGEB)
170 MPI_Isend(chargeB, n*sizeof(real), MPI_BYTE,
171 dd->pme_nodeid, eCommType_ChargeB, cr->mpi_comm_mysim,
172 &dd->req_pme[dd->nreq_pme++]);
174 if (flags & PP_PME_SQRTC6)
176 MPI_Isend(c6A, n*sizeof(real), MPI_BYTE,
177 dd->pme_nodeid, eCommType_SQRTC6A, cr->mpi_comm_mysim,
178 &dd->req_pme[dd->nreq_pme++]);
180 if (flags & PP_PME_SQRTC6B)
182 MPI_Isend(c6B, n*sizeof(real), MPI_BYTE,
183 dd->pme_nodeid, eCommType_SQRTC6B, cr->mpi_comm_mysim,
184 &dd->req_pme[dd->nreq_pme++]);
186 if (flags & PP_PME_SIGMA)
188 MPI_Isend(sigmaA, n*sizeof(real), MPI_BYTE,
189 dd->pme_nodeid, eCommType_SigmaA, cr->mpi_comm_mysim,
190 &dd->req_pme[dd->nreq_pme++]);
192 if (flags & PP_PME_SIGMAB)
194 MPI_Isend(sigmaB, n*sizeof(real), MPI_BYTE,
195 dd->pme_nodeid, eCommType_SigmaB, cr->mpi_comm_mysim,
196 &dd->req_pme[dd->nreq_pme++]);
198 if (flags & PP_PME_COORD)
200 MPI_Isend(x[0], n*sizeof(rvec), MPI_BYTE,
201 dd->pme_nodeid, eCommType_COORD, cr->mpi_comm_mysim,
202 &dd->req_pme[dd->nreq_pme++]);
206 if (!c_useDelayedWait)
208 /* Wait for the data to arrive */
209 /* We can skip this wait as we are sure x and q will not be modified
210 * before the next call to gmx_pme_send_x_q or gmx_pme_receive_f.
212 gmx_pme_send_coeffs_coords_wait(dd);
216 void gmx_pme_send_parameters(const t_commrec *cr,
217 const interaction_const_t *ic,
218 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
219 real *chargeA, real *chargeB,
220 real *sqrt_c6A, real *sqrt_c6B,
221 real *sigmaA, real *sigmaB,
222 int maxshift_x, int maxshift_y)
224 unsigned int flags = 0;
226 if (EEL_PME(ic->eeltype))
228 flags |= PP_PME_CHARGE;
230 if (EVDW_PME(ic->vdwtype))
232 flags |= (PP_PME_SQRTC6 | PP_PME_SIGMA);
234 if (bFreeEnergy_q || bFreeEnergy_lj)
236 /* Assumes that the B state flags are in the bits just above
237 * the ones for the A state. */
238 flags |= (flags << 1);
241 gmx_pme_send_coeffs_coords(cr, flags,
243 sqrt_c6A, sqrt_c6B, sigmaA, sigmaB,
244 nullptr, nullptr, 0, 0, maxshift_x, maxshift_y, -1);
247 void gmx_pme_send_coordinates(const t_commrec *cr, matrix box, rvec *x,
248 real lambda_q, real lambda_lj,
250 int64_t step, gmx_wallcycle *wcycle)
252 wallcycle_start(wcycle, ewcPP_PMESENDX);
254 unsigned int flags = PP_PME_COORD;
257 flags |= PP_PME_ENER_VIR;
259 gmx_pme_send_coeffs_coords(cr, flags, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
260 box, x, lambda_q, lambda_lj, 0, 0, step);
262 wallcycle_stop(wcycle, ewcPP_PMESENDX);
265 void gmx_pme_send_finish(const t_commrec *cr)
267 unsigned int flags = PP_PME_FINISH;
269 gmx_pme_send_coeffs_coords(cr, flags, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, 0, 0, 0, 0, -1);
272 void gmx_pme_send_switchgrid(const t_commrec *cr,
278 gmx_pme_comm_n_box_t cnb;
280 /* Only let one PP node signal each PME node */
281 if (cr->dd->pme_receive_vir_ener)
283 cnb.flags = PP_PME_SWITCHGRID;
284 copy_ivec(grid_size, cnb.grid_size);
285 cnb.ewaldcoeff_q = ewaldcoeff_q;
286 cnb.ewaldcoeff_lj = ewaldcoeff_lj;
288 /* We send this, uncommon, message blocking to simplify the code */
289 MPI_Send(&cnb, sizeof(cnb), MPI_BYTE,
290 cr->dd->pme_nodeid, eCommType_CNB, cr->mpi_comm_mysim);
293 GMX_UNUSED_VALUE(cr);
294 GMX_UNUSED_VALUE(grid_size);
295 GMX_UNUSED_VALUE(ewaldcoeff_q);
296 GMX_UNUSED_VALUE(ewaldcoeff_lj);
300 void gmx_pme_send_resetcounters(const t_commrec gmx_unused *cr, int64_t gmx_unused step)
303 gmx_pme_comm_n_box_t cnb;
305 /* Only let one PP node signal each PME node */
306 if (cr->dd->pme_receive_vir_ener)
308 cnb.flags = PP_PME_RESETCOUNTERS;
311 /* We send this, uncommon, message blocking to simplify the code */
312 MPI_Send(&cnb, sizeof(cnb), MPI_BYTE,
313 cr->dd->pme_nodeid, eCommType_CNB, cr->mpi_comm_mysim);
318 /*! \brief Receive virial and energy from PME rank */
319 static void receive_virial_energy(const t_commrec *cr,
320 gmx::ForceWithVirial *forceWithVirial,
321 real *energy_q, real *energy_lj,
322 real *dvdlambda_q, real *dvdlambda_lj,
325 gmx_pme_comm_vir_ene_t cve;
327 if (cr->dd->pme_receive_vir_ener)
332 "PP rank %d receiving from PME rank %d: virial and energy\n",
333 cr->sim_nodeid, cr->dd->pme_nodeid);
336 MPI_Recv(&cve, sizeof(cve), MPI_BYTE, cr->dd->pme_nodeid, 1, cr->mpi_comm_mysim,
339 memset(&cve, 0, sizeof(cve));
342 forceWithVirial->addVirialContribution(cve.vir_q);
343 forceWithVirial->addVirialContribution(cve.vir_lj);
344 *energy_q = cve.energy_q;
345 *energy_lj = cve.energy_lj;
346 *dvdlambda_q += cve.dvdlambda_q;
347 *dvdlambda_lj += cve.dvdlambda_lj;
348 *pme_cycles = cve.cycles;
350 if (cve.stop_cond != gmx_stop_cond_none)
352 gmx_set_stop_condition(cve.stop_cond);
363 void gmx_pme_receive_f(const t_commrec *cr,
364 gmx::ForceWithVirial *forceWithVirial,
365 real *energy_q, real *energy_lj,
366 real *dvdlambda_q, real *dvdlambda_lj,
369 if (c_useDelayedWait)
371 /* Wait for the x request to finish */
372 gmx_pme_send_coeffs_coords_wait(cr->dd);
375 int natoms = dd_numHomeAtoms(*cr->dd);
377 if (natoms > cr->dd->pme_recv_f_alloc)
379 cr->dd->pme_recv_f_alloc = over_alloc_dd(natoms);
380 srenew(cr->dd->pme_recv_f_buf, cr->dd->pme_recv_f_alloc);
384 MPI_Recv(cr->dd->pme_recv_f_buf[0],
385 natoms*sizeof(rvec), MPI_BYTE,
386 cr->dd->pme_nodeid, 0, cr->mpi_comm_mysim,
390 int nt = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, natoms);
392 gmx::ArrayRef<gmx::RVec> f = forceWithVirial->force_;
394 /* Note that we would like to avoid this conditional by putting it
395 * into the omp pragma instead, but then we still take the full
396 * omp parallel for overhead (at least with gcc5).
400 for (int i = 0; i < natoms; i++)
402 rvec_inc(f[i], cr->dd->pme_recv_f_buf[i]);
407 #pragma omp parallel for num_threads(nt) schedule(static)
408 for (int i = 0; i < natoms; i++)
410 rvec_inc(f[i], cr->dd->pme_recv_f_buf[i]);
414 receive_virial_energy(cr, forceWithVirial, energy_q, energy_lj, dvdlambda_q, dvdlambda_lj, pme_cycles);