2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
46 #include "gromacs/legacyheaders/typedefs.h"
47 #include "gromacs/legacyheaders/types/commrec.h"
48 #include "gromacs/utility/smalloc.h"
49 #include "gromacs/utility/fatalerror.h"
50 #include "gromacs/math/vec.h"
51 #include "gromacs/legacyheaders/pme.h"
52 #include "gromacs/legacyheaders/network.h"
53 #include "gromacs/legacyheaders/domdec.h"
54 #include "gromacs/legacyheaders/sighandler.h"
56 #include "gromacs/utility/gmxmpi.h"
59 eCommType_ChargeA, eCommType_ChargeB, eCommType_SQRTC6A, eCommType_SQRTC6B,
60 eCommType_SigmaA, eCommType_SigmaB, eCommType_NR, eCommType_COORD,
64 /* Some parts of the code(gmx_pme_send_q, gmx_pme_recv_q_x) assume
65 * that the six first flags are exactly in this order.
66 * If more PP_PME_...-flags are to be introduced be aware of some of
67 * the PME-specific flags in pme.h. Currently, they are also passed
71 #define PP_PME_CHARGE (1<<0)
72 #define PP_PME_CHARGEB (1<<1)
73 #define PP_PME_SQRTC6 (1<<2)
74 #define PP_PME_SQRTC6B (1<<3)
75 #define PP_PME_SIGMA (1<<4)
76 #define PP_PME_SIGMAB (1<<5)
77 #define PP_PME_COORD (1<<6)
78 #define PP_PME_FEP_Q (1<<7)
79 #define PP_PME_FEP_LJ (1<<8)
80 #define PP_PME_ENER_VIR (1<<9)
81 #define PP_PME_FINISH (1<<10)
82 #define PP_PME_SWITCHGRID (1<<11)
83 #define PP_PME_RESETCOUNTERS (1<<12)
85 #define PME_PP_SIGSTOP (1<<0)
86 #define PME_PP_SIGSTOPNSS (1<<1)
88 typedef struct gmx_pme_pp {
90 MPI_Comm mpi_comm_mysim;
92 int nnode; /* The number of PP node to communicate with */
93 int *node; /* The PP node ranks */
94 int node_peer; /* The peer PP node rank */
95 int *nat; /* The number of atom for each PP node */
96 int flags_charge; /* The flags sent along with the last charges */
112 typedef struct gmx_pme_comm_n_box {
121 ivec grid_size; /* For PME grid tuning */
122 real ewaldcoeff_q; /* For PME grid tuning */
124 } gmx_pme_comm_n_box_t;
134 gmx_stop_cond_t stop_cond;
135 } gmx_pme_comm_vir_ene_t;
140 gmx_pme_pp_t gmx_pme_pp_init(t_commrec gmx_unused *cr)
142 struct gmx_pme_pp *pme_pp;
148 pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
149 MPI_Comm_rank(cr->mpi_comm_mygroup, &rank);
150 get_pme_ddnodes(cr, rank, &pme_pp->nnode, &pme_pp->node, &pme_pp->node_peer);
151 snew(pme_pp->nat, pme_pp->nnode);
152 snew(pme_pp->req, eCommType_NR*pme_pp->nnode);
153 snew(pme_pp->stat, eCommType_NR*pme_pp->nnode);
155 pme_pp->flags_charge = 0;
161 /* This should be faster with a real non-blocking MPI implementation */
162 /* #define GMX_PME_DELAYED_WAIT */
164 static void gmx_pme_send_coeffs_coords_wait(gmx_domdec_t gmx_unused *dd)
169 MPI_Waitall(dd->nreq_pme, dd->req_pme, MPI_STATUSES_IGNORE);
175 static void gmx_pme_send_coeffs_coords(t_commrec *cr, int flags,
176 real gmx_unused *chargeA, real gmx_unused *chargeB,
177 real gmx_unused *c6A, real gmx_unused *c6B,
178 real gmx_unused *sigmaA, real gmx_unused *sigmaB,
179 matrix box, rvec gmx_unused *x,
180 real lambda_q, real lambda_lj,
181 int maxshift_x, int maxshift_y,
185 gmx_pme_comm_n_box_t *cnb;
193 fprintf(debug, "PP rank %d sending to PME rank %d: %d%s%s%s%s\n",
194 cr->sim_nodeid, dd->pme_nodeid, n,
195 flags & PP_PME_CHARGE ? " charges" : "",
196 flags & PP_PME_SQRTC6 ? " sqrtC6" : "",
197 flags & PP_PME_SIGMA ? " sigma" : "",
198 flags & PP_PME_COORD ? " coordinates" : "");
201 #ifdef GMX_PME_DELAYED_WAIT
202 /* When can not use cnb until pending communication has finished */
203 gmx_pme_send_coeffs_coords_wait(dd);
206 if (dd->pme_receive_vir_ener)
208 /* Peer PP node: communicate all data */
217 cnb->maxshift_x = maxshift_x;
218 cnb->maxshift_y = maxshift_y;
219 cnb->lambda_q = lambda_q;
220 cnb->lambda_lj = lambda_lj;
222 if (flags & PP_PME_COORD)
224 copy_mat(box, cnb->box);
227 MPI_Isend(cnb, sizeof(*cnb), MPI_BYTE,
228 dd->pme_nodeid, eCommType_CNB, cr->mpi_comm_mysim,
229 &dd->req_pme[dd->nreq_pme++]);
232 else if (flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
235 /* Communicate only the number of atoms */
236 MPI_Isend(&n, sizeof(n), MPI_BYTE,
237 dd->pme_nodeid, eCommType_CNB, cr->mpi_comm_mysim,
238 &dd->req_pme[dd->nreq_pme++]);
245 if (flags & PP_PME_CHARGE)
247 MPI_Isend(chargeA, n*sizeof(real), MPI_BYTE,
248 dd->pme_nodeid, eCommType_ChargeA, cr->mpi_comm_mysim,
249 &dd->req_pme[dd->nreq_pme++]);
251 if (flags & PP_PME_CHARGEB)
253 MPI_Isend(chargeB, n*sizeof(real), MPI_BYTE,
254 dd->pme_nodeid, eCommType_ChargeB, cr->mpi_comm_mysim,
255 &dd->req_pme[dd->nreq_pme++]);
257 if (flags & PP_PME_SQRTC6)
259 MPI_Isend(c6A, n*sizeof(real), MPI_BYTE,
260 dd->pme_nodeid, eCommType_SQRTC6A, cr->mpi_comm_mysim,
261 &dd->req_pme[dd->nreq_pme++]);
263 if (flags & PP_PME_SQRTC6B)
265 MPI_Isend(c6B, n*sizeof(real), MPI_BYTE,
266 dd->pme_nodeid, eCommType_SQRTC6B, cr->mpi_comm_mysim,
267 &dd->req_pme[dd->nreq_pme++]);
269 if (flags & PP_PME_SIGMA)
271 MPI_Isend(sigmaA, n*sizeof(real), MPI_BYTE,
272 dd->pme_nodeid, eCommType_SigmaA, cr->mpi_comm_mysim,
273 &dd->req_pme[dd->nreq_pme++]);
275 if (flags & PP_PME_SIGMAB)
277 MPI_Isend(sigmaB, n*sizeof(real), MPI_BYTE,
278 dd->pme_nodeid, eCommType_SigmaB, cr->mpi_comm_mysim,
279 &dd->req_pme[dd->nreq_pme++]);
281 if (flags & PP_PME_COORD)
283 MPI_Isend(x[0], n*sizeof(rvec), MPI_BYTE,
284 dd->pme_nodeid, eCommType_COORD, cr->mpi_comm_mysim,
285 &dd->req_pme[dd->nreq_pme++]);
289 #ifndef GMX_PME_DELAYED_WAIT
290 /* Wait for the data to arrive */
291 /* We can skip this wait as we are sure x and q will not be modified
292 * before the next call to gmx_pme_send_x_q or gmx_pme_receive_f.
294 gmx_pme_send_coeffs_coords_wait(dd);
299 void gmx_pme_send_parameters(t_commrec *cr,
300 const interaction_const_t *ic,
301 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
302 real *chargeA, real *chargeB,
303 real *sqrt_c6A, real *sqrt_c6B,
304 real *sigmaA, real *sigmaB,
305 int maxshift_x, int maxshift_y)
310 if (EEL_PME(ic->eeltype))
312 flags |= PP_PME_CHARGE;
314 if (EVDW_PME(ic->vdwtype))
316 flags |= (PP_PME_SQRTC6 | PP_PME_SIGMA);
318 if (bFreeEnergy_q || bFreeEnergy_lj)
320 /* Assumes that the B state flags are in the bits just above
321 * the ones for the A state. */
322 flags |= (flags << 1);
325 gmx_pme_send_coeffs_coords(cr, flags,
327 sqrt_c6A, sqrt_c6B, sigmaA, sigmaB,
328 NULL, NULL, 0, 0, maxshift_x, maxshift_y, -1);
331 void gmx_pme_send_coordinates(t_commrec *cr, matrix box, rvec *x,
332 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
333 real lambda_q, real lambda_lj,
334 gmx_bool bEnerVir, int pme_flags,
339 flags = pme_flags | PP_PME_COORD;
342 flags |= PP_PME_FEP_Q;
346 flags |= PP_PME_FEP_LJ;
350 flags |= PP_PME_ENER_VIR;
352 gmx_pme_send_coeffs_coords(cr, flags, NULL, NULL, NULL, NULL, NULL, NULL,
353 box, x, lambda_q, lambda_lj, 0, 0, step);
356 void gmx_pme_send_finish(t_commrec *cr)
360 flags = PP_PME_FINISH;
362 gmx_pme_send_coeffs_coords(cr, flags, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0, 0, -1);
365 void gmx_pme_send_switchgrid(t_commrec gmx_unused *cr,
366 ivec gmx_unused grid_size,
367 real gmx_unused ewaldcoeff_q,
368 real gmx_unused ewaldcoeff_lj)
371 gmx_pme_comm_n_box_t cnb;
373 /* Only let one PP node signal each PME node */
374 if (cr->dd->pme_receive_vir_ener)
376 cnb.flags = PP_PME_SWITCHGRID;
377 copy_ivec(grid_size, cnb.grid_size);
378 cnb.ewaldcoeff_q = ewaldcoeff_q;
379 cnb.ewaldcoeff_lj = ewaldcoeff_lj;
381 /* We send this, uncommon, message blocking to simplify the code */
382 MPI_Send(&cnb, sizeof(cnb), MPI_BYTE,
383 cr->dd->pme_nodeid, eCommType_CNB, cr->mpi_comm_mysim);
388 void gmx_pme_send_resetcounters(t_commrec gmx_unused *cr, gmx_int64_t gmx_unused step)
391 gmx_pme_comm_n_box_t cnb;
393 /* Only let one PP node signal each PME node */
394 if (cr->dd->pme_receive_vir_ener)
396 cnb.flags = PP_PME_RESETCOUNTERS;
399 /* We send this, uncommon, message blocking to simplify the code */
400 MPI_Send(&cnb, sizeof(cnb), MPI_BYTE,
401 cr->dd->pme_nodeid, eCommType_CNB, cr->mpi_comm_mysim);
406 int gmx_pme_recv_coeffs_coords(struct gmx_pme_pp *pme_pp,
414 matrix gmx_unused box,
417 int gmx_unused *maxshift_x,
418 int gmx_unused *maxshift_y,
419 gmx_bool gmx_unused *bFreeEnergy_q,
420 gmx_bool gmx_unused *bFreeEnergy_lj,
421 real gmx_unused *lambda_q,
422 real gmx_unused *lambda_lj,
423 gmx_bool gmx_unused *bEnerVir,
425 gmx_int64_t gmx_unused *step,
426 ivec gmx_unused grid_size,
427 real gmx_unused *ewaldcoeff_q,
428 real gmx_unused *ewaldcoeff_lj)
430 gmx_pme_comm_n_box_t cnb;
431 int nat = 0, q, messages, sender;
436 /* avoid compiler warning about unused variable without MPI support */
442 /* Receive the send count, box and time step from the peer PP node */
443 MPI_Recv(&cnb, sizeof(cnb), MPI_BYTE,
444 pme_pp->node_peer, eCommType_CNB,
445 pme_pp->mpi_comm_mysim, MPI_STATUS_IGNORE);
449 fprintf(debug, "PME only rank receiving:%s%s%s%s%s\n",
450 (cnb.flags & PP_PME_CHARGE) ? " charges" : "",
451 (cnb.flags & PP_PME_COORD ) ? " coordinates" : "",
452 (cnb.flags & PP_PME_FINISH) ? " finish" : "",
453 (cnb.flags & PP_PME_SWITCHGRID) ? " switch grid" : "",
454 (cnb.flags & PP_PME_RESETCOUNTERS) ? " reset counters" : "");
457 if (cnb.flags & PP_PME_SWITCHGRID)
459 /* Special case, receive the new parameters and return */
460 copy_ivec(cnb.grid_size, grid_size);
461 *ewaldcoeff_q = cnb.ewaldcoeff_q;
462 *ewaldcoeff_lj = cnb.ewaldcoeff_lj;
463 return pmerecvqxSWITCHGRID;
466 if (cnb.flags & PP_PME_RESETCOUNTERS)
468 /* Special case, receive the step and return */
471 return pmerecvqxRESETCOUNTERS;
474 if (cnb.flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
476 /* Receive the send counts from the other PP nodes */
477 for (sender = 0; sender < pme_pp->nnode; sender++)
479 if (pme_pp->node[sender] == pme_pp->node_peer)
481 pme_pp->nat[sender] = cnb.natoms;
485 MPI_Irecv(&(pme_pp->nat[sender]), sizeof(pme_pp->nat[0]),
487 pme_pp->node[sender], eCommType_CNB,
488 pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
491 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
495 for (sender = 0; sender < pme_pp->nnode; sender++)
497 nat += pme_pp->nat[sender];
500 if (nat > pme_pp->nalloc)
502 pme_pp->nalloc = over_alloc_dd(nat);
503 if (cnb.flags & PP_PME_CHARGE)
505 srenew(pme_pp->chargeA, pme_pp->nalloc);
507 if (cnb.flags & PP_PME_CHARGEB)
509 srenew(pme_pp->chargeB, pme_pp->nalloc);
511 if (cnb.flags & PP_PME_SQRTC6)
513 srenew(pme_pp->sqrt_c6A, pme_pp->nalloc);
515 if (cnb.flags & PP_PME_SQRTC6B)
517 srenew(pme_pp->sqrt_c6B, pme_pp->nalloc);
519 if (cnb.flags & PP_PME_SIGMA)
521 srenew(pme_pp->sigmaA, pme_pp->nalloc);
523 if (cnb.flags & PP_PME_SIGMAB)
525 srenew(pme_pp->sigmaB, pme_pp->nalloc);
527 srenew(pme_pp->x, pme_pp->nalloc);
528 srenew(pme_pp->f, pme_pp->nalloc);
531 /* maxshift is sent when the charges are sent */
532 *maxshift_x = cnb.maxshift_x;
533 *maxshift_y = cnb.maxshift_y;
535 /* Receive the charges in place */
536 for (q = 0; q < eCommType_NR; q++)
538 if (!(cnb.flags & (PP_PME_CHARGE<<q)))
544 case eCommType_ChargeA: charge_pp = pme_pp->chargeA; break;
545 case eCommType_ChargeB: charge_pp = pme_pp->chargeB; break;
546 case eCommType_SQRTC6A: charge_pp = pme_pp->sqrt_c6A; break;
547 case eCommType_SQRTC6B: charge_pp = pme_pp->sqrt_c6B; break;
548 case eCommType_SigmaA: charge_pp = pme_pp->sigmaA; break;
549 case eCommType_SigmaB: charge_pp = pme_pp->sigmaB; break;
550 default: gmx_incons("Wrong eCommType");
553 for (sender = 0; sender < pme_pp->nnode; sender++)
555 if (pme_pp->nat[sender] > 0)
557 MPI_Irecv(charge_pp+nat,
558 pme_pp->nat[sender]*sizeof(real),
560 pme_pp->node[sender], q,
561 pme_pp->mpi_comm_mysim,
562 &pme_pp->req[messages++]);
563 nat += pme_pp->nat[sender];
566 fprintf(debug, "Received from PP rank %d: %d %s\n",
567 pme_pp->node[sender], pme_pp->nat[sender],
568 (q == eCommType_ChargeA ||
569 q == eCommType_ChargeB) ? "charges" : "params");
575 pme_pp->flags_charge = cnb.flags;
578 if (cnb.flags & PP_PME_COORD)
580 if (!(pme_pp->flags_charge & (PP_PME_CHARGE | PP_PME_SQRTC6)))
582 gmx_incons("PME-only rank received coordinates before charges and/or C6-values"
586 /* The box, FE flag and lambda are sent along with the coordinates
588 copy_mat(cnb.box, box);
589 *bFreeEnergy_q = ((cnb.flags & GMX_PME_DO_COULOMB) &&
590 (cnb.flags & PP_PME_FEP_Q));
591 *bFreeEnergy_lj = ((cnb.flags & GMX_PME_DO_LJ) &&
592 (cnb.flags & PP_PME_FEP_LJ));
593 *lambda_q = cnb.lambda_q;
594 *lambda_lj = cnb.lambda_lj;
595 *bEnerVir = (cnb.flags & PP_PME_ENER_VIR);
596 *pme_flags = cnb.flags;
598 if (*bFreeEnergy_q && !(pme_pp->flags_charge & PP_PME_CHARGEB))
600 gmx_incons("PME-only rank received free energy request, but "
601 "did not receive B-state charges");
604 if (*bFreeEnergy_lj && !(pme_pp->flags_charge & PP_PME_SQRTC6B))
606 gmx_incons("PME-only rank received free energy request, but "
607 "did not receive B-state C6-values");
610 /* Receive the coordinates in place */
612 for (sender = 0; sender < pme_pp->nnode; sender++)
614 if (pme_pp->nat[sender] > 0)
616 MPI_Irecv(pme_pp->x[nat], pme_pp->nat[sender]*sizeof(rvec),
618 pme_pp->node[sender], eCommType_COORD,
619 pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
620 nat += pme_pp->nat[sender];
623 fprintf(debug, "Received from PP rank %d: %d "
625 pme_pp->node[sender], pme_pp->nat[sender]);
631 /* Wait for the coordinates and/or charges to arrive */
632 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
635 while (!(cnb.flags & (PP_PME_COORD | PP_PME_FINISH)));
641 *chargeA = pme_pp->chargeA;
642 *chargeB = pme_pp->chargeB;
643 *sqrt_c6A = pme_pp->sqrt_c6A;
644 *sqrt_c6B = pme_pp->sqrt_c6B;
645 *sigmaA = pme_pp->sigmaA;
646 *sigmaB = pme_pp->sigmaB;
650 return ((cnb.flags & PP_PME_FINISH) ? pmerecvqxFINISH : pmerecvqxX);
653 static void receive_virial_energy(t_commrec *cr,
654 matrix vir_q, real *energy_q,
655 matrix vir_lj, real *energy_lj,
656 real *dvdlambda_q, real *dvdlambda_lj,
659 gmx_pme_comm_vir_ene_t cve;
661 if (cr->dd->pme_receive_vir_ener)
666 "PP rank %d receiving from PME rank %d: virial and energy\n",
667 cr->sim_nodeid, cr->dd->pme_nodeid);
670 MPI_Recv(&cve, sizeof(cve), MPI_BYTE, cr->dd->pme_nodeid, 1, cr->mpi_comm_mysim,
673 memset(&cve, 0, sizeof(cve));
676 m_add(vir_q, cve.vir_q, vir_q);
677 m_add(vir_lj, cve.vir_lj, vir_lj);
678 *energy_q = cve.energy_q;
679 *energy_lj = cve.energy_lj;
680 *dvdlambda_q += cve.dvdlambda_q;
681 *dvdlambda_lj += cve.dvdlambda_lj;
682 *pme_cycles = cve.cycles;
684 if (cve.stop_cond != gmx_stop_cond_none)
686 gmx_set_stop_condition(cve.stop_cond);
697 void gmx_pme_receive_f(t_commrec *cr,
698 rvec f[], matrix vir_q, real *energy_q,
699 matrix vir_lj, real *energy_lj,
700 real *dvdlambda_q, real *dvdlambda_lj,
705 #ifdef GMX_PME_DELAYED_WAIT
706 /* Wait for the x request to finish */
707 gmx_pme_send_coeffs_coords_wait(cr->dd);
710 natoms = cr->dd->nat_home;
712 if (natoms > cr->dd->pme_recv_f_alloc)
714 cr->dd->pme_recv_f_alloc = over_alloc_dd(natoms);
715 srenew(cr->dd->pme_recv_f_buf, cr->dd->pme_recv_f_alloc);
719 MPI_Recv(cr->dd->pme_recv_f_buf[0],
720 natoms*sizeof(rvec), MPI_BYTE,
721 cr->dd->pme_nodeid, 0, cr->mpi_comm_mysim,
725 for (i = 0; i < natoms; i++)
727 rvec_inc(f[i], cr->dd->pme_recv_f_buf[i]);
731 receive_virial_energy(cr, vir_q, energy_q, vir_lj, energy_lj, dvdlambda_q, dvdlambda_lj, pme_cycles);
734 void gmx_pme_send_force_vir_ener(struct gmx_pme_pp *pme_pp,
736 matrix vir_q, real energy_q,
737 matrix vir_lj, real energy_lj,
738 real dvdlambda_q, real dvdlambda_lj,
741 gmx_pme_comm_vir_ene_t cve;
742 int messages, ind_start, ind_end, receiver;
746 /* Now the evaluated forces have to be transferred to the PP nodes */
749 for (receiver = 0; receiver < pme_pp->nnode; receiver++)
752 ind_end = ind_start + pme_pp->nat[receiver];
754 if (MPI_Isend(f[ind_start], (ind_end-ind_start)*sizeof(rvec), MPI_BYTE,
755 pme_pp->node[receiver], 0,
756 pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]) != 0)
758 gmx_comm("MPI_Isend failed in do_pmeonly");
763 /* send virial and energy to our last PP node */
764 copy_mat(vir_q, cve.vir_q);
765 copy_mat(vir_lj, cve.vir_lj);
766 cve.energy_q = energy_q;
767 cve.energy_lj = energy_lj;
768 cve.dvdlambda_q = dvdlambda_q;
769 cve.dvdlambda_lj = dvdlambda_lj;
770 /* check for the signals to send back to a PP node */
771 cve.stop_cond = gmx_get_stop_condition();
777 fprintf(debug, "PME rank sending to PP rank %d: virial and energy\n",
781 MPI_Isend(&cve, sizeof(cve), MPI_BYTE,
782 pme_pp->node_peer, 1,
783 pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
785 /* Wait for the forces to arrive */
786 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);