2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
45 #include "types/commrec.h"
46 #include "gromacs/utility/smalloc.h"
47 #include "gromacs/utility/fatalerror.h"
48 #include "gromacs/math/vec.h"
52 #include "sighandler.h"
54 #include "gromacs/utility/gmxmpi.h"
57 eCommType_ChargeA, eCommType_ChargeB, eCommType_SQRTC6A, eCommType_SQRTC6B,
58 eCommType_SigmaA, eCommType_SigmaB, eCommType_NR, eCommType_COORD,
62 /* Some parts of the code(gmx_pme_send_q, gmx_pme_recv_q_x) assume
63 * that the six first flags are exactly in this order.
64 * If more PP_PME_...-flags are to be introduced be aware of some of
65 * the PME-specific flags in pme.h. Currently, they are also passed
69 #define PP_PME_CHARGE (1<<0)
70 #define PP_PME_CHARGEB (1<<1)
71 #define PP_PME_SQRTC6 (1<<2)
72 #define PP_PME_SQRTC6B (1<<3)
73 #define PP_PME_SIGMA (1<<4)
74 #define PP_PME_SIGMAB (1<<5)
75 #define PP_PME_COORD (1<<6)
76 #define PP_PME_FEP_Q (1<<7)
77 #define PP_PME_FEP_LJ (1<<8)
78 #define PP_PME_ENER_VIR (1<<9)
79 #define PP_PME_FINISH (1<<10)
80 #define PP_PME_SWITCHGRID (1<<11)
81 #define PP_PME_RESETCOUNTERS (1<<12)
83 #define PME_PP_SIGSTOP (1<<0)
84 #define PME_PP_SIGSTOPNSS (1<<1)
86 typedef struct gmx_pme_pp {
88 MPI_Comm mpi_comm_mysim;
90 int nnode; /* The number of PP node to communicate with */
91 int *node; /* The PP node ranks */
92 int node_peer; /* The peer PP node rank */
93 int *nat; /* The number of atom for each PP node */
94 int flags_charge; /* The flags sent along with the last charges */
110 typedef struct gmx_pme_comm_n_box {
119 ivec grid_size; /* For PME grid tuning */
120 real ewaldcoeff_q; /* For PME grid tuning */
122 } gmx_pme_comm_n_box_t;
132 gmx_stop_cond_t stop_cond;
133 } gmx_pme_comm_vir_ene_t;
138 gmx_pme_pp_t gmx_pme_pp_init(t_commrec gmx_unused *cr)
140 struct gmx_pme_pp *pme_pp;
146 pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
147 MPI_Comm_rank(cr->mpi_comm_mygroup, &rank);
148 get_pme_ddnodes(cr, rank, &pme_pp->nnode, &pme_pp->node, &pme_pp->node_peer);
149 snew(pme_pp->nat, pme_pp->nnode);
150 snew(pme_pp->req, eCommType_NR*pme_pp->nnode);
151 snew(pme_pp->stat, eCommType_NR*pme_pp->nnode);
153 pme_pp->flags_charge = 0;
159 /* This should be faster with a real non-blocking MPI implementation */
160 /* #define GMX_PME_DELAYED_WAIT */
162 static void gmx_pme_send_coeffs_coords_wait(gmx_domdec_t gmx_unused *dd)
167 MPI_Waitall(dd->nreq_pme, dd->req_pme, MPI_STATUSES_IGNORE);
173 static void gmx_pme_send_coeffs_coords(t_commrec *cr, int flags,
174 real gmx_unused *chargeA, real gmx_unused *chargeB,
175 real gmx_unused *c6A, real gmx_unused *c6B,
176 real gmx_unused *sigmaA, real gmx_unused *sigmaB,
177 matrix box, rvec gmx_unused *x,
178 real lambda_q, real lambda_lj,
179 int maxshift_x, int maxshift_y,
183 gmx_pme_comm_n_box_t *cnb;
191 fprintf(debug, "PP rank %d sending to PME rank %d: %d%s%s\n",
192 cr->sim_nodeid, dd->pme_nodeid, n,
193 flags & PP_PME_CHARGE ? " charges" : "",
194 flags & PP_PME_COORD ? " coordinates" : "");
197 #ifdef GMX_PME_DELAYED_WAIT
198 /* When can not use cnb until pending communication has finished */
199 gmx_pme_send_coeffs_coords_wait(dd);
202 if (dd->pme_receive_vir_ener)
204 /* Peer PP node: communicate all data */
213 cnb->maxshift_x = maxshift_x;
214 cnb->maxshift_y = maxshift_y;
215 cnb->lambda_q = lambda_q;
216 cnb->lambda_lj = lambda_lj;
218 if (flags & PP_PME_COORD)
220 copy_mat(box, cnb->box);
223 MPI_Isend(cnb, sizeof(*cnb), MPI_BYTE,
224 dd->pme_nodeid, eCommType_CNB, cr->mpi_comm_mysim,
225 &dd->req_pme[dd->nreq_pme++]);
228 else if (flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
231 /* Communicate only the number of atoms */
232 MPI_Isend(&n, sizeof(n), MPI_BYTE,
233 dd->pme_nodeid, eCommType_CNB, cr->mpi_comm_mysim,
234 &dd->req_pme[dd->nreq_pme++]);
241 if (flags & PP_PME_CHARGE)
243 MPI_Isend(chargeA, n*sizeof(real), MPI_BYTE,
244 dd->pme_nodeid, eCommType_ChargeA, cr->mpi_comm_mysim,
245 &dd->req_pme[dd->nreq_pme++]);
247 if (flags & PP_PME_CHARGEB)
249 MPI_Isend(chargeB, n*sizeof(real), MPI_BYTE,
250 dd->pme_nodeid, eCommType_ChargeB, cr->mpi_comm_mysim,
251 &dd->req_pme[dd->nreq_pme++]);
253 if (flags & PP_PME_SQRTC6)
255 MPI_Isend(c6A, n*sizeof(real), MPI_BYTE,
256 dd->pme_nodeid, eCommType_SQRTC6A, cr->mpi_comm_mysim,
257 &dd->req_pme[dd->nreq_pme++]);
259 if (flags & PP_PME_SQRTC6B)
261 MPI_Isend(c6B, n*sizeof(real), MPI_BYTE,
262 dd->pme_nodeid, eCommType_SQRTC6B, cr->mpi_comm_mysim,
263 &dd->req_pme[dd->nreq_pme++]);
265 if (flags & PP_PME_SIGMA)
267 MPI_Isend(sigmaA, n*sizeof(real), MPI_BYTE,
268 dd->pme_nodeid, eCommType_SigmaA, cr->mpi_comm_mysim,
269 &dd->req_pme[dd->nreq_pme++]);
271 if (flags & PP_PME_SIGMAB)
273 MPI_Isend(sigmaB, n*sizeof(real), MPI_BYTE,
274 dd->pme_nodeid, eCommType_SigmaB, cr->mpi_comm_mysim,
275 &dd->req_pme[dd->nreq_pme++]);
277 if (flags & PP_PME_COORD)
279 MPI_Isend(x[0], n*sizeof(rvec), MPI_BYTE,
280 dd->pme_nodeid, eCommType_COORD, cr->mpi_comm_mysim,
281 &dd->req_pme[dd->nreq_pme++]);
285 #ifndef GMX_PME_DELAYED_WAIT
286 /* Wait for the data to arrive */
287 /* We can skip this wait as we are sure x and q will not be modified
288 * before the next call to gmx_pme_send_x_q or gmx_pme_receive_f.
290 gmx_pme_send_coeffs_coords_wait(dd);
295 void gmx_pme_send_parameters(t_commrec *cr,
296 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
297 real *chargeA, real *chargeB,
298 real *sqrt_c6A, real *sqrt_c6B,
299 real *sigmaA, real *sigmaB,
300 int maxshift_x, int maxshift_y)
304 /* We always send the charges, even with only LJ- and no Coulomb-PME */
305 flags = PP_PME_CHARGE;
306 if (sqrt_c6A != NULL)
308 flags |= PP_PME_SQRTC6;
312 flags |= PP_PME_SIGMA;
314 if (bFreeEnergy_q || bFreeEnergy_lj)
316 /* Assumes that the B state flags are in the bits just above
317 * the ones for the A state. */
318 flags |= (flags << 1);
321 gmx_pme_send_coeffs_coords(cr, flags,
323 sqrt_c6A, sqrt_c6B, sigmaA, sigmaB,
324 NULL, NULL, 0, 0, maxshift_x, maxshift_y, -1);
327 void gmx_pme_send_coordinates(t_commrec *cr, matrix box, rvec *x,
328 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
329 real lambda_q, real lambda_lj,
330 gmx_bool bEnerVir, int pme_flags,
335 flags = pme_flags | PP_PME_COORD;
338 flags |= PP_PME_FEP_Q;
342 flags |= PP_PME_FEP_LJ;
346 flags |= PP_PME_ENER_VIR;
348 gmx_pme_send_coeffs_coords(cr, flags, NULL, NULL, NULL, NULL, NULL, NULL,
349 box, x, lambda_q, lambda_lj, 0, 0, step);
352 void gmx_pme_send_finish(t_commrec *cr)
356 flags = PP_PME_FINISH;
358 gmx_pme_send_coeffs_coords(cr, flags, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0, 0, -1);
361 void gmx_pme_send_switchgrid(t_commrec gmx_unused *cr,
362 ivec gmx_unused grid_size,
363 real gmx_unused ewaldcoeff_q,
364 real gmx_unused ewaldcoeff_lj)
367 gmx_pme_comm_n_box_t cnb;
369 /* Only let one PP node signal each PME node */
370 if (cr->dd->pme_receive_vir_ener)
372 cnb.flags = PP_PME_SWITCHGRID;
373 copy_ivec(grid_size, cnb.grid_size);
374 cnb.ewaldcoeff_q = ewaldcoeff_q;
375 cnb.ewaldcoeff_lj = ewaldcoeff_lj;
377 /* We send this, uncommon, message blocking to simplify the code */
378 MPI_Send(&cnb, sizeof(cnb), MPI_BYTE,
379 cr->dd->pme_nodeid, eCommType_CNB, cr->mpi_comm_mysim);
384 void gmx_pme_send_resetcounters(t_commrec gmx_unused *cr, gmx_int64_t gmx_unused step)
387 gmx_pme_comm_n_box_t cnb;
389 /* Only let one PP node signal each PME node */
390 if (cr->dd->pme_receive_vir_ener)
392 cnb.flags = PP_PME_RESETCOUNTERS;
395 /* We send this, uncommon, message blocking to simplify the code */
396 MPI_Send(&cnb, sizeof(cnb), MPI_BYTE,
397 cr->dd->pme_nodeid, eCommType_CNB, cr->mpi_comm_mysim);
402 int gmx_pme_recv_coeffs_coords(struct gmx_pme_pp *pme_pp,
410 matrix gmx_unused box,
413 int gmx_unused *maxshift_x,
414 int gmx_unused *maxshift_y,
415 gmx_bool gmx_unused *bFreeEnergy_q,
416 gmx_bool gmx_unused *bFreeEnergy_lj,
417 real gmx_unused *lambda_q,
418 real gmx_unused *lambda_lj,
419 gmx_bool gmx_unused *bEnerVir,
421 gmx_int64_t gmx_unused *step,
422 ivec gmx_unused grid_size,
423 real gmx_unused *ewaldcoeff_q,
424 real gmx_unused *ewaldcoeff_lj)
426 gmx_pme_comm_n_box_t cnb;
427 int nat = 0, q, messages, sender;
432 /* avoid compiler warning about unused variable without MPI support */
438 /* Receive the send count, box and time step from the peer PP node */
439 MPI_Recv(&cnb, sizeof(cnb), MPI_BYTE,
440 pme_pp->node_peer, eCommType_CNB,
441 pme_pp->mpi_comm_mysim, MPI_STATUS_IGNORE);
445 fprintf(debug, "PME only rank receiving:%s%s%s%s%s\n",
446 (cnb.flags & PP_PME_CHARGE) ? " charges" : "",
447 (cnb.flags & PP_PME_COORD ) ? " coordinates" : "",
448 (cnb.flags & PP_PME_FINISH) ? " finish" : "",
449 (cnb.flags & PP_PME_SWITCHGRID) ? " switch grid" : "",
450 (cnb.flags & PP_PME_RESETCOUNTERS) ? " reset counters" : "");
453 if (cnb.flags & PP_PME_SWITCHGRID)
455 /* Special case, receive the new parameters and return */
456 copy_ivec(cnb.grid_size, grid_size);
457 *ewaldcoeff_q = cnb.ewaldcoeff_q;
458 *ewaldcoeff_lj = cnb.ewaldcoeff_lj;
459 return pmerecvqxSWITCHGRID;
462 if (cnb.flags & PP_PME_RESETCOUNTERS)
464 /* Special case, receive the step and return */
467 return pmerecvqxRESETCOUNTERS;
470 if (cnb.flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
472 /* Receive the send counts from the other PP nodes */
473 for (sender = 0; sender < pme_pp->nnode; sender++)
475 if (pme_pp->node[sender] == pme_pp->node_peer)
477 pme_pp->nat[sender] = cnb.natoms;
481 MPI_Irecv(&(pme_pp->nat[sender]), sizeof(pme_pp->nat[0]),
483 pme_pp->node[sender], eCommType_CNB,
484 pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
487 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
491 for (sender = 0; sender < pme_pp->nnode; sender++)
493 nat += pme_pp->nat[sender];
496 if (nat > pme_pp->nalloc)
498 pme_pp->nalloc = over_alloc_dd(nat);
499 if (cnb.flags & PP_PME_CHARGE)
501 srenew(pme_pp->chargeA, pme_pp->nalloc);
503 if (cnb.flags & PP_PME_CHARGEB)
505 srenew(pme_pp->chargeB, pme_pp->nalloc);
507 if (cnb.flags & PP_PME_SQRTC6)
509 srenew(pme_pp->sqrt_c6A, pme_pp->nalloc);
511 if (cnb.flags & PP_PME_SQRTC6B)
513 srenew(pme_pp->sqrt_c6B, pme_pp->nalloc);
515 if (cnb.flags & PP_PME_SIGMA)
517 srenew(pme_pp->sigmaA, pme_pp->nalloc);
519 if (cnb.flags & PP_PME_SIGMAB)
521 srenew(pme_pp->sigmaB, pme_pp->nalloc);
523 srenew(pme_pp->x, pme_pp->nalloc);
524 srenew(pme_pp->f, pme_pp->nalloc);
527 /* maxshift is sent when the charges are sent */
528 *maxshift_x = cnb.maxshift_x;
529 *maxshift_y = cnb.maxshift_y;
531 /* Receive the charges in place */
532 for (q = 0; q < eCommType_NR; q++)
534 if (!(cnb.flags & (PP_PME_CHARGE<<q)))
540 case eCommType_ChargeA: charge_pp = pme_pp->chargeA; break;
541 case eCommType_ChargeB: charge_pp = pme_pp->chargeB; break;
542 case eCommType_SQRTC6A: charge_pp = pme_pp->sqrt_c6A; break;
543 case eCommType_SQRTC6B: charge_pp = pme_pp->sqrt_c6B; break;
544 case eCommType_SigmaA: charge_pp = pme_pp->sigmaA; break;
545 case eCommType_SigmaB: charge_pp = pme_pp->sigmaB; break;
546 default: gmx_incons("Wrong eCommType");
549 for (sender = 0; sender < pme_pp->nnode; sender++)
551 if (pme_pp->nat[sender] > 0)
553 MPI_Irecv(charge_pp+nat,
554 pme_pp->nat[sender]*sizeof(real),
556 pme_pp->node[sender], q,
557 pme_pp->mpi_comm_mysim,
558 &pme_pp->req[messages++]);
559 nat += pme_pp->nat[sender];
562 fprintf(debug, "Received from PP rank %d: %d "
564 pme_pp->node[sender], pme_pp->nat[sender]);
570 pme_pp->flags_charge = cnb.flags;
573 if (cnb.flags & PP_PME_COORD)
575 if (!(pme_pp->flags_charge & (PP_PME_CHARGE | PP_PME_SQRTC6)))
577 gmx_incons("PME-only rank received coordinates before charges and/or C6-values"
581 /* The box, FE flag and lambda are sent along with the coordinates
583 copy_mat(cnb.box, box);
584 *bFreeEnergy_q = ((cnb.flags & GMX_PME_DO_COULOMB) &&
585 (cnb.flags & PP_PME_FEP_Q));
586 *bFreeEnergy_lj = ((cnb.flags & GMX_PME_DO_LJ) &&
587 (cnb.flags & PP_PME_FEP_LJ));
588 *lambda_q = cnb.lambda_q;
589 *lambda_lj = cnb.lambda_lj;
590 *bEnerVir = (cnb.flags & PP_PME_ENER_VIR);
591 *pme_flags = cnb.flags;
593 if (*bFreeEnergy_q && !(pme_pp->flags_charge & PP_PME_CHARGEB))
595 gmx_incons("PME-only rank received free energy request, but "
596 "did not receive B-state charges");
599 if (*bFreeEnergy_lj && !(pme_pp->flags_charge & PP_PME_SQRTC6B))
601 gmx_incons("PME-only rank received free energy request, but "
602 "did not receive B-state C6-values");
605 /* Receive the coordinates in place */
607 for (sender = 0; sender < pme_pp->nnode; sender++)
609 if (pme_pp->nat[sender] > 0)
611 MPI_Irecv(pme_pp->x[nat], pme_pp->nat[sender]*sizeof(rvec),
613 pme_pp->node[sender], eCommType_COORD,
614 pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
615 nat += pme_pp->nat[sender];
618 fprintf(debug, "Received from PP rank %d: %d "
620 pme_pp->node[sender], pme_pp->nat[sender]);
626 /* Wait for the coordinates and/or charges to arrive */
627 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
630 while (!(cnb.flags & (PP_PME_COORD | PP_PME_FINISH)));
636 *chargeA = pme_pp->chargeA;
637 *chargeB = pme_pp->chargeB;
638 *sqrt_c6A = pme_pp->sqrt_c6A;
639 *sqrt_c6B = pme_pp->sqrt_c6B;
640 *sigmaA = pme_pp->sigmaA;
641 *sigmaB = pme_pp->sigmaB;
645 return ((cnb.flags & PP_PME_FINISH) ? pmerecvqxFINISH : pmerecvqxX);
648 static void receive_virial_energy(t_commrec *cr,
649 matrix vir_q, real *energy_q,
650 matrix vir_lj, real *energy_lj,
651 real *dvdlambda_q, real *dvdlambda_lj,
654 gmx_pme_comm_vir_ene_t cve;
656 if (cr->dd->pme_receive_vir_ener)
661 "PP rank %d receiving from PME rank %d: virial and energy\n",
662 cr->sim_nodeid, cr->dd->pme_nodeid);
665 MPI_Recv(&cve, sizeof(cve), MPI_BYTE, cr->dd->pme_nodeid, 1, cr->mpi_comm_mysim,
668 memset(&cve, 0, sizeof(cve));
671 m_add(vir_q, cve.vir_q, vir_q);
672 m_add(vir_lj, cve.vir_lj, vir_lj);
673 *energy_q = cve.energy_q;
674 *energy_lj = cve.energy_lj;
675 *dvdlambda_q += cve.dvdlambda_q;
676 *dvdlambda_lj += cve.dvdlambda_lj;
677 *pme_cycles = cve.cycles;
679 if (cve.stop_cond != gmx_stop_cond_none)
681 gmx_set_stop_condition(cve.stop_cond);
692 void gmx_pme_receive_f(t_commrec *cr,
693 rvec f[], matrix vir_q, real *energy_q,
694 matrix vir_lj, real *energy_lj,
695 real *dvdlambda_q, real *dvdlambda_lj,
700 #ifdef GMX_PME_DELAYED_WAIT
701 /* Wait for the x request to finish */
702 gmx_pme_send_coeffs_coords_wait(cr->dd);
705 natoms = cr->dd->nat_home;
707 if (natoms > cr->dd->pme_recv_f_alloc)
709 cr->dd->pme_recv_f_alloc = over_alloc_dd(natoms);
710 srenew(cr->dd->pme_recv_f_buf, cr->dd->pme_recv_f_alloc);
714 MPI_Recv(cr->dd->pme_recv_f_buf[0],
715 natoms*sizeof(rvec), MPI_BYTE,
716 cr->dd->pme_nodeid, 0, cr->mpi_comm_mysim,
720 for (i = 0; i < natoms; i++)
722 rvec_inc(f[i], cr->dd->pme_recv_f_buf[i]);
726 receive_virial_energy(cr, vir_q, energy_q, vir_lj, energy_lj, dvdlambda_q, dvdlambda_lj, pme_cycles);
729 void gmx_pme_send_force_vir_ener(struct gmx_pme_pp *pme_pp,
731 matrix vir_q, real energy_q,
732 matrix vir_lj, real energy_lj,
733 real dvdlambda_q, real dvdlambda_lj,
736 gmx_pme_comm_vir_ene_t cve;
737 int messages, ind_start, ind_end, receiver;
741 /* Now the evaluated forces have to be transferred to the PP nodes */
744 for (receiver = 0; receiver < pme_pp->nnode; receiver++)
747 ind_end = ind_start + pme_pp->nat[receiver];
749 if (MPI_Isend(f[ind_start], (ind_end-ind_start)*sizeof(rvec), MPI_BYTE,
750 pme_pp->node[receiver], 0,
751 pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]) != 0)
753 gmx_comm("MPI_Isend failed in do_pmeonly");
758 /* send virial and energy to our last PP node */
759 copy_mat(vir_q, cve.vir_q);
760 copy_mat(vir_lj, cve.vir_lj);
761 cve.energy_q = energy_q;
762 cve.energy_lj = energy_lj;
763 cve.dvdlambda_q = dvdlambda_q;
764 cve.dvdlambda_lj = dvdlambda_lj;
765 /* check for the signals to send back to a PP node */
766 cve.stop_cond = gmx_get_stop_condition();
772 fprintf(debug, "PME rank sending to PP rank %d: virial and energy\n",
776 MPI_Isend(&cve, sizeof(cve), MPI_BYTE,
777 pme_pp->node_peer, 1,
778 pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
780 /* Wait for the forces to arrive */
781 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);