3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * GROwing Monsters And Cloning Shrimps
46 #include "gmx_fatal.h"
51 #include "sighandler.h"
60 #define PP_PME_CHARGE (1<<0)
61 #define PP_PME_CHARGEB (1<<1)
62 #define PP_PME_COORD (1<<2)
63 #define PP_PME_FEP (1<<3)
64 #define PP_PME_ENER_VIR (1<<4)
65 #define PP_PME_FINISH (1<<5)
67 #define PME_PP_SIGSTOP (1<<0)
68 #define PME_PP_SIGSTOPNSS (1<<1)
70 typedef struct gmx_pme_pp {
72 MPI_Comm mpi_comm_mysim;
74 int nnode; /* The number of PP node to communicate with */
75 int *node; /* The PP node ranks */
76 int node_peer; /* The peer PP node rank */
77 int *nat; /* The number of atom for each PP node */
78 int flags_charge; /* The flags sent along with the last charges */
90 typedef struct gmx_pme_comm_n_box {
98 } gmx_pme_comm_n_box_t;
105 gmx_stop_cond_t stop_cond;
106 } gmx_pme_comm_vir_ene_t;
111 gmx_pme_pp_t gmx_pme_pp_init(t_commrec *cr)
113 struct gmx_pme_pp *pme_pp;
119 pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
120 MPI_Comm_rank(cr->mpi_comm_mygroup,&rank);
121 get_pme_ddnodes(cr,rank,&pme_pp->nnode,&pme_pp->node,&pme_pp->node_peer);
122 snew(pme_pp->nat,pme_pp->nnode);
123 snew(pme_pp->req,2*pme_pp->nnode);
124 snew(pme_pp->stat,2*pme_pp->nnode);
126 pme_pp->flags_charge = 0;
132 /* This should be faster with a real non-blocking MPI implementation */
133 /* #define GMX_PME_DELAYED_WAIT */
135 static void gmx_pme_send_q_x_wait(gmx_domdec_t *dd)
139 MPI_Waitall(dd->nreq_pme,dd->req_pme,MPI_STATUSES_IGNORE);
145 static void gmx_pme_send_q_x(t_commrec *cr, int flags,
146 real *chargeA, real *chargeB,
149 int maxshift_x, int maxshift_y,
150 gmx_large_int_t step)
153 gmx_pme_comm_n_box_t *cnb;
160 fprintf(debug,"PP node %d sending to PME node %d: %d%s%s\n",
161 cr->sim_nodeid,dd->pme_nodeid,n,
162 flags & PP_PME_CHARGE ? " charges" : "",
163 flags & PP_PME_COORD ? " coordinates" : "");
165 #ifdef GMX_PME_DELAYED_WAIT
166 /* When can not use cnb until pending communication has finished */
167 gmx_pme_send_x_q_wait(dd);
170 if (dd->pme_receive_vir_ener) {
171 /* Peer PP node: communicate all data */
178 cnb->maxshift_x = maxshift_x;
179 cnb->maxshift_y = maxshift_y;
180 cnb->lambda = lambda;
182 if (flags & PP_PME_COORD)
183 copy_mat(box,cnb->box);
185 MPI_Isend(cnb,sizeof(*cnb),MPI_BYTE,
186 dd->pme_nodeid,0,cr->mpi_comm_mysim,
187 &dd->req_pme[dd->nreq_pme++]);
189 } else if (flags & PP_PME_CHARGE) {
191 /* Communicate only the number of atoms */
192 MPI_Isend(&n,sizeof(n),MPI_BYTE,
193 dd->pme_nodeid,0,cr->mpi_comm_mysim,
194 &dd->req_pme[dd->nreq_pme++]);
200 if (flags & PP_PME_CHARGE) {
201 MPI_Isend(chargeA,n*sizeof(real),MPI_BYTE,
202 dd->pme_nodeid,1,cr->mpi_comm_mysim,
203 &dd->req_pme[dd->nreq_pme++]);
205 if (flags & PP_PME_CHARGEB) {
206 MPI_Isend(chargeB,n*sizeof(real),MPI_BYTE,
207 dd->pme_nodeid,2,cr->mpi_comm_mysim,
208 &dd->req_pme[dd->nreq_pme++]);
210 if (flags & PP_PME_COORD) {
211 MPI_Isend(x[0],n*sizeof(rvec),MPI_BYTE,
212 dd->pme_nodeid,3,cr->mpi_comm_mysim,
213 &dd->req_pme[dd->nreq_pme++]);
217 #ifndef GMX_PME_DELAYED_WAIT
218 /* Wait for the data to arrive */
219 /* We can skip this wait as we are sure x and q will not be modified
220 * before the next call to gmx_pme_send_x_q or gmx_pme_receive_f.
222 gmx_pme_send_q_x_wait(dd);
227 void gmx_pme_send_q(t_commrec *cr,
228 gmx_bool bFreeEnergy, real *chargeA, real *chargeB,
229 int maxshift_x, int maxshift_y)
233 flags = PP_PME_CHARGE;
235 flags |= PP_PME_CHARGEB;
237 gmx_pme_send_q_x(cr,flags,
238 chargeA,chargeB,NULL,NULL,0,maxshift_x,maxshift_y,-1);
241 void gmx_pme_send_x(t_commrec *cr, matrix box, rvec *x,
242 gmx_bool bFreeEnergy, real lambda,
244 gmx_large_int_t step)
248 flags = PP_PME_COORD;
252 flags |= PP_PME_ENER_VIR;
254 gmx_pme_send_q_x(cr,flags,NULL,NULL,box,x,lambda,0,0,step);
257 void gmx_pme_finish(t_commrec *cr)
261 flags = PP_PME_FINISH;
263 gmx_pme_send_q_x(cr,flags,NULL,NULL,NULL,NULL,0,0,0,-1);
266 int gmx_pme_recv_q_x(struct gmx_pme_pp *pme_pp,
267 real **chargeA, real **chargeB,
268 matrix box, rvec **x,rvec **f,
269 int *maxshift_x, int *maxshift_y,
270 gmx_bool *bFreeEnergy,real *lambda,
272 gmx_large_int_t *step)
274 gmx_pme_comm_n_box_t cnb;
275 int nat=0,q,messages,sender;
280 /* avoid compiler warning about unused variable without MPI support */
284 /* Receive the send count, box and time step from the peer PP node */
285 MPI_Recv(&cnb,sizeof(cnb),MPI_BYTE,
287 pme_pp->mpi_comm_mysim,MPI_STATUS_IGNORE);
290 fprintf(debug,"PME only node receiving:%s%s%s\n",
291 (cnb.flags & PP_PME_CHARGE) ? " charges" : "",
292 (cnb.flags & PP_PME_COORD ) ? " coordinates" : "",
293 (cnb.flags & PP_PME_FINISH) ? " finish" : "");
295 if (cnb.flags & PP_PME_CHARGE) {
296 /* Receive the send counts from the other PP nodes */
297 for(sender=0; sender<pme_pp->nnode; sender++) {
298 if (pme_pp->node[sender] == pme_pp->node_peer) {
299 pme_pp->nat[sender] = cnb.natoms;
301 MPI_Irecv(&(pme_pp->nat[sender]),sizeof(pme_pp->nat[0]),
303 pme_pp->node[sender],0,
304 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
307 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
311 for(sender=0; sender<pme_pp->nnode; sender++)
312 nat += pme_pp->nat[sender];
314 if (nat > pme_pp->nalloc) {
315 pme_pp->nalloc = over_alloc_dd(nat);
316 srenew(pme_pp->chargeA,pme_pp->nalloc);
317 if (cnb.flags & PP_PME_CHARGEB)
318 srenew(pme_pp->chargeB,pme_pp->nalloc);
319 srenew(pme_pp->x,pme_pp->nalloc);
320 srenew(pme_pp->f,pme_pp->nalloc);
323 /* maxshift is sent when the charges are sent */
324 *maxshift_x = cnb.maxshift_x;
325 *maxshift_y = cnb.maxshift_y;
327 /* Receive the charges in place */
328 for(q=0; q<((cnb.flags & PP_PME_CHARGEB) ? 2 : 1); q++) {
330 charge_pp = pme_pp->chargeA;
332 charge_pp = pme_pp->chargeB;
334 for(sender=0; sender<pme_pp->nnode; sender++) {
335 if (pme_pp->nat[sender] > 0) {
336 MPI_Irecv(charge_pp+nat,
337 pme_pp->nat[sender]*sizeof(real),
339 pme_pp->node[sender],1+q,
340 pme_pp->mpi_comm_mysim,
341 &pme_pp->req[messages++]);
342 nat += pme_pp->nat[sender];
344 fprintf(debug,"Received from PP node %d: %d "
346 pme_pp->node[sender],pme_pp->nat[sender]);
351 pme_pp->flags_charge = cnb.flags;
354 if (cnb.flags & PP_PME_COORD) {
355 if (!(pme_pp->flags_charge & PP_PME_CHARGE))
356 gmx_incons("PME-only node received coordinates before charges"
359 /* The box, FE flag and lambda are sent along with the coordinates
361 copy_mat(cnb.box,box);
362 *bFreeEnergy = (cnb.flags & PP_PME_FEP);
363 *lambda = cnb.lambda;
364 *bEnerVir = (cnb.flags & PP_PME_ENER_VIR);
366 if (*bFreeEnergy && !(pme_pp->flags_charge & PP_PME_CHARGEB))
367 gmx_incons("PME-only node received free energy request, but "
368 "did not receive B-state charges");
370 /* Receive the coordinates in place */
372 for(sender=0; sender<pme_pp->nnode; sender++) {
373 if (pme_pp->nat[sender] > 0) {
374 MPI_Irecv(pme_pp->x[nat],pme_pp->nat[sender]*sizeof(rvec),
376 pme_pp->node[sender],3,
377 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
378 nat += pme_pp->nat[sender];
380 fprintf(debug,"Received from PP node %d: %d "
382 pme_pp->node[sender],pme_pp->nat[sender]);
387 /* Wait for the coordinates and/or charges to arrive */
388 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
390 } while (!(cnb.flags & (PP_PME_COORD | PP_PME_FINISH)));
395 *chargeA = pme_pp->chargeA;
396 *chargeB = pme_pp->chargeB;
401 return ((cnb.flags & PP_PME_FINISH) ? -1 : nat);
404 static void receive_virial_energy(t_commrec *cr,
405 matrix vir,real *energy,real *dvdlambda,
408 gmx_pme_comm_vir_ene_t cve;
410 if (cr->dd->pme_receive_vir_ener) {
413 "PP node %d receiving from PME node %d: virial and energy\n",
414 cr->sim_nodeid,cr->dd->pme_nodeid);
416 MPI_Recv(&cve,sizeof(cve),MPI_BYTE,cr->dd->pme_nodeid,1,cr->mpi_comm_mysim,
419 memset(&cve,0,sizeof(cve));
422 m_add(vir,cve.vir,vir);
423 *energy = cve.energy;
424 *dvdlambda += cve.dvdlambda;
425 *pme_cycles = cve.cycles;
427 if ( cve.stop_cond != gmx_stop_cond_none )
429 gmx_set_stop_condition(cve.stop_cond);
437 void gmx_pme_receive_f(t_commrec *cr,
438 rvec f[], matrix vir,
439 real *energy, real *dvdlambda,
444 #ifdef GMX_PME_DELAYED_WAIT
445 /* Wait for the x request to finish */
446 gmx_pme_send_q_x_wait(cr->dd);
449 natoms = cr->dd->nat_home;
451 if (natoms > cr->dd->pme_recv_f_alloc)
453 cr->dd->pme_recv_f_alloc = over_alloc_dd(natoms);
454 srenew(cr->dd->pme_recv_f_buf, cr->dd->pme_recv_f_alloc);
458 MPI_Recv(cr->dd->pme_recv_f_buf[0],
459 natoms*sizeof(rvec),MPI_BYTE,
460 cr->dd->pme_nodeid,0,cr->mpi_comm_mysim,
464 for(i=0; i<natoms; i++)
465 rvec_inc(f[i],cr->dd->pme_recv_f_buf[i]);
468 receive_virial_energy(cr,vir,energy,dvdlambda,pme_cycles);
471 void gmx_pme_send_force_vir_ener(struct gmx_pme_pp *pme_pp,
473 real energy, real dvdlambda,
476 gmx_pme_comm_vir_ene_t cve;
477 int messages,ind_start,ind_end,receiver;
481 /* Now the evaluated forces have to be transferred to the PP nodes */
484 for (receiver=0; receiver<pme_pp->nnode; receiver++) {
486 ind_end = ind_start + pme_pp->nat[receiver];
488 if (MPI_Isend(f[ind_start],(ind_end-ind_start)*sizeof(rvec),MPI_BYTE,
489 pme_pp->node[receiver],0,
490 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]) != 0)
491 gmx_comm("MPI_Isend failed in do_pmeonly");
495 /* send virial and energy to our last PP node */
496 copy_mat(vir,cve.vir);
498 cve.dvdlambda = dvdlambda;
499 /* check for the signals to send back to a PP node */
500 cve.stop_cond = gmx_get_stop_condition();
505 fprintf(debug,"PME node sending to PP node %d: virial and energy\n",
508 MPI_Isend(&cve,sizeof(cve),MPI_BYTE,
510 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
512 /* Wait for the forces to arrive */
513 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);