Code beautification with uncrustify
[alexxy/gromacs.git] / src / gromacs / mdlib / pme_pp.c
1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
2  *
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  *          GROningen MAchine for Chemical Simulations
9  *
10  *                        VERSION 3.2.0
11  * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13  * Copyright (c) 2001-2004, The GROMACS development team,
14  * check out http://www.gromacs.org for more information.
15
16  * This program is free software; you can redistribute it and/or
17  * modify it under the terms of the GNU General Public License
18  * as published by the Free Software Foundation; either version 2
19  * of the License, or (at your option) any later version.
20  *
21  * If you want to redistribute modifications, please consider that
22  * scientific software is very special. Version control is crucial -
23  * bugs must be traceable. We will be happy to consider code for
24  * inclusion in the official distribution, but derived work must not
25  * be called official GROMACS. Details are found in the README & COPYING
26  * files - if they are missing, get the official version at www.gromacs.org.
27  *
28  * To help us fund GROMACS development, we humbly ask that you cite
29  * the papers on the package - you can find them in the top README file.
30  *
31  * For more info, check our website at http://www.gromacs.org
32  *
33  * And Hey:
34  * GROwing Monsters And Cloning Shrimps
35  */
36
37 #ifdef HAVE_CONFIG_H
38 #include <config.h>
39 #endif
40
41
42 #include <stdio.h>
43 #include <string.h>
44 #include <math.h>
45 #include "typedefs.h"
46 #include "smalloc.h"
47 #include "gmx_fatal.h"
48 #include "vec.h"
49 #include "pme.h"
50 #include "network.h"
51 #include "domdec.h"
52 #include "sighandler.h"
53
54 #ifdef GMX_LIB_MPI
55 #include <mpi.h>
56 #endif
57 #ifdef GMX_THREAD_MPI
58 #include "tmpi.h"
59 #endif
60
61 #define PP_PME_CHARGE   (1<<0)
62 #define PP_PME_CHARGEB  (1<<1)
63 #define PP_PME_COORD    (1<<2)
64 #define PP_PME_FEP      (1<<3)
65 #define PP_PME_ENER_VIR (1<<4)
66 #define PP_PME_FINISH   (1<<5)
67 #define PP_PME_SWITCH   (1<<6)
68
69 #define PME_PP_SIGSTOP     (1<<0)
70 #define PME_PP_SIGSTOPNSS     (1<<1)
71
72 typedef struct gmx_pme_pp {
73 #ifdef GMX_MPI
74     MPI_Comm     mpi_comm_mysim;
75 #endif
76     int          nnode;        /* The number of PP node to communicate with  */
77     int         *node;         /* The PP node ranks                          */
78     int          node_peer;    /* The peer PP node rank                      */
79     int         *nat;          /* The number of atom for each PP node        */
80     int          flags_charge; /* The flags sent along with the last charges */
81     real        *chargeA;
82     real        *chargeB;
83     rvec        *x;
84     rvec        *f;
85     int          nalloc;
86 #ifdef GMX_MPI
87     MPI_Request *req;
88     MPI_Status  *stat;
89 #endif
90 } t_gmx_pme_pp;
91
92 typedef struct gmx_pme_comm_n_box {
93     int             natoms;
94     matrix          box;
95     int             maxshift_x;
96     int             maxshift_y;
97     real            lambda;
98     int             flags;
99     gmx_large_int_t step;
100     ivec            grid_size;  /* For PME grid tuning */
101     real            ewaldcoeff; /* For PME grid tuning */
102 } gmx_pme_comm_n_box_t;
103
104 typedef struct {
105     matrix          vir;
106     real            energy;
107     real            dvdlambda;
108     float           cycles;
109     gmx_stop_cond_t stop_cond;
110 } gmx_pme_comm_vir_ene_t;
111
112
113
114
115 gmx_pme_pp_t gmx_pme_pp_init(t_commrec *cr)
116 {
117     struct gmx_pme_pp *pme_pp;
118     int                rank;
119
120     snew(pme_pp, 1);
121
122 #ifdef GMX_MPI
123     pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
124     MPI_Comm_rank(cr->mpi_comm_mygroup, &rank);
125     get_pme_ddnodes(cr, rank, &pme_pp->nnode, &pme_pp->node, &pme_pp->node_peer);
126     snew(pme_pp->nat, pme_pp->nnode);
127     snew(pme_pp->req, 2*pme_pp->nnode);
128     snew(pme_pp->stat, 2*pme_pp->nnode);
129     pme_pp->nalloc       = 0;
130     pme_pp->flags_charge = 0;
131 #endif
132
133     return pme_pp;
134 }
135
136 /* This should be faster with a real non-blocking MPI implementation */
137 /* #define GMX_PME_DELAYED_WAIT */
138
139 static void gmx_pme_send_q_x_wait(gmx_domdec_t *dd)
140 {
141 #ifdef GMX_MPI
142     if (dd->nreq_pme)
143     {
144         MPI_Waitall(dd->nreq_pme, dd->req_pme, MPI_STATUSES_IGNORE);
145         dd->nreq_pme = 0;
146     }
147 #endif
148 }
149
150 static void gmx_pme_send_q_x(t_commrec *cr, int flags,
151                              real *chargeA, real *chargeB,
152                              matrix box, rvec *x,
153                              real lambda,
154                              int maxshift_x, int maxshift_y,
155                              gmx_large_int_t step)
156 {
157     gmx_domdec_t         *dd;
158     gmx_pme_comm_n_box_t *cnb;
159     int                   n;
160
161     dd = cr->dd;
162     n  = dd->nat_home;
163
164     if (debug)
165     {
166         fprintf(debug, "PP node %d sending to PME node %d: %d%s%s\n",
167                 cr->sim_nodeid, dd->pme_nodeid, n,
168                 flags & PP_PME_CHARGE ? " charges" : "",
169                 flags & PP_PME_COORD  ? " coordinates" : "");
170     }
171
172 #ifdef GMX_PME_DELAYED_WAIT
173     /* When can not use cnb until pending communication has finished */
174     gmx_pme_send_x_q_wait(dd);
175 #endif
176
177     if (dd->pme_receive_vir_ener)
178     {
179         /* Peer PP node: communicate all data */
180         if (dd->cnb == NULL)
181         {
182             snew(dd->cnb, 1);
183         }
184         cnb = dd->cnb;
185
186         cnb->flags      = flags;
187         cnb->natoms     = n;
188         cnb->maxshift_x = maxshift_x;
189         cnb->maxshift_y = maxshift_y;
190         cnb->lambda     = lambda;
191         cnb->step       = step;
192         if (flags & PP_PME_COORD)
193         {
194             copy_mat(box, cnb->box);
195         }
196 #ifdef GMX_MPI
197         MPI_Isend(cnb, sizeof(*cnb), MPI_BYTE,
198                   dd->pme_nodeid, 0, cr->mpi_comm_mysim,
199                   &dd->req_pme[dd->nreq_pme++]);
200 #endif
201     }
202     else if (flags & PP_PME_CHARGE)
203     {
204 #ifdef GMX_MPI
205         /* Communicate only the number of atoms */
206         MPI_Isend(&n, sizeof(n), MPI_BYTE,
207                   dd->pme_nodeid, 0, cr->mpi_comm_mysim,
208                   &dd->req_pme[dd->nreq_pme++]);
209 #endif
210     }
211
212 #ifdef GMX_MPI
213     if (n > 0)
214     {
215         if (flags & PP_PME_CHARGE)
216         {
217             MPI_Isend(chargeA, n*sizeof(real), MPI_BYTE,
218                       dd->pme_nodeid, 1, cr->mpi_comm_mysim,
219                       &dd->req_pme[dd->nreq_pme++]);
220         }
221         if (flags & PP_PME_CHARGEB)
222         {
223             MPI_Isend(chargeB, n*sizeof(real), MPI_BYTE,
224                       dd->pme_nodeid, 2, cr->mpi_comm_mysim,
225                       &dd->req_pme[dd->nreq_pme++]);
226         }
227         if (flags & PP_PME_COORD)
228         {
229             MPI_Isend(x[0], n*sizeof(rvec), MPI_BYTE,
230                       dd->pme_nodeid, 3, cr->mpi_comm_mysim,
231                       &dd->req_pme[dd->nreq_pme++]);
232         }
233     }
234
235 #ifndef GMX_PME_DELAYED_WAIT
236     /* Wait for the data to arrive */
237     /* We can skip this wait as we are sure x and q will not be modified
238      * before the next call to gmx_pme_send_x_q or gmx_pme_receive_f.
239      */
240     gmx_pme_send_q_x_wait(dd);
241 #endif
242 #endif
243 }
244
245 void gmx_pme_send_q(t_commrec *cr,
246                     gmx_bool bFreeEnergy, real *chargeA, real *chargeB,
247                     int maxshift_x, int maxshift_y)
248 {
249     int flags;
250
251     flags = PP_PME_CHARGE;
252     if (bFreeEnergy)
253     {
254         flags |= PP_PME_CHARGEB;
255     }
256
257     gmx_pme_send_q_x(cr, flags,
258                      chargeA, chargeB, NULL, NULL, 0, maxshift_x, maxshift_y, -1);
259 }
260
261 void gmx_pme_send_x(t_commrec *cr, matrix box, rvec *x,
262                     gmx_bool bFreeEnergy, real lambda,
263                     gmx_bool bEnerVir,
264                     gmx_large_int_t step)
265 {
266     int flags;
267
268     flags = PP_PME_COORD;
269     if (bFreeEnergy)
270     {
271         flags |= PP_PME_FEP;
272     }
273     if (bEnerVir)
274     {
275         flags |= PP_PME_ENER_VIR;
276     }
277
278     gmx_pme_send_q_x(cr, flags, NULL, NULL, box, x, lambda, 0, 0, step);
279 }
280
281 void gmx_pme_send_finish(t_commrec *cr)
282 {
283     int flags;
284
285     flags = PP_PME_FINISH;
286
287     gmx_pme_send_q_x(cr, flags, NULL, NULL, NULL, NULL, 0, 0, 0, -1);
288 }
289
290 void gmx_pme_send_switch(t_commrec *cr, ivec grid_size, real ewaldcoeff)
291 {
292 #ifdef GMX_MPI
293     gmx_pme_comm_n_box_t cnb;
294
295     if (cr->dd->pme_receive_vir_ener)
296     {
297         cnb.flags = PP_PME_SWITCH;
298         copy_ivec(grid_size, cnb.grid_size);
299         cnb.ewaldcoeff = ewaldcoeff;
300
301         /* We send this, uncommon, message blocking to simplify the code */
302         MPI_Send(&cnb, sizeof(cnb), MPI_BYTE,
303                  cr->dd->pme_nodeid, 0, cr->mpi_comm_mysim);
304     }
305 #endif
306 }
307
308 int gmx_pme_recv_q_x(struct gmx_pme_pp *pme_pp,
309                      real **chargeA, real **chargeB,
310                      matrix box, rvec **x, rvec **f,
311                      int *maxshift_x, int *maxshift_y,
312                      gmx_bool *bFreeEnergy, real *lambda,
313                      gmx_bool *bEnerVir,
314                      gmx_large_int_t *step,
315                      ivec grid_size, real *ewaldcoeff)
316 {
317     gmx_pme_comm_n_box_t cnb;
318     int                  nat = 0, q, messages, sender;
319     real                *charge_pp;
320
321     messages = 0;
322
323     /* avoid compiler warning about unused variable without MPI support */
324     cnb.flags = 0;
325 #ifdef GMX_MPI
326     do
327     {
328         /* Receive the send count, box and time step from the peer PP node */
329         MPI_Recv(&cnb, sizeof(cnb), MPI_BYTE,
330                  pme_pp->node_peer, 0,
331                  pme_pp->mpi_comm_mysim, MPI_STATUS_IGNORE);
332
333         if (debug)
334         {
335             fprintf(debug, "PME only node receiving:%s%s%s%s\n",
336                     (cnb.flags & PP_PME_CHARGE) ? " charges" : "",
337                     (cnb.flags & PP_PME_COORD ) ? " coordinates" : "",
338                     (cnb.flags & PP_PME_FINISH) ? " finish" : "",
339                     (cnb.flags & PP_PME_SWITCH) ? " switch" : "");
340         }
341
342         if (cnb.flags & PP_PME_SWITCH)
343         {
344             /* Special case, receive the new parameters and return */
345             copy_ivec(cnb.grid_size, grid_size);
346             *ewaldcoeff = cnb.ewaldcoeff;
347
348             return -2;
349         }
350
351         if (cnb.flags & PP_PME_CHARGE)
352         {
353             /* Receive the send counts from the other PP nodes */
354             for (sender = 0; sender < pme_pp->nnode; sender++)
355             {
356                 if (pme_pp->node[sender] == pme_pp->node_peer)
357                 {
358                     pme_pp->nat[sender] = cnb.natoms;
359                 }
360                 else
361                 {
362                     MPI_Irecv(&(pme_pp->nat[sender]), sizeof(pme_pp->nat[0]),
363                               MPI_BYTE,
364                               pme_pp->node[sender], 0,
365                               pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
366                 }
367             }
368             MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
369             messages = 0;
370
371             nat = 0;
372             for (sender = 0; sender < pme_pp->nnode; sender++)
373             {
374                 nat += pme_pp->nat[sender];
375             }
376
377             if (nat > pme_pp->nalloc)
378             {
379                 pme_pp->nalloc = over_alloc_dd(nat);
380                 srenew(pme_pp->chargeA, pme_pp->nalloc);
381                 if (cnb.flags & PP_PME_CHARGEB)
382                 {
383                     srenew(pme_pp->chargeB, pme_pp->nalloc);
384                 }
385                 srenew(pme_pp->x, pme_pp->nalloc);
386                 srenew(pme_pp->f, pme_pp->nalloc);
387             }
388
389             /* maxshift is sent when the charges are sent */
390             *maxshift_x = cnb.maxshift_x;
391             *maxshift_y = cnb.maxshift_y;
392
393             /* Receive the charges in place */
394             for (q = 0; q < ((cnb.flags & PP_PME_CHARGEB) ? 2 : 1); q++)
395             {
396                 if (q == 0)
397                 {
398                     charge_pp = pme_pp->chargeA;
399                 }
400                 else
401                 {
402                     charge_pp = pme_pp->chargeB;
403                 }
404                 nat = 0;
405                 for (sender = 0; sender < pme_pp->nnode; sender++)
406                 {
407                     if (pme_pp->nat[sender] > 0)
408                     {
409                         MPI_Irecv(charge_pp+nat,
410                                   pme_pp->nat[sender]*sizeof(real),
411                                   MPI_BYTE,
412                                   pme_pp->node[sender], 1+q,
413                                   pme_pp->mpi_comm_mysim,
414                                   &pme_pp->req[messages++]);
415                         nat += pme_pp->nat[sender];
416                         if (debug)
417                         {
418                             fprintf(debug, "Received from PP node %d: %d "
419                                     "charges\n",
420                                     pme_pp->node[sender], pme_pp->nat[sender]);
421                         }
422                     }
423                 }
424             }
425
426             pme_pp->flags_charge = cnb.flags;
427         }
428
429         if (cnb.flags & PP_PME_COORD)
430         {
431             if (!(pme_pp->flags_charge & PP_PME_CHARGE))
432             {
433                 gmx_incons("PME-only node received coordinates before charges"
434                            );
435             }
436
437             /* The box, FE flag and lambda are sent along with the coordinates
438              *  */
439             copy_mat(cnb.box, box);
440             *bFreeEnergy = (cnb.flags & PP_PME_FEP);
441             *lambda      = cnb.lambda;
442             *bEnerVir    = (cnb.flags & PP_PME_ENER_VIR);
443
444             if (*bFreeEnergy && !(pme_pp->flags_charge & PP_PME_CHARGEB))
445             {
446                 gmx_incons("PME-only node received free energy request, but "
447                            "did not receive B-state charges");
448             }
449
450             /* Receive the coordinates in place */
451             nat = 0;
452             for (sender = 0; sender < pme_pp->nnode; sender++)
453             {
454                 if (pme_pp->nat[sender] > 0)
455                 {
456                     MPI_Irecv(pme_pp->x[nat], pme_pp->nat[sender]*sizeof(rvec),
457                               MPI_BYTE,
458                               pme_pp->node[sender], 3,
459                               pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
460                     nat += pme_pp->nat[sender];
461                     if (debug)
462                     {
463                         fprintf(debug, "Received from PP node %d: %d "
464                                 "coordinates\n",
465                                 pme_pp->node[sender], pme_pp->nat[sender]);
466                     }
467                 }
468             }
469         }
470
471         /* Wait for the coordinates and/or charges to arrive */
472         MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
473         messages = 0;
474     }
475     while (!(cnb.flags & (PP_PME_COORD | PP_PME_FINISH)));
476
477     *step = cnb.step;
478 #endif
479
480     *chargeA = pme_pp->chargeA;
481     *chargeB = pme_pp->chargeB;
482     *x       = pme_pp->x;
483     *f       = pme_pp->f;
484
485
486     return ((cnb.flags & PP_PME_FINISH) ? -1 : nat);
487 }
488
489 static void receive_virial_energy(t_commrec *cr,
490                                   matrix vir, real *energy, real *dvdlambda,
491                                   float *pme_cycles)
492 {
493     gmx_pme_comm_vir_ene_t cve;
494
495     if (cr->dd->pme_receive_vir_ener)
496     {
497         if (debug)
498         {
499             fprintf(debug,
500                     "PP node %d receiving from PME node %d: virial and energy\n",
501                     cr->sim_nodeid, cr->dd->pme_nodeid);
502         }
503 #ifdef GMX_MPI
504         MPI_Recv(&cve, sizeof(cve), MPI_BYTE, cr->dd->pme_nodeid, 1, cr->mpi_comm_mysim,
505                  MPI_STATUS_IGNORE);
506 #else
507         memset(&cve, 0, sizeof(cve));
508 #endif
509
510         m_add(vir, cve.vir, vir);
511         *energy     = cve.energy;
512         *dvdlambda += cve.dvdlambda;
513         *pme_cycles = cve.cycles;
514
515         if (cve.stop_cond != gmx_stop_cond_none)
516         {
517             gmx_set_stop_condition(cve.stop_cond);
518         }
519     }
520     else
521     {
522         *energy     = 0;
523         *pme_cycles = 0;
524     }
525 }
526
527 void gmx_pme_receive_f(t_commrec *cr,
528                        rvec f[], matrix vir,
529                        real *energy, real *dvdlambda,
530                        float *pme_cycles)
531 {
532     int natoms, i;
533
534 #ifdef GMX_PME_DELAYED_WAIT
535     /* Wait for the x request to finish */
536     gmx_pme_send_q_x_wait(cr->dd);
537 #endif
538
539     natoms = cr->dd->nat_home;
540
541     if (natoms > cr->dd->pme_recv_f_alloc)
542     {
543         cr->dd->pme_recv_f_alloc = over_alloc_dd(natoms);
544         srenew(cr->dd->pme_recv_f_buf, cr->dd->pme_recv_f_alloc);
545     }
546
547 #ifdef GMX_MPI
548     MPI_Recv(cr->dd->pme_recv_f_buf[0],
549              natoms*sizeof(rvec), MPI_BYTE,
550              cr->dd->pme_nodeid, 0, cr->mpi_comm_mysim,
551              MPI_STATUS_IGNORE);
552 #endif
553
554     for (i = 0; i < natoms; i++)
555     {
556         rvec_inc(f[i], cr->dd->pme_recv_f_buf[i]);
557     }
558
559
560     receive_virial_energy(cr, vir, energy, dvdlambda, pme_cycles);
561 }
562
563 void gmx_pme_send_force_vir_ener(struct gmx_pme_pp *pme_pp,
564                                  rvec *f, matrix vir,
565                                  real energy, real dvdlambda,
566                                  float cycles)
567 {
568     gmx_pme_comm_vir_ene_t cve;
569     int                    messages, ind_start, ind_end, receiver;
570
571     cve.cycles = cycles;
572
573     /* Now the evaluated forces have to be transferred to the PP nodes */
574     messages = 0;
575     ind_end  = 0;
576     for (receiver = 0; receiver < pme_pp->nnode; receiver++)
577     {
578         ind_start = ind_end;
579         ind_end   = ind_start + pme_pp->nat[receiver];
580 #ifdef GMX_MPI
581         if (MPI_Isend(f[ind_start], (ind_end-ind_start)*sizeof(rvec), MPI_BYTE,
582                       pme_pp->node[receiver], 0,
583                       pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]) != 0)
584         {
585             gmx_comm("MPI_Isend failed in do_pmeonly");
586         }
587 #endif
588     }
589
590     /* send virial and energy to our last PP node */
591     copy_mat(vir, cve.vir);
592     cve.energy    = energy;
593     cve.dvdlambda = dvdlambda;
594     /* check for the signals to send back to a PP node */
595     cve.stop_cond = gmx_get_stop_condition();
596
597     cve.cycles = cycles;
598
599     if (debug)
600     {
601         fprintf(debug, "PME node sending to PP node %d: virial and energy\n",
602                 pme_pp->node_peer);
603     }
604 #ifdef GMX_MPI
605     MPI_Isend(&cve, sizeof(cve), MPI_BYTE,
606               pme_pp->node_peer, 1,
607               pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
608
609     /* Wait for the forces to arrive */
610     MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
611 #endif
612 }