1837d21f07c39a9b0def42ce81786a7837e225bb
[alexxy/gromacs.git] / src / mdlib / partdec.c
1 /*
2  * 
3  *                This source code is part of
4  * 
5  *                 G   R   O   M   A   C   S
6  * 
7  *          GROningen MAchine for Chemical Simulations
8  * 
9  *                        VERSION 3.2.0
10  * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12  * Copyright (c) 2001-2004, The GROMACS development team,
13  * check out http://www.gromacs.org for more information.
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version 2
18  * of the License, or (at your option) any later version.
19  * 
20  * If you want to redistribute modifications, please consider that
21  * scientific software is very special. Version control is crucial -
22  * bugs must be traceable. We will be happy to consider code for
23  * inclusion in the official distribution, but derived work must not
24  * be called official GROMACS. Details are found in the README & COPYING
25  * files - if they are missing, get the official version at www.gromacs.org.
26  * 
27  * To help us fund GROMACS development, we humbly ask that you cite
28  * the papers on the package - you can find them in the top README file.
29  * 
30  * For more info, check our website at http://www.gromacs.org
31  * 
32  * And Hey:
33  * GROwing Monsters And Cloning Shrimps
34  */
35 /* This file is completely threadsafe - keep it that way! */
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
39
40 #include <math.h>
41
42 #include "sysstuff.h"
43 #include "typedefs.h"
44 #include "smalloc.h"
45 #include "invblock.h"
46 #include "macros.h"
47 #include "main.h"
48 #include "ns.h"
49 #include "partdec.h"
50 #include "splitter.h"
51 #include "gmx_random.h"
52 #include "mtop_util.h"
53 #include "mvdata.h"
54 #include "vec.h"
55
56 typedef struct gmx_partdec_constraint
57 {
58         int                  left_range_receive;
59         int                  right_range_receive;
60         int                  left_range_send;
61         int                  right_range_send;
62         int                  nconstraints;
63         int *                nlocalatoms;
64         rvec *               sendbuf;
65         rvec *               recvbuf;
66
67 gmx_partdec_constraint_t;
68
69
70 typedef struct gmx_partdec {
71   int  neighbor[2];             /* The nodeids of left and right neighb */
72   int  *cgindex;                /* The charge group boundaries,         */
73                                 /* size nnodes+1,                       */
74                                 /* only allocated with particle decomp. */
75   int  *index;                  /* The home particle boundaries,        */
76                                 /* size nnodes+1,                       */
77                                 /* only allocated with particle decomp. */
78   int  shift,bshift;            /* Coordinates are shifted left for     */
79                                 /* 'shift' systolic pulses, and right   */
80                                 /* for 'bshift' pulses. Forces are      */
81                                 /* shifted right for 'shift' pulses     */
82                                 /* and left for 'bshift' pulses         */
83                                 /* This way is not necessary to shift   */
84                                 /* the coordinates over the entire ring */
85   rvec *vbuf;                   /* Buffer for summing the forces        */
86 #ifdef GMX_MPI
87   MPI_Request mpi_req_rx;       /* MPI reqs for async transfers */
88   MPI_Request mpi_req_tx;
89 #endif
90  gmx_partdec_constraint_t *     constraints;    
91 } gmx_partdec_t;
92
93
94 void gmx_tx(const t_commrec *cr,int dir,void *buf,int bufsize)
95 {
96 #ifndef GMX_MPI
97   gmx_call("gmx_tx"); 
98 #else
99   int        nodeid;
100   int        tag,flag;
101   MPI_Status status;
102
103   nodeid = cr->pd->neighbor[dir];
104   
105 #ifdef DEBUG
106   fprintf(stderr,"gmx_tx: nodeid=%d, buf=%x, bufsize=%d\n",
107           nodeid,buf,bufsize);
108 #endif
109 #ifdef MPI_TEST
110   /* workaround for crashes encountered with MPI on IRIX 6.5 */
111   if (cr->pd->mpi_req_tx != MPI_REQUEST_NULL) {
112     MPI_Test(&cr->pd->mpi_req_tx,&flag,&status);
113     if (flag==FALSE) {
114       fprintf(stdlog,"gmx_tx called before previous send was complete: nodeid=%d, buf=%x, bufsize=%d\n",
115               nodeid,buf,bufsize);
116       gmx_tx_wait(nodeid);
117     }
118   }
119 #endif
120   tag = 0;
121   if (MPI_Isend(buf,bufsize,MPI_BYTE,RANK(cr,nodeid),tag,cr->mpi_comm_mygroup,&cr->pd->mpi_req_tx) != 0)
122     gmx_comm("MPI_Isend Failed");
123 #endif
124 }
125
126 void gmx_tx_wait(const t_commrec *cr, int dir)
127 {
128 #ifndef GMX_MPI
129   gmx_call("gmx_tx_wait");
130 #else
131   MPI_Status  status;
132   int mpi_result;
133
134   if ((mpi_result=MPI_Wait(&cr->pd->mpi_req_tx,&status)) != 0)
135     gmx_fatal(FARGS,"MPI_Wait: result=%d",mpi_result);
136 #endif
137 }
138
139 void gmx_rx(const t_commrec *cr,int dir,void *buf,int bufsize)
140 {
141 #ifndef GMX_MPI
142   gmx_call("gmx_rx");
143 #else
144   int nodeid;
145   int tag;
146
147   nodeid = cr->pd->neighbor[dir];
148 #ifdef DEBUG
149   fprintf(stderr,"gmx_rx: nodeid=%d, buf=%x, bufsize=%d\n",
150           nodeid,buf,bufsize);
151 #endif
152   tag = 0;
153   if (MPI_Irecv( buf, bufsize, MPI_BYTE, RANK(cr,nodeid), tag, cr->mpi_comm_mygroup, &cr->pd->mpi_req_rx) != 0 )
154     gmx_comm("MPI_Recv Failed");
155 #endif
156 }
157
158 void gmx_rx_wait(const t_commrec *cr, int nodeid)
159 {
160 #ifndef GMX_MPI
161   gmx_call("gmx_rx_wait");
162 #else
163   MPI_Status  status;
164   int mpi_result;
165   
166   if ((mpi_result=MPI_Wait(&cr->pd->mpi_req_rx,&status)) != 0)
167     gmx_fatal(FARGS,"MPI_Wait: result=%d",mpi_result);
168 #endif
169 }
170
171 void gmx_tx_rx_real(const t_commrec *cr,
172                     int send_dir,real *send_buf,int send_bufsize,
173                     int recv_dir,real *recv_buf,int recv_bufsize)
174 {
175 #ifndef GMX_MPI
176   gmx_call("gmx_tx_rx_real");
177 #else
178   int send_nodeid,recv_nodeid;
179   int tx_tag = 0,rx_tag = 0;
180   MPI_Status stat;
181
182   send_nodeid = cr->pd->neighbor[send_dir];
183   recv_nodeid = cr->pd->neighbor[recv_dir];
184                    
185 #ifdef GMX_DOUBLE
186 #define mpi_type MPI_DOUBLE
187 #else
188 #define mpi_type MPI_FLOAT
189 #endif
190
191   if (send_bufsize > 0 && recv_bufsize > 0) {
192     MPI_Sendrecv(send_buf,send_bufsize,mpi_type,RANK(cr,send_nodeid),tx_tag,
193                  recv_buf,recv_bufsize,mpi_type,RANK(cr,recv_nodeid),rx_tag,
194                  cr->mpi_comm_mygroup,&stat);
195   } else if (send_bufsize > 0) {
196     MPI_Send(send_buf,send_bufsize,mpi_type,RANK(cr,send_nodeid),tx_tag,
197              cr->mpi_comm_mygroup);
198   } else if (recv_bufsize > 0) {
199     MPI_Recv(recv_buf,recv_bufsize,mpi_type,RANK(cr,recv_nodeid),rx_tag,
200              cr->mpi_comm_mygroup,&stat);
201   }
202 #undef mpi_type
203 #endif
204 }
205
206
207 void gmx_tx_rx_void(const t_commrec *cr,
208                     int send_dir,void *send_buf,int send_bufsize,
209                     int recv_dir,void *recv_buf,int recv_bufsize)
210 {
211 #ifndef GMX_MPI
212         gmx_call("gmx_tx_rx_void");
213 #else
214         int send_nodeid,recv_nodeid;
215         int tx_tag = 0,rx_tag = 0;
216         MPI_Status stat;
217         
218         send_nodeid = cr->pd->neighbor[send_dir];
219         recv_nodeid = cr->pd->neighbor[recv_dir];
220         
221         
222         MPI_Sendrecv(send_buf,send_bufsize,MPI_BYTE,RANK(cr,send_nodeid),tx_tag,
223                                  recv_buf,recv_bufsize,MPI_BYTE,RANK(cr,recv_nodeid),rx_tag,
224                                  cr->mpi_comm_mygroup,&stat);
225
226 #endif
227 }
228
229
230 /*void gmx_wait(int dir_send,int dir_recv)*/
231                 
232 void gmx_wait(const t_commrec *cr, int dir_send,int dir_recv)
233 {
234 #ifndef GMX_MPI
235   gmx_call("gmx_wait");
236 #else
237   gmx_tx_wait(cr, dir_send);
238   gmx_rx_wait(cr, dir_recv);
239 #endif
240 }
241
242 static void set_left_right(t_commrec *cr)
243 {
244   cr->pd->neighbor[GMX_LEFT]  = (cr->nnodes + cr->nodeid - 1) % cr->nnodes;
245   cr->pd->neighbor[GMX_RIGHT] = (cr->nodeid + 1) % cr->nnodes;
246 }
247
248 void pd_move_f(const t_commrec *cr,rvec f[],t_nrnb *nrnb)
249 {
250   move_f(NULL,cr,GMX_LEFT,GMX_RIGHT,f,cr->pd->vbuf,nrnb);
251 }
252
253 int *pd_cgindex(const t_commrec *cr)
254 {
255   return cr->pd->cgindex;
256 }
257
258 int *pd_index(const t_commrec *cr)
259 {
260   return cr->pd->index;
261 }
262
263 int pd_shift(const t_commrec *cr)
264 {
265   return cr->pd->shift;
266 }
267
268 int pd_bshift(const t_commrec *cr)
269 {
270   return cr->pd->bshift;
271 }
272
273 void pd_cg_range(const t_commrec *cr,int *cg0,int *cg1)
274 {
275   *cg0 = cr->pd->cgindex[cr->nodeid];
276   *cg1 = cr->pd->cgindex[cr->nodeid+1];
277 }
278
279 void pd_at_range(const t_commrec *cr,int *at0,int *at1)
280 {
281   *at0 = cr->pd->index[cr->nodeid];
282   *at1 = cr->pd->index[cr->nodeid+1];
283 }
284
285 void
286 pd_get_constraint_range(gmx_partdec_p_t pd,int *start,int *natoms)
287 {
288         *start  = pd->constraints->left_range_receive;
289         *natoms = pd->constraints->right_range_receive-pd->constraints->left_range_receive;
290 }
291
292 int *
293 pd_constraints_nlocalatoms(gmx_partdec_p_t pd)
294 {
295         int *rc;
296         
297         if(NULL != pd && NULL != pd->constraints)
298         {
299                 rc = pd->constraints->nlocalatoms;
300         }
301         else
302         {
303                 rc = NULL;
304         }
305         return rc;
306 }
307
308
309
310
311 /* This routine is used to communicate the non-home-atoms needed for constrains.
312  * We have already calculated this range of atoms during setup, and stored in the
313  * partdec constraints structure.
314  *
315  * When called, we send/receive left_range_send/receive atoms to our left (lower) 
316  * node neighbor, and similar to the right (higher) node.
317  *
318  * This has not been tested for periodic molecules...
319  */
320 void
321 pd_move_x_constraints(t_commrec *  cr,
322                                           rvec *       x0,
323                                           rvec *       x1)
324 {
325 #ifdef GMX_MPI
326         gmx_partdec_t *pd;
327         gmx_partdec_constraint_t *pdc;
328         
329         rvec *     sendptr;
330         rvec *     recvptr;
331         int        thisnode;
332         int        i;
333         int        cnt;
334         int        sendcnt,recvcnt;
335         
336         pd  = cr->pd;
337         pdc = pd->constraints;
338         
339        if (pdc == NULL)
340        {
341                 return;         
342        }
343
344         thisnode  = cr->nodeid;
345         
346         /* First pulse to right */
347         
348         recvcnt = 3*(pd->index[thisnode]-pdc->left_range_receive);
349         sendcnt = 3*(cr->pd->index[thisnode+1]-cr->pd->constraints->right_range_send);
350
351         if(x1!=NULL)
352         {
353                 /* Assemble temporary array with both x0 & x1 */
354                 recvptr = pdc->recvbuf;
355                 sendptr = pdc->sendbuf;
356                 
357                 cnt = 0;
358                 for(i=pdc->right_range_send;i<pd->index[thisnode+1];i++)
359                 {
360                         copy_rvec(x0[i],sendptr[cnt++]);
361                 }
362                 for(i=pdc->right_range_send;i<pd->index[thisnode+1];i++)
363                 {
364                         copy_rvec(x1[i],sendptr[cnt++]);
365                 }
366                 recvcnt *= 2;
367                 sendcnt *= 2;           
368         }
369         else
370         {
371                 recvptr = x0 + pdc->left_range_receive;
372                 sendptr = x0 + pdc->right_range_send;
373         }
374                 
375         gmx_tx_rx_real(cr,
376                                    GMX_RIGHT,(real *)sendptr,sendcnt,
377                                    GMX_LEFT, (real *)recvptr,recvcnt);
378                                 
379         if(x1 != NULL)
380         {
381                 /* copy back to x0/x1 */
382                 cnt = 0;
383                 for(i=pdc->left_range_receive;i<pd->index[thisnode];i++)
384                 {
385                         copy_rvec(recvptr[cnt++],x0[i]);
386                 }
387                 for(i=pdc->left_range_receive;i<pd->index[thisnode];i++)
388                 {
389                         copy_rvec(recvptr[cnt++],x1[i]);
390                 }
391         }
392         
393         /* And pulse to left */
394         sendcnt = 3*(pdc->left_range_send-pd->index[thisnode]); 
395         recvcnt = 3*(pdc->right_range_receive-pd->index[thisnode+1]);
396
397         if(x1 != NULL)
398         {       
399                 cnt = 0;
400                 for(i=cr->pd->index[thisnode];i<pdc->left_range_send;i++)
401                 {
402                         copy_rvec(x0[i],sendptr[cnt++]);
403                 }
404                 for(i=cr->pd->index[thisnode];i<pdc->left_range_send;i++)
405                 {
406                         copy_rvec(x1[i],sendptr[cnt++]);
407                 }
408                 recvcnt *= 2;
409                 sendcnt *= 2;           
410         }
411         else
412         {
413                 sendptr = x0 + pd->index[thisnode];
414                 recvptr = x0 + pd->index[thisnode+1];
415         }
416         
417         gmx_tx_rx_real(cr,
418                                    GMX_LEFT ,(real *)sendptr,sendcnt,
419                                    GMX_RIGHT,(real *)recvptr,recvcnt);
420
421         /* Final copy back from buffers */
422         if(x1 != NULL)
423         {
424                 /* First copy received data back into x0 & x1 */
425                 cnt = 0;
426                 for(i=pd->index[thisnode+1];i<pdc->right_range_receive;i++)
427                 {
428                         copy_rvec(recvptr[cnt++],x0[i]);
429                 }
430                 for(i=pd->index[thisnode+1];i<pdc->right_range_receive;i++)
431                 {
432                         copy_rvec(recvptr[cnt++],x1[i]);
433                 }
434         }               
435 #endif
436 }
437
438 static int home_cpu(int nnodes,gmx_partdec_t *pd,int atomid)
439 {
440   int k;
441  
442   for(k=0; (k<nnodes); k++) {
443     if (atomid<pd->index[k+1])
444       return k;
445   }
446   gmx_fatal(FARGS,"Atomid %d is larger than number of atoms (%d)",
447             atomid+1,pd->index[nnodes]+1);
448             
449   return -1;
450 }
451
452 static void calc_nsbshift(FILE *fp,int nnodes,gmx_partdec_t *pd,t_idef *idef)
453 {
454   int i,j,k;
455   int lastcg,targetcg,nshift,naaj;
456   int homeid[32];
457       
458   pd->bshift=0;
459   for(i=1; (i<nnodes); i++) {
460     targetcg = pd->cgindex[i];
461     for(nshift=i; (nshift > 0) && (pd->cgindex[nshift] > targetcg); nshift--)
462       ;
463     pd->bshift=max(pd->bshift,i-nshift);
464   }
465
466   pd->shift = (nnodes + 1)/2;
467   for(i=0; (i<nnodes); i++) {
468     lastcg = pd->cgindex[i+1]-1;
469     naaj = calc_naaj(lastcg,pd->cgindex[nnodes]);
470     targetcg = (lastcg+naaj) % pd->cgindex[nnodes];
471     
472     /* Search until we find the target charge group */
473     for(nshift=0;
474         (nshift < nnodes) && (targetcg > pd->cgindex[nshift+1]);
475         nshift++)
476       ;
477     /* Now compute the shift, that is the difference in node index */
478     nshift=((nshift - i + nnodes) % nnodes);
479     
480     if (fp)
481       fprintf(fp,"CPU=%3d, lastcg=%5d, targetcg=%5d, myshift=%5d\n",
482               i,lastcg,targetcg,nshift);
483             
484     /* It's the largest shift that matters */
485     pd->shift=max(nshift,pd->shift);
486   }
487   /* Now for the bonded forces */
488   for(i=0; (i<F_NRE); i++) {
489     if (interaction_function[i].flags & IF_BOND) {
490       int nratoms = interaction_function[i].nratoms;
491       for(j=0; (j<idef->il[i].nr); j+=nratoms+1) {
492         for(k=1; (k<=nratoms); k++)
493           homeid[k-1] = home_cpu(nnodes,pd,idef->il[i].iatoms[j+k]);
494         for(k=1; (k<nratoms); k++)
495           pd->shift = max(pd->shift,abs(homeid[k]-homeid[0]));
496       }
497     }
498   }
499   if (fp)
500     fprintf(fp,"pd->shift = %3d, pd->bshift=%3d\n",
501             pd->shift,pd->bshift);
502 }
503
504
505 static void
506 init_partdec_constraint(t_commrec *cr,
507                         t_idef *   idef,
508                                                 int *left_range, 
509                                                 int *right_range)
510 {
511         gmx_partdec_t *            pd = cr->pd;
512         gmx_partdec_constraint_t *pdc;
513         int i,cnt,k;
514         int ai,aj,nodei,nodej;
515         int nratoms;
516         int nodeid;
517         
518         snew(pdc,1);
519         cr->pd->constraints = pdc;
520
521         
522         /* Who am I? */
523     nodeid = cr->nodeid;
524         
525     /* Setup LINCS communication ranges */
526     pdc->left_range_receive   = left_range[nodeid];
527     pdc->right_range_receive  = right_range[nodeid]+1;
528     pdc->left_range_send      = (nodeid > 0) ? right_range[nodeid-1]+1 : 0;
529     pdc->right_range_send     = (nodeid < cr->nnodes-1) ? left_range[nodeid+1] : pd->index[cr->nnodes];
530         
531         snew(pdc->nlocalatoms,idef->il[F_CONSTR].nr);
532         nratoms = interaction_function[F_CONSTR].nratoms;
533
534         for(i=0,cnt=0;i<idef->il[F_CONSTR].nr;i+=nratoms+1,cnt++)
535         {
536                 ai = idef->il[F_CONSTR].iatoms[i+1];
537                 aj = idef->il[F_CONSTR].iatoms[i+2];
538                 nodei = 0;
539                 while(ai>=pd->index[nodei+1]) 
540                 {
541                         nodei++;
542                 }
543                 nodej = 0;
544                 while(aj>=pd->index[nodej+1]) 
545                 {
546                         nodej++;
547                 }
548                 pdc->nlocalatoms[cnt] = 0;
549                 if(nodei==nodeid)
550                 {
551                         pdc->nlocalatoms[cnt]++;
552                 }
553                 if(nodej==nodeid)
554                 {
555                         pdc->nlocalatoms[cnt]++;
556                 }
557         }
558         pdc->nconstraints = cnt;
559         
560         snew(pdc->sendbuf,max(6*(pd->index[cr->nodeid+1]-pd->constraints->right_range_send),6*(pdc->left_range_send-pd->index[cr->nodeid])));
561         snew(pdc->recvbuf,max(6*(pd->index[cr->nodeid]-pdc->left_range_receive),6*(pdc->right_range_receive-pd->index[cr->nodeid+1])));
562         
563 }
564
565 static void init_partdec(FILE *fp,t_commrec *cr,t_block *cgs,int *multinr,
566                                                  t_idef *idef)
567 {
568   int  i,nodeid;
569   gmx_partdec_t *pd;
570         
571   snew(pd,1);
572   cr->pd = pd;
573
574   set_left_right(cr);
575   
576   if (cr->nnodes > 1) {
577     if (multinr == NULL)
578       gmx_fatal(FARGS,"Internal error in init_partdec: multinr = NULL");
579     snew(pd->index,cr->nnodes+1);
580     snew(pd->cgindex,cr->nnodes+1);
581     pd->cgindex[0] = 0;
582     pd->index[0]   = 0;
583     for(i=0; (i < cr->nnodes); i++) {
584       pd->cgindex[i+1] = multinr[i];
585       pd->index[i+1]   = cgs->index[multinr[i]];
586     }
587     calc_nsbshift(fp,cr->nnodes,pd,idef);
588     /* This is a hack to do with bugzilla 148 */
589     /*pd->shift = cr->nnodes-1;
590       pd->bshift = 0;*/
591
592     /* Allocate a buffer of size natoms of the whole system
593      * for summing the forces over the nodes.
594      */
595     snew(pd->vbuf,cgs->index[cgs->nr]);
596     pd->constraints = NULL;
597   }
598 #ifdef GMX_MPI
599   pd->mpi_req_tx=MPI_REQUEST_NULL;
600   pd->mpi_req_rx=MPI_REQUEST_NULL;
601 #endif
602 }
603
604 static void print_partdec(FILE *fp,const char *title,
605                           int nnodes,gmx_partdec_t *pd)
606 {
607   int i;
608
609   fprintf(fp,"%s\n",title);
610   fprintf(fp,"nnodes:       %5d\n",nnodes);
611   fprintf(fp,"pd->shift:    %5d\n",pd->shift);
612   fprintf(fp,"pd->bshift:   %5d\n",pd->bshift);
613   
614   fprintf(fp,"Nodeid   atom0   #atom     cg0       #cg\n");
615   for(i=0; (i<nnodes); i++)
616     fprintf(fp,"%6d%8d%8d%8d%10d\n",
617             i,
618             pd->index[i],pd->index[i+1]-pd->index[i],
619             pd->cgindex[i],pd->cgindex[i+1]-pd->cgindex[i]);
620   fprintf(fp,"\n");
621 }
622
623 static void pr_idef_division(FILE *fp,t_idef *idef,int nnodes,int **multinr)
624 {
625   int i,ftype,nr,nra,m0,m1;
626
627   fprintf(fp,"Division of bonded forces over processors\n");
628   fprintf(fp,"%-12s","CPU");
629   for(i=0; (i<nnodes); i++) 
630     fprintf(fp," %5d",i);
631   fprintf(fp,"\n");
632   
633   for(ftype=0; (ftype<F_NRE); ftype++) {
634     if (idef->il[ftype].nr > 0) {
635       nr  = idef->il[ftype].nr;
636       nra = 1+interaction_function[ftype].nratoms;
637       fprintf(fp,"%-12s", interaction_function[ftype].name);
638       /* Loop over processors */
639       for(i=0; (i<nnodes); i++) {
640         m0 = (i == 0) ? 0 : multinr[ftype][i-1]/nra;
641         m1 = multinr[ftype][i]/nra;
642         fprintf(fp," %5d",m1-m0);
643       }
644       fprintf(fp,"\n");
645     }
646   }
647 }
648
649 static void select_my_ilist(FILE *log,t_ilist *il,int *multinr,t_commrec *cr)
650 {
651   t_iatom *ia;
652   int     i,start,end,nr;
653   
654   if (cr->nodeid == 0)
655     start=0;
656   else
657     start=multinr[cr->nodeid-1];
658   end=multinr[cr->nodeid];
659   
660   nr=end-start;
661   if (nr < 0)
662     gmx_fatal(FARGS,"Negative number of atoms (%d) on node %d\n"
663                 "You have probably not used the same value for -np with grompp"
664                 " and mdrun",
665                 nr,cr->nodeid);
666   snew(ia,nr);
667
668   for(i=0; (i<nr); i++)
669     ia[i]=il->iatoms[start+i];
670
671   sfree(il->iatoms);
672   il->iatoms=ia;
673   
674   il->nr=nr;
675 }
676
677 static void select_my_idef(FILE *log,t_idef *idef,int **multinr,t_commrec *cr)
678 {
679   int i;
680   
681   for(i=0; (i<F_NRE); i++)
682     select_my_ilist(log,&idef->il[i],multinr[i],cr);
683 }
684
685 gmx_localtop_t *split_system(FILE *log,
686                              gmx_mtop_t *mtop,t_inputrec *inputrec,
687                              t_commrec *cr)
688 {
689   int    i,npp,n,nn;
690   real   *capacity;
691   double tcap = 0,cap;
692   int    *multinr_cgs,**multinr_nre;
693   char   *cap_env;
694   gmx_localtop_t *top;
695   int    *left_range;
696   int    *right_range;
697         
698   /* Time to setup the division of charge groups over processors */
699   npp = cr->nnodes-cr->npmenodes;
700   snew(capacity,npp);
701   cap_env = getenv("GMX_CAPACITY");
702   if (cap_env == NULL) {
703     for(i=0; (i<npp-1); i++) {
704       capacity[i] = 1.0/(double)npp;
705       tcap += capacity[i];
706     }
707     /* Take care that the sum of capacities is 1.0 */
708     capacity[npp-1] = 1.0 - tcap;
709   } else {
710     tcap = 0;
711     nn = 0;
712     for(i=0; i<npp; i++) {
713       cap = 0;
714       sscanf(cap_env+nn,"%lf%n",&cap,&n);
715       if (cap == 0)
716         gmx_fatal(FARGS,"Incorrect entry or number of entries in GMX_CAPACITY='%s'",cap_env);
717       capacity[i] = cap;
718       tcap += cap;
719       nn += n;
720     }
721     for(i=0; i<npp; i++)
722       capacity[i] /= tcap;
723   }
724
725   /* Convert the molecular topology to a fully listed topology */
726   top = gmx_mtop_generate_local_top(mtop,inputrec);
727
728   snew(multinr_cgs,npp);
729   snew(multinr_nre,F_NRE);
730   for(i=0; i<F_NRE; i++)
731     snew(multinr_nre[i],npp);
732   
733
734   snew(left_range,cr->nnodes);
735   snew(right_range,cr->nnodes);
736         
737   /* This computes which entities can be placed on processors */
738   split_top(log,npp,top,inputrec,&mtop->mols,capacity,multinr_cgs,multinr_nre,left_range,right_range);
739         
740   sfree(capacity);
741   init_partdec(log,cr,&(top->cgs),multinr_cgs,&(top->idef));
742         
743   /* This should be fine */
744   /*split_idef(&(top->idef),cr->nnodes-cr->npmenodes);*/
745   
746   select_my_idef(log,&(top->idef),multinr_nre,cr);
747
748   if(top->idef.il[F_CONSTR].nr>0)
749   {
750           init_partdec_constraint(cr,&(top->idef),left_range,right_range);
751   }
752
753   if (log)
754     pr_idef_division(log,&(top->idef),npp,multinr_nre);
755
756   for(i=0; i<F_NRE; i++)
757     sfree(multinr_nre[i]);
758   sfree(multinr_nre);
759   sfree(multinr_cgs);
760
761   sfree(left_range);
762   sfree(right_range);
763         
764   if (log)
765     print_partdec(log,"Workload division",cr->nnodes,cr->pd);
766
767   return top;
768 }
769
770 static void
771 add_to_vsitelist(int **list, int *nitem, int *nalloc,int newitem)
772 {
773         int  i,idx;
774         gmx_bool found;
775         
776         found = FALSE;
777         idx = *nitem;
778         for(i=0; i<idx && !found;i++)
779         {
780                 found = (newitem ==(*list)[i]);
781         }
782         if(!found)
783         {
784                 *nalloc+=100;
785                 srenew(*list,*nalloc);
786                 (*list)[idx++] = newitem;
787                 *nitem = idx;
788         }
789 }
790
791 gmx_bool setup_parallel_vsites(t_idef *idef,t_commrec *cr,
792                                                    t_comm_vsites *vsitecomm)
793 {
794         int i,j,ftype;
795         int nra;
796         gmx_bool do_comm;
797         t_iatom   *ia;
798         gmx_partdec_t *pd;
799         int  iconstruct;
800         int  i0,i1;
801         int  nalloc_left_construct,nalloc_right_construct;
802         int  sendbuf[2],recvbuf[2];
803         int  bufsize,leftbuf,rightbuf;
804         
805         pd = cr->pd;
806         
807         i0 = pd->index[cr->nodeid];
808         i1 = pd->index[cr->nodeid+1];
809         
810         vsitecomm->left_import_construct  = NULL;       
811         vsitecomm->left_import_nconstruct = 0;
812         nalloc_left_construct      = 0;
813
814         vsitecomm->right_import_construct  = NULL;      
815         vsitecomm->right_import_nconstruct = 0;
816         nalloc_right_construct      = 0;
817         
818         for(ftype=0; (ftype<F_NRE); ftype++) 
819         {
820                 if ( !(interaction_function[ftype].flags & IF_VSITE) )
821                 {
822                         continue;
823                 }
824                 
825                 nra    = interaction_function[ftype].nratoms;
826                 ia     = idef->il[ftype].iatoms;
827                                 
828                 for(i=0; i<idef->il[ftype].nr;i+=nra+1) 
829                 {                               
830                         for(j=2;j<1+nra;j++)
831                         {
832                                 iconstruct = ia[i+j];
833                                 if(iconstruct<i0)
834                                 {
835                                         add_to_vsitelist(&vsitecomm->left_import_construct,
836                                                                          &vsitecomm->left_import_nconstruct,
837                                                                          &nalloc_left_construct,iconstruct);
838                                 }
839                                 else if(iconstruct>=i1)
840                                 {
841                                         add_to_vsitelist(&vsitecomm->right_import_construct,
842                                                                          &vsitecomm->right_import_nconstruct,
843                                                                          &nalloc_right_construct,iconstruct);
844                                 }                               
845                         }
846                 }
847     }
848         
849         /* Pre-communicate the array lengths */
850         gmx_tx_rx_void(cr,
851                                    GMX_RIGHT,(void *)&vsitecomm->right_import_nconstruct,sizeof(int),
852                                    GMX_LEFT, (void *)&vsitecomm->left_export_nconstruct, sizeof(int));
853         gmx_tx_rx_void(cr,
854                                    GMX_LEFT, (void *)&vsitecomm->left_import_nconstruct, sizeof(int),
855                                    GMX_RIGHT,(void *)&vsitecomm->right_export_nconstruct,sizeof(int));
856         
857         snew(vsitecomm->left_export_construct, vsitecomm->left_export_nconstruct);
858         snew(vsitecomm->right_export_construct,vsitecomm->right_export_nconstruct);
859         
860         /* Communicate the construcing atom index arrays */
861         gmx_tx_rx_void(cr,
862                                    GMX_RIGHT,(void *)vsitecomm->right_import_construct,vsitecomm->right_import_nconstruct*sizeof(int),
863                                    GMX_LEFT, (void *)vsitecomm->left_export_construct, vsitecomm->left_export_nconstruct*sizeof(int));
864         
865         /* Communicate the construcing atom index arrays */
866         gmx_tx_rx_void(cr,
867                                    GMX_LEFT ,(void *)vsitecomm->left_import_construct, vsitecomm->left_import_nconstruct*sizeof(int),
868                                    GMX_RIGHT,(void *)vsitecomm->right_export_construct,vsitecomm->right_export_nconstruct*sizeof(int));
869
870         leftbuf  = max(vsitecomm->left_export_nconstruct,vsitecomm->left_import_nconstruct);
871         rightbuf = max(vsitecomm->right_export_nconstruct,vsitecomm->right_import_nconstruct);
872         
873         bufsize  = max(leftbuf,rightbuf);
874         
875         do_comm = (bufsize>0);
876                                 
877         snew(vsitecomm->send_buf,2*bufsize);
878         snew(vsitecomm->recv_buf,2*bufsize);
879
880         return do_comm;
881 }
882
883 t_state *partdec_init_local_state(t_commrec *cr,t_state *state_global)
884 {
885   int i;
886   t_state *state_local;
887
888   snew(state_local,1);
889
890   /* Copy all the contents */
891   *state_local = *state_global;
892   snew(state_local->lambda,efptNR);
893   /* local storage for lambda */
894   for (i=0;i<efptNR;i++)
895     {
896       state_local->lambda[i] = state_global->lambda[i];
897     }
898   if (state_global->nrngi > 1) {
899     /* With stochastic dynamics we need local storage for the random state */
900     if (state_local->flags & (1<<estLD_RNG)) {
901       state_local->nrng = gmx_rng_n();
902       snew(state_local->ld_rng,state_local->nrng);
903 #ifdef GMX_MPI
904       if (PAR(cr)) {
905         MPI_Scatter(state_global->ld_rng,
906                     state_local->nrng*sizeof(state_local->ld_rng[0]),MPI_BYTE,
907                     state_local->ld_rng ,
908                     state_local->nrng*sizeof(state_local->ld_rng[0]),MPI_BYTE,
909                     MASTERRANK(cr),cr->mpi_comm_mygroup);
910       }
911 #endif
912     }
913     if (state_local->flags & (1<<estLD_RNGI)) {
914       snew(state_local->ld_rngi,1);
915 #ifdef GMX_MPI
916       if (PAR(cr)) {
917         MPI_Scatter(state_global->ld_rngi,
918                     sizeof(state_local->ld_rngi[0]),MPI_BYTE,
919                     state_local->ld_rngi,
920                     sizeof(state_local->ld_rngi[0]),MPI_BYTE,
921                     MASTERRANK(cr),cr->mpi_comm_mygroup);
922       }
923 #endif
924     }
925   }
926
927   return state_local;
928 }