e5b8ab249e076f30b0bee23246568874f8645eab
[alexxy/gromacs.git] / src / mdlib / partdec.c
1 /*
2  * 
3  *                This source code is part of
4  * 
5  *                 G   R   O   M   A   C   S
6  * 
7  *          GROningen MAchine for Chemical Simulations
8  * 
9  *                        VERSION 3.2.0
10  * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12  * Copyright (c) 2001-2004, The GROMACS development team,
13  * check out http://www.gromacs.org for more information.
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version 2
18  * of the License, or (at your option) any later version.
19  * 
20  * If you want to redistribute modifications, please consider that
21  * scientific software is very special. Version control is crucial -
22  * bugs must be traceable. We will be happy to consider code for
23  * inclusion in the official distribution, but derived work must not
24  * be called official GROMACS. Details are found in the README & COPYING
25  * files - if they are missing, get the official version at www.gromacs.org.
26  * 
27  * To help us fund GROMACS development, we humbly ask that you cite
28  * the papers on the package - you can find them in the top README file.
29  * 
30  * For more info, check our website at http://www.gromacs.org
31  * 
32  * And Hey:
33  * GROwing Monsters And Cloning Shrimps
34  */
35 /* This file is completely threadsafe - keep it that way! */
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
39
40 #include <math.h>
41
42 #include "sysstuff.h"
43 #include "typedefs.h"
44 #include "smalloc.h"
45 #include "invblock.h"
46 #include "macros.h"
47 #include "main.h"
48 #include "ns.h"
49 #include "partdec.h"
50 #include "splitter.h"
51 #include "gmx_random.h"
52 #include "mtop_util.h"
53 #include "mvdata.h"
54 #include "vec.h"
55
56 typedef struct gmx_partdec_constraint
57 {
58         int                  left_range_receive;
59         int                  right_range_receive;
60         int                  left_range_send;
61         int                  right_range_send;
62         int                  nconstraints;
63         int *                nlocalatoms;
64         rvec *               sendbuf;
65         rvec *               recvbuf;
66
67 gmx_partdec_constraint_t;
68
69
70 typedef struct gmx_partdec {
71   int  neighbor[2];             /* The nodeids of left and right neighb */
72   int  *cgindex;                /* The charge group boundaries,         */
73                                 /* size nnodes+1,                       */
74                                 /* only allocated with particle decomp. */
75   int  *index;                  /* The home particle boundaries,        */
76                                 /* size nnodes+1,                       */
77                                 /* only allocated with particle decomp. */
78   int  shift,bshift;            /* Coordinates are shifted left for     */
79                                 /* 'shift' systolic pulses, and right   */
80                                 /* for 'bshift' pulses. Forces are      */
81                                 /* shifted right for 'shift' pulses     */
82                                 /* and left for 'bshift' pulses         */
83                                 /* This way is not necessary to shift   */
84                                 /* the coordinates over the entire ring */
85   rvec *vbuf;                   /* Buffer for summing the forces        */
86 #ifdef GMX_MPI
87   MPI_Request mpi_req_rx;       /* MPI reqs for async transfers */
88   MPI_Request mpi_req_tx;
89 #endif
90  gmx_partdec_constraint_t *     constraints;    
91 } gmx_partdec_t;
92
93
94 void gmx_tx(const t_commrec *cr,int dir,void *buf,int bufsize)
95 {
96 #ifndef GMX_MPI
97   gmx_call("gmx_tx"); 
98 #else
99   int        nodeid;
100   int        tag,flag;
101   MPI_Status status;
102
103   nodeid = cr->pd->neighbor[dir];
104   
105 #ifdef DEBUG
106   fprintf(stderr,"gmx_tx: nodeid=%d, buf=%x, bufsize=%d\n",
107           nodeid,buf,bufsize);
108 #endif
109 #ifdef MPI_TEST
110   /* workaround for crashes encountered with MPI on IRIX 6.5 */
111   if (cr->pd->mpi_req_tx != MPI_REQUEST_NULL) {
112     MPI_Test(&cr->pd->mpi_req_tx,&flag,&status);
113     if (flag==FALSE) {
114       fprintf(stdlog,"gmx_tx called before previous send was complete: nodeid=%d, buf=%x, bufsize=%d\n",
115               nodeid,buf,bufsize);
116       gmx_tx_wait(nodeid);
117     }
118   }
119 #endif
120   tag = 0;
121   if (MPI_Isend(buf,bufsize,MPI_BYTE,RANK(cr,nodeid),tag,cr->mpi_comm_mygroup,&cr->pd->mpi_req_tx) != 0)
122     gmx_comm("MPI_Isend Failed");
123 #endif
124 }
125
126 void gmx_tx_wait(const t_commrec *cr, int dir)
127 {
128 #ifndef GMX_MPI
129   gmx_call("gmx_tx_wait");
130 #else
131   MPI_Status  status;
132   int mpi_result;
133
134   if ((mpi_result=MPI_Wait(&cr->pd->mpi_req_tx,&status)) != 0)
135     gmx_fatal(FARGS,"MPI_Wait: result=%d",mpi_result);
136 #endif
137 }
138
139 void gmx_rx(const t_commrec *cr,int dir,void *buf,int bufsize)
140 {
141 #ifndef GMX_MPI
142   gmx_call("gmx_rx");
143 #else
144   int nodeid;
145   int tag;
146
147   nodeid = cr->pd->neighbor[dir];
148 #ifdef DEBUG
149   fprintf(stderr,"gmx_rx: nodeid=%d, buf=%x, bufsize=%d\n",
150           nodeid,buf,bufsize);
151 #endif
152   tag = 0;
153   if (MPI_Irecv( buf, bufsize, MPI_BYTE, RANK(cr,nodeid), tag, cr->mpi_comm_mygroup, &cr->pd->mpi_req_rx) != 0 )
154     gmx_comm("MPI_Recv Failed");
155 #endif
156 }
157
158 void gmx_rx_wait(const t_commrec *cr, int nodeid)
159 {
160 #ifndef GMX_MPI
161   gmx_call("gmx_rx_wait");
162 #else
163   MPI_Status  status;
164   int mpi_result;
165   
166   if ((mpi_result=MPI_Wait(&cr->pd->mpi_req_rx,&status)) != 0)
167     gmx_fatal(FARGS,"MPI_Wait: result=%d",mpi_result);
168 #endif
169 }
170
171 void gmx_tx_rx_real(const t_commrec *cr,
172                     int send_dir,real *send_buf,int send_bufsize,
173                     int recv_dir,real *recv_buf,int recv_bufsize)
174 {
175 #ifndef GMX_MPI
176   gmx_call("gmx_tx_rx_real");
177 #else
178   int send_nodeid,recv_nodeid;
179   int tx_tag = 0,rx_tag = 0;
180   MPI_Status stat;
181
182   send_nodeid = cr->pd->neighbor[send_dir];
183   recv_nodeid = cr->pd->neighbor[recv_dir];
184                    
185 #ifdef GMX_DOUBLE
186 #define mpi_type MPI_DOUBLE
187 #else
188 #define mpi_type MPI_FLOAT
189 #endif
190
191   if (send_bufsize > 0 && recv_bufsize > 0) {
192     MPI_Sendrecv(send_buf,send_bufsize,mpi_type,RANK(cr,send_nodeid),tx_tag,
193                  recv_buf,recv_bufsize,mpi_type,RANK(cr,recv_nodeid),rx_tag,
194                  cr->mpi_comm_mygroup,&stat);
195   } else if (send_bufsize > 0) {
196     MPI_Send(send_buf,send_bufsize,mpi_type,RANK(cr,send_nodeid),tx_tag,
197              cr->mpi_comm_mygroup);
198   } else if (recv_bufsize > 0) {
199     MPI_Recv(recv_buf,recv_bufsize,mpi_type,RANK(cr,recv_nodeid),rx_tag,
200              cr->mpi_comm_mygroup,&stat);
201   }
202 #undef mpi_type
203 #endif
204 }
205
206
207 void gmx_tx_rx_void(const t_commrec *cr,
208                     int send_dir,void *send_buf,int send_bufsize,
209                     int recv_dir,void *recv_buf,int recv_bufsize)
210 {
211 #ifndef GMX_MPI
212         gmx_call("gmx_tx_rx_void");
213 #else
214         int send_nodeid,recv_nodeid;
215         int tx_tag = 0,rx_tag = 0;
216         MPI_Status stat;
217         
218         send_nodeid = cr->pd->neighbor[send_dir];
219         recv_nodeid = cr->pd->neighbor[recv_dir];
220         
221         
222         MPI_Sendrecv(send_buf,send_bufsize,MPI_BYTE,RANK(cr,send_nodeid),tx_tag,
223                                  recv_buf,recv_bufsize,MPI_BYTE,RANK(cr,recv_nodeid),rx_tag,
224                                  cr->mpi_comm_mygroup,&stat);
225
226 #endif
227 }
228
229
230 /*void gmx_wait(int dir_send,int dir_recv)*/
231                 
232 void gmx_wait(const t_commrec *cr, int dir_send,int dir_recv)
233 {
234 #ifndef GMX_MPI
235   gmx_call("gmx_wait");
236 #else
237   gmx_tx_wait(cr, dir_send);
238   gmx_rx_wait(cr, dir_recv);
239 #endif
240 }
241
242 static void set_left_right(t_commrec *cr)
243 {
244   cr->pd->neighbor[GMX_LEFT]  = (cr->nnodes + cr->nodeid - 1) % cr->nnodes;
245   cr->pd->neighbor[GMX_RIGHT] = (cr->nodeid + 1) % cr->nnodes;
246 }
247
248 void pd_move_f(const t_commrec *cr,rvec f[],t_nrnb *nrnb)
249 {
250   move_f(NULL,cr,GMX_LEFT,GMX_RIGHT,f,cr->pd->vbuf,nrnb);
251 }
252
253 int *pd_cgindex(const t_commrec *cr)
254 {
255   return cr->pd->cgindex;
256 }
257
258 int *pd_index(const t_commrec *cr)
259 {
260   return cr->pd->index;
261 }
262
263 int pd_shift(const t_commrec *cr)
264 {
265   return cr->pd->shift;
266 }
267
268 int pd_bshift(const t_commrec *cr)
269 {
270   return cr->pd->bshift;
271 }
272
273 void pd_cg_range(const t_commrec *cr,int *cg0,int *cg1)
274 {
275   *cg0 = cr->pd->cgindex[cr->nodeid];
276   *cg1 = cr->pd->cgindex[cr->nodeid+1];
277 }
278
279 void pd_at_range(const t_commrec *cr,int *at0,int *at1)
280 {
281   *at0 = cr->pd->index[cr->nodeid];
282   *at1 = cr->pd->index[cr->nodeid+1];
283 }
284
285 void
286 pd_get_constraint_range(gmx_partdec_p_t pd,int *start,int *natoms)
287 {
288         *start  = pd->constraints->left_range_receive;
289         *natoms = pd->constraints->right_range_receive-pd->constraints->left_range_receive;
290 }
291
292 int *
293 pd_constraints_nlocalatoms(gmx_partdec_p_t pd)
294 {
295         int *rc;
296         
297         if(NULL != pd && NULL != pd->constraints)
298         {
299                 rc = pd->constraints->nlocalatoms;
300         }
301         else
302         {
303                 rc = NULL;
304         }
305         return rc;
306 }
307
308
309
310
311 /* This routine is used to communicate the non-home-atoms needed for constrains.
312  * We have already calculated this range of atoms during setup, and stored in the
313  * partdec constraints structure.
314  *
315  * When called, we send/receive left_range_send/receive atoms to our left (lower) 
316  * node neighbor, and similar to the right (higher) node.
317  *
318  * This has not been tested for periodic molecules...
319  */
320 void
321 pd_move_x_constraints(t_commrec *  cr,
322                                           rvec *       x0,
323                                           rvec *       x1)
324 {
325 #ifdef GMX_MPI
326         gmx_partdec_t *pd;
327         gmx_partdec_constraint_t *pdc;
328         
329         rvec *     sendptr;
330         rvec *     recvptr;
331         int        thisnode;
332         int        i;
333         int        cnt;
334         int        sendcnt,recvcnt;
335         
336         pd  = cr->pd;
337         pdc = pd->constraints;
338         
339         thisnode  = cr->nodeid;
340         
341         /* First pulse to right */
342         
343         recvcnt = 3*(pd->index[thisnode]-pdc->left_range_receive);
344         sendcnt = 3*(cr->pd->index[thisnode+1]-cr->pd->constraints->right_range_send);
345
346         if(x1!=NULL)
347         {
348                 /* Assemble temporary array with both x0 & x1 */
349                 recvptr = pdc->recvbuf;
350                 sendptr = pdc->sendbuf;
351                 
352                 cnt = 0;
353                 for(i=pdc->right_range_send;i<pd->index[thisnode+1];i++)
354                 {
355                         copy_rvec(x0[i],sendptr[cnt++]);
356                 }
357                 for(i=pdc->right_range_send;i<pd->index[thisnode+1];i++)
358                 {
359                         copy_rvec(x1[i],sendptr[cnt++]);
360                 }
361                 recvcnt *= 2;
362                 sendcnt *= 2;           
363         }
364         else
365         {
366                 recvptr = x0 + pdc->left_range_receive;
367                 sendptr = x0 + pdc->right_range_send;
368         }
369                 
370         gmx_tx_rx_real(cr,
371                                    GMX_RIGHT,(real *)sendptr,sendcnt,
372                                    GMX_LEFT, (real *)recvptr,recvcnt);
373                                 
374         if(x1 != NULL)
375         {
376                 /* copy back to x0/x1 */
377                 cnt = 0;
378                 for(i=pdc->left_range_receive;i<pd->index[thisnode];i++)
379                 {
380                         copy_rvec(recvptr[cnt++],x0[i]);
381                 }
382                 for(i=pdc->left_range_receive;i<pd->index[thisnode];i++)
383                 {
384                         copy_rvec(recvptr[cnt++],x1[i]);
385                 }
386         }
387         
388         /* And pulse to left */
389         sendcnt = 3*(pdc->left_range_send-pd->index[thisnode]); 
390         recvcnt = 3*(pdc->right_range_receive-pd->index[thisnode+1]);
391
392         if(x1 != NULL)
393         {       
394                 cnt = 0;
395                 for(i=cr->pd->index[thisnode];i<pdc->left_range_send;i++)
396                 {
397                         copy_rvec(x0[i],sendptr[cnt++]);
398                 }
399                 for(i=cr->pd->index[thisnode];i<pdc->left_range_send;i++)
400                 {
401                         copy_rvec(x1[i],sendptr[cnt++]);
402                 }
403                 recvcnt *= 2;
404                 sendcnt *= 2;           
405         }
406         else
407         {
408                 sendptr = x0 + pd->index[thisnode];
409                 recvptr = x0 + pd->index[thisnode+1];
410         }
411         
412         gmx_tx_rx_real(cr,
413                                    GMX_LEFT ,(real *)sendptr,sendcnt,
414                                    GMX_RIGHT,(real *)recvptr,recvcnt);
415
416         /* Final copy back from buffers */
417         if(x1 != NULL)
418         {
419                 /* First copy received data back into x0 & x1 */
420                 cnt = 0;
421                 for(i=pd->index[thisnode+1];i<pdc->right_range_receive;i++)
422                 {
423                         copy_rvec(recvptr[cnt++],x0[i]);
424                 }
425                 for(i=pd->index[thisnode+1];i<pdc->right_range_receive;i++)
426                 {
427                         copy_rvec(recvptr[cnt++],x1[i]);
428                 }
429         }               
430 #endif
431 }
432
433 static int home_cpu(int nnodes,gmx_partdec_t *pd,int atomid)
434 {
435   int k;
436  
437   for(k=0; (k<nnodes); k++) {
438     if (atomid<pd->index[k+1])
439       return k;
440   }
441   gmx_fatal(FARGS,"Atomid %d is larger than number of atoms (%d)",
442             atomid+1,pd->index[nnodes]+1);
443             
444   return -1;
445 }
446
447 static void calc_nsbshift(FILE *fp,int nnodes,gmx_partdec_t *pd,t_idef *idef)
448 {
449   int i,j,k;
450   int lastcg,targetcg,nshift,naaj;
451   int homeid[32];
452       
453   pd->bshift=0;
454   for(i=1; (i<nnodes); i++) {
455     targetcg = pd->cgindex[i];
456     for(nshift=i; (nshift > 0) && (pd->cgindex[nshift] > targetcg); nshift--)
457       ;
458     pd->bshift=max(pd->bshift,i-nshift);
459   }
460
461   pd->shift = (nnodes + 1)/2;
462   for(i=0; (i<nnodes); i++) {
463     lastcg = pd->cgindex[i+1]-1;
464     naaj = calc_naaj(lastcg,pd->cgindex[nnodes]);
465     targetcg = (lastcg+naaj) % pd->cgindex[nnodes];
466     
467     /* Search until we find the target charge group */
468     for(nshift=0;
469         (nshift < nnodes) && (targetcg > pd->cgindex[nshift+1]);
470         nshift++)
471       ;
472     /* Now compute the shift, that is the difference in node index */
473     nshift=((nshift - i + nnodes) % nnodes);
474     
475     if (fp)
476       fprintf(fp,"CPU=%3d, lastcg=%5d, targetcg=%5d, myshift=%5d\n",
477               i,lastcg,targetcg,nshift);
478             
479     /* It's the largest shift that matters */
480     pd->shift=max(nshift,pd->shift);
481   }
482   /* Now for the bonded forces */
483   for(i=0; (i<F_NRE); i++) {
484     if (interaction_function[i].flags & IF_BOND) {
485       int nratoms = interaction_function[i].nratoms;
486       for(j=0; (j<idef->il[i].nr); j+=nratoms+1) {
487         for(k=1; (k<=nratoms); k++)
488           homeid[k-1] = home_cpu(nnodes,pd,idef->il[i].iatoms[j+k]);
489         for(k=1; (k<nratoms); k++)
490           pd->shift = max(pd->shift,abs(homeid[k]-homeid[0]));
491       }
492     }
493   }
494   if (fp)
495     fprintf(fp,"pd->shift = %3d, pd->bshift=%3d\n",
496             pd->shift,pd->bshift);
497 }
498
499
500 static void
501 init_partdec_constraint(t_commrec *cr,
502                         t_idef *   idef,
503                                                 int *left_range, 
504                                                 int *right_range)
505 {
506         gmx_partdec_t *            pd = cr->pd;
507         gmx_partdec_constraint_t *pdc;
508         int i,cnt,k;
509         int ai,aj,nodei,nodej;
510         int nratoms;
511         int nodeid;
512         
513         snew(pdc,1);
514         cr->pd->constraints = pdc;
515
516         
517         /* Who am I? */
518     nodeid = cr->nodeid;
519         
520     /* Setup LINCS communication ranges */
521     pdc->left_range_receive   = left_range[nodeid];
522     pdc->right_range_receive  = right_range[nodeid]+1;
523     pdc->left_range_send      = (nodeid > 0) ? right_range[nodeid-1]+1 : 0;
524     pdc->right_range_send     = (nodeid < cr->nnodes-1) ? left_range[nodeid+1] : pd->index[cr->nnodes];
525         
526         snew(pdc->nlocalatoms,idef->il[F_CONSTR].nr);
527         nratoms = interaction_function[F_CONSTR].nratoms;
528
529         for(i=0,cnt=0;i<idef->il[F_CONSTR].nr;i+=nratoms+1,cnt++)
530         {
531                 ai = idef->il[F_CONSTR].iatoms[i+1];
532                 aj = idef->il[F_CONSTR].iatoms[i+2];
533                 nodei = 0;
534                 while(ai>=pd->index[nodei+1]) 
535                 {
536                         nodei++;
537                 }
538                 nodej = 0;
539                 while(aj>=pd->index[nodej+1]) 
540                 {
541                         nodej++;
542                 }
543                 pdc->nlocalatoms[cnt] = 0;
544                 if(nodei==nodeid)
545                 {
546                         pdc->nlocalatoms[cnt]++;
547                 }
548                 if(nodej==nodeid)
549                 {
550                         pdc->nlocalatoms[cnt]++;
551                 }
552         }
553         pdc->nconstraints = cnt;
554         
555         snew(pdc->sendbuf,max(6*(pd->index[cr->nodeid+1]-pd->constraints->right_range_send),6*(pdc->left_range_send-pd->index[cr->nodeid])));
556         snew(pdc->recvbuf,max(6*(pd->index[cr->nodeid]-pdc->left_range_receive),6*(pdc->right_range_receive-pd->index[cr->nodeid+1])));
557         
558 }
559
560 static void init_partdec(FILE *fp,t_commrec *cr,t_block *cgs,int *multinr,
561                                                  t_idef *idef)
562 {
563   int  i,nodeid;
564   gmx_partdec_t *pd;
565         
566   snew(pd,1);
567   cr->pd = pd;
568
569   set_left_right(cr);
570   
571   if (cr->nnodes > 1) {
572     if (multinr == NULL)
573       gmx_fatal(FARGS,"Internal error in init_partdec: multinr = NULL");
574     snew(pd->index,cr->nnodes+1);
575     snew(pd->cgindex,cr->nnodes+1);
576     pd->cgindex[0] = 0;
577     pd->index[0]   = 0;
578     for(i=0; (i < cr->nnodes); i++) {
579       pd->cgindex[i+1] = multinr[i];
580       pd->index[i+1]   = cgs->index[multinr[i]];
581     }
582     calc_nsbshift(fp,cr->nnodes,pd,idef);
583     /* This is a hack to do with bugzilla 148 */
584     /*pd->shift = cr->nnodes-1;
585       pd->bshift = 0;*/
586
587     /* Allocate a buffer of size natoms of the whole system
588      * for summing the forces over the nodes.
589      */
590     snew(pd->vbuf,cgs->index[cgs->nr]);
591     pd->constraints = NULL;
592   }
593 #ifdef GMX_MPI
594   pd->mpi_req_tx=MPI_REQUEST_NULL;
595   pd->mpi_req_rx=MPI_REQUEST_NULL;
596 #endif
597 }
598
599 static void print_partdec(FILE *fp,const char *title,
600                           int nnodes,gmx_partdec_t *pd)
601 {
602   int i;
603
604   fprintf(fp,"%s\n",title);
605   fprintf(fp,"nnodes:       %5d\n",nnodes);
606   fprintf(fp,"pd->shift:    %5d\n",pd->shift);
607   fprintf(fp,"pd->bshift:   %5d\n",pd->bshift);
608   
609   fprintf(fp,"Nodeid   atom0   #atom     cg0       #cg\n");
610   for(i=0; (i<nnodes); i++)
611     fprintf(fp,"%6d%8d%8d%8d%10d\n",
612             i,
613             pd->index[i],pd->index[i+1]-pd->index[i],
614             pd->cgindex[i],pd->cgindex[i+1]-pd->cgindex[i]);
615   fprintf(fp,"\n");
616 }
617
618 static void pr_idef_division(FILE *fp,t_idef *idef,int nnodes,int **multinr)
619 {
620   int i,ftype,nr,nra,m0,m1;
621
622   fprintf(fp,"Division of bonded forces over processors\n");
623   fprintf(fp,"%-12s","CPU");
624   for(i=0; (i<nnodes); i++) 
625     fprintf(fp," %5d",i);
626   fprintf(fp,"\n");
627   
628   for(ftype=0; (ftype<F_NRE); ftype++) {
629     if (idef->il[ftype].nr > 0) {
630       nr  = idef->il[ftype].nr;
631       nra = 1+interaction_function[ftype].nratoms;
632       fprintf(fp,"%-12s", interaction_function[ftype].name);
633       /* Loop over processors */
634       for(i=0; (i<nnodes); i++) {
635         m0 = (i == 0) ? 0 : multinr[ftype][i-1]/nra;
636         m1 = multinr[ftype][i]/nra;
637         fprintf(fp," %5d",m1-m0);
638       }
639       fprintf(fp,"\n");
640     }
641   }
642 }
643
644 static void select_my_ilist(FILE *log,t_ilist *il,int *multinr,t_commrec *cr)
645 {
646   t_iatom *ia;
647   int     i,start,end,nr;
648   
649   if (cr->nodeid == 0)
650     start=0;
651   else
652     start=multinr[cr->nodeid-1];
653   end=multinr[cr->nodeid];
654   
655   nr=end-start;
656   if (nr < 0)
657     gmx_fatal(FARGS,"Negative number of atoms (%d) on node %d\n"
658                 "You have probably not used the same value for -np with grompp"
659                 " and mdrun",
660                 nr,cr->nodeid);
661   snew(ia,nr);
662
663   for(i=0; (i<nr); i++)
664     ia[i]=il->iatoms[start+i];
665
666   sfree(il->iatoms);
667   il->iatoms=ia;
668   
669   il->nr=nr;
670 }
671
672 static void select_my_idef(FILE *log,t_idef *idef,int **multinr,t_commrec *cr)
673 {
674   int i;
675   
676   for(i=0; (i<F_NRE); i++)
677     select_my_ilist(log,&idef->il[i],multinr[i],cr);
678 }
679
680 gmx_localtop_t *split_system(FILE *log,
681                              gmx_mtop_t *mtop,t_inputrec *inputrec,
682                              t_commrec *cr)
683 {
684   int    i,npp,n,nn;
685   real   *capacity;
686   double tcap = 0,cap;
687   int    *multinr_cgs,**multinr_nre;
688   char   *cap_env;
689   gmx_localtop_t *top;
690   int    *left_range;
691   int    *right_range;
692         
693   /* Time to setup the division of charge groups over processors */
694   npp = cr->nnodes-cr->npmenodes;
695   snew(capacity,npp);
696   cap_env = getenv("GMX_CAPACITY");
697   if (cap_env == NULL) {
698     for(i=0; (i<npp-1); i++) {
699       capacity[i] = 1.0/(double)npp;
700       tcap += capacity[i];
701     }
702     /* Take care that the sum of capacities is 1.0 */
703     capacity[npp-1] = 1.0 - tcap;
704   } else {
705     tcap = 0;
706     nn = 0;
707     for(i=0; i<npp; i++) {
708       cap = 0;
709       sscanf(cap_env+nn,"%lf%n",&cap,&n);
710       if (cap == 0)
711         gmx_fatal(FARGS,"Incorrect entry or number of entries in GMX_CAPACITY='%s'",cap_env);
712       capacity[i] = cap;
713       tcap += cap;
714       nn += n;
715     }
716     for(i=0; i<npp; i++)
717       capacity[i] /= tcap;
718   }
719
720   /* Convert the molecular topology to a fully listed topology */
721   top = gmx_mtop_generate_local_top(mtop,inputrec);
722
723   snew(multinr_cgs,npp);
724   snew(multinr_nre,F_NRE);
725   for(i=0; i<F_NRE; i++)
726     snew(multinr_nre[i],npp);
727   
728
729   snew(left_range,cr->nnodes);
730   snew(right_range,cr->nnodes);
731         
732   /* This computes which entities can be placed on processors */
733   split_top(log,npp,top,inputrec,&mtop->mols,capacity,multinr_cgs,multinr_nre,left_range,right_range);
734         
735   sfree(capacity);
736   init_partdec(log,cr,&(top->cgs),multinr_cgs,&(top->idef));
737         
738   /* This should be fine */
739   /*split_idef(&(top->idef),cr->nnodes-cr->npmenodes);*/
740   
741   select_my_idef(log,&(top->idef),multinr_nre,cr);
742
743   if(top->idef.il[F_CONSTR].nr>0)
744   {
745           init_partdec_constraint(cr,&(top->idef),left_range,right_range);
746   }
747
748   if (log)
749     pr_idef_division(log,&(top->idef),npp,multinr_nre);
750
751   for(i=0; i<F_NRE; i++)
752     sfree(multinr_nre[i]);
753   sfree(multinr_nre);
754   sfree(multinr_cgs);
755
756   sfree(left_range);
757   sfree(right_range);
758         
759   if (log)
760     print_partdec(log,"Workload division",cr->nnodes,cr->pd);
761
762   return top;
763 }
764
765 static void
766 add_to_vsitelist(int **list, int *nitem, int *nalloc,int newitem)
767 {
768         int  i,idx;
769         gmx_bool found;
770         
771         found = FALSE;
772         idx = *nitem;
773         for(i=0; i<idx && !found;i++)
774         {
775                 found = (newitem ==(*list)[i]);
776         }
777         if(!found)
778         {
779                 *nalloc+=100;
780                 srenew(*list,*nalloc);
781                 (*list)[idx++] = newitem;
782                 *nitem = idx;
783         }
784 }
785
786 gmx_bool setup_parallel_vsites(t_idef *idef,t_commrec *cr,
787                                                    t_comm_vsites *vsitecomm)
788 {
789         int i,j,ftype;
790         int nra;
791         gmx_bool do_comm;
792         t_iatom   *ia;
793         gmx_partdec_t *pd;
794         int  iconstruct;
795         int  i0,i1;
796         int  nalloc_left_construct,nalloc_right_construct;
797         int  sendbuf[2],recvbuf[2];
798         int  bufsize,leftbuf,rightbuf;
799         
800         pd = cr->pd;
801         
802         i0 = pd->index[cr->nodeid];
803         i1 = pd->index[cr->nodeid+1];
804         
805         vsitecomm->left_import_construct  = NULL;       
806         vsitecomm->left_import_nconstruct = 0;
807         nalloc_left_construct      = 0;
808
809         vsitecomm->right_import_construct  = NULL;      
810         vsitecomm->right_import_nconstruct = 0;
811         nalloc_right_construct      = 0;
812         
813         for(ftype=0; (ftype<F_NRE); ftype++) 
814         {
815                 if ( !(interaction_function[ftype].flags & IF_VSITE) )
816                 {
817                         continue;
818                 }
819                 
820                 nra    = interaction_function[ftype].nratoms;
821                 ia     = idef->il[ftype].iatoms;
822                                 
823                 for(i=0; i<idef->il[ftype].nr;i+=nra+1) 
824                 {                               
825                         for(j=2;j<1+nra;j++)
826                         {
827                                 iconstruct = ia[i+j];
828                                 if(iconstruct<i0)
829                                 {
830                                         add_to_vsitelist(&vsitecomm->left_import_construct,
831                                                                          &vsitecomm->left_import_nconstruct,
832                                                                          &nalloc_left_construct,iconstruct);
833                                 }
834                                 else if(iconstruct>=i1)
835                                 {
836                                         add_to_vsitelist(&vsitecomm->right_import_construct,
837                                                                          &vsitecomm->right_import_nconstruct,
838                                                                          &nalloc_right_construct,iconstruct);
839                                 }                               
840                         }
841                 }
842     }
843         
844         /* Pre-communicate the array lengths */
845         gmx_tx_rx_void(cr,
846                                    GMX_RIGHT,(void *)&vsitecomm->right_import_nconstruct,sizeof(int),
847                                    GMX_LEFT, (void *)&vsitecomm->left_export_nconstruct, sizeof(int));
848         gmx_tx_rx_void(cr,
849                                    GMX_LEFT, (void *)&vsitecomm->left_import_nconstruct, sizeof(int),
850                                    GMX_RIGHT,(void *)&vsitecomm->right_export_nconstruct,sizeof(int));
851         
852         snew(vsitecomm->left_export_construct, vsitecomm->left_export_nconstruct);
853         snew(vsitecomm->right_export_construct,vsitecomm->right_export_nconstruct);
854         
855         /* Communicate the construcing atom index arrays */
856         gmx_tx_rx_void(cr,
857                                    GMX_RIGHT,(void *)vsitecomm->right_import_construct,vsitecomm->right_import_nconstruct*sizeof(int),
858                                    GMX_LEFT, (void *)vsitecomm->left_export_construct, vsitecomm->left_export_nconstruct*sizeof(int));
859         
860         /* Communicate the construcing atom index arrays */
861         gmx_tx_rx_void(cr,
862                                    GMX_LEFT ,(void *)vsitecomm->left_import_construct, vsitecomm->left_import_nconstruct*sizeof(int),
863                                    GMX_RIGHT,(void *)vsitecomm->right_export_construct,vsitecomm->right_export_nconstruct*sizeof(int));
864
865         leftbuf  = max(vsitecomm->left_export_nconstruct,vsitecomm->left_import_nconstruct);
866         rightbuf = max(vsitecomm->right_export_nconstruct,vsitecomm->right_import_nconstruct);
867         
868         bufsize  = max(leftbuf,rightbuf);
869         
870         do_comm = (bufsize>0);
871                                 
872         snew(vsitecomm->send_buf,2*bufsize);
873         snew(vsitecomm->recv_buf,2*bufsize);
874
875         return do_comm;
876 }
877
878 t_state *partdec_init_local_state(t_commrec *cr,t_state *state_global)
879 {
880   int i;
881   t_state *state_local;
882
883   snew(state_local,1);
884
885   /* Copy all the contents */
886   *state_local = *state_global;
887   snew(state_local->lambda,efptNR);
888   /* local storage for lambda */
889   for (i=0;i<efptNR;i++)
890     {
891       state_local->lambda[i] = state_global->lambda[i];
892     }
893   if (state_global->nrngi > 1) {
894     /* With stochastic dynamics we need local storage for the random state */
895     if (state_local->flags & (1<<estLD_RNG)) {
896       state_local->nrng = gmx_rng_n();
897       snew(state_local->ld_rng,state_local->nrng);
898 #ifdef GMX_MPI
899       if (PAR(cr)) {
900         MPI_Scatter(state_global->ld_rng,
901                     state_local->nrng*sizeof(state_local->ld_rng[0]),MPI_BYTE,
902                     state_local->ld_rng ,
903                     state_local->nrng*sizeof(state_local->ld_rng[0]),MPI_BYTE,
904                     MASTERRANK(cr),cr->mpi_comm_mygroup);
905       }
906 #endif
907     }
908     if (state_local->flags & (1<<estLD_RNGI)) {
909       snew(state_local->ld_rngi,1);
910 #ifdef GMX_MPI
911       if (PAR(cr)) {
912         MPI_Scatter(state_global->ld_rngi,
913                     sizeof(state_local->ld_rngi[0]),MPI_BYTE,
914                     state_local->ld_rngi,
915                     sizeof(state_local->ld_rngi[0]),MPI_BYTE,
916                     MASTERRANK(cr),cr->mpi_comm_mygroup);
917       }
918 #endif
919     }
920   }
921
922   return state_local;
923 }