3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * GROwing Monsters And Cloning Shrimps
35 /* This file is completely threadsafe - keep it that way! */
51 #include "gmx_random.h"
52 #include "mtop_util.h"
56 typedef struct gmx_partdec_constraint
58 int left_range_receive;
59 int right_range_receive;
67 gmx_partdec_constraint_t;
70 typedef struct gmx_partdec {
71 int neighbor[2]; /* The nodeids of left and right neighb */
72 int *cgindex; /* The charge group boundaries, */
74 /* only allocated with particle decomp. */
75 int *index; /* The home particle boundaries, */
77 /* only allocated with particle decomp. */
78 int shift,bshift; /* Coordinates are shifted left for */
79 /* 'shift' systolic pulses, and right */
80 /* for 'bshift' pulses. Forces are */
81 /* shifted right for 'shift' pulses */
82 /* and left for 'bshift' pulses */
83 /* This way is not necessary to shift */
84 /* the coordinates over the entire ring */
85 rvec *vbuf; /* Buffer for summing the forces */
87 MPI_Request mpi_req_rx; /* MPI reqs for async transfers */
88 MPI_Request mpi_req_tx;
90 gmx_partdec_constraint_t * constraints;
94 void gmx_tx(const t_commrec *cr,int dir,void *buf,int bufsize)
103 nodeid = cr->pd->neighbor[dir];
106 fprintf(stderr,"gmx_tx: nodeid=%d, buf=%x, bufsize=%d\n",
110 /* workaround for crashes encountered with MPI on IRIX 6.5 */
111 if (cr->pd->mpi_req_tx != MPI_REQUEST_NULL) {
112 MPI_Test(&cr->pd->mpi_req_tx,&flag,&status);
114 fprintf(stdlog,"gmx_tx called before previous send was complete: nodeid=%d, buf=%x, bufsize=%d\n",
121 if (MPI_Isend(buf,bufsize,MPI_BYTE,RANK(cr,nodeid),tag,cr->mpi_comm_mygroup,&cr->pd->mpi_req_tx) != 0)
122 gmx_comm("MPI_Isend Failed");
126 void gmx_tx_wait(const t_commrec *cr, int dir)
129 gmx_call("gmx_tx_wait");
134 if ((mpi_result=MPI_Wait(&cr->pd->mpi_req_tx,&status)) != 0)
135 gmx_fatal(FARGS,"MPI_Wait: result=%d",mpi_result);
139 void gmx_rx(const t_commrec *cr,int dir,void *buf,int bufsize)
147 nodeid = cr->pd->neighbor[dir];
149 fprintf(stderr,"gmx_rx: nodeid=%d, buf=%x, bufsize=%d\n",
153 if (MPI_Irecv( buf, bufsize, MPI_BYTE, RANK(cr,nodeid), tag, cr->mpi_comm_mygroup, &cr->pd->mpi_req_rx) != 0 )
154 gmx_comm("MPI_Recv Failed");
158 void gmx_rx_wait(const t_commrec *cr, int nodeid)
161 gmx_call("gmx_rx_wait");
166 if ((mpi_result=MPI_Wait(&cr->pd->mpi_req_rx,&status)) != 0)
167 gmx_fatal(FARGS,"MPI_Wait: result=%d",mpi_result);
171 void gmx_tx_rx_real(const t_commrec *cr,
172 int send_dir,real *send_buf,int send_bufsize,
173 int recv_dir,real *recv_buf,int recv_bufsize)
176 gmx_call("gmx_tx_rx_real");
178 int send_nodeid,recv_nodeid;
179 int tx_tag = 0,rx_tag = 0;
182 send_nodeid = cr->pd->neighbor[send_dir];
183 recv_nodeid = cr->pd->neighbor[recv_dir];
186 #define mpi_type MPI_DOUBLE
188 #define mpi_type MPI_FLOAT
191 if (send_bufsize > 0 && recv_bufsize > 0) {
192 MPI_Sendrecv(send_buf,send_bufsize,mpi_type,RANK(cr,send_nodeid),tx_tag,
193 recv_buf,recv_bufsize,mpi_type,RANK(cr,recv_nodeid),rx_tag,
194 cr->mpi_comm_mygroup,&stat);
195 } else if (send_bufsize > 0) {
196 MPI_Send(send_buf,send_bufsize,mpi_type,RANK(cr,send_nodeid),tx_tag,
197 cr->mpi_comm_mygroup);
198 } else if (recv_bufsize > 0) {
199 MPI_Recv(recv_buf,recv_bufsize,mpi_type,RANK(cr,recv_nodeid),rx_tag,
200 cr->mpi_comm_mygroup,&stat);
207 void gmx_tx_rx_void(const t_commrec *cr,
208 int send_dir,void *send_buf,int send_bufsize,
209 int recv_dir,void *recv_buf,int recv_bufsize)
212 gmx_call("gmx_tx_rx_void");
214 int send_nodeid,recv_nodeid;
215 int tx_tag = 0,rx_tag = 0;
218 send_nodeid = cr->pd->neighbor[send_dir];
219 recv_nodeid = cr->pd->neighbor[recv_dir];
222 MPI_Sendrecv(send_buf,send_bufsize,MPI_BYTE,RANK(cr,send_nodeid),tx_tag,
223 recv_buf,recv_bufsize,MPI_BYTE,RANK(cr,recv_nodeid),rx_tag,
224 cr->mpi_comm_mygroup,&stat);
230 /*void gmx_wait(int dir_send,int dir_recv)*/
232 void gmx_wait(const t_commrec *cr, int dir_send,int dir_recv)
235 gmx_call("gmx_wait");
237 gmx_tx_wait(cr, dir_send);
238 gmx_rx_wait(cr, dir_recv);
242 static void set_left_right(t_commrec *cr)
244 cr->pd->neighbor[GMX_LEFT] = (cr->nnodes + cr->nodeid - 1) % cr->nnodes;
245 cr->pd->neighbor[GMX_RIGHT] = (cr->nodeid + 1) % cr->nnodes;
248 void pd_move_f(const t_commrec *cr,rvec f[],t_nrnb *nrnb)
250 move_f(NULL,cr,GMX_LEFT,GMX_RIGHT,f,cr->pd->vbuf,nrnb);
253 int *pd_cgindex(const t_commrec *cr)
255 return cr->pd->cgindex;
258 int *pd_index(const t_commrec *cr)
260 return cr->pd->index;
263 int pd_shift(const t_commrec *cr)
265 return cr->pd->shift;
268 int pd_bshift(const t_commrec *cr)
270 return cr->pd->bshift;
273 void pd_cg_range(const t_commrec *cr,int *cg0,int *cg1)
275 *cg0 = cr->pd->cgindex[cr->nodeid];
276 *cg1 = cr->pd->cgindex[cr->nodeid+1];
279 void pd_at_range(const t_commrec *cr,int *at0,int *at1)
281 *at0 = cr->pd->index[cr->nodeid];
282 *at1 = cr->pd->index[cr->nodeid+1];
286 pd_get_constraint_range(gmx_partdec_p_t pd,int *start,int *natoms)
288 *start = pd->constraints->left_range_receive;
289 *natoms = pd->constraints->right_range_receive-pd->constraints->left_range_receive;
293 pd_constraints_nlocalatoms(gmx_partdec_p_t pd)
297 if(NULL != pd && NULL != pd->constraints)
299 rc = pd->constraints->nlocalatoms;
311 /* This routine is used to communicate the non-home-atoms needed for constrains.
312 * We have already calculated this range of atoms during setup, and stored in the
313 * partdec constraints structure.
315 * When called, we send/receive left_range_send/receive atoms to our left (lower)
316 * node neighbor, and similar to the right (higher) node.
318 * This has not been tested for periodic molecules...
321 pd_move_x_constraints(t_commrec * cr,
327 gmx_partdec_constraint_t *pdc;
337 pdc = pd->constraints;
344 thisnode = cr->nodeid;
346 /* First pulse to right */
348 recvcnt = 3*(pd->index[thisnode]-pdc->left_range_receive);
349 sendcnt = 3*(cr->pd->index[thisnode+1]-cr->pd->constraints->right_range_send);
353 /* Assemble temporary array with both x0 & x1 */
354 recvptr = pdc->recvbuf;
355 sendptr = pdc->sendbuf;
358 for(i=pdc->right_range_send;i<pd->index[thisnode+1];i++)
360 copy_rvec(x0[i],sendptr[cnt++]);
362 for(i=pdc->right_range_send;i<pd->index[thisnode+1];i++)
364 copy_rvec(x1[i],sendptr[cnt++]);
371 recvptr = x0 + pdc->left_range_receive;
372 sendptr = x0 + pdc->right_range_send;
376 GMX_RIGHT,(real *)sendptr,sendcnt,
377 GMX_LEFT, (real *)recvptr,recvcnt);
381 /* copy back to x0/x1 */
383 for(i=pdc->left_range_receive;i<pd->index[thisnode];i++)
385 copy_rvec(recvptr[cnt++],x0[i]);
387 for(i=pdc->left_range_receive;i<pd->index[thisnode];i++)
389 copy_rvec(recvptr[cnt++],x1[i]);
393 /* And pulse to left */
394 sendcnt = 3*(pdc->left_range_send-pd->index[thisnode]);
395 recvcnt = 3*(pdc->right_range_receive-pd->index[thisnode+1]);
400 for(i=cr->pd->index[thisnode];i<pdc->left_range_send;i++)
402 copy_rvec(x0[i],sendptr[cnt++]);
404 for(i=cr->pd->index[thisnode];i<pdc->left_range_send;i++)
406 copy_rvec(x1[i],sendptr[cnt++]);
413 sendptr = x0 + pd->index[thisnode];
414 recvptr = x0 + pd->index[thisnode+1];
418 GMX_LEFT ,(real *)sendptr,sendcnt,
419 GMX_RIGHT,(real *)recvptr,recvcnt);
421 /* Final copy back from buffers */
424 /* First copy received data back into x0 & x1 */
426 for(i=pd->index[thisnode+1];i<pdc->right_range_receive;i++)
428 copy_rvec(recvptr[cnt++],x0[i]);
430 for(i=pd->index[thisnode+1];i<pdc->right_range_receive;i++)
432 copy_rvec(recvptr[cnt++],x1[i]);
438 static int home_cpu(int nnodes,gmx_partdec_t *pd,int atomid)
442 for(k=0; (k<nnodes); k++) {
443 if (atomid<pd->index[k+1])
446 gmx_fatal(FARGS,"Atomid %d is larger than number of atoms (%d)",
447 atomid+1,pd->index[nnodes]+1);
452 static void calc_nsbshift(FILE *fp,int nnodes,gmx_partdec_t *pd,t_idef *idef)
455 int lastcg,targetcg,nshift,naaj;
459 for(i=1; (i<nnodes); i++) {
460 targetcg = pd->cgindex[i];
461 for(nshift=i; (nshift > 0) && (pd->cgindex[nshift] > targetcg); nshift--)
463 pd->bshift=max(pd->bshift,i-nshift);
466 pd->shift = (nnodes + 1)/2;
467 for(i=0; (i<nnodes); i++) {
468 lastcg = pd->cgindex[i+1]-1;
469 naaj = calc_naaj(lastcg,pd->cgindex[nnodes]);
470 targetcg = (lastcg+naaj) % pd->cgindex[nnodes];
472 /* Search until we find the target charge group */
474 (nshift < nnodes) && (targetcg > pd->cgindex[nshift+1]);
477 /* Now compute the shift, that is the difference in node index */
478 nshift=((nshift - i + nnodes) % nnodes);
481 fprintf(fp,"CPU=%3d, lastcg=%5d, targetcg=%5d, myshift=%5d\n",
482 i,lastcg,targetcg,nshift);
484 /* It's the largest shift that matters */
485 pd->shift=max(nshift,pd->shift);
487 /* Now for the bonded forces */
488 for(i=0; (i<F_NRE); i++) {
489 if (interaction_function[i].flags & IF_BOND) {
490 int nratoms = interaction_function[i].nratoms;
491 for(j=0; (j<idef->il[i].nr); j+=nratoms+1) {
492 for(k=1; (k<=nratoms); k++)
493 homeid[k-1] = home_cpu(nnodes,pd,idef->il[i].iatoms[j+k]);
494 for(k=1; (k<nratoms); k++)
495 pd->shift = max(pd->shift,abs(homeid[k]-homeid[0]));
500 fprintf(fp,"pd->shift = %3d, pd->bshift=%3d\n",
501 pd->shift,pd->bshift);
506 init_partdec_constraint(t_commrec *cr,
511 gmx_partdec_t * pd = cr->pd;
512 gmx_partdec_constraint_t *pdc;
514 int ai,aj,nodei,nodej;
519 cr->pd->constraints = pdc;
525 /* Setup LINCS communication ranges */
526 pdc->left_range_receive = left_range[nodeid];
527 pdc->right_range_receive = right_range[nodeid]+1;
528 pdc->left_range_send = (nodeid > 0) ? right_range[nodeid-1]+1 : 0;
529 pdc->right_range_send = (nodeid < cr->nnodes-1) ? left_range[nodeid+1] : pd->index[cr->nnodes];
531 snew(pdc->nlocalatoms,idef->il[F_CONSTR].nr);
532 nratoms = interaction_function[F_CONSTR].nratoms;
534 for(i=0,cnt=0;i<idef->il[F_CONSTR].nr;i+=nratoms+1,cnt++)
536 ai = idef->il[F_CONSTR].iatoms[i+1];
537 aj = idef->il[F_CONSTR].iatoms[i+2];
539 while(ai>=pd->index[nodei+1])
544 while(aj>=pd->index[nodej+1])
548 pdc->nlocalatoms[cnt] = 0;
551 pdc->nlocalatoms[cnt]++;
555 pdc->nlocalatoms[cnt]++;
558 pdc->nconstraints = cnt;
560 snew(pdc->sendbuf,max(6*(pd->index[cr->nodeid+1]-pd->constraints->right_range_send),6*(pdc->left_range_send-pd->index[cr->nodeid])));
561 snew(pdc->recvbuf,max(6*(pd->index[cr->nodeid]-pdc->left_range_receive),6*(pdc->right_range_receive-pd->index[cr->nodeid+1])));
565 static void init_partdec(FILE *fp,t_commrec *cr,t_block *cgs,int *multinr,
576 if (cr->nnodes > 1) {
578 gmx_fatal(FARGS,"Internal error in init_partdec: multinr = NULL");
579 snew(pd->index,cr->nnodes+1);
580 snew(pd->cgindex,cr->nnodes+1);
583 for(i=0; (i < cr->nnodes); i++) {
584 pd->cgindex[i+1] = multinr[i];
585 pd->index[i+1] = cgs->index[multinr[i]];
587 calc_nsbshift(fp,cr->nnodes,pd,idef);
588 /* This is a hack to do with bugzilla 148 */
589 /*pd->shift = cr->nnodes-1;
592 /* Allocate a buffer of size natoms of the whole system
593 * for summing the forces over the nodes.
595 snew(pd->vbuf,cgs->index[cgs->nr]);
596 pd->constraints = NULL;
599 pd->mpi_req_tx=MPI_REQUEST_NULL;
600 pd->mpi_req_rx=MPI_REQUEST_NULL;
604 static void print_partdec(FILE *fp,const char *title,
605 int nnodes,gmx_partdec_t *pd)
609 fprintf(fp,"%s\n",title);
610 fprintf(fp,"nnodes: %5d\n",nnodes);
611 fprintf(fp,"pd->shift: %5d\n",pd->shift);
612 fprintf(fp,"pd->bshift: %5d\n",pd->bshift);
614 fprintf(fp,"Nodeid atom0 #atom cg0 #cg\n");
615 for(i=0; (i<nnodes); i++)
616 fprintf(fp,"%6d%8d%8d%8d%10d\n",
618 pd->index[i],pd->index[i+1]-pd->index[i],
619 pd->cgindex[i],pd->cgindex[i+1]-pd->cgindex[i]);
623 static void pr_idef_division(FILE *fp,t_idef *idef,int nnodes,int **multinr)
625 int i,ftype,nr,nra,m0,m1;
627 fprintf(fp,"Division of bonded forces over processors\n");
628 fprintf(fp,"%-12s","CPU");
629 for(i=0; (i<nnodes); i++)
630 fprintf(fp," %5d",i);
633 for(ftype=0; (ftype<F_NRE); ftype++) {
634 if (idef->il[ftype].nr > 0) {
635 nr = idef->il[ftype].nr;
636 nra = 1+interaction_function[ftype].nratoms;
637 fprintf(fp,"%-12s", interaction_function[ftype].name);
638 /* Loop over processors */
639 for(i=0; (i<nnodes); i++) {
640 m0 = (i == 0) ? 0 : multinr[ftype][i-1]/nra;
641 m1 = multinr[ftype][i]/nra;
642 fprintf(fp," %5d",m1-m0);
649 static void select_my_ilist(FILE *log,t_ilist *il,int *multinr,t_commrec *cr)
657 start=multinr[cr->nodeid-1];
658 end=multinr[cr->nodeid];
662 gmx_fatal(FARGS,"Negative number of atoms (%d) on node %d\n"
663 "You have probably not used the same value for -np with grompp"
668 for(i=0; (i<nr); i++)
669 ia[i]=il->iatoms[start+i];
677 static void select_my_idef(FILE *log,t_idef *idef,int **multinr,t_commrec *cr)
681 for(i=0; (i<F_NRE); i++)
682 select_my_ilist(log,&idef->il[i],multinr[i],cr);
685 gmx_localtop_t *split_system(FILE *log,
686 gmx_mtop_t *mtop,t_inputrec *inputrec,
692 int *multinr_cgs,**multinr_nre;
698 /* Time to setup the division of charge groups over processors */
699 npp = cr->nnodes-cr->npmenodes;
701 cap_env = getenv("GMX_CAPACITY");
702 if (cap_env == NULL) {
703 for(i=0; (i<npp-1); i++) {
704 capacity[i] = 1.0/(double)npp;
707 /* Take care that the sum of capacities is 1.0 */
708 capacity[npp-1] = 1.0 - tcap;
712 for(i=0; i<npp; i++) {
714 sscanf(cap_env+nn,"%lf%n",&cap,&n);
716 gmx_fatal(FARGS,"Incorrect entry or number of entries in GMX_CAPACITY='%s'",cap_env);
725 /* Convert the molecular topology to a fully listed topology */
726 top = gmx_mtop_generate_local_top(mtop,inputrec);
728 snew(multinr_cgs,npp);
729 snew(multinr_nre,F_NRE);
730 for(i=0; i<F_NRE; i++)
731 snew(multinr_nre[i],npp);
734 snew(left_range,cr->nnodes);
735 snew(right_range,cr->nnodes);
737 /* This computes which entities can be placed on processors */
738 split_top(log,npp,top,inputrec,&mtop->mols,capacity,multinr_cgs,multinr_nre,left_range,right_range);
741 init_partdec(log,cr,&(top->cgs),multinr_cgs,&(top->idef));
743 /* This should be fine */
744 /*split_idef(&(top->idef),cr->nnodes-cr->npmenodes);*/
746 select_my_idef(log,&(top->idef),multinr_nre,cr);
748 if(top->idef.il[F_CONSTR].nr>0)
750 init_partdec_constraint(cr,&(top->idef),left_range,right_range);
754 pr_idef_division(log,&(top->idef),npp,multinr_nre);
756 for(i=0; i<F_NRE; i++)
757 sfree(multinr_nre[i]);
765 print_partdec(log,"Workload division",cr->nnodes,cr->pd);
771 add_to_vsitelist(int **list, int *nitem, int *nalloc,int newitem)
778 for(i=0; i<idx && !found;i++)
780 found = (newitem ==(*list)[i]);
785 srenew(*list,*nalloc);
786 (*list)[idx++] = newitem;
791 gmx_bool setup_parallel_vsites(t_idef *idef,t_commrec *cr,
792 t_comm_vsites *vsitecomm)
801 int nalloc_left_construct,nalloc_right_construct;
802 int sendbuf[2],recvbuf[2];
803 int bufsize,leftbuf,rightbuf;
807 i0 = pd->index[cr->nodeid];
808 i1 = pd->index[cr->nodeid+1];
810 vsitecomm->left_import_construct = NULL;
811 vsitecomm->left_import_nconstruct = 0;
812 nalloc_left_construct = 0;
814 vsitecomm->right_import_construct = NULL;
815 vsitecomm->right_import_nconstruct = 0;
816 nalloc_right_construct = 0;
818 for(ftype=0; (ftype<F_NRE); ftype++)
820 if ( !(interaction_function[ftype].flags & IF_VSITE) )
825 nra = interaction_function[ftype].nratoms;
826 ia = idef->il[ftype].iatoms;
828 for(i=0; i<idef->il[ftype].nr;i+=nra+1)
832 iconstruct = ia[i+j];
835 add_to_vsitelist(&vsitecomm->left_import_construct,
836 &vsitecomm->left_import_nconstruct,
837 &nalloc_left_construct,iconstruct);
839 else if(iconstruct>=i1)
841 add_to_vsitelist(&vsitecomm->right_import_construct,
842 &vsitecomm->right_import_nconstruct,
843 &nalloc_right_construct,iconstruct);
849 /* Pre-communicate the array lengths */
851 GMX_RIGHT,(void *)&vsitecomm->right_import_nconstruct,sizeof(int),
852 GMX_LEFT, (void *)&vsitecomm->left_export_nconstruct, sizeof(int));
854 GMX_LEFT, (void *)&vsitecomm->left_import_nconstruct, sizeof(int),
855 GMX_RIGHT,(void *)&vsitecomm->right_export_nconstruct,sizeof(int));
857 snew(vsitecomm->left_export_construct, vsitecomm->left_export_nconstruct);
858 snew(vsitecomm->right_export_construct,vsitecomm->right_export_nconstruct);
860 /* Communicate the construcing atom index arrays */
862 GMX_RIGHT,(void *)vsitecomm->right_import_construct,vsitecomm->right_import_nconstruct*sizeof(int),
863 GMX_LEFT, (void *)vsitecomm->left_export_construct, vsitecomm->left_export_nconstruct*sizeof(int));
865 /* Communicate the construcing atom index arrays */
867 GMX_LEFT ,(void *)vsitecomm->left_import_construct, vsitecomm->left_import_nconstruct*sizeof(int),
868 GMX_RIGHT,(void *)vsitecomm->right_export_construct,vsitecomm->right_export_nconstruct*sizeof(int));
870 leftbuf = max(vsitecomm->left_export_nconstruct,vsitecomm->left_import_nconstruct);
871 rightbuf = max(vsitecomm->right_export_nconstruct,vsitecomm->right_import_nconstruct);
873 bufsize = max(leftbuf,rightbuf);
875 do_comm = (bufsize>0);
877 snew(vsitecomm->send_buf,2*bufsize);
878 snew(vsitecomm->recv_buf,2*bufsize);
883 t_state *partdec_init_local_state(t_commrec *cr,t_state *state_global)
886 t_state *state_local;
890 /* Copy all the contents */
891 *state_local = *state_global;
892 snew(state_local->lambda,efptNR);
893 /* local storage for lambda */
894 for (i=0;i<efptNR;i++)
896 state_local->lambda[i] = state_global->lambda[i];
898 if (state_global->nrngi > 1) {
899 /* With stochastic dynamics we need local storage for the random state */
900 if (state_local->flags & (1<<estLD_RNG)) {
901 state_local->nrng = gmx_rng_n();
902 snew(state_local->ld_rng,state_local->nrng);
905 MPI_Scatter(state_global->ld_rng,
906 state_local->nrng*sizeof(state_local->ld_rng[0]),MPI_BYTE,
907 state_local->ld_rng ,
908 state_local->nrng*sizeof(state_local->ld_rng[0]),MPI_BYTE,
909 MASTERRANK(cr),cr->mpi_comm_mygroup);
913 if (state_local->flags & (1<<estLD_RNGI)) {
914 snew(state_local->ld_rngi,1);
917 MPI_Scatter(state_global->ld_rngi,
918 sizeof(state_local->ld_rngi[0]),MPI_BYTE,
919 state_local->ld_rngi,
920 sizeof(state_local->ld_rngi[0]),MPI_BYTE,
921 MASTERRANK(cr),cr->mpi_comm_mygroup);