3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * GROwing Monsters And Cloning Shrimps
35 /* This file is completely threadsafe - keep it that way! */
51 #include "gmx_random.h"
52 #include "mtop_util.h"
56 typedef struct gmx_partdec_constraint
58 int left_range_receive;
59 int right_range_receive;
67 gmx_partdec_constraint_t;
70 typedef struct gmx_partdec {
71 int neighbor[2]; /* The nodeids of left and right neighb */
72 int *cgindex; /* The charge group boundaries, */
74 /* only allocated with particle decomp. */
75 int *index; /* The home particle boundaries, */
77 /* only allocated with particle decomp. */
78 int shift, bshift; /* Coordinates are shifted left for */
79 /* 'shift' systolic pulses, and right */
80 /* for 'bshift' pulses. Forces are */
81 /* shifted right for 'shift' pulses */
82 /* and left for 'bshift' pulses */
83 /* This way is not necessary to shift */
84 /* the coordinates over the entire ring */
85 rvec *vbuf; /* Buffer for summing the forces */
87 MPI_Request mpi_req_rx; /* MPI reqs for async transfers */
88 MPI_Request mpi_req_tx;
90 gmx_partdec_constraint_t * constraints;
94 void gmx_tx(const t_commrec *cr, int dir, void *buf, int bufsize)
103 nodeid = cr->pd->neighbor[dir];
106 fprintf(stderr, "gmx_tx: nodeid=%d, buf=%x, bufsize=%d\n",
107 nodeid, buf, bufsize);
110 /* workaround for crashes encountered with MPI on IRIX 6.5 */
111 if (cr->pd->mpi_req_tx != MPI_REQUEST_NULL)
113 MPI_Test(&cr->pd->mpi_req_tx, &flag, &status);
116 fprintf(stdlog, "gmx_tx called before previous send was complete: nodeid=%d, buf=%x, bufsize=%d\n",
117 nodeid, buf, bufsize);
123 if (MPI_Isend(buf, bufsize, MPI_BYTE, RANK(cr, nodeid), tag, cr->mpi_comm_mygroup, &cr->pd->mpi_req_tx) != 0)
125 gmx_comm("MPI_Isend Failed");
130 void gmx_tx_wait(const t_commrec *cr, int dir)
133 gmx_call("gmx_tx_wait");
138 if ((mpi_result = MPI_Wait(&cr->pd->mpi_req_tx, &status)) != 0)
140 gmx_fatal(FARGS, "MPI_Wait: result=%d", mpi_result);
145 void gmx_rx(const t_commrec *cr, int dir, void *buf, int bufsize)
153 nodeid = cr->pd->neighbor[dir];
155 fprintf(stderr, "gmx_rx: nodeid=%d, buf=%x, bufsize=%d\n",
156 nodeid, buf, bufsize);
159 if (MPI_Irecv( buf, bufsize, MPI_BYTE, RANK(cr, nodeid), tag, cr->mpi_comm_mygroup, &cr->pd->mpi_req_rx) != 0)
161 gmx_comm("MPI_Recv Failed");
166 void gmx_rx_wait(const t_commrec *cr, int nodeid)
169 gmx_call("gmx_rx_wait");
174 if ((mpi_result = MPI_Wait(&cr->pd->mpi_req_rx, &status)) != 0)
176 gmx_fatal(FARGS, "MPI_Wait: result=%d", mpi_result);
181 void gmx_tx_rx_real(const t_commrec *cr,
182 int send_dir, real *send_buf, int send_bufsize,
183 int recv_dir, real *recv_buf, int recv_bufsize)
186 gmx_call("gmx_tx_rx_real");
188 int send_nodeid, recv_nodeid;
189 int tx_tag = 0, rx_tag = 0;
192 send_nodeid = cr->pd->neighbor[send_dir];
193 recv_nodeid = cr->pd->neighbor[recv_dir];
196 #define mpi_type MPI_DOUBLE
198 #define mpi_type MPI_FLOAT
201 if (send_bufsize > 0 && recv_bufsize > 0)
203 MPI_Sendrecv(send_buf, send_bufsize, mpi_type, RANK(cr, send_nodeid), tx_tag,
204 recv_buf, recv_bufsize, mpi_type, RANK(cr, recv_nodeid), rx_tag,
205 cr->mpi_comm_mygroup, &stat);
207 else if (send_bufsize > 0)
209 MPI_Send(send_buf, send_bufsize, mpi_type, RANK(cr, send_nodeid), tx_tag,
210 cr->mpi_comm_mygroup);
212 else if (recv_bufsize > 0)
214 MPI_Recv(recv_buf, recv_bufsize, mpi_type, RANK(cr, recv_nodeid), rx_tag,
215 cr->mpi_comm_mygroup, &stat);
222 void gmx_tx_rx_void(const t_commrec *cr,
223 int send_dir, void *send_buf, int send_bufsize,
224 int recv_dir, void *recv_buf, int recv_bufsize)
227 gmx_call("gmx_tx_rx_void");
229 int send_nodeid, recv_nodeid;
230 int tx_tag = 0, rx_tag = 0;
233 send_nodeid = cr->pd->neighbor[send_dir];
234 recv_nodeid = cr->pd->neighbor[recv_dir];
237 MPI_Sendrecv(send_buf, send_bufsize, MPI_BYTE, RANK(cr, send_nodeid), tx_tag,
238 recv_buf, recv_bufsize, MPI_BYTE, RANK(cr, recv_nodeid), rx_tag,
239 cr->mpi_comm_mygroup, &stat);
245 /*void gmx_wait(int dir_send,int dir_recv)*/
247 void gmx_wait(const t_commrec *cr, int dir_send, int dir_recv)
250 gmx_call("gmx_wait");
252 gmx_tx_wait(cr, dir_send);
253 gmx_rx_wait(cr, dir_recv);
257 static void set_left_right(t_commrec *cr)
259 cr->pd->neighbor[GMX_LEFT] = (cr->nnodes + cr->nodeid - 1) % cr->nnodes;
260 cr->pd->neighbor[GMX_RIGHT] = (cr->nodeid + 1) % cr->nnodes;
263 void pd_move_f(const t_commrec *cr, rvec f[], t_nrnb *nrnb)
265 move_f(NULL, cr, GMX_LEFT, GMX_RIGHT, f, cr->pd->vbuf, nrnb);
268 int *pd_cgindex(const t_commrec *cr)
270 return cr->pd->cgindex;
273 int *pd_index(const t_commrec *cr)
275 return cr->pd->index;
278 int pd_shift(const t_commrec *cr)
280 return cr->pd->shift;
283 int pd_bshift(const t_commrec *cr)
285 return cr->pd->bshift;
288 void pd_cg_range(const t_commrec *cr, int *cg0, int *cg1)
290 *cg0 = cr->pd->cgindex[cr->nodeid];
291 *cg1 = cr->pd->cgindex[cr->nodeid+1];
294 void pd_at_range(const t_commrec *cr, int *at0, int *at1)
296 *at0 = cr->pd->index[cr->nodeid];
297 *at1 = cr->pd->index[cr->nodeid+1];
301 pd_get_constraint_range(gmx_partdec_p_t pd, int *start, int *natoms)
303 *start = pd->constraints->left_range_receive;
304 *natoms = pd->constraints->right_range_receive-pd->constraints->left_range_receive;
308 pd_constraints_nlocalatoms(gmx_partdec_p_t pd)
312 if (NULL != pd && NULL != pd->constraints)
314 rc = pd->constraints->nlocalatoms;
326 /* This routine is used to communicate the non-home-atoms needed for constrains.
327 * We have already calculated this range of atoms during setup, and stored in the
328 * partdec constraints structure.
330 * When called, we send/receive left_range_send/receive atoms to our left (lower)
331 * node neighbor, and similar to the right (higher) node.
333 * This has not been tested for periodic molecules...
336 pd_move_x_constraints(t_commrec * cr,
342 gmx_partdec_constraint_t *pdc;
349 int sendcnt, recvcnt;
352 pdc = pd->constraints;
359 thisnode = cr->nodeid;
361 /* First pulse to right */
363 recvcnt = 3*(pd->index[thisnode]-pdc->left_range_receive);
364 sendcnt = 3*(cr->pd->index[thisnode+1]-cr->pd->constraints->right_range_send);
368 /* Assemble temporary array with both x0 & x1 */
369 recvptr = pdc->recvbuf;
370 sendptr = pdc->sendbuf;
373 for (i = pdc->right_range_send; i < pd->index[thisnode+1]; i++)
375 copy_rvec(x0[i], sendptr[cnt++]);
377 for (i = pdc->right_range_send; i < pd->index[thisnode+1]; i++)
379 copy_rvec(x1[i], sendptr[cnt++]);
386 recvptr = x0 + pdc->left_range_receive;
387 sendptr = x0 + pdc->right_range_send;
391 GMX_RIGHT, (real *)sendptr, sendcnt,
392 GMX_LEFT, (real *)recvptr, recvcnt);
396 /* copy back to x0/x1 */
398 for (i = pdc->left_range_receive; i < pd->index[thisnode]; i++)
400 copy_rvec(recvptr[cnt++], x0[i]);
402 for (i = pdc->left_range_receive; i < pd->index[thisnode]; i++)
404 copy_rvec(recvptr[cnt++], x1[i]);
408 /* And pulse to left */
409 sendcnt = 3*(pdc->left_range_send-pd->index[thisnode]);
410 recvcnt = 3*(pdc->right_range_receive-pd->index[thisnode+1]);
415 for (i = cr->pd->index[thisnode]; i < pdc->left_range_send; i++)
417 copy_rvec(x0[i], sendptr[cnt++]);
419 for (i = cr->pd->index[thisnode]; i < pdc->left_range_send; i++)
421 copy_rvec(x1[i], sendptr[cnt++]);
428 sendptr = x0 + pd->index[thisnode];
429 recvptr = x0 + pd->index[thisnode+1];
433 GMX_LEFT, (real *)sendptr, sendcnt,
434 GMX_RIGHT, (real *)recvptr, recvcnt);
436 /* Final copy back from buffers */
439 /* First copy received data back into x0 & x1 */
441 for (i = pd->index[thisnode+1]; i < pdc->right_range_receive; i++)
443 copy_rvec(recvptr[cnt++], x0[i]);
445 for (i = pd->index[thisnode+1]; i < pdc->right_range_receive; i++)
447 copy_rvec(recvptr[cnt++], x1[i]);
453 static int home_cpu(int nnodes, gmx_partdec_t *pd, int atomid)
457 for (k = 0; (k < nnodes); k++)
459 if (atomid < pd->index[k+1])
464 gmx_fatal(FARGS, "Atomid %d is larger than number of atoms (%d)",
465 atomid+1, pd->index[nnodes]+1);
470 static void calc_nsbshift(FILE *fp, int nnodes, gmx_partdec_t *pd, t_idef *idef)
473 int lastcg, targetcg, nshift, naaj;
477 for (i = 1; (i < nnodes); i++)
479 targetcg = pd->cgindex[i];
480 for (nshift = i; (nshift > 0) && (pd->cgindex[nshift] > targetcg); nshift--)
484 pd->bshift = max(pd->bshift, i-nshift);
487 pd->shift = (nnodes + 1)/2;
488 for (i = 0; (i < nnodes); i++)
490 lastcg = pd->cgindex[i+1]-1;
491 naaj = calc_naaj(lastcg, pd->cgindex[nnodes]);
492 targetcg = (lastcg+naaj) % pd->cgindex[nnodes];
494 /* Search until we find the target charge group */
496 (nshift < nnodes) && (targetcg > pd->cgindex[nshift+1]);
501 /* Now compute the shift, that is the difference in node index */
502 nshift = ((nshift - i + nnodes) % nnodes);
506 fprintf(fp, "CPU=%3d, lastcg=%5d, targetcg=%5d, myshift=%5d\n",
507 i, lastcg, targetcg, nshift);
510 /* It's the largest shift that matters */
511 pd->shift = max(nshift, pd->shift);
513 /* Now for the bonded forces */
514 for (i = 0; (i < F_NRE); i++)
516 if (interaction_function[i].flags & IF_BOND)
518 int nratoms = interaction_function[i].nratoms;
519 for (j = 0; (j < idef->il[i].nr); j += nratoms+1)
521 for (k = 1; (k <= nratoms); k++)
523 homeid[k-1] = home_cpu(nnodes, pd, idef->il[i].iatoms[j+k]);
525 for (k = 1; (k < nratoms); k++)
527 pd->shift = max(pd->shift, abs(homeid[k]-homeid[0]));
534 fprintf(fp, "pd->shift = %3d, pd->bshift=%3d\n",
535 pd->shift, pd->bshift);
541 init_partdec_constraint(t_commrec *cr,
546 gmx_partdec_t * pd = cr->pd;
547 gmx_partdec_constraint_t *pdc;
549 int ai, aj, nodei, nodej;
554 cr->pd->constraints = pdc;
560 /* Setup LINCS communication ranges */
561 pdc->left_range_receive = left_range[nodeid];
562 pdc->right_range_receive = right_range[nodeid]+1;
563 pdc->left_range_send = (nodeid > 0) ? right_range[nodeid-1]+1 : 0;
564 pdc->right_range_send = (nodeid < cr->nnodes-1) ? left_range[nodeid+1] : pd->index[cr->nnodes];
566 snew(pdc->nlocalatoms, idef->il[F_CONSTR].nr);
567 nratoms = interaction_function[F_CONSTR].nratoms;
569 for (i = 0, cnt = 0; i < idef->il[F_CONSTR].nr; i += nratoms+1, cnt++)
571 ai = idef->il[F_CONSTR].iatoms[i+1];
572 aj = idef->il[F_CONSTR].iatoms[i+2];
574 while (ai >= pd->index[nodei+1])
579 while (aj >= pd->index[nodej+1])
583 pdc->nlocalatoms[cnt] = 0;
586 pdc->nlocalatoms[cnt]++;
590 pdc->nlocalatoms[cnt]++;
593 pdc->nconstraints = cnt;
595 snew(pdc->sendbuf, max(6*(pd->index[cr->nodeid+1]-pd->constraints->right_range_send), 6*(pdc->left_range_send-pd->index[cr->nodeid])));
596 snew(pdc->recvbuf, max(6*(pd->index[cr->nodeid]-pdc->left_range_receive), 6*(pdc->right_range_receive-pd->index[cr->nodeid+1])));
600 static void init_partdec(FILE *fp, t_commrec *cr, t_block *cgs, int *multinr,
615 gmx_fatal(FARGS, "Internal error in init_partdec: multinr = NULL");
617 snew(pd->index, cr->nnodes+1);
618 snew(pd->cgindex, cr->nnodes+1);
621 for (i = 0; (i < cr->nnodes); i++)
623 pd->cgindex[i+1] = multinr[i];
624 pd->index[i+1] = cgs->index[multinr[i]];
626 calc_nsbshift(fp, cr->nnodes, pd, idef);
627 /* This is a hack to do with bugzilla 148 */
628 /*pd->shift = cr->nnodes-1;
631 /* Allocate a buffer of size natoms of the whole system
632 * for summing the forces over the nodes.
634 snew(pd->vbuf, cgs->index[cgs->nr]);
635 pd->constraints = NULL;
638 pd->mpi_req_tx = MPI_REQUEST_NULL;
639 pd->mpi_req_rx = MPI_REQUEST_NULL;
643 static void print_partdec(FILE *fp, const char *title,
644 int nnodes, gmx_partdec_t *pd)
648 fprintf(fp, "%s\n", title);
649 fprintf(fp, "nnodes: %5d\n", nnodes);
650 fprintf(fp, "pd->shift: %5d\n", pd->shift);
651 fprintf(fp, "pd->bshift: %5d\n", pd->bshift);
653 fprintf(fp, "Nodeid atom0 #atom cg0 #cg\n");
654 for (i = 0; (i < nnodes); i++)
656 fprintf(fp, "%6d%8d%8d%8d%10d\n",
658 pd->index[i], pd->index[i+1]-pd->index[i],
659 pd->cgindex[i], pd->cgindex[i+1]-pd->cgindex[i]);
664 static void pr_idef_division(FILE *fp, t_idef *idef, int nnodes, int **multinr)
666 int i, ftype, nr, nra, m0, m1;
668 fprintf(fp, "Division of bonded forces over processors\n");
669 fprintf(fp, "%-12s", "CPU");
670 for (i = 0; (i < nnodes); i++)
672 fprintf(fp, " %5d", i);
676 for (ftype = 0; (ftype < F_NRE); ftype++)
678 if (idef->il[ftype].nr > 0)
680 nr = idef->il[ftype].nr;
681 nra = 1+interaction_function[ftype].nratoms;
682 fprintf(fp, "%-12s", interaction_function[ftype].name);
683 /* Loop over processors */
684 for (i = 0; (i < nnodes); i++)
686 m0 = (i == 0) ? 0 : multinr[ftype][i-1]/nra;
687 m1 = multinr[ftype][i]/nra;
688 fprintf(fp, " %5d", m1-m0);
695 static void select_my_ilist(FILE *log, t_ilist *il, int *multinr, t_commrec *cr)
698 int i, start, end, nr;
706 start = multinr[cr->nodeid-1];
708 end = multinr[cr->nodeid];
713 gmx_fatal(FARGS, "Negative number of atoms (%d) on node %d\n"
714 "You have probably not used the same value for -np with grompp"
720 for (i = 0; (i < nr); i++)
722 ia[i] = il->iatoms[start+i];
731 static void select_my_idef(FILE *log, t_idef *idef, int **multinr, t_commrec *cr)
735 for (i = 0; (i < F_NRE); i++)
737 select_my_ilist(log, &idef->il[i], multinr[i], cr);
741 gmx_localtop_t *split_system(FILE *log,
742 gmx_mtop_t *mtop, t_inputrec *inputrec,
747 double tcap = 0, cap;
748 int *multinr_cgs, **multinr_nre;
754 /* Time to setup the division of charge groups over processors */
755 npp = cr->nnodes-cr->npmenodes;
757 cap_env = getenv("GMX_CAPACITY");
760 for (i = 0; (i < npp-1); i++)
762 capacity[i] = 1.0/(double)npp;
765 /* Take care that the sum of capacities is 1.0 */
766 capacity[npp-1] = 1.0 - tcap;
772 for (i = 0; i < npp; i++)
775 sscanf(cap_env+nn, "%lf%n", &cap, &n);
778 gmx_fatal(FARGS, "Incorrect entry or number of entries in GMX_CAPACITY='%s'", cap_env);
784 for (i = 0; i < npp; i++)
790 /* Convert the molecular topology to a fully listed topology */
791 top = gmx_mtop_generate_local_top(mtop, inputrec);
793 snew(multinr_cgs, npp);
794 snew(multinr_nre, F_NRE);
795 for (i = 0; i < F_NRE; i++)
797 snew(multinr_nre[i], npp);
801 snew(left_range, cr->nnodes);
802 snew(right_range, cr->nnodes);
804 /* This computes which entities can be placed on processors */
805 split_top(log, npp, top, inputrec, &mtop->mols, capacity, multinr_cgs, multinr_nre, left_range, right_range);
808 init_partdec(log, cr, &(top->cgs), multinr_cgs, &(top->idef));
810 /* This should be fine */
811 /*split_idef(&(top->idef),cr->nnodes-cr->npmenodes);*/
813 select_my_idef(log, &(top->idef), multinr_nre, cr);
815 if (top->idef.il[F_CONSTR].nr > 0)
817 init_partdec_constraint(cr, &(top->idef), left_range, right_range);
822 pr_idef_division(log, &(top->idef), npp, multinr_nre);
825 for (i = 0; i < F_NRE; i++)
827 sfree(multinr_nre[i]);
837 print_partdec(log, "Workload division", cr->nnodes, cr->pd);
844 add_to_vsitelist(int **list, int *nitem, int *nalloc, int newitem)
851 for (i = 0; i < idx && !found; i++)
853 found = (newitem == (*list)[i]);
858 srenew(*list, *nalloc);
859 (*list)[idx++] = newitem;
864 gmx_bool setup_parallel_vsites(t_idef *idef, t_commrec *cr,
865 t_comm_vsites *vsitecomm)
874 int nalloc_left_construct, nalloc_right_construct;
875 int sendbuf[2], recvbuf[2];
876 int bufsize, leftbuf, rightbuf;
880 i0 = pd->index[cr->nodeid];
881 i1 = pd->index[cr->nodeid+1];
883 vsitecomm->left_import_construct = NULL;
884 vsitecomm->left_import_nconstruct = 0;
885 nalloc_left_construct = 0;
887 vsitecomm->right_import_construct = NULL;
888 vsitecomm->right_import_nconstruct = 0;
889 nalloc_right_construct = 0;
891 for (ftype = 0; (ftype < F_NRE); ftype++)
893 if (!(interaction_function[ftype].flags & IF_VSITE) )
898 nra = interaction_function[ftype].nratoms;
899 ia = idef->il[ftype].iatoms;
901 for (i = 0; i < idef->il[ftype].nr; i += nra+1)
903 for (j = 2; j < 1+nra; j++)
905 iconstruct = ia[i+j];
908 add_to_vsitelist(&vsitecomm->left_import_construct,
909 &vsitecomm->left_import_nconstruct,
910 &nalloc_left_construct, iconstruct);
912 else if (iconstruct >= i1)
914 add_to_vsitelist(&vsitecomm->right_import_construct,
915 &vsitecomm->right_import_nconstruct,
916 &nalloc_right_construct, iconstruct);
922 /* Pre-communicate the array lengths */
924 GMX_RIGHT, (void *)&vsitecomm->right_import_nconstruct, sizeof(int),
925 GMX_LEFT, (void *)&vsitecomm->left_export_nconstruct, sizeof(int));
927 GMX_LEFT, (void *)&vsitecomm->left_import_nconstruct, sizeof(int),
928 GMX_RIGHT, (void *)&vsitecomm->right_export_nconstruct, sizeof(int));
930 snew(vsitecomm->left_export_construct, vsitecomm->left_export_nconstruct);
931 snew(vsitecomm->right_export_construct, vsitecomm->right_export_nconstruct);
933 /* Communicate the construcing atom index arrays */
935 GMX_RIGHT, (void *)vsitecomm->right_import_construct, vsitecomm->right_import_nconstruct*sizeof(int),
936 GMX_LEFT, (void *)vsitecomm->left_export_construct, vsitecomm->left_export_nconstruct*sizeof(int));
938 /* Communicate the construcing atom index arrays */
940 GMX_LEFT, (void *)vsitecomm->left_import_construct, vsitecomm->left_import_nconstruct*sizeof(int),
941 GMX_RIGHT, (void *)vsitecomm->right_export_construct, vsitecomm->right_export_nconstruct*sizeof(int));
943 leftbuf = max(vsitecomm->left_export_nconstruct, vsitecomm->left_import_nconstruct);
944 rightbuf = max(vsitecomm->right_export_nconstruct, vsitecomm->right_import_nconstruct);
946 bufsize = max(leftbuf, rightbuf);
948 do_comm = (bufsize > 0);
950 snew(vsitecomm->send_buf, 2*bufsize);
951 snew(vsitecomm->recv_buf, 2*bufsize);
956 t_state *partdec_init_local_state(t_commrec *cr, t_state *state_global)
959 t_state *state_local;
961 snew(state_local, 1);
963 /* Copy all the contents */
964 *state_local = *state_global;
965 snew(state_local->lambda, efptNR);
966 /* local storage for lambda */
967 for (i = 0; i < efptNR; i++)
969 state_local->lambda[i] = state_global->lambda[i];
971 if (state_global->nrngi > 1)
973 /* With stochastic dynamics we need local storage for the random state */
974 if (state_local->flags & (1<<estLD_RNG))
976 state_local->nrng = gmx_rng_n();
977 snew(state_local->ld_rng, state_local->nrng);
981 MPI_Scatter(state_global->ld_rng,
982 state_local->nrng*sizeof(state_local->ld_rng[0]), MPI_BYTE,
984 state_local->nrng*sizeof(state_local->ld_rng[0]), MPI_BYTE,
985 MASTERRANK(cr), cr->mpi_comm_mygroup);
989 if (state_local->flags & (1<<estLD_RNGI))
991 snew(state_local->ld_rngi, 1);
995 MPI_Scatter(state_global->ld_rngi,
996 sizeof(state_local->ld_rngi[0]), MPI_BYTE,
997 state_local->ld_rngi,
998 sizeof(state_local->ld_rngi[0]), MPI_BYTE,
999 MASTERRANK(cr), cr->mpi_comm_mygroup);