2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2008
5 * Copyright (c) 2012, by the GROMACS development team, led by
6 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
7 * others, as listed in the AUTHORS file in the top-level source
8 * directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
52 /* Margin for setting up the DD grid */
53 #define DD_GRID_MARGIN_PRES_SCALE 1.05
55 static int factorize(int n,int **fac,int **mfac)
61 gmx_fatal(FARGS, "Can only factorize positive integers.");
64 /* Decompose n in factors */
73 if (ndiv == 0 || (*fac)[ndiv-1] != d)
87 static gmx_bool largest_divisor(int n)
89 int ndiv,*div,*mdiv,ldiv;
91 ndiv = factorize(n,&div,&mdiv);
99 static int lcd(int n1,int n2)
104 for(i=2; (i<=n1 && i<=n2); i++)
106 if (n1 % i == 0 && n2 % i == 0)
115 static gmx_bool fits_pme_ratio(int nnodes,int npme,float ratio)
117 return ((double)npme/(double)nnodes > 0.95*ratio);
120 static gmx_bool fits_pp_pme_perf(FILE *fplog,
121 t_inputrec *ir,matrix box,gmx_mtop_t *mtop,
122 int nnodes,int npme,float ratio)
124 int ndiv,*div,*mdiv,ldiv;
125 int npp_root3,npme_root2;
127 ndiv = factorize(nnodes-npme,&div,&mdiv);
132 npp_root3 = (int)(pow(nnodes-npme,1.0/3.0) + 0.5);
133 npme_root2 = (int)(sqrt(npme) + 0.5);
135 /* The check below gives a reasonable division:
136 * factor 5 allowed at 5 or more PP nodes,
137 * factor 7 allowed at 49 or more PP nodes.
139 if (ldiv > 3 + npp_root3)
144 /* Check if the number of PP and PME nodes have a reasonable sized
145 * denominator in common, such that we can use 2D PME decomposition
146 * when required (which requires nx_pp == nx_pme).
147 * The factor of 2 allows for a maximum ratio of 2^2=4
148 * between nx_pme and ny_pme.
150 if (lcd(nnodes-npme,npme)*2 < npme_root2)
155 /* Does this division gives a reasonable PME load? */
156 return fits_pme_ratio(nnodes,npme,ratio);
159 static int guess_npme(FILE *fplog,gmx_mtop_t *mtop,t_inputrec *ir,matrix box,
166 ratio = pme_load_estimate(mtop,ir,box);
170 fprintf(fplog,"Guess for relative PME load: %.2f\n",ratio);
173 /* We assume the optimal node ratio is close to the load ratio.
174 * The communication load is neglected,
175 * but (hopefully) this will balance out between PP and PME.
178 if (!fits_pme_ratio(nnodes,nnodes/2,ratio))
180 /* We would need more than nnodes/2 PME only nodes,
181 * which is not possible. Since the PME load is very high,
182 * we will not loose much performance when all nodes do PME.
188 /* First try to find npme as a factor of nnodes up to nnodes/3.
189 * We start with a minimum PME node fraction of 1/16
190 * and avoid ratios which lead to large prime factors in nnodes-npme.
192 npme = (nnodes + 15)/16;
193 while (npme <= nnodes/3) {
194 if (nnodes % npme == 0)
196 /* Note that fits_perf might change the PME grid,
197 * in the current implementation it does not.
199 if (fits_pp_pme_perf(fplog,ir,box,mtop,nnodes,npme,ratio))
208 /* Try any possible number for npme */
210 while (npme <= nnodes/2)
212 /* Note that fits_perf may change the PME grid */
213 if (fits_pp_pme_perf(fplog,ir,box,mtop,nnodes,npme,ratio))
222 gmx_fatal(FARGS,"Could not find an appropriate number of separate PME nodes. i.e. >= %5f*#nodes (%d) and <= #nodes/2 (%d) and reasonable performance wise (grid_x=%d, grid_y=%d).\n"
223 "Use the -npme option of mdrun or change the number of processors or the PME grid dimensions, see the manual for details.",
224 ratio,(int)(0.95*ratio*nnodes+0.5),nnodes/2,ir->nkx,ir->nky);
225 /* Keep the compiler happy */
233 "Will use %d particle-particle and %d PME only nodes\n"
234 "This is a guess, check the performance at the end of the log file\n",
238 "Will use %d particle-particle and %d PME only nodes\n"
239 "This is a guess, check the performance at the end of the log file\n",
246 static int div_up(int n,int f)
248 return (n + f - 1)/f;
251 real comm_box_frac(ivec dd_nc,real cutoff,gmx_ddbox_t *ddbox)
259 bt[i] = ddbox->box_size[i]*ddbox->skew_fac[i];
260 nw[i] = dd_nc[i]*cutoff/bt[i];
271 for(j=i+1; j<DIM; j++)
275 comm_vol += nw[i]*nw[j]*M_PI/4;
276 for(k=j+1; k<DIM; k++)
280 comm_vol += nw[i]*nw[j]*nw[k]*M_PI/6;
291 static gmx_bool inhomogeneous_z(const t_inputrec *ir)
293 return ((EEL_PME(ir->coulombtype) || ir->coulombtype==eelEWALD) &&
294 ir->ePBC==epbcXYZ && ir->ewald_geometry==eewg3DC);
297 /* Avoid integer overflows */
298 static float comm_pme_cost_vol(int npme, int a, int b, int c)
304 comm_vol *= div_up(a, npme);
305 comm_vol *= div_up(b, npme);
310 static float comm_cost_est(gmx_domdec_t *dd,real limit,real cutoff,
311 matrix box,gmx_ddbox_t *ddbox,
312 int natoms,t_inputrec *ir,
314 int npme_tot,ivec nc)
317 int i,j,k,nk,overlap;
319 float comm_vol,comm_vol_xf,comm_pme,cost_pbcdx;
320 /* This is the cost of a pbc_dx call relative to the cost
321 * of communicating the coordinate and force of an atom.
322 * This will be machine dependent.
323 * These factors are for x86 with SMP or Infiniband.
325 float pbcdx_rect_fac = 0.1;
326 float pbcdx_tric_fac = 0.2;
329 /* Check the DD algorithm restrictions */
330 if ((ir->ePBC == epbcXY && ir->nwall < 2 && nc[ZZ] > 1) ||
331 (ir->ePBC == epbcSCREW && (nc[XX] == 1 || nc[YY] > 1 || nc[ZZ] > 1)))
336 if (inhomogeneous_z(ir) && nc[ZZ] > 1)
341 assert(ddbox->npbcdim<=DIM);
343 /* Check if the triclinic requirements are met */
346 for(j=i+1; j<ddbox->npbcdim; j++)
348 if (box[j][i] != 0 || ir->deform[j][i] != 0 ||
349 (ir->epc != epcNO && ir->compress[j][i] != 0))
351 if (nc[j] > 1 && nc[i] == 1)
361 bt[i] = ddbox->box_size[i]*ddbox->skew_fac[i];
363 /* Without PBC there are no cell size limits with 2 cells */
364 if (!(i >= ddbox->npbcdim && nc[i] <= 2) && bt[i] < nc[i]*limit)
372 /* The following choices should match those
373 * in init_domain_decomposition in domdec.c.
375 if (nc[XX] == 1 && nc[YY] > 1)
380 else if (nc[YY] == 1)
387 /* Will we use 1D or 2D PME decomposition? */
388 npme[XX] = (npme_tot % nc[XX] == 0) ? nc[XX] : npme_tot;
389 npme[YY] = npme_tot/npme[XX];
393 /* When two dimensions are (nearly) equal, use more cells
394 * for the smallest index, so the decomposition does not
395 * depend sensitively on the rounding of the box elements.
399 for(j=i+1; j<DIM; j++)
401 /* Check if the box size is nearly identical,
402 * in that case we prefer nx > ny and ny > nz.
404 if (fabs(bt[j] - bt[i]) < 0.01*bt[i] && nc[j] > nc[i])
406 /* The XX/YY check is a bit compact. If nc[YY]==npme[YY]
407 * this means the swapped nc has nc[XX]==npme[XX],
408 * and we can also swap X and Y for PME.
410 /* Check if dimension i and j are equivalent for PME.
411 * For x/y: if nc[YY]!=npme[YY], we can not swap x/y
412 * For y/z: we can not have PME decomposition in z
415 !((i == XX && j == YY && nc[YY] != npme[YY]) ||
416 (i == YY && j == ZZ && npme[YY] > 1)))
424 /* This function determines only half of the communication cost.
425 * All PP, PME and PP-PME communication is symmetric
426 * and the "back"-communication cost is identical to the forward cost.
429 comm_vol = comm_box_frac(nc,cutoff,ddbox);
434 /* Determine the largest volume for PME x/f redistribution */
435 if (nc[i] % npme[i] != 0)
439 comm_vol_xf = (npme[i]==2 ? 1.0/3.0 : 0.5);
443 comm_vol_xf = 1.0 - lcd(nc[i],npme[i])/(double)npme[i];
445 comm_pme += 3*natoms*comm_vol_xf;
448 /* Grid overlap communication */
451 nk = (i==0 ? ir->nkx : ir->nky);
452 overlap = (nk % npme[i] == 0 ? ir->pme_order-1 : ir->pme_order);
460 /* Old line comm_pme += npme[i]*overlap*ir->nkx*ir->nky*ir->nkz/nk; */
464 /* PME FFT communication volume.
465 * This only takes the communication into account and not imbalance
466 * in the calculation. But the imbalance in communication and calculation
467 * are similar and therefore these formulas also prefer load balance
468 * in the FFT and pme_solve calculation.
470 comm_pme += comm_pme_cost_vol(npme[YY], ir->nky, ir->nkz, ir->nkx);
471 comm_pme += comm_pme_cost_vol(npme[XX], ir->nkx, ir->nky, ir->nkz);
473 /* Add cost of pbc_dx for bondeds */
475 if ((nc[XX] == 1 || nc[YY] == 1) || (nc[ZZ] == 1 && ir->ePBC != epbcXY))
477 if ((ddbox->tric_dir[XX] && nc[XX] == 1) ||
478 (ddbox->tric_dir[YY] && nc[YY] == 1))
480 cost_pbcdx = pbcdxr*pbcdx_tric_fac;
484 cost_pbcdx = pbcdxr*pbcdx_rect_fac;
491 "nc %2d %2d %2d %2d %2d vol pp %6.4f pbcdx %6.4f pme %9.3e tot %9.3e\n",
492 nc[XX],nc[YY],nc[ZZ],npme[XX],npme[YY],
493 comm_vol,cost_pbcdx,comm_pme,
494 3*natoms*(comm_vol + cost_pbcdx) + comm_pme);
497 return 3*natoms*(comm_vol + cost_pbcdx) + comm_pme;
500 static void assign_factors(gmx_domdec_t *dd,
501 real limit,real cutoff,
502 matrix box,gmx_ddbox_t *ddbox,
503 int natoms,t_inputrec *ir,
504 float pbcdxr,int npme,
505 int ndiv,int *div,int *mdiv,ivec ir_try,ivec opt)
512 ce = comm_cost_est(dd,limit,cutoff,box,ddbox,
513 natoms,ir,pbcdxr,npme,ir_try);
514 if (ce >= 0 && (opt[XX] == 0 ||
515 ce < comm_cost_est(dd,limit,cutoff,box,ddbox,
519 copy_ivec(ir_try,opt);
525 for(x=mdiv[0]; x>=0; x--)
529 ir_try[XX] *= div[0];
531 for(y=mdiv[0]-x; y>=0; y--)
535 ir_try[YY] *= div[0];
537 for(i=0; i<mdiv[0]-x-y; i++)
539 ir_try[ZZ] *= div[0];
543 assign_factors(dd,limit,cutoff,box,ddbox,natoms,ir,pbcdxr,npme,
544 ndiv-1,div+1,mdiv+1,ir_try,opt);
546 for(i=0; i<mdiv[0]-x-y; i++)
548 ir_try[ZZ] /= div[0];
552 ir_try[YY] /= div[0];
557 ir_try[XX] /= div[0];
562 static real optimize_ncells(FILE *fplog,
563 int nnodes_tot,int npme_only,
564 gmx_bool bDynLoadBal,real dlb_scale,
565 gmx_mtop_t *mtop,matrix box,gmx_ddbox_t *ddbox,
568 real cellsize_limit,real cutoff,
569 gmx_bool bInterCGBondeds,gmx_bool bInterCGMultiBody,
572 int npp,npme,ndiv,*div,*mdiv,d,nmax;
573 gmx_bool bExcl_pbcdx;
578 limit = cellsize_limit;
584 npp = nnodes_tot - npme_only;
585 if (EEL_PME(ir->coulombtype))
587 npme = (npme_only > 0 ? npme_only : npp);
596 /* For Ewald exclusions pbc_dx is not called */
598 (IR_EXCL_FORCES(*ir) && !EEL_FULL(ir->coulombtype));
599 pbcdxr = (double)n_bonded_dx(mtop,bExcl_pbcdx)/(double)mtop->natoms;
603 /* Every molecule is a single charge group: no pbc required */
606 /* Add a margin for DLB and/or pressure scaling */
609 if (dlb_scale >= 1.0)
611 gmx_fatal(FARGS,"The value for option -dds should be smaller than 1");
615 fprintf(fplog,"Scaling the initial minimum size with 1/%g (option -dds) = %g\n",dlb_scale,1/dlb_scale);
619 else if (ir->epc != epcNO)
623 fprintf(fplog,"To account for pressure scaling, scaling the initial minimum size with %g\n",DD_GRID_MARGIN_PRES_SCALE);
624 limit *= DD_GRID_MARGIN_PRES_SCALE;
630 fprintf(fplog,"Optimizing the DD grid for %d cells with a minimum initial size of %.3f nm\n",npp,limit);
632 if (inhomogeneous_z(ir))
634 fprintf(fplog,"Ewald_geometry=%s: assuming inhomogeneous particle distribution in z, will not decompose in z.\n",eewg_names[ir->ewald_geometry]);
639 fprintf(fplog,"The maximum allowed number of cells is:");
642 nmax = (int)(ddbox->box_size[d]*ddbox->skew_fac[d]/limit);
643 if (d >= ddbox->npbcdim && nmax < 2)
647 if (d == ZZ && inhomogeneous_z(ir))
651 fprintf(fplog," %c %d",'X' + d,nmax);
659 fprintf(debug,"Average nr of pbc_dx calls per atom %.2f\n",pbcdxr);
662 /* Decompose npp in factors */
663 ndiv = factorize(npp,&div,&mdiv);
669 assign_factors(dd,limit,cutoff,box,ddbox,mtop->natoms,ir,pbcdxr,
670 npme,ndiv,div,mdiv,itry,nc);
678 real dd_choose_grid(FILE *fplog,
679 t_commrec *cr,gmx_domdec_t *dd,t_inputrec *ir,
680 gmx_mtop_t *mtop,matrix box,gmx_ddbox_t *ddbox,
681 gmx_bool bDynLoadBal,real dlb_scale,
682 real cellsize_limit,real cutoff_dd,
683 gmx_bool bInterCGBondeds,gmx_bool bInterCGMultiBody)
685 gmx_large_int_t nnodes_div,ldiv;
690 nnodes_div = cr->nnodes;
691 if (EEL_PME(ir->coulombtype))
693 if (cr->npmenodes > 0)
698 "Can not have separate PME nodes with 2 or less nodes");
700 if (cr->npmenodes >= cr->nnodes)
703 "Can not have %d separate PME nodes with just %d total nodes",
704 cr->npmenodes, cr->nnodes);
707 /* If the user purposely selected the number of PME nodes,
708 * only check for large primes in the PP node count.
710 nnodes_div -= cr->npmenodes;
720 ldiv = largest_divisor(nnodes_div);
721 /* Check if the largest divisor is more than nnodes^2/3 */
722 if (ldiv*ldiv*ldiv > nnodes_div*nnodes_div)
724 gmx_fatal(FARGS,"The number of nodes you selected (%d) contains a large prime factor %d. In most cases this will lead to bad performance. Choose a number with smaller prime factors or set the decomposition (option -dd) manually.",
729 if (EEL_PME(ir->coulombtype))
731 if (cr->npmenodes < 0)
733 /* Use PME nodes when the number of nodes is more than 16 */
734 if (cr->nnodes <= 18)
739 fprintf(fplog,"Using %d separate PME nodes, as there are too few total\n nodes for efficient splitting\n",cr->npmenodes);
744 cr->npmenodes = guess_npme(fplog,mtop,ir,box,cr->nnodes);
747 fprintf(fplog,"Using %d separate PME nodes, as guessed by mdrun\n",cr->npmenodes);
755 fprintf(fplog,"Using %d separate PME nodes, per user request\n",cr->npmenodes);
760 limit = optimize_ncells(fplog,cr->nnodes,cr->npmenodes,
761 bDynLoadBal,dlb_scale,
762 mtop,box,ddbox,ir,dd,
763 cellsize_limit,cutoff_dd,
764 bInterCGBondeds,bInterCGMultiBody,
771 /* Communicate the information set by the master to all nodes */
772 gmx_bcast(sizeof(dd->nc),dd->nc,cr);
773 if (EEL_PME(ir->coulombtype))
775 gmx_bcast(sizeof(ir->nkx),&ir->nkx,cr);
776 gmx_bcast(sizeof(ir->nky),&ir->nky,cr);
777 gmx_bcast(sizeof(cr->npmenodes),&cr->npmenodes,cr);