2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2011, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012,2013, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "nbnxn_cuda_data_mgmt.h"
50 #include "md_logging.h"
51 #include "pme_loadbal.h"
53 /* Parameters and setting for one PP-PME setup */
55 real rcut_coulomb; /* Coulomb cut-off */
56 real rlist; /* pair-list cut-off */
57 real rlistlong; /* LR pair-list cut-off */
58 int nstcalclr; /* frequency of evaluating long-range forces for group scheme */
59 real spacing; /* (largest) PME grid spacing */
60 ivec grid; /* the PME grid dimensions */
61 real grid_efficiency; /* ineffiency factor for non-uniform grids <= 1 */
62 real ewaldcoeff; /* the Ewald coefficient */
63 gmx_pme_t pmedata; /* the data structure used in the PME code */
65 int count; /* number of times this setup has been timed */
66 double cycles; /* the fastest time for this setup in cycles */
69 /* In the initial scan, step by grids that are at least a factor 0.8 coarser */
70 #define PME_LB_GRID_SCALE_FAC 0.8
71 /* In the initial scan, try to skip grids with uneven x/y/z spacing,
72 * checking if the "efficiency" is more than 5% worse than the previous grid.
74 #define PME_LB_GRID_EFFICIENCY_REL_FAC 1.05
75 /* Rerun up till 12% slower setups than the fastest up till now */
76 #define PME_LB_SLOW_FAC 1.12
77 /* If setups get more than 2% faster, do another round to avoid
78 * choosing a slower setup due to acceleration or fluctuations.
80 #define PME_LB_ACCEL_TOL 1.02
83 epmelblimNO, epmelblimBOX, epmelblimDD, epmelblimNR
86 const char *pmelblim_str[epmelblimNR] =
87 { "no", "box size", "domain decompostion" };
89 struct pme_load_balancing {
90 int nstage; /* the current maximum number of stages */
92 real cut_spacing; /* the minimum cutoff / PME grid spacing ratio */
93 real rcut_vdw; /* Vdw cutoff (does not change) */
94 real rcut_coulomb_start; /* Initial electrostatics cutoff */
95 int nstcalclr_start; /* Initial electrostatics cutoff */
96 real rbuf_coulomb; /* the pairlist buffer size */
97 real rbuf_vdw; /* the pairlist buffer size */
98 matrix box_start; /* the initial simulation box */
99 int n; /* the count of setup as well as the allocation size */
100 pme_setup_t *setup; /* the PME+cutoff setups */
101 int cur; /* the current setup */
102 int fastest; /* fastest setup up till now */
103 int start; /* start of setup range to consider in stage>0 */
104 int end; /* end of setup range to consider in stage>0 */
105 int elimited; /* was the balancing limited, uses enum above */
106 int cutoff_scheme; /* Verlet or group cut-offs */
108 int stage; /* the current stage */
111 void pme_loadbal_init(pme_load_balancing_t *pme_lb_p,
112 const t_inputrec *ir, matrix box,
113 const interaction_const_t *ic,
116 pme_load_balancing_t pme_lb;
122 /* Any number of stages >= 2 is supported */
125 pme_lb->cutoff_scheme = ir->cutoff_scheme;
127 if (pme_lb->cutoff_scheme == ecutsVERLET)
129 pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
130 pme_lb->rbuf_vdw = pme_lb->rbuf_coulomb;
134 if (ic->rcoulomb > ic->rlist)
136 pme_lb->rbuf_coulomb = ic->rlistlong - ic->rcoulomb;
140 pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
142 if (ic->rvdw > ic->rlist)
144 pme_lb->rbuf_vdw = ic->rlistlong - ic->rvdw;
148 pme_lb->rbuf_vdw = ic->rlist - ic->rvdw;
152 copy_mat(box, pme_lb->box_start);
153 if (ir->ePBC == epbcXY && ir->nwall == 2)
155 svmul(ir->wall_ewald_zfac, pme_lb->box_start[ZZ], pme_lb->box_start[ZZ]);
159 snew(pme_lb->setup, pme_lb->n);
161 pme_lb->rcut_vdw = ic->rvdw;
162 pme_lb->rcut_coulomb_start = ir->rcoulomb;
163 pme_lb->nstcalclr_start = ir->nstcalclr;
166 pme_lb->setup[0].rcut_coulomb = ic->rcoulomb;
167 pme_lb->setup[0].rlist = ic->rlist;
168 pme_lb->setup[0].rlistlong = ic->rlistlong;
169 pme_lb->setup[0].nstcalclr = ir->nstcalclr;
170 pme_lb->setup[0].grid[XX] = ir->nkx;
171 pme_lb->setup[0].grid[YY] = ir->nky;
172 pme_lb->setup[0].grid[ZZ] = ir->nkz;
173 pme_lb->setup[0].ewaldcoeff = ic->ewaldcoeff;
175 pme_lb->setup[0].pmedata = pmedata;
178 for (d = 0; d < DIM; d++)
180 sp = norm(pme_lb->box_start[d])/pme_lb->setup[0].grid[d];
186 pme_lb->setup[0].spacing = spm;
188 if (ir->fourier_spacing > 0)
190 pme_lb->cut_spacing = ir->rcoulomb/ir->fourier_spacing;
194 pme_lb->cut_spacing = ir->rcoulomb/pme_lb->setup[0].spacing;
202 pme_lb->elimited = epmelblimNO;
207 static gmx_bool pme_loadbal_increase_cutoff(pme_load_balancing_t pme_lb,
212 real tmpr_coulomb, tmpr_vdw;
215 /* Try to add a new setup with next larger cut-off to the list */
217 srenew(pme_lb->setup, pme_lb->n);
218 set = &pme_lb->setup[pme_lb->n-1];
225 clear_ivec(set->grid);
226 sp = calc_grid(NULL, pme_lb->box_start,
227 fac*pme_lb->setup[pme_lb->cur].spacing,
232 /* In parallel we can't have grids smaller than 2*pme_order,
233 * and we would anyhow not gain much speed at these grid sizes.
235 for (d = 0; d < DIM; d++)
237 if (set->grid[d] <= 2*pme_order)
245 while (sp <= 1.001*pme_lb->setup[pme_lb->cur].spacing);
247 set->rcut_coulomb = pme_lb->cut_spacing*sp;
249 if (pme_lb->cutoff_scheme == ecutsVERLET)
251 set->rlist = set->rcut_coulomb + pme_lb->rbuf_coulomb;
252 /* We dont use LR lists with Verlet, but this avoids if-statements in further checks */
253 set->rlistlong = set->rlist;
257 tmpr_coulomb = set->rcut_coulomb + pme_lb->rbuf_coulomb;
258 tmpr_vdw = pme_lb->rcut_vdw + pme_lb->rbuf_vdw;
259 set->rlist = min(tmpr_coulomb, tmpr_vdw);
260 set->rlistlong = max(tmpr_coulomb, tmpr_vdw);
262 /* Set the long-range update frequency */
263 if (set->rlist == set->rlistlong)
265 /* No long-range interactions if the short-/long-range cutoffs are identical */
268 else if (pme_lb->nstcalclr_start == 0 || pme_lb->nstcalclr_start == 1)
270 /* We were not doing long-range before, but now we are since rlist!=rlistlong */
275 /* We were already doing long-range interactions from the start */
276 if (pme_lb->rcut_vdw > pme_lb->rcut_coulomb_start)
278 /* We were originally doing long-range VdW-only interactions.
279 * If rvdw is still longer than rcoulomb we keep the original nstcalclr,
280 * but if the coulomb cutoff has become longer we should update the long-range
283 set->nstcalclr = (tmpr_vdw > tmpr_coulomb) ? pme_lb->nstcalclr_start : 1;
287 /* We were not doing any long-range interaction from the start,
288 * since it is not possible to do twin-range coulomb for the PME interaction.
296 /* The grid efficiency is the size wrt a grid with uniform x/y/z spacing */
297 set->grid_efficiency = 1;
298 for (d = 0; d < DIM; d++)
300 set->grid_efficiency *= (set->grid[d]*sp)/norm(pme_lb->box_start[d]);
302 /* The Ewald coefficient is inversly proportional to the cut-off */
304 pme_lb->setup[0].ewaldcoeff*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
311 fprintf(debug, "PME loadbal: grid %d %d %d, coulomb cutoff %f\n",
312 set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb);
317 static void print_grid(FILE *fp_err, FILE *fp_log,
320 const pme_setup_t *set,
323 char buf[STRLEN], buft[STRLEN];
327 sprintf(buft, ": %.1f M-cycles", cycles*1e-6);
333 sprintf(buf, "%-11s%10s pme grid %d %d %d, coulomb cutoff %.3f%s",
335 desc, set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb,
339 fprintf(fp_err, "\r%s\n", buf);
343 fprintf(fp_log, "%s\n", buf);
347 static int pme_loadbal_end(pme_load_balancing_t pme_lb)
349 /* In the initial stage only n is set; end is not set yet */
360 static void print_loadbal_limited(FILE *fp_err, FILE *fp_log,
361 gmx_large_int_t step,
362 pme_load_balancing_t pme_lb)
364 char buf[STRLEN], sbuf[22];
366 sprintf(buf, "step %4s: the %s limited the PME load balancing to a coulomb cut-off of %.3f",
367 gmx_step_str(step, sbuf),
368 pmelblim_str[pme_lb->elimited],
369 pme_lb->setup[pme_loadbal_end(pme_lb)-1].rcut_coulomb);
372 fprintf(fp_err, "\r%s\n", buf);
376 fprintf(fp_log, "%s\n", buf);
380 static void switch_to_stage1(pme_load_balancing_t pme_lb)
383 while (pme_lb->start+1 < pme_lb->n &&
384 (pme_lb->setup[pme_lb->start].count == 0 ||
385 pme_lb->setup[pme_lb->start].cycles >
386 pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC))
390 while (pme_lb->start > 0 && pme_lb->setup[pme_lb->start-1].cycles == 0)
395 pme_lb->end = pme_lb->n;
396 if (pme_lb->setup[pme_lb->end-1].count > 0 &&
397 pme_lb->setup[pme_lb->end-1].cycles >
398 pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
405 /* Next we want to choose setup pme_lb->start, but as we will increase
406 * pme_ln->cur by one right after returning, we subtract 1 here.
408 pme_lb->cur = pme_lb->start - 1;
411 gmx_bool pme_load_balance(pme_load_balancing_t pme_lb,
418 interaction_const_t *ic,
419 nonbonded_verlet_t *nbv,
421 gmx_large_int_t step)
426 char buf[STRLEN], sbuf[22];
428 gmx_bool bUsesSimpleTables = TRUE;
430 if (pme_lb->stage == pme_lb->nstage)
437 gmx_sumd(1, &cycles, cr);
438 cycles /= cr->nnodes;
441 set = &pme_lb->setup[pme_lb->cur];
444 rtab = ir->rlistlong + ir->tabext;
446 if (set->count % 2 == 1)
448 /* Skip the first cycle, because the first step after a switch
449 * is much slower due to allocation and/or caching effects.
454 sprintf(buf, "step %4s: ", gmx_step_str(step, sbuf));
455 print_grid(fp_err, fp_log, buf, "timed with", set, cycles);
459 set->cycles = cycles;
463 if (cycles*PME_LB_ACCEL_TOL < set->cycles &&
464 pme_lb->stage == pme_lb->nstage - 1)
466 /* The performance went up a lot (due to e.g. DD load balancing).
467 * Add a stage, keep the minima, but rescan all setups.
473 fprintf(debug, "The performance for grid %d %d %d went from %.3f to %.1f M-cycles, this is more than %f\n"
474 "Increased the number stages to %d"
475 " and ignoring the previous performance\n",
476 set->grid[XX], set->grid[YY], set->grid[ZZ],
477 cycles*1e-6, set->cycles*1e-6, PME_LB_ACCEL_TOL,
481 set->cycles = min(set->cycles, cycles);
484 if (set->cycles < pme_lb->setup[pme_lb->fastest].cycles)
486 pme_lb->fastest = pme_lb->cur;
488 if (DOMAINDECOMP(cr))
490 /* We found a new fastest setting, ensure that with subsequent
491 * shorter cut-off's the dynamic load balancing does not make
492 * the use of the current cut-off impossible. This solution is
493 * a trade-off, as the PME load balancing and DD domain size
494 * load balancing can interact in complex ways.
495 * With the Verlet kernels, DD load imbalance will usually be
496 * mainly due to bonded interaction imbalance, which will often
497 * quickly push the domain boundaries beyond the limit for the
498 * optimal, PME load balanced, cut-off. But it could be that
499 * better overal performance can be obtained with a slightly
500 * shorter cut-off and better DD load balancing.
502 change_dd_dlb_cutoff_limit(cr);
505 cycles_fast = pme_lb->setup[pme_lb->fastest].cycles;
507 /* Check in stage 0 if we should stop scanning grids.
508 * Stop when the time is more than SLOW_FAC longer than the fastest.
510 if (pme_lb->stage == 0 && pme_lb->cur > 0 &&
511 cycles > pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
513 pme_lb->n = pme_lb->cur + 1;
514 /* Done with scanning, go to stage 1 */
515 switch_to_stage1(pme_lb);
518 if (pme_lb->stage == 0)
522 gridsize_start = set->grid[XX]*set->grid[YY]*set->grid[ZZ];
526 if (pme_lb->cur+1 < pme_lb->n)
528 /* We had already generated the next setup */
533 /* Find the next setup */
534 OK = pme_loadbal_increase_cutoff(pme_lb, ir->pme_order);
537 if (OK && ir->ePBC != epbcNONE)
539 OK = (sqr(pme_lb->setup[pme_lb->cur+1].rlistlong)
540 <= max_cutoff2(ir->ePBC, state->box));
543 pme_lb->elimited = epmelblimBOX;
551 if (DOMAINDECOMP(cr))
553 OK = change_dd_cutoff(cr, state, ir,
554 pme_lb->setup[pme_lb->cur].rlistlong);
557 /* Failed: do not use this setup */
559 pme_lb->elimited = epmelblimDD;
565 /* We hit the upper limit for the cut-off,
566 * the setup should not go further than cur.
568 pme_lb->n = pme_lb->cur + 1;
569 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
570 /* Switch to the next stage */
571 switch_to_stage1(pme_lb);
575 !(pme_lb->setup[pme_lb->cur].grid[XX]*
576 pme_lb->setup[pme_lb->cur].grid[YY]*
577 pme_lb->setup[pme_lb->cur].grid[ZZ] <
578 gridsize_start*PME_LB_GRID_SCALE_FAC
580 pme_lb->setup[pme_lb->cur].grid_efficiency <
581 pme_lb->setup[pme_lb->cur-1].grid_efficiency*PME_LB_GRID_EFFICIENCY_REL_FAC));
584 if (pme_lb->stage > 0 && pme_lb->end == 1)
587 pme_lb->stage = pme_lb->nstage;
589 else if (pme_lb->stage > 0 && pme_lb->end > 1)
591 /* If stage = nstage-1:
592 * scan over all setups, rerunning only those setups
593 * which are not much slower than the fastest
600 if (pme_lb->cur == pme_lb->end)
603 pme_lb->cur = pme_lb->start;
606 while (pme_lb->stage == pme_lb->nstage - 1 &&
607 pme_lb->setup[pme_lb->cur].count > 0 &&
608 pme_lb->setup[pme_lb->cur].cycles > cycles_fast*PME_LB_SLOW_FAC);
610 if (pme_lb->stage == pme_lb->nstage)
612 /* We are done optimizing, use the fastest setup we found */
613 pme_lb->cur = pme_lb->fastest;
617 if (DOMAINDECOMP(cr) && pme_lb->stage > 0)
619 OK = change_dd_cutoff(cr, state, ir, pme_lb->setup[pme_lb->cur].rlistlong);
622 /* Failsafe solution */
623 if (pme_lb->cur > 1 && pme_lb->stage == pme_lb->nstage)
629 pme_lb->end = pme_lb->cur;
630 pme_lb->cur = pme_lb->start;
631 pme_lb->elimited = epmelblimDD;
632 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
636 /* Change the Coulomb cut-off and the PME grid */
638 set = &pme_lb->setup[pme_lb->cur];
640 ic->rcoulomb = set->rcut_coulomb;
641 ic->rlist = set->rlist;
642 ic->rlistlong = set->rlistlong;
643 ir->nstcalclr = set->nstcalclr;
644 ic->ewaldcoeff = set->ewaldcoeff;
646 bUsesSimpleTables = uses_simple_tables(ir->cutoff_scheme, nbv, 0);
647 if (pme_lb->cutoff_scheme == ecutsVERLET &&
648 nbv->grp[0].kernel_type == nbnxnk8x8x8_CUDA)
650 nbnxn_cuda_pme_loadbal_update_param(nbv->cu_nbv, ic);
654 init_interaction_const_tables(NULL, ic, bUsesSimpleTables,
658 if (pme_lb->cutoff_scheme == ecutsVERLET && nbv->ngrp > 1)
660 init_interaction_const_tables(NULL, ic, bUsesSimpleTables,
664 if (cr->duty & DUTY_PME)
666 if (pme_lb->setup[pme_lb->cur].pmedata == NULL)
668 /* Generate a new PME data structure,
669 * copying part of the old pointers.
671 gmx_pme_reinit(&set->pmedata,
672 cr, pme_lb->setup[0].pmedata, ir,
675 *pmedata = set->pmedata;
679 /* Tell our PME-only node to switch grid */
680 gmx_pme_send_switchgrid(cr, set->grid, set->ewaldcoeff);
685 print_grid(NULL, debug, "", "switched to", set, -1);
688 if (pme_lb->stage == pme_lb->nstage)
690 print_grid(fp_err, fp_log, "", "optimal", set, -1);
696 void restart_pme_loadbal(pme_load_balancing_t pme_lb, int n)
701 static int pme_grid_points(const pme_setup_t *setup)
703 return setup->grid[XX]*setup->grid[YY]*setup->grid[ZZ];
706 static real pme_loadbal_rlist(const pme_setup_t *setup)
708 /* With the group cut-off scheme we can have twin-range either
709 * for Coulomb or for VdW, so we need a check here.
710 * With the Verlet cut-off scheme rlist=rlistlong.
712 if (setup->rcut_coulomb > setup->rlist)
714 return setup->rlistlong;
722 static void print_pme_loadbal_setting(FILE *fplog,
724 const pme_setup_t *setup)
727 " %-7s %6.3f nm %6.3f nm %3d %3d %3d %5.3f nm %5.3f nm\n",
729 setup->rcut_coulomb, pme_loadbal_rlist(setup),
730 setup->grid[XX], setup->grid[YY], setup->grid[ZZ],
731 setup->spacing, 1/setup->ewaldcoeff);
734 static void print_pme_loadbal_settings(pme_load_balancing_t pme_lb,
737 gmx_bool bNonBondedOnGPU)
739 double pp_ratio, grid_ratio;
741 pp_ratio = pow(pme_loadbal_rlist(&pme_lb->setup[pme_lb->cur])/pme_loadbal_rlist(&pme_lb->setup[0]), 3.0);
742 grid_ratio = pme_grid_points(&pme_lb->setup[pme_lb->cur])/
743 (double)pme_grid_points(&pme_lb->setup[0]);
745 fprintf(fplog, "\n");
746 fprintf(fplog, " P P - P M E L O A D B A L A N C I N G\n");
747 fprintf(fplog, "\n");
748 /* Here we only warn when the optimal setting is the last one */
749 if (pme_lb->elimited != epmelblimNO &&
750 pme_lb->cur == pme_loadbal_end(pme_lb)-1)
752 fprintf(fplog, " NOTE: The PP/PME load balancing was limited by the %s,\n",
753 pmelblim_str[pme_lb->elimited]);
754 fprintf(fplog, " you might not have reached a good load balance.\n");
755 if (pme_lb->elimited == epmelblimDD)
757 fprintf(fplog, " Try different mdrun -dd settings or lower the -dds value.\n");
759 fprintf(fplog, "\n");
761 fprintf(fplog, " PP/PME load balancing changed the cut-off and PME settings:\n");
762 fprintf(fplog, " particle-particle PME\n");
763 fprintf(fplog, " rcoulomb rlist grid spacing 1/beta\n");
764 print_pme_loadbal_setting(fplog, "initial", &pme_lb->setup[0]);
765 print_pme_loadbal_setting(fplog, "final", &pme_lb->setup[pme_lb->cur]);
766 fprintf(fplog, " cost-ratio %4.2f %4.2f\n",
767 pp_ratio, grid_ratio);
768 fprintf(fplog, " (note that these numbers concern only part of the total PP and PME load)\n");
770 if (pp_ratio > 1.5 && !bNonBondedOnGPU)
772 md_print_warn(cr, fplog,
773 "NOTE: PME load balancing increased the non-bonded workload by more than 50%%.\n"
774 " For better performance use (more) PME nodes (mdrun -npme),\n"
775 " or in case you are beyond the scaling limit, use less nodes in total.\n");
779 fprintf(fplog, "\n");
783 void pme_loadbal_done(pme_load_balancing_t pme_lb,
784 t_commrec *cr, FILE *fplog,
785 gmx_bool bNonBondedOnGPU)
787 if (fplog != NULL && (pme_lb->cur > 0 || pme_lb->elimited != epmelblimNO))
789 print_pme_loadbal_settings(pme_lb, cr, fplog, bNonBondedOnGPU);
792 /* TODO: Here we should free all pointers in pme_lb,
793 * but as it contains pme data structures,
794 * we need to first make pme.c free all data.