2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gromacs/legacyheaders/types/commrec.h"
38 #include "gromacs/legacyheaders/network.h"
39 #include "gromacs/legacyheaders/calcgrid.h"
40 #include "gromacs/legacyheaders/pme.h"
41 #include "gromacs/legacyheaders/domdec.h"
42 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_data_mgmt.h"
43 #include "gromacs/legacyheaders/force.h"
44 #include "gromacs/legacyheaders/macros.h"
45 #include "gromacs/legacyheaders/md_logging.h"
46 #include "pme_loadbal.h"
48 #include "gromacs/math/vec.h"
49 #include "gromacs/legacyheaders/sim_util.h"
50 #include "gromacs/pbcutil/pbc.h"
51 #include "gromacs/utility/cstringutil.h"
52 #include "gromacs/utility/smalloc.h"
54 /* Parameters and setting for one PP-PME setup */
56 real rcut_coulomb; /* Coulomb cut-off */
57 real rlist; /* pair-list cut-off */
58 real rlistlong; /* LR pair-list cut-off */
59 int nstcalclr; /* frequency of evaluating long-range forces for group scheme */
60 real spacing; /* (largest) PME grid spacing */
61 ivec grid; /* the PME grid dimensions */
62 real grid_efficiency; /* ineffiency factor for non-uniform grids <= 1 */
63 real ewaldcoeff_q; /* Electrostatic Ewald coefficient */
64 real ewaldcoeff_lj; /* LJ Ewald coefficient, only for the call to send_switchgrid */
65 gmx_pme_t pmedata; /* the data structure used in the PME code */
66 int count; /* number of times this setup has been timed */
67 double cycles; /* the fastest time for this setup in cycles */
70 /* In the initial scan, step by grids that are at least a factor 0.8 coarser */
71 #define PME_LB_GRID_SCALE_FAC 0.8
72 /* In the initial scan, try to skip grids with uneven x/y/z spacing,
73 * checking if the "efficiency" is more than 5% worse than the previous grid.
75 #define PME_LB_GRID_EFFICIENCY_REL_FAC 1.05
76 /* Rerun up till 12% slower setups than the fastest up till now */
77 #define PME_LB_SLOW_FAC 1.12
78 /* If setups get more than 2% faster, do another round to avoid
79 * choosing a slower setup due to acceleration or fluctuations.
81 #define PME_LB_ACCEL_TOL 1.02
84 epmelblimNO, epmelblimBOX, epmelblimDD, epmelblimPMEGRID, epmelblimNR
87 const char *pmelblim_str[epmelblimNR] =
88 { "no", "box size", "domain decompostion", "PME grid restriction" };
90 struct pme_load_balancing {
91 int nstage; /* the current maximum number of stages */
93 real cut_spacing; /* the minimum cutoff / PME grid spacing ratio */
94 real rcut_vdw; /* Vdw cutoff (does not change) */
95 real rcut_coulomb_start; /* Initial electrostatics cutoff */
96 int nstcalclr_start; /* Initial electrostatics cutoff */
97 real rbuf_coulomb; /* the pairlist buffer size */
98 real rbuf_vdw; /* the pairlist buffer size */
99 matrix box_start; /* the initial simulation box */
100 int n; /* the count of setup as well as the allocation size */
101 pme_setup_t *setup; /* the PME+cutoff setups */
102 int cur; /* the current setup */
103 int fastest; /* fastest setup up till now */
104 int start; /* start of setup range to consider in stage>0 */
105 int end; /* end of setup range to consider in stage>0 */
106 int elimited; /* was the balancing limited, uses enum above */
107 int cutoff_scheme; /* Verlet or group cut-offs */
109 int stage; /* the current stage */
112 void pme_loadbal_init(pme_load_balancing_t *pme_lb_p,
113 const t_inputrec *ir, matrix box,
114 const interaction_const_t *ic,
117 pme_load_balancing_t pme_lb;
123 /* Any number of stages >= 2 is supported */
126 pme_lb->cutoff_scheme = ir->cutoff_scheme;
128 if (pme_lb->cutoff_scheme == ecutsVERLET)
130 pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
131 pme_lb->rbuf_vdw = pme_lb->rbuf_coulomb;
135 if (ic->rcoulomb > ic->rlist)
137 pme_lb->rbuf_coulomb = ic->rlistlong - ic->rcoulomb;
141 pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
143 if (ic->rvdw > ic->rlist)
145 pme_lb->rbuf_vdw = ic->rlistlong - ic->rvdw;
149 pme_lb->rbuf_vdw = ic->rlist - ic->rvdw;
153 copy_mat(box, pme_lb->box_start);
154 if (ir->ePBC == epbcXY && ir->nwall == 2)
156 svmul(ir->wall_ewald_zfac, pme_lb->box_start[ZZ], pme_lb->box_start[ZZ]);
160 snew(pme_lb->setup, pme_lb->n);
162 pme_lb->rcut_vdw = ic->rvdw;
163 pme_lb->rcut_coulomb_start = ir->rcoulomb;
164 pme_lb->nstcalclr_start = ir->nstcalclr;
167 pme_lb->setup[0].rcut_coulomb = ic->rcoulomb;
168 pme_lb->setup[0].rlist = ic->rlist;
169 pme_lb->setup[0].rlistlong = ic->rlistlong;
170 pme_lb->setup[0].nstcalclr = ir->nstcalclr;
171 pme_lb->setup[0].grid[XX] = ir->nkx;
172 pme_lb->setup[0].grid[YY] = ir->nky;
173 pme_lb->setup[0].grid[ZZ] = ir->nkz;
174 pme_lb->setup[0].ewaldcoeff_q = ic->ewaldcoeff_q;
175 pme_lb->setup[0].ewaldcoeff_lj = ic->ewaldcoeff_lj;
177 pme_lb->setup[0].pmedata = pmedata;
180 for (d = 0; d < DIM; d++)
182 sp = norm(pme_lb->box_start[d])/pme_lb->setup[0].grid[d];
188 pme_lb->setup[0].spacing = spm;
190 if (ir->fourier_spacing > 0)
192 pme_lb->cut_spacing = ir->rcoulomb/ir->fourier_spacing;
196 pme_lb->cut_spacing = ir->rcoulomb/pme_lb->setup[0].spacing;
204 pme_lb->elimited = epmelblimNO;
209 static gmx_bool pme_loadbal_increase_cutoff(pme_load_balancing_t pme_lb,
211 const gmx_domdec_t *dd)
214 int npmenodes_x, npmenodes_y;
216 real tmpr_coulomb, tmpr_vdw;
220 /* Try to add a new setup with next larger cut-off to the list */
222 srenew(pme_lb->setup, pme_lb->n);
223 set = &pme_lb->setup[pme_lb->n-1];
226 get_pme_nnodes(dd, &npmenodes_x, &npmenodes_y);
231 /* Avoid infinite while loop, which can occur at the minimum grid size.
232 * Note that in practice load balancing will stop before this point.
233 * The factor 2.1 allows for the extreme case in which only grids
234 * of powers of 2 are allowed (the current code supports more grids).
244 clear_ivec(set->grid);
245 sp = calc_grid(NULL, pme_lb->box_start,
246 fac*pme_lb->setup[pme_lb->cur].spacing,
251 /* As here we can't easily check if one of the PME nodes
252 * uses threading, we do a conservative grid check.
253 * This means we can't use pme_order or less grid lines
254 * per PME node along x, which is not a strong restriction.
256 gmx_pme_check_restrictions(pme_order,
257 set->grid[XX], set->grid[YY], set->grid[ZZ],
258 npmenodes_x, npmenodes_y,
263 while (sp <= 1.001*pme_lb->setup[pme_lb->cur].spacing || !grid_ok);
265 set->rcut_coulomb = pme_lb->cut_spacing*sp;
267 if (pme_lb->cutoff_scheme == ecutsVERLET)
269 set->rlist = set->rcut_coulomb + pme_lb->rbuf_coulomb;
270 /* We dont use LR lists with Verlet, but this avoids if-statements in further checks */
271 set->rlistlong = set->rlist;
275 tmpr_coulomb = set->rcut_coulomb + pme_lb->rbuf_coulomb;
276 tmpr_vdw = pme_lb->rcut_vdw + pme_lb->rbuf_vdw;
277 set->rlist = min(tmpr_coulomb, tmpr_vdw);
278 set->rlistlong = max(tmpr_coulomb, tmpr_vdw);
280 /* Set the long-range update frequency */
281 if (set->rlist == set->rlistlong)
283 /* No long-range interactions if the short-/long-range cutoffs are identical */
286 else if (pme_lb->nstcalclr_start == 0 || pme_lb->nstcalclr_start == 1)
288 /* We were not doing long-range before, but now we are since rlist!=rlistlong */
293 /* We were already doing long-range interactions from the start */
294 if (pme_lb->rcut_vdw > pme_lb->rcut_coulomb_start)
296 /* We were originally doing long-range VdW-only interactions.
297 * If rvdw is still longer than rcoulomb we keep the original nstcalclr,
298 * but if the coulomb cutoff has become longer we should update the long-range
301 set->nstcalclr = (tmpr_vdw > tmpr_coulomb) ? pme_lb->nstcalclr_start : 1;
305 /* We were not doing any long-range interaction from the start,
306 * since it is not possible to do twin-range coulomb for the PME interaction.
314 /* The grid efficiency is the size wrt a grid with uniform x/y/z spacing */
315 set->grid_efficiency = 1;
316 for (d = 0; d < DIM; d++)
318 set->grid_efficiency *= (set->grid[d]*sp)/norm(pme_lb->box_start[d]);
320 /* The Ewald coefficient is inversly proportional to the cut-off */
322 pme_lb->setup[0].ewaldcoeff_q*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
323 /* We set ewaldcoeff_lj in set, even when LJ-PME is not used */
325 pme_lb->setup[0].ewaldcoeff_lj*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
332 fprintf(debug, "PME loadbal: grid %d %d %d, coulomb cutoff %f\n",
333 set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb);
338 static void print_grid(FILE *fp_err, FILE *fp_log,
341 const pme_setup_t *set,
344 char buf[STRLEN], buft[STRLEN];
348 sprintf(buft, ": %.1f M-cycles", cycles*1e-6);
354 sprintf(buf, "%-11s%10s pme grid %d %d %d, coulomb cutoff %.3f%s",
356 desc, set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb,
360 fprintf(fp_err, "\r%s\n", buf);
364 fprintf(fp_log, "%s\n", buf);
368 static int pme_loadbal_end(pme_load_balancing_t pme_lb)
370 /* In the initial stage only n is set; end is not set yet */
381 static void print_loadbal_limited(FILE *fp_err, FILE *fp_log,
383 pme_load_balancing_t pme_lb)
385 char buf[STRLEN], sbuf[22];
387 sprintf(buf, "step %4s: the %s limits the PME load balancing to a coulomb cut-off of %.3f",
388 gmx_step_str(step, sbuf),
389 pmelblim_str[pme_lb->elimited],
390 pme_lb->setup[pme_loadbal_end(pme_lb)-1].rcut_coulomb);
393 fprintf(fp_err, "\r%s\n", buf);
397 fprintf(fp_log, "%s\n", buf);
401 static void switch_to_stage1(pme_load_balancing_t pme_lb)
404 while (pme_lb->start+1 < pme_lb->n &&
405 (pme_lb->setup[pme_lb->start].count == 0 ||
406 pme_lb->setup[pme_lb->start].cycles >
407 pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC))
411 while (pme_lb->start > 0 && pme_lb->setup[pme_lb->start-1].cycles == 0)
416 pme_lb->end = pme_lb->n;
417 if (pme_lb->setup[pme_lb->end-1].count > 0 &&
418 pme_lb->setup[pme_lb->end-1].cycles >
419 pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
426 /* Next we want to choose setup pme_lb->start, but as we will increase
427 * pme_ln->cur by one right after returning, we subtract 1 here.
429 pme_lb->cur = pme_lb->start - 1;
432 gmx_bool pme_load_balance(pme_load_balancing_t pme_lb,
439 interaction_const_t *ic,
440 struct nonbonded_verlet_t *nbv,
447 char buf[STRLEN], sbuf[22];
449 gmx_bool bUsesSimpleTables = TRUE;
451 if (pme_lb->stage == pme_lb->nstage)
458 gmx_sumd(1, &cycles, cr);
459 cycles /= cr->nnodes;
462 set = &pme_lb->setup[pme_lb->cur];
465 rtab = ir->rlistlong + ir->tabext;
467 if (set->count % 2 == 1)
469 /* Skip the first cycle, because the first step after a switch
470 * is much slower due to allocation and/or caching effects.
475 sprintf(buf, "step %4s: ", gmx_step_str(step, sbuf));
476 print_grid(fp_err, fp_log, buf, "timed with", set, cycles);
480 set->cycles = cycles;
484 if (cycles*PME_LB_ACCEL_TOL < set->cycles &&
485 pme_lb->stage == pme_lb->nstage - 1)
487 /* The performance went up a lot (due to e.g. DD load balancing).
488 * Add a stage, keep the minima, but rescan all setups.
494 fprintf(debug, "The performance for grid %d %d %d went from %.3f to %.1f M-cycles, this is more than %f\n"
495 "Increased the number stages to %d"
496 " and ignoring the previous performance\n",
497 set->grid[XX], set->grid[YY], set->grid[ZZ],
498 cycles*1e-6, set->cycles*1e-6, PME_LB_ACCEL_TOL,
502 set->cycles = min(set->cycles, cycles);
505 if (set->cycles < pme_lb->setup[pme_lb->fastest].cycles)
507 pme_lb->fastest = pme_lb->cur;
509 if (DOMAINDECOMP(cr))
511 /* We found a new fastest setting, ensure that with subsequent
512 * shorter cut-off's the dynamic load balancing does not make
513 * the use of the current cut-off impossible. This solution is
514 * a trade-off, as the PME load balancing and DD domain size
515 * load balancing can interact in complex ways.
516 * With the Verlet kernels, DD load imbalance will usually be
517 * mainly due to bonded interaction imbalance, which will often
518 * quickly push the domain boundaries beyond the limit for the
519 * optimal, PME load balanced, cut-off. But it could be that
520 * better overal performance can be obtained with a slightly
521 * shorter cut-off and better DD load balancing.
523 change_dd_dlb_cutoff_limit(cr);
526 cycles_fast = pme_lb->setup[pme_lb->fastest].cycles;
528 /* Check in stage 0 if we should stop scanning grids.
529 * Stop when the time is more than SLOW_FAC longer than the fastest.
531 if (pme_lb->stage == 0 && pme_lb->cur > 0 &&
532 cycles > pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
534 pme_lb->n = pme_lb->cur + 1;
535 /* Done with scanning, go to stage 1 */
536 switch_to_stage1(pme_lb);
539 if (pme_lb->stage == 0)
543 gridsize_start = set->grid[XX]*set->grid[YY]*set->grid[ZZ];
547 if (pme_lb->cur+1 < pme_lb->n)
549 /* We had already generated the next setup */
554 /* Find the next setup */
555 OK = pme_loadbal_increase_cutoff(pme_lb, ir->pme_order, cr->dd);
559 pme_lb->elimited = epmelblimPMEGRID;
563 if (OK && ir->ePBC != epbcNONE)
565 OK = (sqr(pme_lb->setup[pme_lb->cur+1].rlistlong)
566 <= max_cutoff2(ir->ePBC, state->box));
569 pme_lb->elimited = epmelblimBOX;
577 if (DOMAINDECOMP(cr))
579 OK = change_dd_cutoff(cr, state, ir,
580 pme_lb->setup[pme_lb->cur].rlistlong);
583 /* Failed: do not use this setup */
585 pme_lb->elimited = epmelblimDD;
591 /* We hit the upper limit for the cut-off,
592 * the setup should not go further than cur.
594 pme_lb->n = pme_lb->cur + 1;
595 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
596 /* Switch to the next stage */
597 switch_to_stage1(pme_lb);
601 !(pme_lb->setup[pme_lb->cur].grid[XX]*
602 pme_lb->setup[pme_lb->cur].grid[YY]*
603 pme_lb->setup[pme_lb->cur].grid[ZZ] <
604 gridsize_start*PME_LB_GRID_SCALE_FAC
606 pme_lb->setup[pme_lb->cur].grid_efficiency <
607 pme_lb->setup[pme_lb->cur-1].grid_efficiency*PME_LB_GRID_EFFICIENCY_REL_FAC));
610 if (pme_lb->stage > 0 && pme_lb->end == 1)
613 pme_lb->stage = pme_lb->nstage;
615 else if (pme_lb->stage > 0 && pme_lb->end > 1)
617 /* If stage = nstage-1:
618 * scan over all setups, rerunning only those setups
619 * which are not much slower than the fastest
626 if (pme_lb->cur == pme_lb->end)
629 pme_lb->cur = pme_lb->start;
632 while (pme_lb->stage == pme_lb->nstage - 1 &&
633 pme_lb->setup[pme_lb->cur].count > 0 &&
634 pme_lb->setup[pme_lb->cur].cycles > cycles_fast*PME_LB_SLOW_FAC);
636 if (pme_lb->stage == pme_lb->nstage)
638 /* We are done optimizing, use the fastest setup we found */
639 pme_lb->cur = pme_lb->fastest;
643 if (DOMAINDECOMP(cr) && pme_lb->stage > 0)
645 OK = change_dd_cutoff(cr, state, ir, pme_lb->setup[pme_lb->cur].rlistlong);
648 /* Failsafe solution */
649 if (pme_lb->cur > 1 && pme_lb->stage == pme_lb->nstage)
655 pme_lb->end = pme_lb->cur;
656 pme_lb->cur = pme_lb->start;
657 pme_lb->elimited = epmelblimDD;
658 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
662 /* Change the Coulomb cut-off and the PME grid */
664 set = &pme_lb->setup[pme_lb->cur];
666 ic->rcoulomb = set->rcut_coulomb;
667 ic->rlist = set->rlist;
668 ic->rlistlong = set->rlistlong;
669 ir->nstcalclr = set->nstcalclr;
670 ic->ewaldcoeff_q = set->ewaldcoeff_q;
671 /* TODO: centralize the code that sets the potentials shifts */
672 if (ic->coulomb_modifier == eintmodPOTSHIFT)
674 ic->sh_ewald = gmx_erfc(ic->ewaldcoeff_q*ic->rcoulomb);
676 if (EVDW_PME(ic->vdwtype))
678 /* We have PME for both Coulomb and VdW, set rvdw equal to rcoulomb */
679 ic->rvdw = set->rcut_coulomb;
680 ic->ewaldcoeff_lj = set->ewaldcoeff_lj;
681 if (ic->vdw_modifier == eintmodPOTSHIFT)
685 ic->dispersion_shift.cpot = -pow(ic->rvdw, -6.0);
686 ic->repulsion_shift.cpot = -pow(ic->rvdw, -12.0);
687 ic->sh_invrc6 = -ic->dispersion_shift.cpot;
688 crc2 = sqr(ic->ewaldcoeff_lj*ic->rvdw);
689 ic->sh_lj_ewald = (exp(-crc2)*(1 + crc2 + 0.5*crc2*crc2) - 1)*pow(ic->rvdw, -6.0);
693 bUsesSimpleTables = uses_simple_tables(ir->cutoff_scheme, nbv, 0);
694 nbnxn_cuda_pme_loadbal_update_param(nbv, ic);
696 /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
697 * also sharing texture references. To keep the code simple, we don't
698 * treat texture references as shared resources, but this means that
699 * the coulomb_tab texture ref will get updated by multiple threads.
700 * Hence, to ensure that the non-bonded kernels don't start before all
701 * texture binding operations are finished, we need to wait for all ranks
702 * to arrive here before continuing.
704 * Note that we could omit this barrier if GPUs are not shared (or
705 * texture objects are used), but as this is initialization code, there
706 * is not point in complicating things.
708 #ifdef GMX_THREAD_MPI
709 if (PAR(cr) && use_GPU(nbv))
713 #endif /* GMX_THREAD_MPI */
715 /* Usually we won't need the simple tables with GPUs.
716 * But we do with hybrid acceleration and with free energy.
717 * To avoid bugs, we always re-initialize the simple tables here.
719 init_interaction_const_tables(NULL, ic, bUsesSimpleTables, rtab);
721 if (cr->duty & DUTY_PME)
723 if (pme_lb->setup[pme_lb->cur].pmedata == NULL)
725 /* Generate a new PME data structure,
726 * copying part of the old pointers.
728 gmx_pme_reinit(&set->pmedata,
729 cr, pme_lb->setup[0].pmedata, ir,
732 *pmedata = set->pmedata;
736 /* Tell our PME-only node to switch grid */
737 gmx_pme_send_switchgrid(cr, set->grid, set->ewaldcoeff_q, set->ewaldcoeff_lj);
742 print_grid(NULL, debug, "", "switched to", set, -1);
745 if (pme_lb->stage == pme_lb->nstage)
747 print_grid(fp_err, fp_log, "", "optimal", set, -1);
753 void restart_pme_loadbal(pme_load_balancing_t pme_lb, int n)
758 static int pme_grid_points(const pme_setup_t *setup)
760 return setup->grid[XX]*setup->grid[YY]*setup->grid[ZZ];
763 static real pme_loadbal_rlist(const pme_setup_t *setup)
765 /* With the group cut-off scheme we can have twin-range either
766 * for Coulomb or for VdW, so we need a check here.
767 * With the Verlet cut-off scheme rlist=rlistlong.
769 if (setup->rcut_coulomb > setup->rlist)
771 return setup->rlistlong;
779 static void print_pme_loadbal_setting(FILE *fplog,
781 const pme_setup_t *setup)
784 " %-7s %6.3f nm %6.3f nm %3d %3d %3d %5.3f nm %5.3f nm\n",
786 setup->rcut_coulomb, pme_loadbal_rlist(setup),
787 setup->grid[XX], setup->grid[YY], setup->grid[ZZ],
788 setup->spacing, 1/setup->ewaldcoeff_q);
791 static void print_pme_loadbal_settings(pme_load_balancing_t pme_lb,
794 gmx_bool bNonBondedOnGPU)
796 double pp_ratio, grid_ratio;
798 pp_ratio = pow(pme_loadbal_rlist(&pme_lb->setup[pme_lb->cur])/pme_loadbal_rlist(&pme_lb->setup[0]), 3.0);
799 grid_ratio = pme_grid_points(&pme_lb->setup[pme_lb->cur])/
800 (double)pme_grid_points(&pme_lb->setup[0]);
802 fprintf(fplog, "\n");
803 fprintf(fplog, " P P - P M E L O A D B A L A N C I N G\n");
804 fprintf(fplog, "\n");
805 /* Here we only warn when the optimal setting is the last one */
806 if (pme_lb->elimited != epmelblimNO &&
807 pme_lb->cur == pme_loadbal_end(pme_lb)-1)
809 fprintf(fplog, " NOTE: The PP/PME load balancing was limited by the %s,\n",
810 pmelblim_str[pme_lb->elimited]);
811 fprintf(fplog, " you might not have reached a good load balance.\n");
812 if (pme_lb->elimited == epmelblimDD)
814 fprintf(fplog, " Try different mdrun -dd settings or lower the -dds value.\n");
816 fprintf(fplog, "\n");
818 fprintf(fplog, " PP/PME load balancing changed the cut-off and PME settings:\n");
819 fprintf(fplog, " particle-particle PME\n");
820 fprintf(fplog, " rcoulomb rlist grid spacing 1/beta\n");
821 print_pme_loadbal_setting(fplog, "initial", &pme_lb->setup[0]);
822 print_pme_loadbal_setting(fplog, "final", &pme_lb->setup[pme_lb->cur]);
823 fprintf(fplog, " cost-ratio %4.2f %4.2f\n",
824 pp_ratio, grid_ratio);
825 fprintf(fplog, " (note that these numbers concern only part of the total PP and PME load)\n");
827 if (pp_ratio > 1.5 && !bNonBondedOnGPU)
829 md_print_warn(cr, fplog,
830 "NOTE: PME load balancing increased the non-bonded workload by more than 50%%.\n"
831 " For better performance, use (more) PME ranks (mdrun -npme),\n"
832 " or if you are beyond the scaling limit, use fewer total ranks (or nodes).\n");
836 fprintf(fplog, "\n");
840 void pme_loadbal_done(pme_load_balancing_t pme_lb,
841 t_commrec *cr, FILE *fplog,
842 gmx_bool bNonBondedOnGPU)
844 if (fplog != NULL && (pme_lb->cur > 0 || pme_lb->elimited != epmelblimNO))
846 print_pme_loadbal_settings(pme_lb, cr, fplog, bNonBondedOnGPU);
849 /* TODO: Here we should free all pointers in pme_lb,
850 * but as it contains pme data structures,
851 * we need to first make pme.c free all data.