Merge release-5-0 into master
[alexxy/gromacs.git] / src / programs / mdrun / pme_loadbal.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 #include "config.h"
36
37 #include "types/commrec.h"
38 #include "network.h"
39 #include "calcgrid.h"
40 #include "pme.h"
41 #include "domdec.h"
42 #include "nbnxn_cuda_data_mgmt.h"
43 #include "force.h"
44 #include "macros.h"
45 #include "md_logging.h"
46 #include "pme_loadbal.h"
47
48 #include "gromacs/math/vec.h"
49 #include "gromacs/pbcutil/pbc.h"
50 #include "gromacs/utility/cstringutil.h"
51 #include "gromacs/utility/smalloc.h"
52
53 /* Parameters and setting for one PP-PME setup */
54 typedef struct {
55     real      rcut_coulomb;    /* Coulomb cut-off                              */
56     real      rlist;           /* pair-list cut-off                            */
57     real      rlistlong;       /* LR pair-list cut-off                         */
58     int       nstcalclr;       /* frequency of evaluating long-range forces for group scheme */
59     real      spacing;         /* (largest) PME grid spacing                   */
60     ivec      grid;            /* the PME grid dimensions                      */
61     real      grid_efficiency; /* ineffiency factor for non-uniform grids <= 1 */
62     real      ewaldcoeff_q;    /* Electrostatic Ewald coefficient            */
63     real      ewaldcoeff_lj;   /* LJ Ewald coefficient, only for the call to send_switchgrid */
64     gmx_pme_t pmedata;         /* the data structure used in the PME code      */
65     int       count;           /* number of times this setup has been timed    */
66     double    cycles;          /* the fastest time for this setup in cycles    */
67 } pme_setup_t;
68
69 /* In the initial scan, step by grids that are at least a factor 0.8 coarser */
70 #define PME_LB_GRID_SCALE_FAC  0.8
71 /* In the initial scan, try to skip grids with uneven x/y/z spacing,
72  * checking if the "efficiency" is more than 5% worse than the previous grid.
73  */
74 #define PME_LB_GRID_EFFICIENCY_REL_FAC  1.05
75 /* Rerun up till 12% slower setups than the fastest up till now */
76 #define PME_LB_SLOW_FAC  1.12
77 /* If setups get more than 2% faster, do another round to avoid
78  * choosing a slower setup due to acceleration or fluctuations.
79  */
80 #define PME_LB_ACCEL_TOL 1.02
81
82 enum {
83     epmelblimNO, epmelblimBOX, epmelblimDD, epmelblimPMEGRID, epmelblimNR
84 };
85
86 const char *pmelblim_str[epmelblimNR] =
87 { "no", "box size", "domain decompostion", "PME grid restriction" };
88
89 struct pme_load_balancing {
90     int          nstage;             /* the current maximum number of stages */
91
92     real         cut_spacing;        /* the minimum cutoff / PME grid spacing ratio */
93     real         rcut_vdw;           /* Vdw cutoff (does not change) */
94     real         rcut_coulomb_start; /* Initial electrostatics cutoff */
95     int          nstcalclr_start;    /* Initial electrostatics cutoff */
96     real         rbuf_coulomb;       /* the pairlist buffer size */
97     real         rbuf_vdw;           /* the pairlist buffer size */
98     matrix       box_start;          /* the initial simulation box */
99     int          n;                  /* the count of setup as well as the allocation size */
100     pme_setup_t *setup;              /* the PME+cutoff setups */
101     int          cur;                /* the current setup */
102     int          fastest;            /* fastest setup up till now */
103     int          start;              /* start of setup range to consider in stage>0 */
104     int          end;                /* end   of setup range to consider in stage>0 */
105     int          elimited;           /* was the balancing limited, uses enum above */
106     int          cutoff_scheme;      /* Verlet or group cut-offs */
107
108     int          stage;              /* the current stage */
109 };
110
111 void pme_loadbal_init(pme_load_balancing_t *pme_lb_p,
112                       const t_inputrec *ir, matrix box,
113                       const interaction_const_t *ic,
114                       gmx_pme_t pmedata)
115 {
116     pme_load_balancing_t pme_lb;
117     real                 spm, sp;
118     int                  d;
119
120     snew(pme_lb, 1);
121
122     /* Any number of stages >= 2 is supported */
123     pme_lb->nstage   = 2;
124
125     pme_lb->cutoff_scheme = ir->cutoff_scheme;
126
127     if (pme_lb->cutoff_scheme == ecutsVERLET)
128     {
129         pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
130         pme_lb->rbuf_vdw     = pme_lb->rbuf_coulomb;
131     }
132     else
133     {
134         if (ic->rcoulomb > ic->rlist)
135         {
136             pme_lb->rbuf_coulomb = ic->rlistlong - ic->rcoulomb;
137         }
138         else
139         {
140             pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
141         }
142         if (ic->rvdw > ic->rlist)
143         {
144             pme_lb->rbuf_vdw = ic->rlistlong - ic->rvdw;
145         }
146         else
147         {
148             pme_lb->rbuf_vdw = ic->rlist - ic->rvdw;
149         }
150     }
151
152     copy_mat(box, pme_lb->box_start);
153     if (ir->ePBC == epbcXY && ir->nwall == 2)
154     {
155         svmul(ir->wall_ewald_zfac, pme_lb->box_start[ZZ], pme_lb->box_start[ZZ]);
156     }
157
158     pme_lb->n = 1;
159     snew(pme_lb->setup, pme_lb->n);
160
161     pme_lb->rcut_vdw                 = ic->rvdw;
162     pme_lb->rcut_coulomb_start       = ir->rcoulomb;
163     pme_lb->nstcalclr_start          = ir->nstcalclr;
164
165     pme_lb->cur                      = 0;
166     pme_lb->setup[0].rcut_coulomb    = ic->rcoulomb;
167     pme_lb->setup[0].rlist           = ic->rlist;
168     pme_lb->setup[0].rlistlong       = ic->rlistlong;
169     pme_lb->setup[0].nstcalclr       = ir->nstcalclr;
170     pme_lb->setup[0].grid[XX]        = ir->nkx;
171     pme_lb->setup[0].grid[YY]        = ir->nky;
172     pme_lb->setup[0].grid[ZZ]        = ir->nkz;
173     pme_lb->setup[0].ewaldcoeff_q    = ic->ewaldcoeff_q;
174     pme_lb->setup[0].ewaldcoeff_lj   = ic->ewaldcoeff_lj;
175
176     pme_lb->setup[0].pmedata  = pmedata;
177
178     spm = 0;
179     for (d = 0; d < DIM; d++)
180     {
181         sp = norm(pme_lb->box_start[d])/pme_lb->setup[0].grid[d];
182         if (sp > spm)
183         {
184             spm = sp;
185         }
186     }
187     pme_lb->setup[0].spacing = spm;
188
189     if (ir->fourier_spacing > 0)
190     {
191         pme_lb->cut_spacing = ir->rcoulomb/ir->fourier_spacing;
192     }
193     else
194     {
195         pme_lb->cut_spacing = ir->rcoulomb/pme_lb->setup[0].spacing;
196     }
197
198     pme_lb->stage = 0;
199
200     pme_lb->fastest  = 0;
201     pme_lb->start    = 0;
202     pme_lb->end      = 0;
203     pme_lb->elimited = epmelblimNO;
204
205     *pme_lb_p = pme_lb;
206 }
207
208 static gmx_bool pme_loadbal_increase_cutoff(pme_load_balancing_t  pme_lb,
209                                             int                   pme_order,
210                                             const gmx_domdec_t   *dd)
211 {
212     pme_setup_t *set;
213     int          npmenodes_x, npmenodes_y;
214     real         fac, sp;
215     real         tmpr_coulomb, tmpr_vdw;
216     int          d;
217     gmx_bool     grid_ok;
218
219     /* Try to add a new setup with next larger cut-off to the list */
220     pme_lb->n++;
221     srenew(pme_lb->setup, pme_lb->n);
222     set          = &pme_lb->setup[pme_lb->n-1];
223     set->pmedata = NULL;
224
225     get_pme_nnodes(dd, &npmenodes_x, &npmenodes_y);
226
227     fac = 1;
228     do
229     {
230         /* Avoid infinite while loop, which can occur at the minimum grid size.
231          * Note that in practice load balancing will stop before this point.
232          * The factor 2.1 allows for the extreme case in which only grids
233          * of powers of 2 are allowed (the current code supports more grids).
234          */
235         if (fac > 2.1)
236         {
237             pme_lb->n--;
238
239             return FALSE;
240         }
241
242         fac *= 1.01;
243         clear_ivec(set->grid);
244         sp = calc_grid(NULL, pme_lb->box_start,
245                        fac*pme_lb->setup[pme_lb->cur].spacing,
246                        &set->grid[XX],
247                        &set->grid[YY],
248                        &set->grid[ZZ]);
249
250         /* As here we can't easily check if one of the PME nodes
251          * uses threading, we do a conservative grid check.
252          * This means we can't use pme_order or less grid lines
253          * per PME node along x, which is not a strong restriction.
254          */
255         gmx_pme_check_restrictions(pme_order,
256                                    set->grid[XX], set->grid[YY], set->grid[ZZ],
257                                    npmenodes_x, npmenodes_y,
258                                    TRUE,
259                                    FALSE,
260                                    &grid_ok);
261     }
262     while (sp <= 1.001*pme_lb->setup[pme_lb->cur].spacing || !grid_ok);
263
264     set->rcut_coulomb = pme_lb->cut_spacing*sp;
265
266     if (pme_lb->cutoff_scheme == ecutsVERLET)
267     {
268         set->rlist        = set->rcut_coulomb + pme_lb->rbuf_coulomb;
269         /* We dont use LR lists with Verlet, but this avoids if-statements in further checks */
270         set->rlistlong    = set->rlist;
271     }
272     else
273     {
274         tmpr_coulomb          = set->rcut_coulomb + pme_lb->rbuf_coulomb;
275         tmpr_vdw              = pme_lb->rcut_vdw + pme_lb->rbuf_vdw;
276         set->rlist            = min(tmpr_coulomb, tmpr_vdw);
277         set->rlistlong        = max(tmpr_coulomb, tmpr_vdw);
278
279         /* Set the long-range update frequency */
280         if (set->rlist == set->rlistlong)
281         {
282             /* No long-range interactions if the short-/long-range cutoffs are identical */
283             set->nstcalclr = 0;
284         }
285         else if (pme_lb->nstcalclr_start == 0 || pme_lb->nstcalclr_start == 1)
286         {
287             /* We were not doing long-range before, but now we are since rlist!=rlistlong */
288             set->nstcalclr = 1;
289         }
290         else
291         {
292             /* We were already doing long-range interactions from the start */
293             if (pme_lb->rcut_vdw > pme_lb->rcut_coulomb_start)
294             {
295                 /* We were originally doing long-range VdW-only interactions.
296                  * If rvdw is still longer than rcoulomb we keep the original nstcalclr,
297                  * but if the coulomb cutoff has become longer we should update the long-range
298                  * part every step.
299                  */
300                 set->nstcalclr = (tmpr_vdw > tmpr_coulomb) ? pme_lb->nstcalclr_start : 1;
301             }
302             else
303             {
304                 /* We were not doing any long-range interaction from the start,
305                  * since it is not possible to do twin-range coulomb for the PME interaction.
306                  */
307                 set->nstcalclr = 1;
308             }
309         }
310     }
311
312     set->spacing      = sp;
313     /* The grid efficiency is the size wrt a grid with uniform x/y/z spacing */
314     set->grid_efficiency = 1;
315     for (d = 0; d < DIM; d++)
316     {
317         set->grid_efficiency *= (set->grid[d]*sp)/norm(pme_lb->box_start[d]);
318     }
319     /* The Ewald coefficient is inversly proportional to the cut-off */
320     set->ewaldcoeff_q =
321         pme_lb->setup[0].ewaldcoeff_q*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
322     /* We set ewaldcoeff_lj in set, even when LJ-PME is not used */
323     set->ewaldcoeff_lj =
324         pme_lb->setup[0].ewaldcoeff_lj*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
325
326     set->count   = 0;
327     set->cycles  = 0;
328
329     if (debug)
330     {
331         fprintf(debug, "PME loadbal: grid %d %d %d, coulomb cutoff %f\n",
332                 set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb);
333     }
334     return TRUE;
335 }
336
337 static void print_grid(FILE *fp_err, FILE *fp_log,
338                        const char *pre,
339                        const char *desc,
340                        const pme_setup_t *set,
341                        double cycles)
342 {
343     char buf[STRLEN], buft[STRLEN];
344
345     if (cycles >= 0)
346     {
347         sprintf(buft, ": %.1f M-cycles", cycles*1e-6);
348     }
349     else
350     {
351         buft[0] = '\0';
352     }
353     sprintf(buf, "%-11s%10s pme grid %d %d %d, coulomb cutoff %.3f%s",
354             pre,
355             desc, set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb,
356             buft);
357     if (fp_err != NULL)
358     {
359         fprintf(fp_err, "\r%s\n", buf);
360     }
361     if (fp_log != NULL)
362     {
363         fprintf(fp_log, "%s\n", buf);
364     }
365 }
366
367 static int pme_loadbal_end(pme_load_balancing_t pme_lb)
368 {
369     /* In the initial stage only n is set; end is not set yet */
370     if (pme_lb->end > 0)
371     {
372         return pme_lb->end;
373     }
374     else
375     {
376         return pme_lb->n;
377     }
378 }
379
380 static void print_loadbal_limited(FILE *fp_err, FILE *fp_log,
381                                   gmx_int64_t step,
382                                   pme_load_balancing_t pme_lb)
383 {
384     char buf[STRLEN], sbuf[22];
385
386     sprintf(buf, "step %4s: the %s limits the PME load balancing to a coulomb cut-off of %.3f",
387             gmx_step_str(step, sbuf),
388             pmelblim_str[pme_lb->elimited],
389             pme_lb->setup[pme_loadbal_end(pme_lb)-1].rcut_coulomb);
390     if (fp_err != NULL)
391     {
392         fprintf(fp_err, "\r%s\n", buf);
393     }
394     if (fp_log != NULL)
395     {
396         fprintf(fp_log, "%s\n", buf);
397     }
398 }
399
400 static void switch_to_stage1(pme_load_balancing_t pme_lb)
401 {
402     pme_lb->start = 0;
403     while (pme_lb->start+1 < pme_lb->n &&
404            (pme_lb->setup[pme_lb->start].count == 0 ||
405             pme_lb->setup[pme_lb->start].cycles >
406             pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC))
407     {
408         pme_lb->start++;
409     }
410     while (pme_lb->start > 0 && pme_lb->setup[pme_lb->start-1].cycles == 0)
411     {
412         pme_lb->start--;
413     }
414
415     pme_lb->end = pme_lb->n;
416     if (pme_lb->setup[pme_lb->end-1].count > 0 &&
417         pme_lb->setup[pme_lb->end-1].cycles >
418         pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
419     {
420         pme_lb->end--;
421     }
422
423     pme_lb->stage = 1;
424
425     /* Next we want to choose setup pme_lb->start, but as we will increase
426      * pme_ln->cur by one right after returning, we subtract 1 here.
427      */
428     pme_lb->cur = pme_lb->start - 1;
429 }
430
431 gmx_bool pme_load_balance(pme_load_balancing_t pme_lb,
432                           t_commrec           *cr,
433                           FILE                *fp_err,
434                           FILE                *fp_log,
435                           t_inputrec          *ir,
436                           t_state             *state,
437                           double               cycles,
438                           interaction_const_t *ic,
439                           nonbonded_verlet_t  *nbv,
440                           gmx_pme_t           *pmedata,
441                           gmx_int64_t          step)
442 {
443     gmx_bool     OK;
444     pme_setup_t *set;
445     double       cycles_fast;
446     char         buf[STRLEN], sbuf[22];
447     real         rtab;
448     gmx_bool     bUsesSimpleTables = TRUE;
449
450     if (pme_lb->stage == pme_lb->nstage)
451     {
452         return FALSE;
453     }
454
455     if (PAR(cr))
456     {
457         gmx_sumd(1, &cycles, cr);
458         cycles /= cr->nnodes;
459     }
460
461     set = &pme_lb->setup[pme_lb->cur];
462     set->count++;
463
464     rtab = ir->rlistlong + ir->tabext;
465
466     if (set->count % 2 == 1)
467     {
468         /* Skip the first cycle, because the first step after a switch
469          * is much slower due to allocation and/or caching effects.
470          */
471         return TRUE;
472     }
473
474     sprintf(buf, "step %4s: ", gmx_step_str(step, sbuf));
475     print_grid(fp_err, fp_log, buf, "timed with", set, cycles);
476
477     if (set->count <= 2)
478     {
479         set->cycles = cycles;
480     }
481     else
482     {
483         if (cycles*PME_LB_ACCEL_TOL < set->cycles &&
484             pme_lb->stage == pme_lb->nstage - 1)
485         {
486             /* The performance went up a lot (due to e.g. DD load balancing).
487              * Add a stage, keep the minima, but rescan all setups.
488              */
489             pme_lb->nstage++;
490
491             if (debug)
492             {
493                 fprintf(debug, "The performance for grid %d %d %d went from %.3f to %.1f M-cycles, this is more than %f\n"
494                         "Increased the number stages to %d"
495                         " and ignoring the previous performance\n",
496                         set->grid[XX], set->grid[YY], set->grid[ZZ],
497                         cycles*1e-6, set->cycles*1e-6, PME_LB_ACCEL_TOL,
498                         pme_lb->nstage);
499             }
500         }
501         set->cycles = min(set->cycles, cycles);
502     }
503
504     if (set->cycles < pme_lb->setup[pme_lb->fastest].cycles)
505     {
506         pme_lb->fastest = pme_lb->cur;
507
508         if (DOMAINDECOMP(cr))
509         {
510             /* We found a new fastest setting, ensure that with subsequent
511              * shorter cut-off's the dynamic load balancing does not make
512              * the use of the current cut-off impossible. This solution is
513              * a trade-off, as the PME load balancing and DD domain size
514              * load balancing can interact in complex ways.
515              * With the Verlet kernels, DD load imbalance will usually be
516              * mainly due to bonded interaction imbalance, which will often
517              * quickly push the domain boundaries beyond the limit for the
518              * optimal, PME load balanced, cut-off. But it could be that
519              * better overal performance can be obtained with a slightly
520              * shorter cut-off and better DD load balancing.
521              */
522             change_dd_dlb_cutoff_limit(cr);
523         }
524     }
525     cycles_fast = pme_lb->setup[pme_lb->fastest].cycles;
526
527     /* Check in stage 0 if we should stop scanning grids.
528      * Stop when the time is more than SLOW_FAC longer than the fastest.
529      */
530     if (pme_lb->stage == 0 && pme_lb->cur > 0 &&
531         cycles > pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
532     {
533         pme_lb->n = pme_lb->cur + 1;
534         /* Done with scanning, go to stage 1 */
535         switch_to_stage1(pme_lb);
536     }
537
538     if (pme_lb->stage == 0)
539     {
540         int gridsize_start;
541
542         gridsize_start = set->grid[XX]*set->grid[YY]*set->grid[ZZ];
543
544         do
545         {
546             if (pme_lb->cur+1 < pme_lb->n)
547             {
548                 /* We had already generated the next setup */
549                 OK = TRUE;
550             }
551             else
552             {
553                 /* Find the next setup */
554                 OK = pme_loadbal_increase_cutoff(pme_lb, ir->pme_order, cr->dd);
555
556                 if (!OK)
557                 {
558                     pme_lb->elimited = epmelblimPMEGRID;
559                 }
560             }
561
562             if (OK && ir->ePBC != epbcNONE)
563             {
564                 OK = (sqr(pme_lb->setup[pme_lb->cur+1].rlistlong)
565                       <= max_cutoff2(ir->ePBC, state->box));
566                 if (!OK)
567                 {
568                     pme_lb->elimited = epmelblimBOX;
569                 }
570             }
571
572             if (OK)
573             {
574                 pme_lb->cur++;
575
576                 if (DOMAINDECOMP(cr))
577                 {
578                     OK = change_dd_cutoff(cr, state, ir,
579                                           pme_lb->setup[pme_lb->cur].rlistlong);
580                     if (!OK)
581                     {
582                         /* Failed: do not use this setup */
583                         pme_lb->cur--;
584                         pme_lb->elimited = epmelblimDD;
585                     }
586                 }
587             }
588             if (!OK)
589             {
590                 /* We hit the upper limit for the cut-off,
591                  * the setup should not go further than cur.
592                  */
593                 pme_lb->n = pme_lb->cur + 1;
594                 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
595                 /* Switch to the next stage */
596                 switch_to_stage1(pme_lb);
597             }
598         }
599         while (OK &&
600                !(pme_lb->setup[pme_lb->cur].grid[XX]*
601                  pme_lb->setup[pme_lb->cur].grid[YY]*
602                  pme_lb->setup[pme_lb->cur].grid[ZZ] <
603                  gridsize_start*PME_LB_GRID_SCALE_FAC
604                  &&
605                  pme_lb->setup[pme_lb->cur].grid_efficiency <
606                  pme_lb->setup[pme_lb->cur-1].grid_efficiency*PME_LB_GRID_EFFICIENCY_REL_FAC));
607     }
608
609     if (pme_lb->stage > 0 && pme_lb->end == 1)
610     {
611         pme_lb->cur   = 0;
612         pme_lb->stage = pme_lb->nstage;
613     }
614     else if (pme_lb->stage > 0 && pme_lb->end > 1)
615     {
616         /* If stage = nstage-1:
617          *   scan over all setups, rerunning only those setups
618          *   which are not much slower than the fastest
619          * else:
620          *   use the next setup
621          */
622         do
623         {
624             pme_lb->cur++;
625             if (pme_lb->cur == pme_lb->end)
626             {
627                 pme_lb->stage++;
628                 pme_lb->cur = pme_lb->start;
629             }
630         }
631         while (pme_lb->stage == pme_lb->nstage - 1 &&
632                pme_lb->setup[pme_lb->cur].count > 0 &&
633                pme_lb->setup[pme_lb->cur].cycles > cycles_fast*PME_LB_SLOW_FAC);
634
635         if (pme_lb->stage == pme_lb->nstage)
636         {
637             /* We are done optimizing, use the fastest setup we found */
638             pme_lb->cur = pme_lb->fastest;
639         }
640     }
641
642     if (DOMAINDECOMP(cr) && pme_lb->stage > 0)
643     {
644         OK = change_dd_cutoff(cr, state, ir, pme_lb->setup[pme_lb->cur].rlistlong);
645         if (!OK)
646         {
647             /* Failsafe solution */
648             if (pme_lb->cur > 1 && pme_lb->stage == pme_lb->nstage)
649             {
650                 pme_lb->stage--;
651             }
652             pme_lb->fastest  = 0;
653             pme_lb->start    = 0;
654             pme_lb->end      = pme_lb->cur;
655             pme_lb->cur      = pme_lb->start;
656             pme_lb->elimited = epmelblimDD;
657             print_loadbal_limited(fp_err, fp_log, step, pme_lb);
658         }
659     }
660
661     /* Change the Coulomb cut-off and the PME grid */
662
663     set = &pme_lb->setup[pme_lb->cur];
664
665     ic->rcoulomb     = set->rcut_coulomb;
666     ic->rlist        = set->rlist;
667     ic->rlistlong    = set->rlistlong;
668     ir->nstcalclr    = set->nstcalclr;
669     ic->ewaldcoeff_q = set->ewaldcoeff_q;
670     /* TODO: centralize the code that sets the potentials shifts */
671     if (ic->coulomb_modifier == eintmodPOTSHIFT)
672     {
673         ic->sh_ewald = gmx_erfc(ic->ewaldcoeff_q*ic->rcoulomb);
674     }
675     if (EVDW_PME(ic->vdwtype))
676     {
677         /* We have PME for both Coulomb and VdW, set rvdw equal to rcoulomb */
678         ic->rvdw            = set->rcut_coulomb;
679         ic->ewaldcoeff_lj   = set->ewaldcoeff_lj;
680         if (ic->vdw_modifier == eintmodPOTSHIFT)
681         {
682             real crc2;
683
684             ic->dispersion_shift.cpot = -pow(ic->rvdw, -6.0);
685             ic->repulsion_shift.cpot  = -pow(ic->rvdw, -12.0);
686             ic->sh_invrc6             = -ic->dispersion_shift.cpot;
687             crc2                      = sqr(ic->ewaldcoeff_lj*ic->rvdw);
688             ic->sh_lj_ewald           = (exp(-crc2)*(1 + crc2 + 0.5*crc2*crc2) - 1)*pow(ic->rvdw, -6.0);
689         }
690     }
691
692     bUsesSimpleTables = uses_simple_tables(ir->cutoff_scheme, nbv, 0);
693     if (pme_lb->cutoff_scheme == ecutsVERLET &&
694         nbv->grp[0].kernel_type == nbnxnk8x8x8_CUDA)
695     {
696         nbnxn_cuda_pme_loadbal_update_param(nbv->cu_nbv, ic);
697
698         /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
699          * also sharing texture references. To keep the code simple, we don't
700          * treat texture references as shared resources, but this means that
701          * the coulomb_tab texture ref will get updated by multiple threads.
702          * Hence, to ensure that the non-bonded kernels don't start before all
703          * texture binding operations are finished, we need to wait for all ranks
704          * to arrive here before continuing.
705          *
706          * Note that we could omit this barrier if GPUs are not shared (or
707          * texture objects are used), but as this is initialization code, there
708          * is not point in complicating things.
709          */
710 #ifdef GMX_THREAD_MPI
711         if (PAR(cr))
712         {
713             gmx_barrier(cr);
714         }
715 #endif  /* GMX_THREAD_MPI */
716     }
717
718     /* Usually we won't need the simple tables with GPUs.
719      * But we do with hybrid acceleration and with free energy.
720      * To avoid bugs, we always re-initialize the simple tables here.
721      */
722     init_interaction_const_tables(NULL, ic, bUsesSimpleTables, rtab);
723
724     if (cr->duty & DUTY_PME)
725     {
726         if (pme_lb->setup[pme_lb->cur].pmedata == NULL)
727         {
728             /* Generate a new PME data structure,
729              * copying part of the old pointers.
730              */
731             gmx_pme_reinit(&set->pmedata,
732                            cr, pme_lb->setup[0].pmedata, ir,
733                            set->grid);
734         }
735         *pmedata = set->pmedata;
736     }
737     else
738     {
739         /* Tell our PME-only node to switch grid */
740         gmx_pme_send_switchgrid(cr, set->grid, set->ewaldcoeff_q, set->ewaldcoeff_lj);
741     }
742
743     if (debug)
744     {
745         print_grid(NULL, debug, "", "switched to", set, -1);
746     }
747
748     if (pme_lb->stage == pme_lb->nstage)
749     {
750         print_grid(fp_err, fp_log, "", "optimal", set, -1);
751     }
752
753     return TRUE;
754 }
755
756 void restart_pme_loadbal(pme_load_balancing_t pme_lb, int n)
757 {
758     pme_lb->nstage += n;
759 }
760
761 static int pme_grid_points(const pme_setup_t *setup)
762 {
763     return setup->grid[XX]*setup->grid[YY]*setup->grid[ZZ];
764 }
765
766 static real pme_loadbal_rlist(const pme_setup_t *setup)
767 {
768     /* With the group cut-off scheme we can have twin-range either
769      * for Coulomb or for VdW, so we need a check here.
770      * With the Verlet cut-off scheme rlist=rlistlong.
771      */
772     if (setup->rcut_coulomb > setup->rlist)
773     {
774         return setup->rlistlong;
775     }
776     else
777     {
778         return setup->rlist;
779     }
780 }
781
782 static void print_pme_loadbal_setting(FILE              *fplog,
783                                       char              *name,
784                                       const pme_setup_t *setup)
785 {
786     fprintf(fplog,
787             "   %-7s %6.3f nm %6.3f nm     %3d %3d %3d   %5.3f nm  %5.3f nm\n",
788             name,
789             setup->rcut_coulomb, pme_loadbal_rlist(setup),
790             setup->grid[XX], setup->grid[YY], setup->grid[ZZ],
791             setup->spacing, 1/setup->ewaldcoeff_q);
792 }
793
794 static void print_pme_loadbal_settings(pme_load_balancing_t pme_lb,
795                                        t_commrec           *cr,
796                                        FILE                *fplog,
797                                        gmx_bool             bNonBondedOnGPU)
798 {
799     double pp_ratio, grid_ratio;
800
801     pp_ratio   = pow(pme_loadbal_rlist(&pme_lb->setup[pme_lb->cur])/pme_loadbal_rlist(&pme_lb->setup[0]), 3.0);
802     grid_ratio = pme_grid_points(&pme_lb->setup[pme_lb->cur])/
803         (double)pme_grid_points(&pme_lb->setup[0]);
804
805     fprintf(fplog, "\n");
806     fprintf(fplog, "       P P   -   P M E   L O A D   B A L A N C I N G\n");
807     fprintf(fplog, "\n");
808     /* Here we only warn when the optimal setting is the last one */
809     if (pme_lb->elimited != epmelblimNO &&
810         pme_lb->cur == pme_loadbal_end(pme_lb)-1)
811     {
812         fprintf(fplog, " NOTE: The PP/PME load balancing was limited by the %s,\n",
813                 pmelblim_str[pme_lb->elimited]);
814         fprintf(fplog, "       you might not have reached a good load balance.\n");
815         if (pme_lb->elimited == epmelblimDD)
816         {
817             fprintf(fplog, "       Try different mdrun -dd settings or lower the -dds value.\n");
818         }
819         fprintf(fplog, "\n");
820     }
821     fprintf(fplog, " PP/PME load balancing changed the cut-off and PME settings:\n");
822     fprintf(fplog, "           particle-particle                    PME\n");
823     fprintf(fplog, "            rcoulomb  rlist            grid      spacing   1/beta\n");
824     print_pme_loadbal_setting(fplog, "initial", &pme_lb->setup[0]);
825     print_pme_loadbal_setting(fplog, "final", &pme_lb->setup[pme_lb->cur]);
826     fprintf(fplog, " cost-ratio           %4.2f             %4.2f\n",
827             pp_ratio, grid_ratio);
828     fprintf(fplog, " (note that these numbers concern only part of the total PP and PME load)\n");
829
830     if (pp_ratio > 1.5 && !bNonBondedOnGPU)
831     {
832         md_print_warn(cr, fplog,
833                       "NOTE: PME load balancing increased the non-bonded workload by more than 50%%.\n"
834                       "      For better performance, use (more) PME ranks (mdrun -npme),\n"
835                       "      or if you are beyond the scaling limit, use fewer total ranks (or nodes).\n");
836     }
837     else
838     {
839         fprintf(fplog, "\n");
840     }
841 }
842
843 void pme_loadbal_done(pme_load_balancing_t pme_lb,
844                       t_commrec *cr, FILE *fplog,
845                       gmx_bool bNonBondedOnGPU)
846 {
847     if (fplog != NULL && (pme_lb->cur > 0 || pme_lb->elimited != epmelblimNO))
848     {
849         print_pme_loadbal_settings(pme_lb, cr, fplog, bNonBondedOnGPU);
850     }
851
852     /* TODO: Here we should free all pointers in pme_lb,
853      * but as it contains pme data structures,
854      * we need to first make pme.c free all data.
855      */
856 }