Merge release-5-0 into master
[alexxy/gromacs.git] / src / programs / mdrun / pme_loadbal.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 #include "gmxpre.h"
36
37 #include "pme_loadbal.h"
38
39 #include "config.h"
40
41 #include "gromacs/legacyheaders/calcgrid.h"
42 #include "gromacs/legacyheaders/domdec.h"
43 #include "gromacs/legacyheaders/force.h"
44 #include "gromacs/legacyheaders/macros.h"
45 #include "gromacs/legacyheaders/md_logging.h"
46 #include "gromacs/legacyheaders/network.h"
47 #include "gromacs/legacyheaders/pme.h"
48 #include "gromacs/legacyheaders/sim_util.h"
49 #include "gromacs/legacyheaders/types/commrec.h"
50 #include "gromacs/math/vec.h"
51 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_data_mgmt.h"
52 #include "gromacs/pbcutil/pbc.h"
53 #include "gromacs/utility/cstringutil.h"
54 #include "gromacs/utility/smalloc.h"
55
56 /* Parameters and setting for one PP-PME setup */
57 typedef struct {
58     real      rcut_coulomb;    /* Coulomb cut-off                              */
59     real      rlist;           /* pair-list cut-off                            */
60     real      rlistlong;       /* LR pair-list cut-off                         */
61     int       nstcalclr;       /* frequency of evaluating long-range forces for group scheme */
62     real      spacing;         /* (largest) PME grid spacing                   */
63     ivec      grid;            /* the PME grid dimensions                      */
64     real      grid_efficiency; /* ineffiency factor for non-uniform grids <= 1 */
65     real      ewaldcoeff_q;    /* Electrostatic Ewald coefficient            */
66     real      ewaldcoeff_lj;   /* LJ Ewald coefficient, only for the call to send_switchgrid */
67     gmx_pme_t pmedata;         /* the data structure used in the PME code      */
68     int       count;           /* number of times this setup has been timed    */
69     double    cycles;          /* the fastest time for this setup in cycles    */
70 } pme_setup_t;
71
72 /* In the initial scan, step by grids that are at least a factor 0.8 coarser */
73 #define PME_LB_GRID_SCALE_FAC  0.8
74 /* In the initial scan, try to skip grids with uneven x/y/z spacing,
75  * checking if the "efficiency" is more than 5% worse than the previous grid.
76  */
77 #define PME_LB_GRID_EFFICIENCY_REL_FAC  1.05
78 /* Rerun up till 12% slower setups than the fastest up till now */
79 #define PME_LB_SLOW_FAC  1.12
80 /* If setups get more than 2% faster, do another round to avoid
81  * choosing a slower setup due to acceleration or fluctuations.
82  */
83 #define PME_LB_ACCEL_TOL 1.02
84
85 enum {
86     epmelblimNO, epmelblimBOX, epmelblimDD, epmelblimPMEGRID, epmelblimNR
87 };
88
89 const char *pmelblim_str[epmelblimNR] =
90 { "no", "box size", "domain decompostion", "PME grid restriction" };
91
92 struct pme_load_balancing {
93     int          nstage;             /* the current maximum number of stages */
94
95     real         cut_spacing;        /* the minimum cutoff / PME grid spacing ratio */
96     real         rcut_vdw;           /* Vdw cutoff (does not change) */
97     real         rcut_coulomb_start; /* Initial electrostatics cutoff */
98     int          nstcalclr_start;    /* Initial electrostatics cutoff */
99     real         rbuf_coulomb;       /* the pairlist buffer size */
100     real         rbuf_vdw;           /* the pairlist buffer size */
101     matrix       box_start;          /* the initial simulation box */
102     int          n;                  /* the count of setup as well as the allocation size */
103     pme_setup_t *setup;              /* the PME+cutoff setups */
104     int          cur;                /* the current setup */
105     int          fastest;            /* fastest setup up till now */
106     int          start;              /* start of setup range to consider in stage>0 */
107     int          end;                /* end   of setup range to consider in stage>0 */
108     int          elimited;           /* was the balancing limited, uses enum above */
109     int          cutoff_scheme;      /* Verlet or group cut-offs */
110
111     int          stage;              /* the current stage */
112 };
113
114 void pme_loadbal_init(pme_load_balancing_t *pme_lb_p,
115                       const t_inputrec *ir, matrix box,
116                       const interaction_const_t *ic,
117                       gmx_pme_t pmedata)
118 {
119     pme_load_balancing_t pme_lb;
120     real                 spm, sp;
121     int                  d;
122
123     snew(pme_lb, 1);
124
125     /* Any number of stages >= 2 is supported */
126     pme_lb->nstage   = 2;
127
128     pme_lb->cutoff_scheme = ir->cutoff_scheme;
129
130     if (pme_lb->cutoff_scheme == ecutsVERLET)
131     {
132         pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
133         pme_lb->rbuf_vdw     = pme_lb->rbuf_coulomb;
134     }
135     else
136     {
137         if (ic->rcoulomb > ic->rlist)
138         {
139             pme_lb->rbuf_coulomb = ic->rlistlong - ic->rcoulomb;
140         }
141         else
142         {
143             pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
144         }
145         if (ic->rvdw > ic->rlist)
146         {
147             pme_lb->rbuf_vdw = ic->rlistlong - ic->rvdw;
148         }
149         else
150         {
151             pme_lb->rbuf_vdw = ic->rlist - ic->rvdw;
152         }
153     }
154
155     copy_mat(box, pme_lb->box_start);
156     if (ir->ePBC == epbcXY && ir->nwall == 2)
157     {
158         svmul(ir->wall_ewald_zfac, pme_lb->box_start[ZZ], pme_lb->box_start[ZZ]);
159     }
160
161     pme_lb->n = 1;
162     snew(pme_lb->setup, pme_lb->n);
163
164     pme_lb->rcut_vdw                 = ic->rvdw;
165     pme_lb->rcut_coulomb_start       = ir->rcoulomb;
166     pme_lb->nstcalclr_start          = ir->nstcalclr;
167
168     pme_lb->cur                      = 0;
169     pme_lb->setup[0].rcut_coulomb    = ic->rcoulomb;
170     pme_lb->setup[0].rlist           = ic->rlist;
171     pme_lb->setup[0].rlistlong       = ic->rlistlong;
172     pme_lb->setup[0].nstcalclr       = ir->nstcalclr;
173     pme_lb->setup[0].grid[XX]        = ir->nkx;
174     pme_lb->setup[0].grid[YY]        = ir->nky;
175     pme_lb->setup[0].grid[ZZ]        = ir->nkz;
176     pme_lb->setup[0].ewaldcoeff_q    = ic->ewaldcoeff_q;
177     pme_lb->setup[0].ewaldcoeff_lj   = ic->ewaldcoeff_lj;
178
179     pme_lb->setup[0].pmedata  = pmedata;
180
181     spm = 0;
182     for (d = 0; d < DIM; d++)
183     {
184         sp = norm(pme_lb->box_start[d])/pme_lb->setup[0].grid[d];
185         if (sp > spm)
186         {
187             spm = sp;
188         }
189     }
190     pme_lb->setup[0].spacing = spm;
191
192     if (ir->fourier_spacing > 0)
193     {
194         pme_lb->cut_spacing = ir->rcoulomb/ir->fourier_spacing;
195     }
196     else
197     {
198         pme_lb->cut_spacing = ir->rcoulomb/pme_lb->setup[0].spacing;
199     }
200
201     pme_lb->stage = 0;
202
203     pme_lb->fastest  = 0;
204     pme_lb->start    = 0;
205     pme_lb->end      = 0;
206     pme_lb->elimited = epmelblimNO;
207
208     *pme_lb_p = pme_lb;
209 }
210
211 static gmx_bool pme_loadbal_increase_cutoff(pme_load_balancing_t  pme_lb,
212                                             int                   pme_order,
213                                             const gmx_domdec_t   *dd)
214 {
215     pme_setup_t *set;
216     int          npmenodes_x, npmenodes_y;
217     real         fac, sp;
218     real         tmpr_coulomb, tmpr_vdw;
219     int          d;
220     gmx_bool     grid_ok;
221
222     /* Try to add a new setup with next larger cut-off to the list */
223     pme_lb->n++;
224     srenew(pme_lb->setup, pme_lb->n);
225     set          = &pme_lb->setup[pme_lb->n-1];
226     set->pmedata = NULL;
227
228     get_pme_nnodes(dd, &npmenodes_x, &npmenodes_y);
229
230     fac = 1;
231     do
232     {
233         /* Avoid infinite while loop, which can occur at the minimum grid size.
234          * Note that in practice load balancing will stop before this point.
235          * The factor 2.1 allows for the extreme case in which only grids
236          * of powers of 2 are allowed (the current code supports more grids).
237          */
238         if (fac > 2.1)
239         {
240             pme_lb->n--;
241
242             return FALSE;
243         }
244
245         fac *= 1.01;
246         clear_ivec(set->grid);
247         sp = calc_grid(NULL, pme_lb->box_start,
248                        fac*pme_lb->setup[pme_lb->cur].spacing,
249                        &set->grid[XX],
250                        &set->grid[YY],
251                        &set->grid[ZZ]);
252
253         /* As here we can't easily check if one of the PME nodes
254          * uses threading, we do a conservative grid check.
255          * This means we can't use pme_order or less grid lines
256          * per PME node along x, which is not a strong restriction.
257          */
258         gmx_pme_check_restrictions(pme_order,
259                                    set->grid[XX], set->grid[YY], set->grid[ZZ],
260                                    npmenodes_x, npmenodes_y,
261                                    TRUE,
262                                    FALSE,
263                                    &grid_ok);
264     }
265     while (sp <= 1.001*pme_lb->setup[pme_lb->cur].spacing || !grid_ok);
266
267     set->rcut_coulomb = pme_lb->cut_spacing*sp;
268     if (set->rcut_coulomb < pme_lb->rcut_coulomb_start)
269     {
270         /* This is unlikely, but can happen when e.g. continuing from
271          * a checkpoint after equilibration where the box shrank a lot.
272          * We want to avoid rcoulomb getting smaller than rvdw
273          * and there might be more issues with decreasing rcoulomb.
274          */
275         set->rcut_coulomb = pme_lb->rcut_coulomb_start;
276     }
277
278     if (pme_lb->cutoff_scheme == ecutsVERLET)
279     {
280         set->rlist        = set->rcut_coulomb + pme_lb->rbuf_coulomb;
281         /* We dont use LR lists with Verlet, but this avoids if-statements in further checks */
282         set->rlistlong    = set->rlist;
283     }
284     else
285     {
286         tmpr_coulomb          = set->rcut_coulomb + pme_lb->rbuf_coulomb;
287         tmpr_vdw              = pme_lb->rcut_vdw + pme_lb->rbuf_vdw;
288         set->rlist            = min(tmpr_coulomb, tmpr_vdw);
289         set->rlistlong        = max(tmpr_coulomb, tmpr_vdw);
290
291         /* Set the long-range update frequency */
292         if (set->rlist == set->rlistlong)
293         {
294             /* No long-range interactions if the short-/long-range cutoffs are identical */
295             set->nstcalclr = 0;
296         }
297         else if (pme_lb->nstcalclr_start == 0 || pme_lb->nstcalclr_start == 1)
298         {
299             /* We were not doing long-range before, but now we are since rlist!=rlistlong */
300             set->nstcalclr = 1;
301         }
302         else
303         {
304             /* We were already doing long-range interactions from the start */
305             if (pme_lb->rcut_vdw > pme_lb->rcut_coulomb_start)
306             {
307                 /* We were originally doing long-range VdW-only interactions.
308                  * If rvdw is still longer than rcoulomb we keep the original nstcalclr,
309                  * but if the coulomb cutoff has become longer we should update the long-range
310                  * part every step.
311                  */
312                 set->nstcalclr = (tmpr_vdw > tmpr_coulomb) ? pme_lb->nstcalclr_start : 1;
313             }
314             else
315             {
316                 /* We were not doing any long-range interaction from the start,
317                  * since it is not possible to do twin-range coulomb for the PME interaction.
318                  */
319                 set->nstcalclr = 1;
320             }
321         }
322     }
323
324     set->spacing      = sp;
325     /* The grid efficiency is the size wrt a grid with uniform x/y/z spacing */
326     set->grid_efficiency = 1;
327     for (d = 0; d < DIM; d++)
328     {
329         set->grid_efficiency *= (set->grid[d]*sp)/norm(pme_lb->box_start[d]);
330     }
331     /* The Ewald coefficient is inversly proportional to the cut-off */
332     set->ewaldcoeff_q =
333         pme_lb->setup[0].ewaldcoeff_q*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
334     /* We set ewaldcoeff_lj in set, even when LJ-PME is not used */
335     set->ewaldcoeff_lj =
336         pme_lb->setup[0].ewaldcoeff_lj*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
337
338     set->count   = 0;
339     set->cycles  = 0;
340
341     if (debug)
342     {
343         fprintf(debug, "PME loadbal: grid %d %d %d, coulomb cutoff %f\n",
344                 set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb);
345     }
346     return TRUE;
347 }
348
349 static void print_grid(FILE *fp_err, FILE *fp_log,
350                        const char *pre,
351                        const char *desc,
352                        const pme_setup_t *set,
353                        double cycles)
354 {
355     char buf[STRLEN], buft[STRLEN];
356
357     if (cycles >= 0)
358     {
359         sprintf(buft, ": %.1f M-cycles", cycles*1e-6);
360     }
361     else
362     {
363         buft[0] = '\0';
364     }
365     sprintf(buf, "%-11s%10s pme grid %d %d %d, coulomb cutoff %.3f%s",
366             pre,
367             desc, set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb,
368             buft);
369     if (fp_err != NULL)
370     {
371         fprintf(fp_err, "\r%s\n", buf);
372     }
373     if (fp_log != NULL)
374     {
375         fprintf(fp_log, "%s\n", buf);
376     }
377 }
378
379 static int pme_loadbal_end(pme_load_balancing_t pme_lb)
380 {
381     /* In the initial stage only n is set; end is not set yet */
382     if (pme_lb->end > 0)
383     {
384         return pme_lb->end;
385     }
386     else
387     {
388         return pme_lb->n;
389     }
390 }
391
392 static void print_loadbal_limited(FILE *fp_err, FILE *fp_log,
393                                   gmx_int64_t step,
394                                   pme_load_balancing_t pme_lb)
395 {
396     char buf[STRLEN], sbuf[22];
397
398     sprintf(buf, "step %4s: the %s limits the PME load balancing to a coulomb cut-off of %.3f",
399             gmx_step_str(step, sbuf),
400             pmelblim_str[pme_lb->elimited],
401             pme_lb->setup[pme_loadbal_end(pme_lb)-1].rcut_coulomb);
402     if (fp_err != NULL)
403     {
404         fprintf(fp_err, "\r%s\n", buf);
405     }
406     if (fp_log != NULL)
407     {
408         fprintf(fp_log, "%s\n", buf);
409     }
410 }
411
412 static void switch_to_stage1(pme_load_balancing_t pme_lb)
413 {
414     pme_lb->start = 0;
415     while (pme_lb->start+1 < pme_lb->n &&
416            (pme_lb->setup[pme_lb->start].count == 0 ||
417             pme_lb->setup[pme_lb->start].cycles >
418             pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC))
419     {
420         pme_lb->start++;
421     }
422     while (pme_lb->start > 0 && pme_lb->setup[pme_lb->start-1].cycles == 0)
423     {
424         pme_lb->start--;
425     }
426
427     pme_lb->end = pme_lb->n;
428     if (pme_lb->setup[pme_lb->end-1].count > 0 &&
429         pme_lb->setup[pme_lb->end-1].cycles >
430         pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
431     {
432         pme_lb->end--;
433     }
434
435     pme_lb->stage = 1;
436
437     /* Next we want to choose setup pme_lb->start, but as we will increase
438      * pme_ln->cur by one right after returning, we subtract 1 here.
439      */
440     pme_lb->cur = pme_lb->start - 1;
441 }
442
443 gmx_bool pme_load_balance(pme_load_balancing_t        pme_lb,
444                           t_commrec                  *cr,
445                           FILE                       *fp_err,
446                           FILE                       *fp_log,
447                           t_inputrec                 *ir,
448                           t_state                    *state,
449                           double                      cycles,
450                           interaction_const_t        *ic,
451                           struct nonbonded_verlet_t  *nbv,
452                           gmx_pme_t                  *pmedata,
453                           gmx_int64_t                 step)
454 {
455     gmx_bool     OK;
456     pme_setup_t *set;
457     double       cycles_fast;
458     char         buf[STRLEN], sbuf[22];
459     real         rtab;
460     gmx_bool     bUsesSimpleTables = TRUE;
461
462     if (pme_lb->stage == pme_lb->nstage)
463     {
464         return FALSE;
465     }
466
467     if (PAR(cr))
468     {
469         gmx_sumd(1, &cycles, cr);
470         cycles /= cr->nnodes;
471     }
472
473     set = &pme_lb->setup[pme_lb->cur];
474     set->count++;
475
476     rtab = ir->rlistlong + ir->tabext;
477
478     if (set->count % 2 == 1)
479     {
480         /* Skip the first cycle, because the first step after a switch
481          * is much slower due to allocation and/or caching effects.
482          */
483         return TRUE;
484     }
485
486     sprintf(buf, "step %4s: ", gmx_step_str(step, sbuf));
487     print_grid(fp_err, fp_log, buf, "timed with", set, cycles);
488
489     if (set->count <= 2)
490     {
491         set->cycles = cycles;
492     }
493     else
494     {
495         if (cycles*PME_LB_ACCEL_TOL < set->cycles &&
496             pme_lb->stage == pme_lb->nstage - 1)
497         {
498             /* The performance went up a lot (due to e.g. DD load balancing).
499              * Add a stage, keep the minima, but rescan all setups.
500              */
501             pme_lb->nstage++;
502
503             if (debug)
504             {
505                 fprintf(debug, "The performance for grid %d %d %d went from %.3f to %.1f M-cycles, this is more than %f\n"
506                         "Increased the number stages to %d"
507                         " and ignoring the previous performance\n",
508                         set->grid[XX], set->grid[YY], set->grid[ZZ],
509                         cycles*1e-6, set->cycles*1e-6, PME_LB_ACCEL_TOL,
510                         pme_lb->nstage);
511             }
512         }
513         set->cycles = min(set->cycles, cycles);
514     }
515
516     if (set->cycles < pme_lb->setup[pme_lb->fastest].cycles)
517     {
518         pme_lb->fastest = pme_lb->cur;
519
520         if (DOMAINDECOMP(cr))
521         {
522             /* We found a new fastest setting, ensure that with subsequent
523              * shorter cut-off's the dynamic load balancing does not make
524              * the use of the current cut-off impossible. This solution is
525              * a trade-off, as the PME load balancing and DD domain size
526              * load balancing can interact in complex ways.
527              * With the Verlet kernels, DD load imbalance will usually be
528              * mainly due to bonded interaction imbalance, which will often
529              * quickly push the domain boundaries beyond the limit for the
530              * optimal, PME load balanced, cut-off. But it could be that
531              * better overal performance can be obtained with a slightly
532              * shorter cut-off and better DD load balancing.
533              */
534             change_dd_dlb_cutoff_limit(cr);
535         }
536     }
537     cycles_fast = pme_lb->setup[pme_lb->fastest].cycles;
538
539     /* Check in stage 0 if we should stop scanning grids.
540      * Stop when the time is more than SLOW_FAC longer than the fastest.
541      */
542     if (pme_lb->stage == 0 && pme_lb->cur > 0 &&
543         cycles > pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
544     {
545         pme_lb->n = pme_lb->cur + 1;
546         /* Done with scanning, go to stage 1 */
547         switch_to_stage1(pme_lb);
548     }
549
550     if (pme_lb->stage == 0)
551     {
552         int gridsize_start;
553
554         gridsize_start = set->grid[XX]*set->grid[YY]*set->grid[ZZ];
555
556         do
557         {
558             if (pme_lb->cur+1 < pme_lb->n)
559             {
560                 /* We had already generated the next setup */
561                 OK = TRUE;
562             }
563             else
564             {
565                 /* Find the next setup */
566                 OK = pme_loadbal_increase_cutoff(pme_lb, ir->pme_order, cr->dd);
567
568                 if (!OK)
569                 {
570                     pme_lb->elimited = epmelblimPMEGRID;
571                 }
572             }
573
574             if (OK && ir->ePBC != epbcNONE)
575             {
576                 OK = (sqr(pme_lb->setup[pme_lb->cur+1].rlistlong)
577                       <= max_cutoff2(ir->ePBC, state->box));
578                 if (!OK)
579                 {
580                     pme_lb->elimited = epmelblimBOX;
581                 }
582             }
583
584             if (OK)
585             {
586                 pme_lb->cur++;
587
588                 if (DOMAINDECOMP(cr))
589                 {
590                     OK = change_dd_cutoff(cr, state, ir,
591                                           pme_lb->setup[pme_lb->cur].rlistlong);
592                     if (!OK)
593                     {
594                         /* Failed: do not use this setup */
595                         pme_lb->cur--;
596                         pme_lb->elimited = epmelblimDD;
597                     }
598                 }
599             }
600             if (!OK)
601             {
602                 /* We hit the upper limit for the cut-off,
603                  * the setup should not go further than cur.
604                  */
605                 pme_lb->n = pme_lb->cur + 1;
606                 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
607                 /* Switch to the next stage */
608                 switch_to_stage1(pme_lb);
609             }
610         }
611         while (OK &&
612                !(pme_lb->setup[pme_lb->cur].grid[XX]*
613                  pme_lb->setup[pme_lb->cur].grid[YY]*
614                  pme_lb->setup[pme_lb->cur].grid[ZZ] <
615                  gridsize_start*PME_LB_GRID_SCALE_FAC
616                  &&
617                  pme_lb->setup[pme_lb->cur].grid_efficiency <
618                  pme_lb->setup[pme_lb->cur-1].grid_efficiency*PME_LB_GRID_EFFICIENCY_REL_FAC));
619     }
620
621     if (pme_lb->stage > 0 && pme_lb->end == 1)
622     {
623         pme_lb->cur   = 0;
624         pme_lb->stage = pme_lb->nstage;
625     }
626     else if (pme_lb->stage > 0 && pme_lb->end > 1)
627     {
628         /* If stage = nstage-1:
629          *   scan over all setups, rerunning only those setups
630          *   which are not much slower than the fastest
631          * else:
632          *   use the next setup
633          */
634         do
635         {
636             pme_lb->cur++;
637             if (pme_lb->cur == pme_lb->end)
638             {
639                 pme_lb->stage++;
640                 pme_lb->cur = pme_lb->start;
641             }
642         }
643         while (pme_lb->stage == pme_lb->nstage - 1 &&
644                pme_lb->setup[pme_lb->cur].count > 0 &&
645                pme_lb->setup[pme_lb->cur].cycles > cycles_fast*PME_LB_SLOW_FAC);
646
647         if (pme_lb->stage == pme_lb->nstage)
648         {
649             /* We are done optimizing, use the fastest setup we found */
650             pme_lb->cur = pme_lb->fastest;
651         }
652     }
653
654     if (DOMAINDECOMP(cr) && pme_lb->stage > 0)
655     {
656         OK = change_dd_cutoff(cr, state, ir, pme_lb->setup[pme_lb->cur].rlistlong);
657         if (!OK)
658         {
659             /* Failsafe solution */
660             if (pme_lb->cur > 1 && pme_lb->stage == pme_lb->nstage)
661             {
662                 pme_lb->stage--;
663             }
664             pme_lb->fastest  = 0;
665             pme_lb->start    = 0;
666             pme_lb->end      = pme_lb->cur;
667             pme_lb->cur      = pme_lb->start;
668             pme_lb->elimited = epmelblimDD;
669             print_loadbal_limited(fp_err, fp_log, step, pme_lb);
670         }
671     }
672
673     /* Change the Coulomb cut-off and the PME grid */
674
675     set = &pme_lb->setup[pme_lb->cur];
676
677     ic->rcoulomb     = set->rcut_coulomb;
678     ic->rlist        = set->rlist;
679     ic->rlistlong    = set->rlistlong;
680     ir->nstcalclr    = set->nstcalclr;
681     ic->ewaldcoeff_q = set->ewaldcoeff_q;
682     /* TODO: centralize the code that sets the potentials shifts */
683     if (ic->coulomb_modifier == eintmodPOTSHIFT)
684     {
685         ic->sh_ewald = gmx_erfc(ic->ewaldcoeff_q*ic->rcoulomb);
686     }
687     if (EVDW_PME(ic->vdwtype))
688     {
689         /* We have PME for both Coulomb and VdW, set rvdw equal to rcoulomb */
690         ic->rvdw            = set->rcut_coulomb;
691         ic->ewaldcoeff_lj   = set->ewaldcoeff_lj;
692         if (ic->vdw_modifier == eintmodPOTSHIFT)
693         {
694             real crc2;
695
696             ic->dispersion_shift.cpot = -pow(ic->rvdw, -6.0);
697             ic->repulsion_shift.cpot  = -pow(ic->rvdw, -12.0);
698             ic->sh_invrc6             = -ic->dispersion_shift.cpot;
699             crc2                      = sqr(ic->ewaldcoeff_lj*ic->rvdw);
700             ic->sh_lj_ewald           = (exp(-crc2)*(1 + crc2 + 0.5*crc2*crc2) - 1)*pow(ic->rvdw, -6.0);
701         }
702     }
703
704     bUsesSimpleTables = uses_simple_tables(ir->cutoff_scheme, nbv, 0);
705     nbnxn_cuda_pme_loadbal_update_param(nbv, ic);
706
707     /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
708      * also sharing texture references. To keep the code simple, we don't
709      * treat texture references as shared resources, but this means that
710      * the coulomb_tab texture ref will get updated by multiple threads.
711      * Hence, to ensure that the non-bonded kernels don't start before all
712      * texture binding operations are finished, we need to wait for all ranks
713      * to arrive here before continuing.
714      *
715      * Note that we could omit this barrier if GPUs are not shared (or
716      * texture objects are used), but as this is initialization code, there
717      * is not point in complicating things.
718      */
719 #ifdef GMX_THREAD_MPI
720     if (PAR(cr) && use_GPU(nbv))
721     {
722         gmx_barrier(cr);
723     }
724 #endif  /* GMX_THREAD_MPI */
725
726     /* Usually we won't need the simple tables with GPUs.
727      * But we do with hybrid acceleration and with free energy.
728      * To avoid bugs, we always re-initialize the simple tables here.
729      */
730     init_interaction_const_tables(NULL, ic, bUsesSimpleTables, rtab);
731
732     if (cr->duty & DUTY_PME)
733     {
734         if (pme_lb->setup[pme_lb->cur].pmedata == NULL)
735         {
736             /* Generate a new PME data structure,
737              * copying part of the old pointers.
738              */
739             gmx_pme_reinit(&set->pmedata,
740                            cr, pme_lb->setup[0].pmedata, ir,
741                            set->grid);
742         }
743         *pmedata = set->pmedata;
744     }
745     else
746     {
747         /* Tell our PME-only node to switch grid */
748         gmx_pme_send_switchgrid(cr, set->grid, set->ewaldcoeff_q, set->ewaldcoeff_lj);
749     }
750
751     if (debug)
752     {
753         print_grid(NULL, debug, "", "switched to", set, -1);
754     }
755
756     if (pme_lb->stage == pme_lb->nstage)
757     {
758         print_grid(fp_err, fp_log, "", "optimal", set, -1);
759     }
760
761     return TRUE;
762 }
763
764 void restart_pme_loadbal(pme_load_balancing_t pme_lb, int n)
765 {
766     pme_lb->nstage += n;
767 }
768
769 static int pme_grid_points(const pme_setup_t *setup)
770 {
771     return setup->grid[XX]*setup->grid[YY]*setup->grid[ZZ];
772 }
773
774 static real pme_loadbal_rlist(const pme_setup_t *setup)
775 {
776     /* With the group cut-off scheme we can have twin-range either
777      * for Coulomb or for VdW, so we need a check here.
778      * With the Verlet cut-off scheme rlist=rlistlong.
779      */
780     if (setup->rcut_coulomb > setup->rlist)
781     {
782         return setup->rlistlong;
783     }
784     else
785     {
786         return setup->rlist;
787     }
788 }
789
790 static void print_pme_loadbal_setting(FILE              *fplog,
791                                       char              *name,
792                                       const pme_setup_t *setup)
793 {
794     fprintf(fplog,
795             "   %-7s %6.3f nm %6.3f nm     %3d %3d %3d   %5.3f nm  %5.3f nm\n",
796             name,
797             setup->rcut_coulomb, pme_loadbal_rlist(setup),
798             setup->grid[XX], setup->grid[YY], setup->grid[ZZ],
799             setup->spacing, 1/setup->ewaldcoeff_q);
800 }
801
802 static void print_pme_loadbal_settings(pme_load_balancing_t pme_lb,
803                                        t_commrec           *cr,
804                                        FILE                *fplog,
805                                        gmx_bool             bNonBondedOnGPU)
806 {
807     double pp_ratio, grid_ratio;
808
809     pp_ratio   = pow(pme_loadbal_rlist(&pme_lb->setup[pme_lb->cur])/pme_loadbal_rlist(&pme_lb->setup[0]), 3.0);
810     grid_ratio = pme_grid_points(&pme_lb->setup[pme_lb->cur])/
811         (double)pme_grid_points(&pme_lb->setup[0]);
812
813     fprintf(fplog, "\n");
814     fprintf(fplog, "       P P   -   P M E   L O A D   B A L A N C I N G\n");
815     fprintf(fplog, "\n");
816     /* Here we only warn when the optimal setting is the last one */
817     if (pme_lb->elimited != epmelblimNO &&
818         pme_lb->cur == pme_loadbal_end(pme_lb)-1)
819     {
820         fprintf(fplog, " NOTE: The PP/PME load balancing was limited by the %s,\n",
821                 pmelblim_str[pme_lb->elimited]);
822         fprintf(fplog, "       you might not have reached a good load balance.\n");
823         if (pme_lb->elimited == epmelblimDD)
824         {
825             fprintf(fplog, "       Try different mdrun -dd settings or lower the -dds value.\n");
826         }
827         fprintf(fplog, "\n");
828     }
829     fprintf(fplog, " PP/PME load balancing changed the cut-off and PME settings:\n");
830     fprintf(fplog, "           particle-particle                    PME\n");
831     fprintf(fplog, "            rcoulomb  rlist            grid      spacing   1/beta\n");
832     print_pme_loadbal_setting(fplog, "initial", &pme_lb->setup[0]);
833     print_pme_loadbal_setting(fplog, "final", &pme_lb->setup[pme_lb->cur]);
834     fprintf(fplog, " cost-ratio           %4.2f             %4.2f\n",
835             pp_ratio, grid_ratio);
836     fprintf(fplog, " (note that these numbers concern only part of the total PP and PME load)\n");
837
838     if (pp_ratio > 1.5 && !bNonBondedOnGPU)
839     {
840         md_print_warn(cr, fplog,
841                       "NOTE: PME load balancing increased the non-bonded workload by more than 50%%.\n"
842                       "      For better performance, use (more) PME ranks (mdrun -npme),\n"
843                       "      or if you are beyond the scaling limit, use fewer total ranks (or nodes).\n");
844     }
845     else
846     {
847         fprintf(fplog, "\n");
848     }
849 }
850
851 void pme_loadbal_done(pme_load_balancing_t pme_lb,
852                       t_commrec *cr, FILE *fplog,
853                       gmx_bool bNonBondedOnGPU)
854 {
855     if (fplog != NULL && (pme_lb->cur > 0 || pme_lb->elimited != epmelblimNO))
856     {
857         print_pme_loadbal_settings(pme_lb, cr, fplog, bNonBondedOnGPU);
858     }
859
860     /* TODO: Here we should free all pointers in pme_lb,
861      * but as it contains pme data structures,
862      * we need to first make pme.c free all data.
863      */
864 }