Adjust more copyright headers
[alexxy/gromacs.git] / src / programs / mdrun / pme_loadbal.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 #ifdef HAVE_CONFIG_H
36 #include <config.h>
37 #endif
38
39 #include "smalloc.h"
40 #include "network.h"
41 #include "calcgrid.h"
42 #include "pme.h"
43 #include "vec.h"
44 #include "domdec.h"
45 #include "nbnxn_cuda_data_mgmt.h"
46 #include "force.h"
47 #include "macros.h"
48 #include "md_logging.h"
49 #include "pme_loadbal.h"
50
51 /* Parameters and setting for one PP-PME setup */
52 typedef struct {
53     real      rcut_coulomb;    /* Coulomb cut-off                              */
54     real      rlist;           /* pair-list cut-off                            */
55     real      rlistlong;       /* LR pair-list cut-off                         */
56     int       nstcalclr;       /* frequency of evaluating long-range forces for group scheme */
57     real      spacing;         /* (largest) PME grid spacing                   */
58     ivec      grid;            /* the PME grid dimensions                      */
59     real      grid_efficiency; /* ineffiency factor for non-uniform grids <= 1 */
60     real      ewaldcoeff;      /* the Ewald coefficient                        */
61     gmx_pme_t pmedata;         /* the data structure used in the PME code      */
62
63     int       count;           /* number of times this setup has been timed    */
64     double    cycles;          /* the fastest time for this setup in cycles    */
65 } pme_setup_t;
66
67 /* In the initial scan, step by grids that are at least a factor 0.8 coarser */
68 #define PME_LB_GRID_SCALE_FAC  0.8
69 /* In the initial scan, try to skip grids with uneven x/y/z spacing,
70  * checking if the "efficiency" is more than 5% worse than the previous grid.
71  */
72 #define PME_LB_GRID_EFFICIENCY_REL_FAC  1.05
73 /* Rerun up till 12% slower setups than the fastest up till now */
74 #define PME_LB_SLOW_FAC  1.12
75 /* If setups get more than 2% faster, do another round to avoid
76  * choosing a slower setup due to acceleration or fluctuations.
77  */
78 #define PME_LB_ACCEL_TOL 1.02
79
80 enum {
81     epmelblimNO, epmelblimBOX, epmelblimDD, epmelblimPMEGRID, epmelblimNR
82 };
83
84 const char *pmelblim_str[epmelblimNR] =
85 { "no", "box size", "domain decompostion", "PME grid restriction" };
86
87 struct pme_load_balancing {
88     int          nstage;             /* the current maximum number of stages */
89
90     real         cut_spacing;        /* the minimum cutoff / PME grid spacing ratio */
91     real         rcut_vdw;           /* Vdw cutoff (does not change) */
92     real         rcut_coulomb_start; /* Initial electrostatics cutoff */
93     int          nstcalclr_start;    /* Initial electrostatics cutoff */
94     real         rbuf_coulomb;       /* the pairlist buffer size */
95     real         rbuf_vdw;           /* the pairlist buffer size */
96     matrix       box_start;          /* the initial simulation box */
97     int          n;                  /* the count of setup as well as the allocation size */
98     pme_setup_t *setup;              /* the PME+cutoff setups */
99     int          cur;                /* the current setup */
100     int          fastest;            /* fastest setup up till now */
101     int          start;              /* start of setup range to consider in stage>0 */
102     int          end;                /* end   of setup range to consider in stage>0 */
103     int          elimited;           /* was the balancing limited, uses enum above */
104     int          cutoff_scheme;      /* Verlet or group cut-offs */
105
106     int          stage;              /* the current stage */
107 };
108
109 void pme_loadbal_init(pme_load_balancing_t *pme_lb_p,
110                       const t_inputrec *ir, matrix box,
111                       const interaction_const_t *ic,
112                       gmx_pme_t pmedata)
113 {
114     pme_load_balancing_t pme_lb;
115     real                 spm, sp;
116     int                  d;
117
118     snew(pme_lb, 1);
119
120     /* Any number of stages >= 2 is supported */
121     pme_lb->nstage   = 2;
122
123     pme_lb->cutoff_scheme = ir->cutoff_scheme;
124
125     if (pme_lb->cutoff_scheme == ecutsVERLET)
126     {
127         pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
128         pme_lb->rbuf_vdw     = pme_lb->rbuf_coulomb;
129     }
130     else
131     {
132         if (ic->rcoulomb > ic->rlist)
133         {
134             pme_lb->rbuf_coulomb = ic->rlistlong - ic->rcoulomb;
135         }
136         else
137         {
138             pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
139         }
140         if (ic->rvdw > ic->rlist)
141         {
142             pme_lb->rbuf_vdw = ic->rlistlong - ic->rvdw;
143         }
144         else
145         {
146             pme_lb->rbuf_vdw = ic->rlist - ic->rvdw;
147         }
148     }
149
150     copy_mat(box, pme_lb->box_start);
151     if (ir->ePBC == epbcXY && ir->nwall == 2)
152     {
153         svmul(ir->wall_ewald_zfac, pme_lb->box_start[ZZ], pme_lb->box_start[ZZ]);
154     }
155
156     pme_lb->n = 1;
157     snew(pme_lb->setup, pme_lb->n);
158
159     pme_lb->rcut_vdw              = ic->rvdw;
160     pme_lb->rcut_coulomb_start    = ir->rcoulomb;
161     pme_lb->nstcalclr_start       = ir->nstcalclr;
162
163     pme_lb->cur                   = 0;
164     pme_lb->setup[0].rcut_coulomb = ic->rcoulomb;
165     pme_lb->setup[0].rlist        = ic->rlist;
166     pme_lb->setup[0].rlistlong    = ic->rlistlong;
167     pme_lb->setup[0].nstcalclr    = ir->nstcalclr;
168     pme_lb->setup[0].grid[XX]     = ir->nkx;
169     pme_lb->setup[0].grid[YY]     = ir->nky;
170     pme_lb->setup[0].grid[ZZ]     = ir->nkz;
171     pme_lb->setup[0].ewaldcoeff   = ic->ewaldcoeff;
172
173     pme_lb->setup[0].pmedata  = pmedata;
174
175     spm = 0;
176     for (d = 0; d < DIM; d++)
177     {
178         sp = norm(pme_lb->box_start[d])/pme_lb->setup[0].grid[d];
179         if (sp > spm)
180         {
181             spm = sp;
182         }
183     }
184     pme_lb->setup[0].spacing = spm;
185
186     if (ir->fourier_spacing > 0)
187     {
188         pme_lb->cut_spacing = ir->rcoulomb/ir->fourier_spacing;
189     }
190     else
191     {
192         pme_lb->cut_spacing = ir->rcoulomb/pme_lb->setup[0].spacing;
193     }
194
195     pme_lb->stage = 0;
196
197     pme_lb->fastest  = 0;
198     pme_lb->start    = 0;
199     pme_lb->end      = 0;
200     pme_lb->elimited = epmelblimNO;
201
202     *pme_lb_p = pme_lb;
203 }
204
205 static gmx_bool pme_loadbal_increase_cutoff(pme_load_balancing_t pme_lb,
206                                             int                  pme_order,
207                                             const gmx_domdec_t   *dd)
208 {
209     pme_setup_t *set;
210     int          npmenodes_x, npmenodes_y;
211     real         fac, sp;
212     real         tmpr_coulomb, tmpr_vdw;
213     int          d;
214     gmx_bool     grid_ok;
215
216     /* Try to add a new setup with next larger cut-off to the list */
217     pme_lb->n++;
218     srenew(pme_lb->setup, pme_lb->n);
219     set          = &pme_lb->setup[pme_lb->n-1];
220     set->pmedata = NULL;
221
222     get_pme_nnodes(dd, &npmenodes_x, &npmenodes_y);
223
224     fac = 1;
225     do
226     {
227         /* Avoid infinite while loop, which can occur at the minimum grid size.
228          * Note that in practice load balancing will stop before this point.
229          * The factor 2.1 allows for the extreme case in which only grids
230          * of powers of 2 are allowed (the current code supports more grids).
231          */
232         if (fac > 2.1)
233         {
234             pme_lb->n--;
235
236             return FALSE;
237         }
238
239         fac *= 1.01;
240         clear_ivec(set->grid);
241         sp = calc_grid(NULL, pme_lb->box_start,
242                        fac*pme_lb->setup[pme_lb->cur].spacing,
243                        &set->grid[XX],
244                        &set->grid[YY],
245                        &set->grid[ZZ]);
246
247         /* As here we can't easily check if one of the PME nodes
248          * uses threading, we do a conservative grid check.
249          * This means we can't use pme_order or less grid lines
250          * per PME node along x, which is not a strong restriction.
251          */
252         gmx_pme_check_restrictions(pme_order,
253                                    set->grid[XX], set->grid[YY], set->grid[ZZ],
254                                    npmenodes_x, npmenodes_y,
255                                    TRUE,
256                                    FALSE,
257                                    &grid_ok);
258     }
259     while (sp <= 1.001*pme_lb->setup[pme_lb->cur].spacing || !grid_ok);
260
261     set->rcut_coulomb = pme_lb->cut_spacing*sp;
262
263     if (pme_lb->cutoff_scheme == ecutsVERLET)
264     {
265         set->rlist        = set->rcut_coulomb + pme_lb->rbuf_coulomb;
266         /* We dont use LR lists with Verlet, but this avoids if-statements in further checks */
267         set->rlistlong    = set->rlist;
268     }
269     else
270     {
271         tmpr_coulomb          = set->rcut_coulomb + pme_lb->rbuf_coulomb;
272         tmpr_vdw              = pme_lb->rcut_vdw + pme_lb->rbuf_vdw;
273         set->rlist            = min(tmpr_coulomb, tmpr_vdw);
274         set->rlistlong        = max(tmpr_coulomb, tmpr_vdw);
275
276         /* Set the long-range update frequency */
277         if (set->rlist == set->rlistlong)
278         {
279             /* No long-range interactions if the short-/long-range cutoffs are identical */
280             set->nstcalclr = 0;
281         }
282         else if (pme_lb->nstcalclr_start == 0 || pme_lb->nstcalclr_start == 1)
283         {
284             /* We were not doing long-range before, but now we are since rlist!=rlistlong */
285             set->nstcalclr = 1;
286         }
287         else
288         {
289             /* We were already doing long-range interactions from the start */
290             if (pme_lb->rcut_vdw > pme_lb->rcut_coulomb_start)
291             {
292                 /* We were originally doing long-range VdW-only interactions.
293                  * If rvdw is still longer than rcoulomb we keep the original nstcalclr,
294                  * but if the coulomb cutoff has become longer we should update the long-range
295                  * part every step.
296                  */
297                 set->nstcalclr = (tmpr_vdw > tmpr_coulomb) ? pme_lb->nstcalclr_start : 1;
298             }
299             else
300             {
301                 /* We were not doing any long-range interaction from the start,
302                  * since it is not possible to do twin-range coulomb for the PME interaction.
303                  */
304                 set->nstcalclr = 1;
305             }
306         }
307     }
308
309     set->spacing      = sp;
310     /* The grid efficiency is the size wrt a grid with uniform x/y/z spacing */
311     set->grid_efficiency = 1;
312     for (d = 0; d < DIM; d++)
313     {
314         set->grid_efficiency *= (set->grid[d]*sp)/norm(pme_lb->box_start[d]);
315     }
316     /* The Ewald coefficient is inversly proportional to the cut-off */
317     set->ewaldcoeff =
318         pme_lb->setup[0].ewaldcoeff*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
319
320     set->count   = 0;
321     set->cycles  = 0;
322
323     if (debug)
324     {
325         fprintf(debug, "PME loadbal: grid %d %d %d, coulomb cutoff %f\n",
326                 set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb);
327     }
328     return TRUE;
329 }
330
331 static void print_grid(FILE *fp_err, FILE *fp_log,
332                        const char *pre,
333                        const char *desc,
334                        const pme_setup_t *set,
335                        double cycles)
336 {
337     char buf[STRLEN], buft[STRLEN];
338
339     if (cycles >= 0)
340     {
341         sprintf(buft, ": %.1f M-cycles", cycles*1e-6);
342     }
343     else
344     {
345         buft[0] = '\0';
346     }
347     sprintf(buf, "%-11s%10s pme grid %d %d %d, coulomb cutoff %.3f%s",
348             pre,
349             desc, set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb,
350             buft);
351     if (fp_err != NULL)
352     {
353         fprintf(fp_err, "\r%s\n", buf);
354     }
355     if (fp_log != NULL)
356     {
357         fprintf(fp_log, "%s\n", buf);
358     }
359 }
360
361 static int pme_loadbal_end(pme_load_balancing_t pme_lb)
362 {
363     /* In the initial stage only n is set; end is not set yet */
364     if (pme_lb->end > 0)
365     {
366         return pme_lb->end;
367     }
368     else
369     {
370         return pme_lb->n;
371     }
372 }
373
374 static void print_loadbal_limited(FILE *fp_err, FILE *fp_log,
375                                   gmx_large_int_t step,
376                                   pme_load_balancing_t pme_lb)
377 {
378     char buf[STRLEN], sbuf[22];
379
380     sprintf(buf, "step %4s: the %s limits the PME load balancing to a coulomb cut-off of %.3f",
381             gmx_step_str(step, sbuf),
382             pmelblim_str[pme_lb->elimited],
383             pme_lb->setup[pme_loadbal_end(pme_lb)-1].rcut_coulomb);
384     if (fp_err != NULL)
385     {
386         fprintf(fp_err, "\r%s\n", buf);
387     }
388     if (fp_log != NULL)
389     {
390         fprintf(fp_log, "%s\n", buf);
391     }
392 }
393
394 static void switch_to_stage1(pme_load_balancing_t pme_lb)
395 {
396     pme_lb->start = 0;
397     while (pme_lb->start+1 < pme_lb->n &&
398            (pme_lb->setup[pme_lb->start].count == 0 ||
399             pme_lb->setup[pme_lb->start].cycles >
400             pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC))
401     {
402         pme_lb->start++;
403     }
404     while (pme_lb->start > 0 && pme_lb->setup[pme_lb->start-1].cycles == 0)
405     {
406         pme_lb->start--;
407     }
408
409     pme_lb->end = pme_lb->n;
410     if (pme_lb->setup[pme_lb->end-1].count > 0 &&
411         pme_lb->setup[pme_lb->end-1].cycles >
412         pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
413     {
414         pme_lb->end--;
415     }
416
417     pme_lb->stage = 1;
418
419     /* Next we want to choose setup pme_lb->start, but as we will increase
420      * pme_ln->cur by one right after returning, we subtract 1 here.
421      */
422     pme_lb->cur = pme_lb->start - 1;
423 }
424
425 gmx_bool pme_load_balance(pme_load_balancing_t pme_lb,
426                           t_commrec           *cr,
427                           FILE                *fp_err,
428                           FILE                *fp_log,
429                           t_inputrec          *ir,
430                           t_state             *state,
431                           double               cycles,
432                           interaction_const_t *ic,
433                           nonbonded_verlet_t  *nbv,
434                           gmx_pme_t           *pmedata,
435                           gmx_large_int_t      step)
436 {
437     gmx_bool     OK;
438     pme_setup_t *set;
439     double       cycles_fast;
440     char         buf[STRLEN], sbuf[22];
441     real         rtab;
442     gmx_bool     bUsesSimpleTables = TRUE;
443
444     if (pme_lb->stage == pme_lb->nstage)
445     {
446         return FALSE;
447     }
448
449     if (PAR(cr))
450     {
451         gmx_sumd(1, &cycles, cr);
452         cycles /= cr->nnodes;
453     }
454
455     set = &pme_lb->setup[pme_lb->cur];
456     set->count++;
457
458     rtab = ir->rlistlong + ir->tabext;
459
460     if (set->count % 2 == 1)
461     {
462         /* Skip the first cycle, because the first step after a switch
463          * is much slower due to allocation and/or caching effects.
464          */
465         return TRUE;
466     }
467
468     sprintf(buf, "step %4s: ", gmx_step_str(step, sbuf));
469     print_grid(fp_err, fp_log, buf, "timed with", set, cycles);
470
471     if (set->count <= 2)
472     {
473         set->cycles = cycles;
474     }
475     else
476     {
477         if (cycles*PME_LB_ACCEL_TOL < set->cycles &&
478             pme_lb->stage == pme_lb->nstage - 1)
479         {
480             /* The performance went up a lot (due to e.g. DD load balancing).
481              * Add a stage, keep the minima, but rescan all setups.
482              */
483             pme_lb->nstage++;
484
485             if (debug)
486             {
487                 fprintf(debug, "The performance for grid %d %d %d went from %.3f to %.1f M-cycles, this is more than %f\n"
488                         "Increased the number stages to %d"
489                         " and ignoring the previous performance\n",
490                         set->grid[XX], set->grid[YY], set->grid[ZZ],
491                         cycles*1e-6, set->cycles*1e-6, PME_LB_ACCEL_TOL,
492                         pme_lb->nstage);
493             }
494         }
495         set->cycles = min(set->cycles, cycles);
496     }
497
498     if (set->cycles < pme_lb->setup[pme_lb->fastest].cycles)
499     {
500         pme_lb->fastest = pme_lb->cur;
501
502         if (DOMAINDECOMP(cr))
503         {
504             /* We found a new fastest setting, ensure that with subsequent
505              * shorter cut-off's the dynamic load balancing does not make
506              * the use of the current cut-off impossible. This solution is
507              * a trade-off, as the PME load balancing and DD domain size
508              * load balancing can interact in complex ways.
509              * With the Verlet kernels, DD load imbalance will usually be
510              * mainly due to bonded interaction imbalance, which will often
511              * quickly push the domain boundaries beyond the limit for the
512              * optimal, PME load balanced, cut-off. But it could be that
513              * better overal performance can be obtained with a slightly
514              * shorter cut-off and better DD load balancing.
515              */
516             change_dd_dlb_cutoff_limit(cr);
517         }
518     }
519     cycles_fast = pme_lb->setup[pme_lb->fastest].cycles;
520
521     /* Check in stage 0 if we should stop scanning grids.
522      * Stop when the time is more than SLOW_FAC longer than the fastest.
523      */
524     if (pme_lb->stage == 0 && pme_lb->cur > 0 &&
525         cycles > pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
526     {
527         pme_lb->n = pme_lb->cur + 1;
528         /* Done with scanning, go to stage 1 */
529         switch_to_stage1(pme_lb);
530     }
531
532     if (pme_lb->stage == 0)
533     {
534         int gridsize_start;
535
536         gridsize_start = set->grid[XX]*set->grid[YY]*set->grid[ZZ];
537
538         do
539         {
540             if (pme_lb->cur+1 < pme_lb->n)
541             {
542                 /* We had already generated the next setup */
543                 OK = TRUE;
544             }
545             else
546             {
547                 /* Find the next setup */
548                 OK = pme_loadbal_increase_cutoff(pme_lb, ir->pme_order, cr->dd);
549
550                 if (!OK)
551                 {
552                     pme_lb->elimited = epmelblimPMEGRID;
553                 }
554             }
555
556             if (OK && ir->ePBC != epbcNONE)
557             {
558                 OK = (sqr(pme_lb->setup[pme_lb->cur+1].rlistlong)
559                       <= max_cutoff2(ir->ePBC, state->box));
560                 if (!OK)
561                 {
562                     pme_lb->elimited = epmelblimBOX;
563                 }
564             }
565
566             if (OK)
567             {
568                 pme_lb->cur++;
569
570                 if (DOMAINDECOMP(cr))
571                 {
572                     OK = change_dd_cutoff(cr, state, ir,
573                                           pme_lb->setup[pme_lb->cur].rlistlong);
574                     if (!OK)
575                     {
576                         /* Failed: do not use this setup */
577                         pme_lb->cur--;
578                         pme_lb->elimited = epmelblimDD;
579                     }
580                 }
581             }
582             if (!OK)
583             {
584                 /* We hit the upper limit for the cut-off,
585                  * the setup should not go further than cur.
586                  */
587                 pme_lb->n = pme_lb->cur + 1;
588                 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
589                 /* Switch to the next stage */
590                 switch_to_stage1(pme_lb);
591             }
592         }
593         while (OK &&
594                !(pme_lb->setup[pme_lb->cur].grid[XX]*
595                  pme_lb->setup[pme_lb->cur].grid[YY]*
596                  pme_lb->setup[pme_lb->cur].grid[ZZ] <
597                  gridsize_start*PME_LB_GRID_SCALE_FAC
598                  &&
599                  pme_lb->setup[pme_lb->cur].grid_efficiency <
600                  pme_lb->setup[pme_lb->cur-1].grid_efficiency*PME_LB_GRID_EFFICIENCY_REL_FAC));
601     }
602
603     if (pme_lb->stage > 0 && pme_lb->end == 1)
604     {
605         pme_lb->cur   = 0;
606         pme_lb->stage = pme_lb->nstage;
607     }
608     else if (pme_lb->stage > 0 && pme_lb->end > 1)
609     {
610         /* If stage = nstage-1:
611          *   scan over all setups, rerunning only those setups
612          *   which are not much slower than the fastest
613          * else:
614          *   use the next setup
615          */
616         do
617         {
618             pme_lb->cur++;
619             if (pme_lb->cur == pme_lb->end)
620             {
621                 pme_lb->stage++;
622                 pme_lb->cur = pme_lb->start;
623             }
624         }
625         while (pme_lb->stage == pme_lb->nstage - 1 &&
626                pme_lb->setup[pme_lb->cur].count > 0 &&
627                pme_lb->setup[pme_lb->cur].cycles > cycles_fast*PME_LB_SLOW_FAC);
628
629         if (pme_lb->stage == pme_lb->nstage)
630         {
631             /* We are done optimizing, use the fastest setup we found */
632             pme_lb->cur = pme_lb->fastest;
633         }
634     }
635
636     if (DOMAINDECOMP(cr) && pme_lb->stage > 0)
637     {
638         OK = change_dd_cutoff(cr, state, ir, pme_lb->setup[pme_lb->cur].rlistlong);
639         if (!OK)
640         {
641             /* Failsafe solution */
642             if (pme_lb->cur > 1 && pme_lb->stage == pme_lb->nstage)
643             {
644                 pme_lb->stage--;
645             }
646             pme_lb->fastest  = 0;
647             pme_lb->start    = 0;
648             pme_lb->end      = pme_lb->cur;
649             pme_lb->cur      = pme_lb->start;
650             pme_lb->elimited = epmelblimDD;
651             print_loadbal_limited(fp_err, fp_log, step, pme_lb);
652         }
653     }
654
655     /* Change the Coulomb cut-off and the PME grid */
656
657     set = &pme_lb->setup[pme_lb->cur];
658
659     ic->rcoulomb   = set->rcut_coulomb;
660     ic->rlist      = set->rlist;
661     ic->rlistlong  = set->rlistlong;
662     ir->nstcalclr  = set->nstcalclr;
663     ic->ewaldcoeff = set->ewaldcoeff;
664
665     bUsesSimpleTables = uses_simple_tables(ir->cutoff_scheme, nbv, 0);
666     if (pme_lb->cutoff_scheme == ecutsVERLET &&
667         nbv->grp[0].kernel_type == nbnxnk8x8x8_CUDA)
668     {
669         nbnxn_cuda_pme_loadbal_update_param(nbv->cu_nbv, ic);
670
671         /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
672          * also sharing texture references. To keep the code simple, we don't
673          * treat texture references as shared resources, but this means that
674          * the coulomb_tab texture ref will get updated by multiple threads.
675          * Hence, to ensure that the non-bonded kernels don't start before all
676          * texture binding operations are finished, we need to wait for all ranks
677          * to arrive here before continuing.
678          *
679          * Note that we could omit this barrier if GPUs are not shared (or
680          * texture objects are used), but as this is initialization code, there
681          * is not point in complicating things.
682          */
683 #ifdef GMX_THREAD_MPI
684         if (PAR(cr))
685         {
686             gmx_barrier(cr);
687         }
688 #endif /* GMX_THREAD_MPI */
689     }
690     else
691     {
692         init_interaction_const_tables(NULL, ic, bUsesSimpleTables,
693                                       rtab);
694     }
695
696     if (pme_lb->cutoff_scheme == ecutsVERLET && nbv->ngrp > 1)
697     {
698         init_interaction_const_tables(NULL, ic, bUsesSimpleTables,
699                                       rtab);
700     }
701
702     if (cr->duty & DUTY_PME)
703     {
704         if (pme_lb->setup[pme_lb->cur].pmedata == NULL)
705         {
706             /* Generate a new PME data structure,
707              * copying part of the old pointers.
708              */
709             gmx_pme_reinit(&set->pmedata,
710                            cr, pme_lb->setup[0].pmedata, ir,
711                            set->grid);
712         }
713         *pmedata = set->pmedata;
714     }
715     else
716     {
717         /* Tell our PME-only node to switch grid */
718         gmx_pme_send_switchgrid(cr, set->grid, set->ewaldcoeff);
719     }
720
721     if (debug)
722     {
723         print_grid(NULL, debug, "", "switched to", set, -1);
724     }
725
726     if (pme_lb->stage == pme_lb->nstage)
727     {
728         print_grid(fp_err, fp_log, "", "optimal", set, -1);
729     }
730
731     return TRUE;
732 }
733
734 void restart_pme_loadbal(pme_load_balancing_t pme_lb, int n)
735 {
736     pme_lb->nstage += n;
737 }
738
739 static int pme_grid_points(const pme_setup_t *setup)
740 {
741     return setup->grid[XX]*setup->grid[YY]*setup->grid[ZZ];
742 }
743
744 static real pme_loadbal_rlist(const pme_setup_t *setup)
745 {
746     /* With the group cut-off scheme we can have twin-range either
747      * for Coulomb or for VdW, so we need a check here.
748      * With the Verlet cut-off scheme rlist=rlistlong.
749      */
750     if (setup->rcut_coulomb > setup->rlist)
751     {
752         return setup->rlistlong;
753     }
754     else
755     {
756         return setup->rlist;
757     }
758 }
759
760 static void print_pme_loadbal_setting(FILE              *fplog,
761                                       char              *name,
762                                       const pme_setup_t *setup)
763 {
764     fprintf(fplog,
765             "   %-7s %6.3f nm %6.3f nm     %3d %3d %3d   %5.3f nm  %5.3f nm\n",
766             name,
767             setup->rcut_coulomb, pme_loadbal_rlist(setup),
768             setup->grid[XX], setup->grid[YY], setup->grid[ZZ],
769             setup->spacing, 1/setup->ewaldcoeff);
770 }
771
772 static void print_pme_loadbal_settings(pme_load_balancing_t pme_lb,
773                                        t_commrec           *cr,
774                                        FILE                *fplog,
775                                        gmx_bool             bNonBondedOnGPU)
776 {
777     double pp_ratio, grid_ratio;
778
779     pp_ratio   = pow(pme_loadbal_rlist(&pme_lb->setup[pme_lb->cur])/pme_loadbal_rlist(&pme_lb->setup[0]), 3.0);
780     grid_ratio = pme_grid_points(&pme_lb->setup[pme_lb->cur])/
781         (double)pme_grid_points(&pme_lb->setup[0]);
782
783     fprintf(fplog, "\n");
784     fprintf(fplog, "       P P   -   P M E   L O A D   B A L A N C I N G\n");
785     fprintf(fplog, "\n");
786     /* Here we only warn when the optimal setting is the last one */
787     if (pme_lb->elimited != epmelblimNO &&
788         pme_lb->cur == pme_loadbal_end(pme_lb)-1)
789     {
790         fprintf(fplog, " NOTE: The PP/PME load balancing was limited by the %s,\n",
791                 pmelblim_str[pme_lb->elimited]);
792         fprintf(fplog, "       you might not have reached a good load balance.\n");
793         if (pme_lb->elimited == epmelblimDD)
794         {
795             fprintf(fplog, "       Try different mdrun -dd settings or lower the -dds value.\n");
796         }
797         fprintf(fplog, "\n");
798     }
799     fprintf(fplog, " PP/PME load balancing changed the cut-off and PME settings:\n");
800     fprintf(fplog, "           particle-particle                    PME\n");
801     fprintf(fplog, "            rcoulomb  rlist            grid      spacing   1/beta\n");
802     print_pme_loadbal_setting(fplog, "initial", &pme_lb->setup[0]);
803     print_pme_loadbal_setting(fplog, "final", &pme_lb->setup[pme_lb->cur]);
804     fprintf(fplog, " cost-ratio           %4.2f             %4.2f\n",
805             pp_ratio, grid_ratio);
806     fprintf(fplog, " (note that these numbers concern only part of the total PP and PME load)\n");
807
808     if (pp_ratio > 1.5 && !bNonBondedOnGPU)
809     {
810         md_print_warn(cr, fplog,
811                       "NOTE: PME load balancing increased the non-bonded workload by more than 50%%.\n"
812                       "      For better performance use (more) PME nodes (mdrun -npme),\n"
813                       "      or in case you are beyond the scaling limit, use less nodes in total.\n");
814     }
815     else
816     {
817         fprintf(fplog, "\n");
818     }
819 }
820
821 void pme_loadbal_done(pme_load_balancing_t pme_lb,
822                       t_commrec *cr, FILE *fplog,
823                       gmx_bool bNonBondedOnGPU)
824 {
825     if (fplog != NULL && (pme_lb->cur > 0 || pme_lb->elimited != epmelblimNO))
826     {
827         print_pme_loadbal_settings(pme_lb, cr, fplog, bNonBondedOnGPU);
828     }
829
830     /* TODO: Here we should free all pointers in pme_lb,
831      * but as it contains pme data structures,
832      * we need to first make pme.c free all data.
833      */
834 }