Update copyright statements and change license to LGPL
[alexxy/gromacs.git] / src / kernel / pme_loadbal.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2011, The GROMACS development team,
6  * check out http://www.gromacs.org for more information.
7  * Copyright (c) 2012, by the GROMACS development team, led by
8  * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9  * others, as listed in the AUTHORS file in the top-level source
10  * directory and at http://www.gromacs.org.
11  *
12  * GROMACS is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public License
14  * as published by the Free Software Foundation; either version 2.1
15  * of the License, or (at your option) any later version.
16  *
17  * GROMACS is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with GROMACS; if not, see
24  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
26  *
27  * If you want to redistribute modifications to GROMACS, please
28  * consider that scientific software is very special. Version
29  * control is crucial - bugs must be traceable. We will be happy to
30  * consider code for inclusion in the official distribution, but
31  * derived work must not be called official GROMACS. Details are found
32  * in the README & COPYING files - if they are missing, get the
33  * official version at http://www.gromacs.org.
34  *
35  * To help us fund GROMACS development, we humbly ask that you cite
36  * the research papers on the package. Check out http://www.gromacs.org.
37  */
38 #ifdef HAVE_CONFIG_H
39 #include <config.h>
40 #endif
41
42 #include "smalloc.h"
43 #include "network.h"
44 #include "calcgrid.h"
45 #include "pme.h"
46 #include "vec.h"
47 #include "domdec.h"
48 #include "nbnxn_cuda_data_mgmt.h"
49 #include "force.h"
50 #include "pme_loadbal.h"
51
52 /* Parameters and setting for one PP-PME setup */
53 typedef struct {
54     real rcut_coulomb;    /* Coulomb cut-off                              */
55     real rlist;           /* pair-list cut-off                            */
56     real rlistlong;       /* LR pair-list cut-off                         */
57     int  nstcalclr;       /* frequency of evaluating long-range forces for group scheme */
58     real spacing;         /* (largest) PME grid spacing                   */
59     ivec grid;            /* the PME grid dimensions                      */
60     real grid_efficiency; /* ineffiency factor for non-uniform grids <= 1 */
61     real ewaldcoeff;      /* the Ewald coefficient                        */
62     gmx_pme_t pmedata;    /* the data structure used in the PME code      */
63
64     int  count;           /* number of times this setup has been timed    */
65     double cycles;        /* the fastest time for this setup in cycles    */
66 } pme_setup_t;
67
68 /* In the initial scan, step by grids that are at least a factor 0.8 coarser */
69 #define PME_LB_GRID_SCALE_FAC  0.8
70 /* In the initial scan, try to skip grids with uneven x/y/z spacing,
71  * checking if the "efficiency" is more than 5% worse than the previous grid.
72  */
73 #define PME_LB_GRID_EFFICIENCY_REL_FAC  1.05
74 /* Rerun up till 12% slower setups than the fastest up till now */
75 #define PME_LB_SLOW_FAC  1.12
76 /* If setups get more than 2% faster, do another round to avoid
77  * choosing a slower setup due to acceleration or fluctuations.
78  */
79 #define PME_LB_ACCEL_TOL 1.02
80
81 enum { epmelblimNO, epmelblimBOX, epmelblimDD, epmelblimNR };
82
83 const char *pmelblim_str[epmelblimNR] =
84 { "no", "box size", "domain decompostion" };
85
86 struct pme_load_balancing {
87     int  nstage;        /* the current maximum number of stages */
88
89     real cut_spacing;   /* the minimum cutoff / PME grid spacing ratio */
90     real rcut_vdw;      /* Vdw cutoff (does not change) */
91     real rcut_coulomb_start; /* Initial electrostatics cutoff */
92     int  nstcalclr_start; /* Initial electrostatics cutoff */
93     real rbuf_coulomb;  /* the pairlist buffer size */
94     real rbuf_vdw;      /* the pairlist buffer size */
95     matrix box_start;   /* the initial simulation box */
96     int n;              /* the count of setup as well as the allocation size */
97     pme_setup_t *setup; /* the PME+cutoff setups */
98     int cur;            /* the current setup */
99     int fastest;        /* fastest setup up till now */
100     int start;          /* start of setup range to consider in stage>0 */
101     int end;            /* end   of setup range to consider in stage>0 */
102     int elimited;       /* was the balancing limited, uses enum above */
103     int cutoff_scheme;  /* Verlet or group cut-offs */
104
105     int stage;          /* the current stage */
106 };
107
108 void pme_loadbal_init(pme_load_balancing_t *pme_lb_p,
109                       const t_inputrec *ir,matrix box,
110                       const interaction_const_t *ic,
111                       gmx_pme_t pmedata)
112 {
113     pme_load_balancing_t pme_lb;
114     real spm,sp;
115     int  d;
116
117     snew(pme_lb,1);
118
119     /* Any number of stages >= 2 is supported */
120     pme_lb->nstage   = 2;
121
122     pme_lb->cutoff_scheme = ir->cutoff_scheme;
123
124     if(pme_lb->cutoff_scheme == ecutsVERLET)
125     {
126         pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
127         pme_lb->rbuf_vdw     = pme_lb->rbuf_coulomb;
128     }
129     else
130     {
131         if(ic->rcoulomb > ic->rlist)
132         {
133             pme_lb->rbuf_coulomb = ic->rlistlong - ic->rcoulomb;
134         }
135         else
136         {
137             pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
138         }
139         if(ic->rvdw > ic->rlist)
140         {
141             pme_lb->rbuf_vdw = ic->rlistlong - ic->rvdw;
142         }
143         else
144         {
145             pme_lb->rbuf_vdw = ic->rlist - ic->rvdw;
146         }
147     }
148
149     copy_mat(box,pme_lb->box_start);
150     if (ir->ePBC==epbcXY && ir->nwall==2)
151     {
152         svmul(ir->wall_ewald_zfac,pme_lb->box_start[ZZ],pme_lb->box_start[ZZ]);
153     }
154
155     pme_lb->n = 1;
156     snew(pme_lb->setup,pme_lb->n);
157
158     pme_lb->rcut_vdw              = ic->rvdw;
159     pme_lb->rcut_coulomb_start    = ir->rcoulomb;
160     pme_lb->nstcalclr_start       = ir->nstcalclr;
161     
162     pme_lb->cur = 0;
163     pme_lb->setup[0].rcut_coulomb = ic->rcoulomb;
164     pme_lb->setup[0].rlist        = ic->rlist;
165     pme_lb->setup[0].rlistlong    = ic->rlistlong;
166     pme_lb->setup[0].nstcalclr    = ir->nstcalclr;
167     pme_lb->setup[0].grid[XX]     = ir->nkx;
168     pme_lb->setup[0].grid[YY]     = ir->nky;
169     pme_lb->setup[0].grid[ZZ]     = ir->nkz;
170     pme_lb->setup[0].ewaldcoeff   = ic->ewaldcoeff;
171
172     pme_lb->setup[0].pmedata  = pmedata;
173     
174     spm = 0;
175     for(d=0; d<DIM; d++)
176     {
177         sp = norm(pme_lb->box_start[d])/pme_lb->setup[0].grid[d];
178         if (sp > spm)
179         {
180             spm = sp;
181         }
182     }
183     pme_lb->setup[0].spacing = spm;
184
185     if (ir->fourier_spacing > 0)
186     {
187         pme_lb->cut_spacing = ir->rcoulomb/ir->fourier_spacing;
188     }
189     else
190     {
191         pme_lb->cut_spacing = ir->rcoulomb/pme_lb->setup[0].spacing;
192     }
193
194     pme_lb->stage = 0;
195
196     pme_lb->fastest  = 0;
197     pme_lb->start    = 0;
198     pme_lb->end      = 0;
199     pme_lb->elimited = epmelblimNO;
200
201     *pme_lb_p = pme_lb;
202 }
203
204 static gmx_bool pme_loadbal_increase_cutoff(pme_load_balancing_t pme_lb,
205                                             int pme_order)
206 {
207     pme_setup_t *set;
208     real fac,sp;
209     real tmpr_coulomb,tmpr_vdw;
210     int d;
211
212     /* Try to add a new setup with next larger cut-off to the list */
213     pme_lb->n++;
214     srenew(pme_lb->setup,pme_lb->n);
215     set = &pme_lb->setup[pme_lb->n-1];
216     set->pmedata = NULL;
217
218     fac = 1;
219     do
220     {
221         fac *= 1.01;
222         clear_ivec(set->grid);
223         sp = calc_grid(NULL,pme_lb->box_start,
224                        fac*pme_lb->setup[pme_lb->cur].spacing,
225                        &set->grid[XX],
226                        &set->grid[YY],
227                        &set->grid[ZZ]);
228
229         /* In parallel we can't have grids smaller than 2*pme_order,
230          * and we would anyhow not gain much speed at these grid sizes.
231          */
232         for(d=0; d<DIM; d++)
233         {
234             if (set->grid[d] <= 2*pme_order)
235             {
236                 pme_lb->n--;
237
238                 return FALSE;
239             }
240         }
241     }
242     while (sp <= 1.001*pme_lb->setup[pme_lb->cur].spacing);
243
244     set->rcut_coulomb = pme_lb->cut_spacing*sp;
245
246     if(pme_lb->cutoff_scheme == ecutsVERLET)
247     {
248         set->rlist        = set->rcut_coulomb + pme_lb->rbuf_coulomb;
249         /* We dont use LR lists with Verlet, but this avoids if-statements in further checks */
250         set->rlistlong    = set->rlist;
251     }
252     else
253     {
254         tmpr_coulomb          = set->rcut_coulomb + pme_lb->rbuf_coulomb;
255         tmpr_vdw              = pme_lb->rcut_vdw + pme_lb->rbuf_vdw;
256         set->rlist            = min(tmpr_coulomb,tmpr_vdw);
257         set->rlistlong        = max(tmpr_coulomb,tmpr_vdw);
258         
259         /* Set the long-range update frequency */
260         if(set->rlist == set->rlistlong)
261         {
262             /* No long-range interactions if the short-/long-range cutoffs are identical */
263             set->nstcalclr = 0;
264         }
265         else if(pme_lb->nstcalclr_start==0 || pme_lb->nstcalclr_start==1)
266         {
267             /* We were not doing long-range before, but now we are since rlist!=rlistlong */
268             set->nstcalclr = 1;
269         }
270         else
271         {
272             /* We were already doing long-range interactions from the start */
273             if(pme_lb->rcut_vdw > pme_lb->rcut_coulomb_start)
274             {
275                 /* We were originally doing long-range VdW-only interactions.
276                  * If rvdw is still longer than rcoulomb we keep the original nstcalclr,
277                  * but if the coulomb cutoff has become longer we should update the long-range
278                  * part every step.
279                  */
280                 set->nstcalclr = (tmpr_vdw > tmpr_coulomb) ? pme_lb->nstcalclr_start : 1;
281             }
282             else
283             {
284                 /* We were not doing any long-range interaction from the start,
285                  * since it is not possible to do twin-range coulomb for the PME interaction.
286                  */
287                 set->nstcalclr = 1;
288             }
289         }
290     }
291     
292     set->spacing      = sp;
293     /* The grid efficiency is the size wrt a grid with uniform x/y/z spacing */
294     set->grid_efficiency = 1;
295     for(d=0; d<DIM; d++)
296     {
297         set->grid_efficiency *= (set->grid[d]*sp)/norm(pme_lb->box_start[d]);
298     }
299     /* The Ewald coefficient is inversly proportional to the cut-off */
300     set->ewaldcoeff =
301         pme_lb->setup[0].ewaldcoeff*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
302
303     set->count   = 0;
304     set->cycles  = 0;
305
306     if (debug)
307     {
308         fprintf(debug,"PME loadbal: grid %d %d %d, coulomb cutoff %f\n",
309                 set->grid[XX],set->grid[YY],set->grid[ZZ],set->rcut_coulomb);
310     }
311     return TRUE;
312 }
313
314 static void print_grid(FILE *fp_err,FILE *fp_log,
315                        const char *pre,
316                        const char *desc,
317                        const pme_setup_t *set,
318                        double cycles)
319 {
320     char buf[STRLEN],buft[STRLEN];
321
322     if (cycles >= 0)
323     {
324         sprintf(buft,": %.1f M-cycles",cycles*1e-6);
325     }
326     else
327     {
328         buft[0] = '\0';
329     }
330     sprintf(buf,"%-11s%10s pme grid %d %d %d, coulomb cutoff %.3f%s",
331             pre,
332             desc,set->grid[XX],set->grid[YY],set->grid[ZZ],set->rcut_coulomb,
333             buft);
334     if (fp_err != NULL)
335     {
336         fprintf(fp_err,"\r%s\n",buf);
337     }
338     if (fp_log != NULL)
339     {
340         fprintf(fp_log,"%s\n",buf);
341     }
342 }
343
344 static int pme_loadbal_end(pme_load_balancing_t pme_lb)
345 {
346     /* In the initial stage only n is set; end is not set yet */
347     if (pme_lb->end > 0)
348     {
349         return pme_lb->end;
350     }
351     else
352     {
353         return pme_lb->n;
354     }
355 }
356
357 static void print_loadbal_limited(FILE *fp_err,FILE *fp_log,
358                                   gmx_large_int_t step,
359                                   pme_load_balancing_t pme_lb)
360 {
361     char buf[STRLEN],sbuf[22];
362
363     sprintf(buf,"step %4s: the %s limited the PME load balancing to a coulomb cut-off of %.3f",
364             gmx_step_str(step,sbuf),
365             pmelblim_str[pme_lb->elimited],
366             pme_lb->setup[pme_loadbal_end(pme_lb)-1].rcut_coulomb);
367     if (fp_err != NULL)
368     {
369         fprintf(fp_err,"\r%s\n",buf);
370     }
371     if (fp_log != NULL)
372     {
373         fprintf(fp_log,"%s\n",buf);
374     }
375 }
376
377 static void switch_to_stage1(pme_load_balancing_t pme_lb)
378 {
379     pme_lb->start = 0;
380     while (pme_lb->start+1 < pme_lb->n &&
381            (pme_lb->setup[pme_lb->start].count == 0 ||
382             pme_lb->setup[pme_lb->start].cycles >
383             pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC))
384     {
385         pme_lb->start++;
386     }
387     while (pme_lb->start > 0 && pme_lb->setup[pme_lb->start-1].cycles == 0)
388     {
389         pme_lb->start--;
390     }
391
392     pme_lb->end = pme_lb->n;
393     if (pme_lb->setup[pme_lb->end-1].count > 0 &&
394         pme_lb->setup[pme_lb->end-1].cycles >
395         pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
396     {
397         pme_lb->end--;
398     }
399
400     pme_lb->stage = 1;
401
402     /* Next we want to choose setup pme_lb->start, but as we will increase
403      * pme_ln->cur by one right after returning, we subtract 1 here.
404      */
405     pme_lb->cur = pme_lb->start - 1;
406 }
407
408 gmx_bool pme_load_balance(pme_load_balancing_t pme_lb,
409                           t_commrec *cr,
410                           FILE *fp_err,
411                           FILE *fp_log,
412                           t_inputrec *ir,
413                           t_state *state,
414                           double cycles,
415                           interaction_const_t *ic,
416                           nonbonded_verlet_t *nbv,
417                           gmx_pme_t *pmedata,
418                           gmx_large_int_t step)
419 {
420     gmx_bool OK;
421     pme_setup_t *set;
422     double cycles_fast;
423     char buf[STRLEN],sbuf[22];
424     real rtab;
425     gmx_bool bUsesSimpleTables = TRUE;
426
427     if (pme_lb->stage == pme_lb->nstage)
428     {
429         return FALSE;
430     }
431
432     if (PAR(cr))
433     {
434         gmx_sumd(1,&cycles,cr);
435         cycles /= cr->nnodes;
436     }
437
438     set = &pme_lb->setup[pme_lb->cur];
439     set->count++;
440
441     rtab = ir->rlistlong + ir->tabext;
442
443     if (set->count % 2 == 1)
444     {
445         /* Skip the first cycle, because the first step after a switch
446          * is much slower due to allocation and/or caching effects.
447          */
448         return TRUE;
449     }
450
451     sprintf(buf, "step %4s: ", gmx_step_str(step,sbuf));
452     print_grid(fp_err,fp_log,buf,"timed with",set,cycles);
453
454     if (set->count <= 2)
455     {
456         set->cycles = cycles;
457     }
458     else
459     {
460         if (cycles*PME_LB_ACCEL_TOL < set->cycles &&
461             pme_lb->stage == pme_lb->nstage - 1)
462         {
463             /* The performance went up a lot (due to e.g. DD load balancing).
464              * Add a stage, keep the minima, but rescan all setups.
465              */
466             pme_lb->nstage++;
467
468             if (debug)
469             {
470                 fprintf(debug,"The performance for grid %d %d %d went from %.3f to %.1f M-cycles, this is more than %f\n"
471                         "Increased the number stages to %d"
472                         " and ignoring the previous performance\n",
473                         set->grid[XX],set->grid[YY],set->grid[ZZ],
474                         cycles*1e-6,set->cycles*1e-6,PME_LB_ACCEL_TOL,
475                         pme_lb->nstage);
476             }
477         }
478         set->cycles = min(set->cycles,cycles);
479     }
480
481     if (set->cycles < pme_lb->setup[pme_lb->fastest].cycles)
482     {
483         pme_lb->fastest = pme_lb->cur;
484     }
485     cycles_fast = pme_lb->setup[pme_lb->fastest].cycles;
486
487     /* Check in stage 0 if we should stop scanning grids.
488      * Stop when the time is more than SLOW_FAC longer than the fastest.
489      */
490     if (pme_lb->stage == 0 && pme_lb->cur > 0 &&
491         cycles > pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
492     {
493         pme_lb->n = pme_lb->cur + 1;
494         /* Done with scanning, go to stage 1 */
495         switch_to_stage1(pme_lb);
496     }
497
498     if (pme_lb->stage == 0)
499     {
500         int gridsize_start;
501
502         gridsize_start = set->grid[XX]*set->grid[YY]*set->grid[ZZ];
503
504         do
505         {
506             if (pme_lb->cur+1 < pme_lb->n)
507             {
508                 /* We had already generated the next setup */
509                 OK = TRUE;
510             }
511             else
512             {
513                 /* Find the next setup */
514                 OK = pme_loadbal_increase_cutoff(pme_lb,ir->pme_order);
515             }
516
517             if (OK && ir->ePBC != epbcNONE)
518             {
519                 OK = (sqr(pme_lb->setup[pme_lb->cur+1].rlistlong)
520                       <= max_cutoff2(ir->ePBC,state->box));
521                 if (!OK)
522                 {
523                     pme_lb->elimited = epmelblimBOX;
524                 }
525             }
526
527             if (OK)
528             {
529                 pme_lb->cur++;
530
531                 if (DOMAINDECOMP(cr))
532                 {
533                     OK = change_dd_cutoff(cr,state,ir,
534                                           pme_lb->setup[pme_lb->cur].rlistlong);
535                     if (!OK)
536                     {
537                         /* Failed: do not use this setup */
538                         pme_lb->cur--;
539                         pme_lb->elimited = epmelblimDD;
540                     }
541                 }
542             }
543             if (!OK)
544             {
545                 /* We hit the upper limit for the cut-off,
546                  * the setup should not go further than cur.
547                  */
548                 pme_lb->n = pme_lb->cur + 1;
549                 print_loadbal_limited(fp_err,fp_log,step,pme_lb);
550                 /* Switch to the next stage */
551                 switch_to_stage1(pme_lb);
552             }
553         }
554         while (OK &&
555                !(pme_lb->setup[pme_lb->cur].grid[XX]*
556                  pme_lb->setup[pme_lb->cur].grid[YY]*
557                  pme_lb->setup[pme_lb->cur].grid[ZZ] <
558                  gridsize_start*PME_LB_GRID_SCALE_FAC
559                  &&
560                  pme_lb->setup[pme_lb->cur].grid_efficiency <
561                  pme_lb->setup[pme_lb->cur-1].grid_efficiency*PME_LB_GRID_EFFICIENCY_REL_FAC));
562     }
563
564     if (pme_lb->stage > 0 && pme_lb->end == 1)
565     {
566         pme_lb->cur = 0;
567         pme_lb->stage = pme_lb->nstage;
568     }
569     else if (pme_lb->stage > 0 && pme_lb->end > 1)
570     {
571         /* If stage = nstage-1:
572          *   scan over all setups, rerunning only those setups
573          *   which are not much slower than the fastest
574          * else:
575          *   use the next setup
576          */
577         do
578         {
579             pme_lb->cur++;
580             if (pme_lb->cur == pme_lb->end)
581             {
582                 pme_lb->stage++;
583                 pme_lb->cur = pme_lb->start;
584             }
585         }
586         while (pme_lb->stage == pme_lb->nstage - 1 &&
587                pme_lb->setup[pme_lb->cur].count > 0 &&
588                pme_lb->setup[pme_lb->cur].cycles > cycles_fast*PME_LB_SLOW_FAC);
589
590         if (pme_lb->stage == pme_lb->nstage)
591         {
592             /* We are done optimizing, use the fastest setup we found */
593             pme_lb->cur = pme_lb->fastest;
594         }
595     }
596
597     if (DOMAINDECOMP(cr) && pme_lb->stage > 0)
598     {
599         OK = change_dd_cutoff(cr,state,ir,pme_lb->setup[pme_lb->cur].rlistlong);
600         if (!OK)
601         {
602             /* Failsafe solution */
603             if (pme_lb->cur > 1 && pme_lb->stage == pme_lb->nstage)
604             {
605                 pme_lb->stage--;
606             }
607             pme_lb->fastest  = 0;
608             pme_lb->start    = 0;
609             pme_lb->end      = pme_lb->cur;
610             pme_lb->cur      = pme_lb->start;
611             pme_lb->elimited = epmelblimDD;
612             print_loadbal_limited(fp_err,fp_log,step,pme_lb);
613         }
614     }
615
616     /* Change the Coulomb cut-off and the PME grid */
617
618     set = &pme_lb->setup[pme_lb->cur];
619
620     ic->rcoulomb   = set->rcut_coulomb;
621     ic->rlist      = set->rlist;
622     ic->rlistlong  = set->rlistlong;
623     ir->nstcalclr  = set->nstcalclr;
624     ic->ewaldcoeff = set->ewaldcoeff;
625
626     bUsesSimpleTables = uses_simple_tables(ir->cutoff_scheme, nbv, 0);
627     if (pme_lb->cutoff_scheme == ecutsVERLET && nbv->grp[0].kernel_type == nbk8x8x8_CUDA)
628     {
629         nbnxn_cuda_pme_loadbal_update_param(nbv->cu_nbv,ic);
630     }
631     else
632     {
633         init_interaction_const_tables(NULL,ic,bUsesSimpleTables,
634                                       rtab);
635     }
636
637     if (pme_lb->cutoff_scheme == ecutsVERLET && nbv->ngrp > 1)
638     {
639         init_interaction_const_tables(NULL,ic,bUsesSimpleTables,
640                                       rtab);
641     }
642
643     if (cr->duty & DUTY_PME)
644     {
645         if (pme_lb->setup[pme_lb->cur].pmedata == NULL)
646         {
647             /* Generate a new PME data structure,
648              * copying part of the old pointers.
649              */
650             gmx_pme_reinit(&set->pmedata,
651                            cr,pme_lb->setup[0].pmedata,ir,
652                            set->grid);
653         }
654         *pmedata = set->pmedata;
655     }
656     else
657     {
658         /* Tell our PME-only node to switch grid */
659         gmx_pme_send_switch(cr, set->grid, set->ewaldcoeff);
660     }
661
662     if (debug)
663     {
664         print_grid(NULL,debug,"","switched to",set,-1);
665     }
666
667     if (pme_lb->stage == pme_lb->nstage)
668     {
669         print_grid(fp_err,fp_log,"","optimal",set,-1);
670     }
671
672     return TRUE;
673 }
674
675 void restart_pme_loadbal(pme_load_balancing_t pme_lb, int n)
676 {
677     pme_lb->nstage += n;
678 }
679
680 static int pme_grid_points(const pme_setup_t *setup)
681 {
682     return setup->grid[XX]*setup->grid[YY]*setup->grid[ZZ];
683 }
684
685 static void print_pme_loadbal_setting(FILE *fplog,
686                                      char *name,
687                                      const pme_setup_t *setup)
688 {
689     fprintf(fplog,
690             "   %-7s %6.3f nm %6.3f nm     %3d %3d %3d   %5.3f nm  %5.3f nm\n",
691             name,
692             setup->rcut_coulomb,setup->rlist,
693             setup->grid[XX],setup->grid[YY],setup->grid[ZZ],
694             setup->spacing,1/setup->ewaldcoeff);
695 }
696
697 static void print_pme_loadbal_settings(pme_load_balancing_t pme_lb,
698                                        FILE *fplog)
699 {
700     double pp_ratio,grid_ratio;
701
702     pp_ratio   = pow(pme_lb->setup[pme_lb->cur].rlist/pme_lb->setup[0].rlistlong,3.0);
703     grid_ratio = pme_grid_points(&pme_lb->setup[pme_lb->cur])/
704         (double)pme_grid_points(&pme_lb->setup[0]);
705
706     fprintf(fplog,"\n");
707     fprintf(fplog,"       P P   -   P M E   L O A D   B A L A N C I N G\n");
708     fprintf(fplog,"\n");
709     /* Here we only warn when the optimal setting is the last one */
710     if (pme_lb->elimited != epmelblimNO &&
711         pme_lb->cur == pme_loadbal_end(pme_lb)-1)
712     {
713         fprintf(fplog," NOTE: The PP/PME load balancing was limited by the %s,\n",
714                 pmelblim_str[pme_lb->elimited]);
715         fprintf(fplog,"       you might not have reached a good load balance.\n");
716         if (pme_lb->elimited == epmelblimDD)
717         {
718             fprintf(fplog,"       Try different mdrun -dd settings or lower the -dds value.\n");
719         }
720         fprintf(fplog,"\n");
721     }
722     fprintf(fplog," PP/PME load balancing changed the cut-off and PME settings:\n");
723     fprintf(fplog,"           particle-particle                    PME\n");
724     fprintf(fplog,"            rcoulomb  rlist            grid      spacing   1/beta\n");
725     print_pme_loadbal_setting(fplog,"initial",&pme_lb->setup[0]);
726     print_pme_loadbal_setting(fplog,"final"  ,&pme_lb->setup[pme_lb->cur]);
727     fprintf(fplog," cost-ratio           %4.2f             %4.2f\n",
728             pp_ratio,grid_ratio);
729     fprintf(fplog," (note that these numbers concern only part of the total PP and PME load)\n");
730     fprintf(fplog,"\n");
731 }
732
733 void pme_loadbal_done(pme_load_balancing_t pme_lb, FILE *fplog)
734 {
735     if (fplog != NULL && (pme_lb->cur > 0 || pme_lb->elimited != epmelblimNO))
736     {
737         print_pme_loadbal_settings(pme_lb,fplog);
738     }
739
740     /* TODO: Here we should free all pointers in pme_lb,
741      * but as it contains pme data structures,
742      * we need to first make pme.c free all data.
743      */
744 }