4390f66122cb3da2eeaf3a66937d5c12f435d9a7
[alexxy/gromacs.git] / src / kernel / pme_loadbal.c
1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
2  *
3  * 
4  *                This source code is part of
5  * 
6  *                 G   R   O   M   A   C   S
7  * 
8  *          GROningen MAchine for Chemical Simulations
9  * 
10  *                        VERSION 4.6.0
11  * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13  * Copyright (c) 2001-2011, The GROMACS development team,
14  * check out http://www.gromacs.org for more information.
15
16  * This program is free software; you can redistribute it and/or
17  * modify it under the terms of the GNU General Public License
18  * as published by the Free Software Foundation; either version 2
19  * of the License, or (at your option) any later version.
20  * 
21  * If you want to redistribute modifications, please consider that
22  * scientific software is very special. Version control is crucial -
23  * bugs must be traceable. We will be happy to consider code for
24  * inclusion in the official distribution, but derived work must not
25  * be called official GROMACS. Details are found in the README & COPYING
26  * files - if they are missing, get the official version at www.gromacs.org.
27  * 
28  * To help us fund GROMACS development, we humbly ask that you cite
29  * the papers on the package - you can find them in the top README file.
30  * 
31  * For more info, check our website at http://www.gromacs.org
32  * 
33  * And Hey:
34  * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
35  */
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
39
40 #include "smalloc.h"
41 #include "network.h"
42 #include "calcgrid.h"
43 #include "pme.h"
44 #include "vec.h"
45 #include "domdec.h"
46 #include "nbnxn_cuda_data_mgmt.h"
47 #include "force.h"
48 #include "pme_loadbal.h"
49
50 /* Parameters and setting for one PP-PME setup */
51 typedef struct {
52     real rcut_coulomb;    /* Coulomb cut-off                              */
53     real rlist;           /* pair-list cut-off                            */
54     real rlistlong;       /* LR pair-list cut-off                         */
55     int  nstcalclr;       /* frequency of evaluating long-range forces for group scheme */
56     real spacing;         /* (largest) PME grid spacing                   */
57     ivec grid;            /* the PME grid dimensions                      */
58     real grid_efficiency; /* ineffiency factor for non-uniform grids <= 1 */
59     real ewaldcoeff;      /* the Ewald coefficient                        */
60     gmx_pme_t pmedata;    /* the data structure used in the PME code      */
61
62     int  count;           /* number of times this setup has been timed    */
63     double cycles;        /* the fastest time for this setup in cycles    */
64 } pme_setup_t;
65
66 /* In the initial scan, step by grids that are at least a factor 0.8 coarser */
67 #define PME_LB_GRID_SCALE_FAC  0.8
68 /* In the initial scan, try to skip grids with uneven x/y/z spacing,
69  * checking if the "efficiency" is more than 5% worse than the previous grid.
70  */
71 #define PME_LB_GRID_EFFICIENCY_REL_FAC  1.05
72 /* Rerun up till 12% slower setups than the fastest up till now */
73 #define PME_LB_SLOW_FAC  1.12
74 /* If setups get more than 2% faster, do another round to avoid
75  * choosing a slower setup due to acceleration or fluctuations.
76  */
77 #define PME_LB_ACCEL_TOL 1.02
78
79 enum { epmelblimNO, epmelblimBOX, epmelblimDD, epmelblimNR };
80
81 const char *pmelblim_str[epmelblimNR] =
82 { "no", "box size", "domain decompostion" };
83
84 struct pme_load_balancing {
85     int  nstage;        /* the current maximum number of stages */
86
87     real cut_spacing;   /* the minimum cutoff / PME grid spacing ratio */
88     real rcut_vdw;      /* Vdw cutoff (does not change) */
89     real rcut_coulomb_start; /* Initial electrostatics cutoff */
90     int  nstcalclr_start; /* Initial electrostatics cutoff */
91     real rbuf_coulomb;  /* the pairlist buffer size */
92     real rbuf_vdw;      /* the pairlist buffer size */
93     matrix box_start;   /* the initial simulation box */
94     int n;              /* the count of setup as well as the allocation size */
95     pme_setup_t *setup; /* the PME+cutoff setups */
96     int cur;            /* the current setup */
97     int fastest;        /* fastest setup up till now */
98     int start;          /* start of setup range to consider in stage>0 */
99     int end;            /* end   of setup range to consider in stage>0 */
100     int elimited;       /* was the balancing limited, uses enum above */
101     int cutoff_scheme;  /* Verlet or group cut-offs */
102
103     int stage;          /* the current stage */
104 };
105
106 void pme_loadbal_init(pme_load_balancing_t *pme_lb_p,
107                       const t_inputrec *ir,matrix box,
108                       const interaction_const_t *ic,
109                       gmx_pme_t pmedata)
110 {
111     pme_load_balancing_t pme_lb;
112     real spm,sp;
113     int  d;
114
115     snew(pme_lb,1);
116
117     /* Any number of stages >= 2 is supported */
118     pme_lb->nstage   = 2;
119
120     pme_lb->cutoff_scheme = ir->cutoff_scheme;
121
122     if(pme_lb->cutoff_scheme == ecutsVERLET)
123     {
124         pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
125         pme_lb->rbuf_vdw     = pme_lb->rbuf_coulomb;
126     }
127     else
128     {
129         if(ic->rcoulomb > ic->rlist)
130         {
131             pme_lb->rbuf_coulomb = ic->rlistlong - ic->rcoulomb;
132         }
133         else
134         {
135             pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
136         }
137         if(ic->rvdw > ic->rlist)
138         {
139             pme_lb->rbuf_vdw = ic->rlistlong - ic->rvdw;
140         }
141         else
142         {
143             pme_lb->rbuf_vdw = ic->rlist - ic->rvdw;
144         }
145     }
146
147     copy_mat(box,pme_lb->box_start);
148     if (ir->ePBC==epbcXY && ir->nwall==2)
149     {
150         svmul(ir->wall_ewald_zfac,pme_lb->box_start[ZZ],pme_lb->box_start[ZZ]);
151     }
152
153     pme_lb->n = 1;
154     snew(pme_lb->setup,pme_lb->n);
155
156     pme_lb->rcut_vdw              = ic->rvdw;
157     pme_lb->rcut_coulomb_start    = ir->rcoulomb;
158     pme_lb->nstcalclr_start       = ir->nstcalclr;
159     
160     pme_lb->cur = 0;
161     pme_lb->setup[0].rcut_coulomb = ic->rcoulomb;
162     pme_lb->setup[0].rlist        = ic->rlist;
163     pme_lb->setup[0].rlistlong    = ic->rlistlong;
164     pme_lb->setup[0].nstcalclr    = ir->nstcalclr;
165     pme_lb->setup[0].grid[XX]     = ir->nkx;
166     pme_lb->setup[0].grid[YY]     = ir->nky;
167     pme_lb->setup[0].grid[ZZ]     = ir->nkz;
168     pme_lb->setup[0].ewaldcoeff   = ic->ewaldcoeff;
169
170     pme_lb->setup[0].pmedata  = pmedata;
171     
172     spm = 0;
173     for(d=0; d<DIM; d++)
174     {
175         sp = norm(pme_lb->box_start[d])/pme_lb->setup[0].grid[d];
176         if (sp > spm)
177         {
178             spm = sp;
179         }
180     }
181     pme_lb->setup[0].spacing = spm;
182
183     if (ir->fourier_spacing > 0)
184     {
185         pme_lb->cut_spacing = ir->rcoulomb/ir->fourier_spacing;
186     }
187     else
188     {
189         pme_lb->cut_spacing = ir->rcoulomb/pme_lb->setup[0].spacing;
190     }
191
192     pme_lb->stage = 0;
193
194     pme_lb->fastest  = 0;
195     pme_lb->start    = 0;
196     pme_lb->end      = 0;
197     pme_lb->elimited = epmelblimNO;
198
199     *pme_lb_p = pme_lb;
200 }
201
202 static gmx_bool pme_loadbal_increase_cutoff(pme_load_balancing_t pme_lb,
203                                             int pme_order)
204 {
205     pme_setup_t *set;
206     real fac,sp;
207     real tmpr_coulomb,tmpr_vdw;
208     int d;
209
210     /* Try to add a new setup with next larger cut-off to the list */
211     pme_lb->n++;
212     srenew(pme_lb->setup,pme_lb->n);
213     set = &pme_lb->setup[pme_lb->n-1];
214     set->pmedata = NULL;
215
216     fac = 1;
217     do
218     {
219         fac *= 1.01;
220         clear_ivec(set->grid);
221         sp = calc_grid(NULL,pme_lb->box_start,
222                        fac*pme_lb->setup[pme_lb->cur].spacing,
223                        &set->grid[XX],
224                        &set->grid[YY],
225                        &set->grid[ZZ]);
226
227         /* In parallel we can't have grids smaller than 2*pme_order,
228          * and we would anyhow not gain much speed at these grid sizes.
229          */
230         for(d=0; d<DIM; d++)
231         {
232             if (set->grid[d] <= 2*pme_order)
233             {
234                 pme_lb->n--;
235
236                 return FALSE;
237             }
238         }
239     }
240     while (sp <= 1.001*pme_lb->setup[pme_lb->cur].spacing);
241
242     set->rcut_coulomb = pme_lb->cut_spacing*sp;
243
244     if(pme_lb->cutoff_scheme == ecutsVERLET)
245     {
246         set->rlist        = set->rcut_coulomb + pme_lb->rbuf_coulomb;
247         /* We dont use LR lists with Verlet, but this avoids if-statements in further checks */
248         set->rlistlong    = set->rlist;
249     }
250     else
251     {
252         tmpr_coulomb          = set->rcut_coulomb + pme_lb->rbuf_coulomb;
253         tmpr_vdw              = pme_lb->rcut_vdw + pme_lb->rbuf_vdw;
254         set->rlist            = min(tmpr_coulomb,tmpr_vdw);
255         set->rlistlong        = max(tmpr_coulomb,tmpr_vdw);
256         
257         /* Set the long-range update frequency */
258         if(set->rlist == set->rlistlong)
259         {
260             /* No long-range interactions if the short-/long-range cutoffs are identical */
261             set->nstcalclr = 0;
262         }
263         else if(pme_lb->nstcalclr_start==0 || pme_lb->nstcalclr_start==1)
264         {
265             /* We were not doing long-range before, but now we are since rlist!=rlistlong */
266             set->nstcalclr = 1;
267         }
268         else
269         {
270             /* We were already doing long-range interactions from the start */
271             if(pme_lb->rcut_vdw > pme_lb->rcut_coulomb_start)
272             {
273                 /* We were originally doing long-range VdW-only interactions.
274                  * If rvdw is still longer than rcoulomb we keep the original nstcalclr,
275                  * but if the coulomb cutoff has become longer we should update the long-range
276                  * part every step.
277                  */
278                 set->nstcalclr = (tmpr_vdw > tmpr_coulomb) ? pme_lb->nstcalclr_start : 1;
279             }
280             else
281             {
282                 /* We were not doing any long-range interaction from the start,
283                  * since it is not possible to do twin-range coulomb for the PME interaction.
284                  */
285                 set->nstcalclr = 1;
286             }
287         }
288     }
289     
290     set->spacing      = sp;
291     /* The grid efficiency is the size wrt a grid with uniform x/y/z spacing */
292     set->grid_efficiency = 1;
293     for(d=0; d<DIM; d++)
294     {
295         set->grid_efficiency *= (set->grid[d]*sp)/norm(pme_lb->box_start[d]);
296     }
297     /* The Ewald coefficient is inversly proportional to the cut-off */
298     set->ewaldcoeff =
299         pme_lb->setup[0].ewaldcoeff*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
300
301     set->count   = 0;
302     set->cycles  = 0;
303
304     if (debug)
305     {
306         fprintf(debug,"PME loadbal: grid %d %d %d, coulomb cutoff %f\n",
307                 set->grid[XX],set->grid[YY],set->grid[ZZ],set->rcut_coulomb);
308     }
309     return TRUE;
310 }
311
312 static void print_grid(FILE *fp_err,FILE *fp_log,
313                        const char *pre,
314                        const char *desc,
315                        const pme_setup_t *set,
316                        double cycles)
317 {
318     char buf[STRLEN],buft[STRLEN];
319
320     if (cycles >= 0)
321     {
322         sprintf(buft,": %.1f M-cycles",cycles*1e-6);
323     }
324     else
325     {
326         buft[0] = '\0';
327     }
328     sprintf(buf,"%-11s%10s pme grid %d %d %d, coulomb cutoff %.3f%s",
329             pre,
330             desc,set->grid[XX],set->grid[YY],set->grid[ZZ],set->rcut_coulomb,
331             buft);
332     if (fp_err != NULL)
333     {
334         fprintf(fp_err,"\r%s\n",buf);
335     }
336     if (fp_log != NULL)
337     {
338         fprintf(fp_log,"%s\n",buf);
339     }
340 }
341
342 static int pme_loadbal_end(pme_load_balancing_t pme_lb)
343 {
344     /* In the initial stage only n is set; end is not set yet */
345     if (pme_lb->end > 0)
346     {
347         return pme_lb->end;
348     }
349     else
350     {
351         return pme_lb->n;
352     }
353 }
354
355 static void print_loadbal_limited(FILE *fp_err,FILE *fp_log,
356                                   gmx_large_int_t step,
357                                   pme_load_balancing_t pme_lb)
358 {
359     char buf[STRLEN],sbuf[22];
360
361     sprintf(buf,"step %4s: the %s limited the PME load balancing to a coulomb cut-off of %.3f",
362             gmx_step_str(step,sbuf),
363             pmelblim_str[pme_lb->elimited],
364             pme_lb->setup[pme_loadbal_end(pme_lb)-1].rcut_coulomb);
365     if (fp_err != NULL)
366     {
367         fprintf(fp_err,"\r%s\n",buf);
368     }
369     if (fp_log != NULL)
370     {
371         fprintf(fp_log,"%s\n",buf);
372     }
373 }
374
375 static void switch_to_stage1(pme_load_balancing_t pme_lb)
376 {
377     pme_lb->start = 0;
378     while (pme_lb->start+1 < pme_lb->n &&
379            (pme_lb->setup[pme_lb->start].count == 0 ||
380             pme_lb->setup[pme_lb->start].cycles >
381             pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC))
382     {
383         pme_lb->start++;
384     }
385     while (pme_lb->start > 0 && pme_lb->setup[pme_lb->start-1].cycles == 0)
386     {
387         pme_lb->start--;
388     }
389
390     pme_lb->end = pme_lb->n;
391     if (pme_lb->setup[pme_lb->end-1].count > 0 &&
392         pme_lb->setup[pme_lb->end-1].cycles >
393         pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
394     {
395         pme_lb->end--;
396     }
397
398     pme_lb->stage = 1;
399
400     /* Next we want to choose setup pme_lb->start, but as we will increase
401      * pme_ln->cur by one right after returning, we subtract 1 here.
402      */
403     pme_lb->cur = pme_lb->start - 1;
404 }
405
406 gmx_bool pme_load_balance(pme_load_balancing_t pme_lb,
407                           t_commrec *cr,
408                           FILE *fp_err,
409                           FILE *fp_log,
410                           t_inputrec *ir,
411                           t_state *state,
412                           double cycles,
413                           interaction_const_t *ic,
414                           nonbonded_verlet_t *nbv,
415                           gmx_pme_t *pmedata,
416                           gmx_large_int_t step)
417 {
418     gmx_bool OK;
419     pme_setup_t *set;
420     double cycles_fast;
421     char buf[STRLEN],sbuf[22];
422     real rtab;
423     gmx_bool bUsesSimpleTables = TRUE;
424
425     if (pme_lb->stage == pme_lb->nstage)
426     {
427         return FALSE;
428     }
429
430     if (PAR(cr))
431     {
432         gmx_sumd(1,&cycles,cr);
433         cycles /= cr->nnodes;
434     }
435
436     set = &pme_lb->setup[pme_lb->cur];
437     set->count++;
438
439     rtab = ir->rlistlong + ir->tabext;
440
441     if (set->count % 2 == 1)
442     {
443         /* Skip the first cycle, because the first step after a switch
444          * is much slower due to allocation and/or caching effects.
445          */
446         return TRUE;
447     }
448
449     sprintf(buf, "step %4s: ", gmx_step_str(step,sbuf));
450     print_grid(fp_err,fp_log,buf,"timed with",set,cycles);
451
452     if (set->count <= 2)
453     {
454         set->cycles = cycles;
455     }
456     else
457     {
458         if (cycles*PME_LB_ACCEL_TOL < set->cycles &&
459             pme_lb->stage == pme_lb->nstage - 1)
460         {
461             /* The performance went up a lot (due to e.g. DD load balancing).
462              * Add a stage, keep the minima, but rescan all setups.
463              */
464             pme_lb->nstage++;
465
466             if (debug)
467             {
468                 fprintf(debug,"The performance for grid %d %d %d went from %.3f to %.1f M-cycles, this is more than %f\n"
469                         "Increased the number stages to %d"
470                         " and ignoring the previous performance\n",
471                         set->grid[XX],set->grid[YY],set->grid[ZZ],
472                         cycles*1e-6,set->cycles*1e-6,PME_LB_ACCEL_TOL,
473                         pme_lb->nstage);
474             }
475         }
476         set->cycles = min(set->cycles,cycles);
477     }
478
479     if (set->cycles < pme_lb->setup[pme_lb->fastest].cycles)
480     {
481         pme_lb->fastest = pme_lb->cur;
482     }
483     cycles_fast = pme_lb->setup[pme_lb->fastest].cycles;
484
485     /* Check in stage 0 if we should stop scanning grids.
486      * Stop when the time is more than SLOW_FAC longer than the fastest.
487      */
488     if (pme_lb->stage == 0 && pme_lb->cur > 0 &&
489         cycles > pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
490     {
491         pme_lb->n = pme_lb->cur + 1;
492         /* Done with scanning, go to stage 1 */
493         switch_to_stage1(pme_lb);
494     }
495
496     if (pme_lb->stage == 0)
497     {
498         int gridsize_start;
499
500         gridsize_start = set->grid[XX]*set->grid[YY]*set->grid[ZZ];
501
502         do
503         {
504             if (pme_lb->cur+1 < pme_lb->n)
505             {
506                 /* We had already generated the next setup */
507                 OK = TRUE;
508             }
509             else
510             {
511                 /* Find the next setup */
512                 OK = pme_loadbal_increase_cutoff(pme_lb,ir->pme_order);
513             }
514
515             if (OK && ir->ePBC != epbcNONE)
516             {
517                 OK = (sqr(pme_lb->setup[pme_lb->cur+1].rlistlong)
518                       <= max_cutoff2(ir->ePBC,state->box));
519                 if (!OK)
520                 {
521                     pme_lb->elimited = epmelblimBOX;
522                 }
523             }
524
525             if (OK)
526             {
527                 pme_lb->cur++;
528
529                 if (DOMAINDECOMP(cr))
530                 {
531                     OK = change_dd_cutoff(cr,state,ir,
532                                           pme_lb->setup[pme_lb->cur].rlistlong);
533                     if (!OK)
534                     {
535                         /* Failed: do not use this setup */
536                         pme_lb->cur--;
537                         pme_lb->elimited = epmelblimDD;
538                     }
539                 }
540             }
541             if (!OK)
542             {
543                 /* We hit the upper limit for the cut-off,
544                  * the setup should not go further than cur.
545                  */
546                 pme_lb->n = pme_lb->cur + 1;
547                 print_loadbal_limited(fp_err,fp_log,step,pme_lb);
548                 /* Switch to the next stage */
549                 switch_to_stage1(pme_lb);
550             }
551         }
552         while (OK &&
553                !(pme_lb->setup[pme_lb->cur].grid[XX]*
554                  pme_lb->setup[pme_lb->cur].grid[YY]*
555                  pme_lb->setup[pme_lb->cur].grid[ZZ] <
556                  gridsize_start*PME_LB_GRID_SCALE_FAC
557                  &&
558                  pme_lb->setup[pme_lb->cur].grid_efficiency <
559                  pme_lb->setup[pme_lb->cur-1].grid_efficiency*PME_LB_GRID_EFFICIENCY_REL_FAC));
560     }
561
562     if (pme_lb->stage > 0 && pme_lb->end == 1)
563     {
564         pme_lb->cur = 0;
565         pme_lb->stage = pme_lb->nstage;
566     }
567     else if (pme_lb->stage > 0 && pme_lb->end > 1)
568     {
569         /* If stage = nstage-1:
570          *   scan over all setups, rerunning only those setups
571          *   which are not much slower than the fastest
572          * else:
573          *   use the next setup
574          */
575         do
576         {
577             pme_lb->cur++;
578             if (pme_lb->cur == pme_lb->end)
579             {
580                 pme_lb->stage++;
581                 pme_lb->cur = pme_lb->start;
582             }
583         }
584         while (pme_lb->stage == pme_lb->nstage - 1 &&
585                pme_lb->setup[pme_lb->cur].count > 0 &&
586                pme_lb->setup[pme_lb->cur].cycles > cycles_fast*PME_LB_SLOW_FAC);
587
588         if (pme_lb->stage == pme_lb->nstage)
589         {
590             /* We are done optimizing, use the fastest setup we found */
591             pme_lb->cur = pme_lb->fastest;
592         }
593     }
594
595     if (DOMAINDECOMP(cr) && pme_lb->stage > 0)
596     {
597         OK = change_dd_cutoff(cr,state,ir,pme_lb->setup[pme_lb->cur].rlistlong);
598         if (!OK)
599         {
600             /* Failsafe solution */
601             if (pme_lb->cur > 1 && pme_lb->stage == pme_lb->nstage)
602             {
603                 pme_lb->stage--;
604             }
605             pme_lb->fastest  = 0;
606             pme_lb->start    = 0;
607             pme_lb->end      = pme_lb->cur;
608             pme_lb->cur      = pme_lb->start;
609             pme_lb->elimited = epmelblimDD;
610             print_loadbal_limited(fp_err,fp_log,step,pme_lb);
611         }
612     }
613
614     /* Change the Coulomb cut-off and the PME grid */
615
616     set = &pme_lb->setup[pme_lb->cur];
617
618     ic->rcoulomb   = set->rcut_coulomb;
619     ic->rlist      = set->rlist;
620     ic->rlistlong  = set->rlistlong;
621     ir->nstcalclr  = set->nstcalclr;
622     ic->ewaldcoeff = set->ewaldcoeff;
623
624     bUsesSimpleTables = uses_simple_tables(ir->cutoff_scheme, nbv, 0);
625     if (pme_lb->cutoff_scheme == ecutsVERLET && nbv->grp[0].kernel_type == nbk8x8x8_CUDA)
626     {
627         nbnxn_cuda_pme_loadbal_update_param(nbv->cu_nbv,ic);
628     }
629     else
630     {
631         init_interaction_const_tables(NULL,ic,bUsesSimpleTables,
632                                       rtab);
633     }
634
635     if (pme_lb->cutoff_scheme == ecutsVERLET && nbv->ngrp > 1)
636     {
637         init_interaction_const_tables(NULL,ic,bUsesSimpleTables,
638                                       rtab);
639     }
640
641     if (cr->duty & DUTY_PME)
642     {
643         if (pme_lb->setup[pme_lb->cur].pmedata == NULL)
644         {
645             /* Generate a new PME data structure,
646              * copying part of the old pointers.
647              */
648             gmx_pme_reinit(&set->pmedata,
649                            cr,pme_lb->setup[0].pmedata,ir,
650                            set->grid);
651         }
652         *pmedata = set->pmedata;
653     }
654     else
655     {
656         /* Tell our PME-only node to switch grid */
657         gmx_pme_send_switch(cr, set->grid, set->ewaldcoeff);
658     }
659
660     if (debug)
661     {
662         print_grid(NULL,debug,"","switched to",set,-1);
663     }
664
665     if (pme_lb->stage == pme_lb->nstage)
666     {
667         print_grid(fp_err,fp_log,"","optimal",set,-1);
668     }
669
670     return TRUE;
671 }
672
673 void restart_pme_loadbal(pme_load_balancing_t pme_lb, int n)
674 {
675     pme_lb->nstage += n;
676 }
677
678 static int pme_grid_points(const pme_setup_t *setup)
679 {
680     return setup->grid[XX]*setup->grid[YY]*setup->grid[ZZ];
681 }
682
683 static void print_pme_loadbal_setting(FILE *fplog,
684                                      char *name,
685                                      const pme_setup_t *setup)
686 {
687     fprintf(fplog,
688             "   %-7s %6.3f nm %6.3f nm     %3d %3d %3d   %5.3f nm  %5.3f nm\n",
689             name,
690             setup->rcut_coulomb,setup->rlist,
691             setup->grid[XX],setup->grid[YY],setup->grid[ZZ],
692             setup->spacing,1/setup->ewaldcoeff);
693 }
694
695 static void print_pme_loadbal_settings(pme_load_balancing_t pme_lb,
696                                        FILE *fplog)
697 {
698     double pp_ratio,grid_ratio;
699
700     pp_ratio   = pow(pme_lb->setup[pme_lb->cur].rlist/pme_lb->setup[0].rlistlong,3.0);
701     grid_ratio = pme_grid_points(&pme_lb->setup[pme_lb->cur])/
702         (double)pme_grid_points(&pme_lb->setup[0]);
703
704     fprintf(fplog,"\n");
705     fprintf(fplog,"       P P   -   P M E   L O A D   B A L A N C I N G\n");
706     fprintf(fplog,"\n");
707     /* Here we only warn when the optimal setting is the last one */
708     if (pme_lb->elimited != epmelblimNO &&
709         pme_lb->cur == pme_loadbal_end(pme_lb)-1)
710     {
711         fprintf(fplog," NOTE: The PP/PME load balancing was limited by the %s,\n",
712                 pmelblim_str[pme_lb->elimited]);
713         fprintf(fplog,"       you might not have reached a good load balance.\n");
714         if (pme_lb->elimited == epmelblimDD)
715         {
716             fprintf(fplog,"       Try different mdrun -dd settings or lower the -dds value.\n");
717         }
718         fprintf(fplog,"\n");
719     }
720     fprintf(fplog," PP/PME load balancing changed the cut-off and PME settings:\n");
721     fprintf(fplog,"           particle-particle                    PME\n");
722     fprintf(fplog,"            rcoulomb  rlist            grid      spacing   1/beta\n");
723     print_pme_loadbal_setting(fplog,"initial",&pme_lb->setup[0]);
724     print_pme_loadbal_setting(fplog,"final"  ,&pme_lb->setup[pme_lb->cur]);
725     fprintf(fplog," cost-ratio           %4.2f             %4.2f\n",
726             pp_ratio,grid_ratio);
727     fprintf(fplog," (note that these numbers concern only part of the total PP and PME load)\n");
728     fprintf(fplog,"\n");
729 }
730
731 void pme_loadbal_done(pme_load_balancing_t pme_lb, FILE *fplog)
732 {
733     if (fplog != NULL && (pme_lb->cur > 0 || pme_lb->elimited != epmelblimNO))
734     {
735         print_pme_loadbal_settings(pme_lb,fplog);
736     }
737
738     /* TODO: Here we should free all pointers in pme_lb,
739      * but as it contains pme data structures,
740      * we need to first make pme.c free all data.
741      */
742 }