Merge "Merge branch release-4-6 into release-5-0" into release-5-0
[alexxy/gromacs.git] / src / gromacs / mdlib / pme.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /* IMPORTANT FOR DEVELOPERS:
38  *
39  * Triclinic pme stuff isn't entirely trivial, and we've experienced
40  * some bugs during development (many of them due to me). To avoid
41  * this in the future, please check the following things if you make
42  * changes in this file:
43  *
44  * 1. You should obtain identical (at least to the PME precision)
45  *    energies, forces, and virial for
46  *    a rectangular box and a triclinic one where the z (or y) axis is
47  *    tilted a whole box side. For instance you could use these boxes:
48  *
49  *    rectangular       triclinic
50  *     2  0  0           2  0  0
51  *     0  2  0           0  2  0
52  *     0  0  6           2  2  6
53  *
54  * 2. You should check the energy conservation in a triclinic box.
55  *
56  * It might seem an overkill, but better safe than sorry.
57  * /Erik 001109
58  */
59
60 #ifdef HAVE_CONFIG_H
61 #include <config.h>
62 #endif
63
64 #include <stdio.h>
65 #include <string.h>
66 #include <math.h>
67 #include <assert.h>
68 #include "typedefs.h"
69 #include "txtdump.h"
70 #include "vec.h"
71 #include "gromacs/utility/smalloc.h"
72 #include "coulomb.h"
73 #include "gmx_fatal.h"
74 #include "pme.h"
75 #include "network.h"
76 #include "physics.h"
77 #include "nrnb.h"
78 #include "macros.h"
79
80 #include "gromacs/fft/parallel_3dfft.h"
81 #include "gromacs/fileio/futil.h"
82 #include "gromacs/fileio/pdbio.h"
83 #include "gromacs/math/gmxcomplex.h"
84 #include "gromacs/timing/cyclecounter.h"
85 #include "gromacs/timing/wallcycle.h"
86 #include "gromacs/utility/gmxmpi.h"
87 #include "gromacs/utility/gmxomp.h"
88
89 /* Include the SIMD macro file and then check for support */
90 #include "gromacs/simd/simd.h"
91 #include "gromacs/simd/simd_math.h"
92 #ifdef GMX_SIMD_HAVE_REAL
93 /* Turn on arbitrary width SIMD intrinsics for PME solve */
94 #    define PME_SIMD_SOLVE
95 #endif
96
97 #define PME_GRID_QA    0 /* Gridindex for A-state for Q */
98 #define PME_GRID_C6A   2 /* Gridindex for A-state for LJ */
99 #define DO_Q           2 /* Electrostatic grids have index q<2 */
100 #define DO_Q_AND_LJ    4 /* non-LB LJ grids have index 2 <= q < 4 */
101 #define DO_Q_AND_LJ_LB 9 /* With LB rules we need a total of 2+7 grids */
102
103 /* Pascal triangle coefficients scaled with (1/2)^6 for LJ-PME with LB-rules */
104 const real lb_scale_factor[] = {
105     1.0/64, 6.0/64, 15.0/64, 20.0/64,
106     15.0/64, 6.0/64, 1.0/64
107 };
108
109 /* Pascal triangle coefficients used in solve_pme_lj_yzx, only need to do 4 calculations due to symmetry */
110 const real lb_scale_factor_symm[] = { 2.0/64, 12.0/64, 30.0/64, 20.0/64 };
111
112 /* Check if we have 4-wide SIMD macro support */
113 #if (defined GMX_SIMD4_HAVE_REAL)
114 /* Do PME spread and gather with 4-wide SIMD.
115  * NOTE: SIMD is only used with PME order 4 and 5 (which are the most common).
116  */
117 #    define PME_SIMD4_SPREAD_GATHER
118
119 #    if (defined GMX_SIMD_HAVE_LOADU) && (defined GMX_SIMD_HAVE_STOREU)
120 /* With PME-order=4 on x86, unaligned load+store is slightly faster
121  * than doubling all SIMD operations when using aligned load+store.
122  */
123 #        define PME_SIMD4_UNALIGNED
124 #    endif
125 #endif
126
127 #define DFT_TOL 1e-7
128 /* #define PRT_FORCE */
129 /* conditions for on the fly time-measurement */
130 /* #define TAKETIME (step > 1 && timesteps < 10) */
131 #define TAKETIME FALSE
132
133 /* #define PME_TIME_THREADS */
134
135 #ifdef GMX_DOUBLE
136 #define mpi_type MPI_DOUBLE
137 #else
138 #define mpi_type MPI_FLOAT
139 #endif
140
141 #ifdef PME_SIMD4_SPREAD_GATHER
142 #    define SIMD4_ALIGNMENT  (GMX_SIMD4_WIDTH*sizeof(real))
143 #else
144 /* We can use any alignment, apart from 0, so we use 4 reals */
145 #    define SIMD4_ALIGNMENT  (4*sizeof(real))
146 #endif
147
148 /* GMX_CACHE_SEP should be a multiple of the SIMD and SIMD4 register size
149  * to preserve alignment.
150  */
151 #define GMX_CACHE_SEP 64
152
153 /* We only define a maximum to be able to use local arrays without allocation.
154  * An order larger than 12 should never be needed, even for test cases.
155  * If needed it can be changed here.
156  */
157 #define PME_ORDER_MAX 12
158
159 /* Internal datastructures */
160 typedef struct {
161     int send_index0;
162     int send_nindex;
163     int recv_index0;
164     int recv_nindex;
165     int recv_size;   /* Receive buffer width, used with OpenMP */
166 } pme_grid_comm_t;
167
168 typedef struct {
169 #ifdef GMX_MPI
170     MPI_Comm         mpi_comm;
171 #endif
172     int              nnodes, nodeid;
173     int             *s2g0;
174     int             *s2g1;
175     int              noverlap_nodes;
176     int             *send_id, *recv_id;
177     int              send_size; /* Send buffer width, used with OpenMP */
178     pme_grid_comm_t *comm_data;
179     real            *sendbuf;
180     real            *recvbuf;
181 } pme_overlap_t;
182
183 typedef struct {
184     int *n;      /* Cumulative counts of the number of particles per thread */
185     int  nalloc; /* Allocation size of i */
186     int *i;      /* Particle indices ordered on thread index (n) */
187 } thread_plist_t;
188
189 typedef struct {
190     int      *thread_one;
191     int       n;
192     int      *ind;
193     splinevec theta;
194     real     *ptr_theta_z;
195     splinevec dtheta;
196     real     *ptr_dtheta_z;
197 } splinedata_t;
198
199 typedef struct {
200     int      dimind;        /* The index of the dimension, 0=x, 1=y */
201     int      nslab;
202     int      nodeid;
203 #ifdef GMX_MPI
204     MPI_Comm mpi_comm;
205 #endif
206
207     int     *node_dest;     /* The nodes to send x and q to with DD */
208     int     *node_src;      /* The nodes to receive x and q from with DD */
209     int     *buf_index;     /* Index for commnode into the buffers */
210
211     int      maxshift;
212
213     int      npd;
214     int      pd_nalloc;
215     int     *pd;
216     int     *count;         /* The number of atoms to send to each node */
217     int    **count_thread;
218     int     *rcount;        /* The number of atoms to receive */
219
220     int      n;
221     int      nalloc;
222     rvec    *x;
223     real    *coefficient;
224     rvec    *f;
225     gmx_bool bSpread;       /* These coordinates are used for spreading */
226     int      pme_order;
227     ivec    *idx;
228     rvec    *fractx;            /* Fractional coordinate relative to
229                                  * the lower cell boundary
230                                  */
231     int             nthread;
232     int            *thread_idx; /* Which thread should spread which coefficient */
233     thread_plist_t *thread_plist;
234     splinedata_t   *spline;
235 } pme_atomcomm_t;
236
237 #define FLBS  3
238 #define FLBSZ 4
239
240 typedef struct {
241     ivec  ci;     /* The spatial location of this grid         */
242     ivec  n;      /* The used size of *grid, including order-1 */
243     ivec  offset; /* The grid offset from the full node grid   */
244     int   order;  /* PME spreading order                       */
245     ivec  s;      /* The allocated size of *grid, s >= n       */
246     real *grid;   /* The grid local thread, size n             */
247 } pmegrid_t;
248
249 typedef struct {
250     pmegrid_t  grid;         /* The full node grid (non thread-local)            */
251     int        nthread;      /* The number of threads operating on this grid     */
252     ivec       nc;           /* The local spatial decomposition over the threads */
253     pmegrid_t *grid_th;      /* Array of grids for each thread                   */
254     real      *grid_all;     /* Allocated array for the grids in *grid_th        */
255     int      **g2t;          /* The grid to thread index                         */
256     ivec       nthread_comm; /* The number of threads to communicate with        */
257 } pmegrids_t;
258
259 typedef struct {
260 #ifdef PME_SIMD4_SPREAD_GATHER
261     /* Masks for 4-wide SIMD aligned spreading and gathering */
262     gmx_simd4_bool_t mask_S0[6], mask_S1[6];
263 #else
264     int              dummy; /* C89 requires that struct has at least one member */
265 #endif
266 } pme_spline_work_t;
267
268 typedef struct {
269     /* work data for solve_pme */
270     int      nalloc;
271     real *   mhx;
272     real *   mhy;
273     real *   mhz;
274     real *   m2;
275     real *   denom;
276     real *   tmp1_alloc;
277     real *   tmp1;
278     real *   tmp2;
279     real *   eterm;
280     real *   m2inv;
281
282     real     energy_q;
283     matrix   vir_q;
284     real     energy_lj;
285     matrix   vir_lj;
286 } pme_work_t;
287
288 typedef struct gmx_pme {
289     int           ndecompdim; /* The number of decomposition dimensions */
290     int           nodeid;     /* Our nodeid in mpi->mpi_comm */
291     int           nodeid_major;
292     int           nodeid_minor;
293     int           nnodes;    /* The number of nodes doing PME */
294     int           nnodes_major;
295     int           nnodes_minor;
296
297     MPI_Comm      mpi_comm;
298     MPI_Comm      mpi_comm_d[2]; /* Indexed on dimension, 0=x, 1=y */
299 #ifdef GMX_MPI
300     MPI_Datatype  rvec_mpi;      /* the pme vector's MPI type */
301 #endif
302
303     gmx_bool   bUseThreads;   /* Does any of the PME ranks have nthread>1 ?  */
304     int        nthread;       /* The number of threads doing PME on our rank */
305
306     gmx_bool   bPPnode;       /* Node also does particle-particle forces */
307     gmx_bool   bFEP;          /* Compute Free energy contribution */
308     gmx_bool   bFEP_q;
309     gmx_bool   bFEP_lj;
310     int        nkx, nky, nkz; /* Grid dimensions */
311     gmx_bool   bP3M;          /* Do P3M: optimize the influence function */
312     int        pme_order;
313     real       epsilon_r;
314
315     int        ljpme_combination_rule;  /* Type of combination rule in LJ-PME */
316
317     int        ngrids;                  /* number of grids we maintain for pmegrid, (c)fftgrid and pfft_setups*/
318
319     pmegrids_t pmegrid[DO_Q_AND_LJ_LB]; /* Grids on which we do spreading/interpolation,
320                                          * includes overlap Grid indices are ordered as
321                                          * follows:
322                                          * 0: Coloumb PME, state A
323                                          * 1: Coloumb PME, state B
324                                          * 2-8: LJ-PME
325                                          * This can probably be done in a better way
326                                          * but this simple hack works for now
327                                          */
328     /* The PME coefficient spreading grid sizes/strides, includes pme_order-1 */
329     int        pmegrid_nx, pmegrid_ny, pmegrid_nz;
330     /* pmegrid_nz might be larger than strictly necessary to ensure
331      * memory alignment, pmegrid_nz_base gives the real base size.
332      */
333     int     pmegrid_nz_base;
334     /* The local PME grid starting indices */
335     int     pmegrid_start_ix, pmegrid_start_iy, pmegrid_start_iz;
336
337     /* Work data for spreading and gathering */
338     pme_spline_work_t     *spline_work;
339
340     real                 **fftgrid; /* Grids for FFT. With 1D FFT decomposition this can be a pointer */
341     /* inside the interpolation grid, but separate for 2D PME decomp. */
342     int                    fftgrid_nx, fftgrid_ny, fftgrid_nz;
343
344     t_complex            **cfftgrid;  /* Grids for complex FFT data */
345
346     int                    cfftgrid_nx, cfftgrid_ny, cfftgrid_nz;
347
348     gmx_parallel_3dfft_t  *pfft_setup;
349
350     int                   *nnx, *nny, *nnz;
351     real                  *fshx, *fshy, *fshz;
352
353     pme_atomcomm_t         atc[2]; /* Indexed on decomposition index */
354     matrix                 recipbox;
355     splinevec              bsp_mod;
356     /* Buffers to store data for local atoms for L-B combination rule
357      * calculations in LJ-PME. lb_buf1 stores either the coefficients
358      * for spreading/gathering (in serial), or the C6 coefficient for
359      * local atoms (in parallel).  lb_buf2 is only used in parallel,
360      * and stores the sigma values for local atoms. */
361     real                 *lb_buf1, *lb_buf2;
362     int                   lb_buf_nalloc; /* Allocation size for the above buffers. */
363
364     pme_overlap_t         overlap[2];    /* Indexed on dimension, 0=x, 1=y */
365
366     pme_atomcomm_t        atc_energy;    /* Only for gmx_pme_calc_energy */
367
368     rvec                 *bufv;          /* Communication buffer */
369     real                 *bufr;          /* Communication buffer */
370     int                   buf_nalloc;    /* The communication buffer size */
371
372     /* thread local work data for solve_pme */
373     pme_work_t *work;
374
375     /* Work data for sum_qgrid */
376     real *   sum_qgrid_tmp;
377     real *   sum_qgrid_dd_tmp;
378 } t_gmx_pme;
379
380 static void calc_interpolation_idx(gmx_pme_t pme, pme_atomcomm_t *atc,
381                                    int start, int grid_index, int end, int thread)
382 {
383     int             i;
384     int            *idxptr, tix, tiy, tiz;
385     real           *xptr, *fptr, tx, ty, tz;
386     real            rxx, ryx, ryy, rzx, rzy, rzz;
387     int             nx, ny, nz;
388     int             start_ix, start_iy, start_iz;
389     int            *g2tx, *g2ty, *g2tz;
390     gmx_bool        bThreads;
391     int            *thread_idx = NULL;
392     thread_plist_t *tpl        = NULL;
393     int            *tpl_n      = NULL;
394     int             thread_i;
395
396     nx  = pme->nkx;
397     ny  = pme->nky;
398     nz  = pme->nkz;
399
400     start_ix = pme->pmegrid_start_ix;
401     start_iy = pme->pmegrid_start_iy;
402     start_iz = pme->pmegrid_start_iz;
403
404     rxx = pme->recipbox[XX][XX];
405     ryx = pme->recipbox[YY][XX];
406     ryy = pme->recipbox[YY][YY];
407     rzx = pme->recipbox[ZZ][XX];
408     rzy = pme->recipbox[ZZ][YY];
409     rzz = pme->recipbox[ZZ][ZZ];
410
411     g2tx = pme->pmegrid[grid_index].g2t[XX];
412     g2ty = pme->pmegrid[grid_index].g2t[YY];
413     g2tz = pme->pmegrid[grid_index].g2t[ZZ];
414
415     bThreads = (atc->nthread > 1);
416     if (bThreads)
417     {
418         thread_idx = atc->thread_idx;
419
420         tpl   = &atc->thread_plist[thread];
421         tpl_n = tpl->n;
422         for (i = 0; i < atc->nthread; i++)
423         {
424             tpl_n[i] = 0;
425         }
426     }
427
428     for (i = start; i < end; i++)
429     {
430         xptr   = atc->x[i];
431         idxptr = atc->idx[i];
432         fptr   = atc->fractx[i];
433
434         /* Fractional coordinates along box vectors, add 2.0 to make 100% sure we are positive for triclinic boxes */
435         tx = nx * ( xptr[XX] * rxx + xptr[YY] * ryx + xptr[ZZ] * rzx + 2.0 );
436         ty = ny * (                  xptr[YY] * ryy + xptr[ZZ] * rzy + 2.0 );
437         tz = nz * (                                   xptr[ZZ] * rzz + 2.0 );
438
439         tix = (int)(tx);
440         tiy = (int)(ty);
441         tiz = (int)(tz);
442
443         /* Because decomposition only occurs in x and y,
444          * we never have a fraction correction in z.
445          */
446         fptr[XX] = tx - tix + pme->fshx[tix];
447         fptr[YY] = ty - tiy + pme->fshy[tiy];
448         fptr[ZZ] = tz - tiz;
449
450         idxptr[XX] = pme->nnx[tix];
451         idxptr[YY] = pme->nny[tiy];
452         idxptr[ZZ] = pme->nnz[tiz];
453
454 #ifdef DEBUG
455         range_check(idxptr[XX], 0, pme->pmegrid_nx);
456         range_check(idxptr[YY], 0, pme->pmegrid_ny);
457         range_check(idxptr[ZZ], 0, pme->pmegrid_nz);
458 #endif
459
460         if (bThreads)
461         {
462             thread_i      = g2tx[idxptr[XX]] + g2ty[idxptr[YY]] + g2tz[idxptr[ZZ]];
463             thread_idx[i] = thread_i;
464             tpl_n[thread_i]++;
465         }
466     }
467
468     if (bThreads)
469     {
470         /* Make a list of particle indices sorted on thread */
471
472         /* Get the cumulative count */
473         for (i = 1; i < atc->nthread; i++)
474         {
475             tpl_n[i] += tpl_n[i-1];
476         }
477         /* The current implementation distributes particles equally
478          * over the threads, so we could actually allocate for that
479          * in pme_realloc_atomcomm_things.
480          */
481         if (tpl_n[atc->nthread-1] > tpl->nalloc)
482         {
483             tpl->nalloc = over_alloc_large(tpl_n[atc->nthread-1]);
484             srenew(tpl->i, tpl->nalloc);
485         }
486         /* Set tpl_n to the cumulative start */
487         for (i = atc->nthread-1; i >= 1; i--)
488         {
489             tpl_n[i] = tpl_n[i-1];
490         }
491         tpl_n[0] = 0;
492
493         /* Fill our thread local array with indices sorted on thread */
494         for (i = start; i < end; i++)
495         {
496             tpl->i[tpl_n[atc->thread_idx[i]]++] = i;
497         }
498         /* Now tpl_n contains the cummulative count again */
499     }
500 }
501
502 static void make_thread_local_ind(pme_atomcomm_t *atc,
503                                   int thread, splinedata_t *spline)
504 {
505     int             n, t, i, start, end;
506     thread_plist_t *tpl;
507
508     /* Combine the indices made by each thread into one index */
509
510     n     = 0;
511     start = 0;
512     for (t = 0; t < atc->nthread; t++)
513     {
514         tpl = &atc->thread_plist[t];
515         /* Copy our part (start - end) from the list of thread t */
516         if (thread > 0)
517         {
518             start = tpl->n[thread-1];
519         }
520         end = tpl->n[thread];
521         for (i = start; i < end; i++)
522         {
523             spline->ind[n++] = tpl->i[i];
524         }
525     }
526
527     spline->n = n;
528 }
529
530
531 static void pme_calc_pidx(int start, int end,
532                           matrix recipbox, rvec x[],
533                           pme_atomcomm_t *atc, int *count)
534 {
535     int   nslab, i;
536     int   si;
537     real *xptr, s;
538     real  rxx, ryx, rzx, ryy, rzy;
539     int  *pd;
540
541     /* Calculate PME task index (pidx) for each grid index.
542      * Here we always assign equally sized slabs to each node
543      * for load balancing reasons (the PME grid spacing is not used).
544      */
545
546     nslab = atc->nslab;
547     pd    = atc->pd;
548
549     /* Reset the count */
550     for (i = 0; i < nslab; i++)
551     {
552         count[i] = 0;
553     }
554
555     if (atc->dimind == 0)
556     {
557         rxx = recipbox[XX][XX];
558         ryx = recipbox[YY][XX];
559         rzx = recipbox[ZZ][XX];
560         /* Calculate the node index in x-dimension */
561         for (i = start; i < end; i++)
562         {
563             xptr   = x[i];
564             /* Fractional coordinates along box vectors */
565             s     = nslab*(xptr[XX]*rxx + xptr[YY]*ryx + xptr[ZZ]*rzx);
566             si    = (int)(s + 2*nslab) % nslab;
567             pd[i] = si;
568             count[si]++;
569         }
570     }
571     else
572     {
573         ryy = recipbox[YY][YY];
574         rzy = recipbox[ZZ][YY];
575         /* Calculate the node index in y-dimension */
576         for (i = start; i < end; i++)
577         {
578             xptr   = x[i];
579             /* Fractional coordinates along box vectors */
580             s     = nslab*(xptr[YY]*ryy + xptr[ZZ]*rzy);
581             si    = (int)(s + 2*nslab) % nslab;
582             pd[i] = si;
583             count[si]++;
584         }
585     }
586 }
587
588 static void pme_calc_pidx_wrapper(int natoms, matrix recipbox, rvec x[],
589                                   pme_atomcomm_t *atc)
590 {
591     int nthread, thread, slab;
592
593     nthread = atc->nthread;
594
595 #pragma omp parallel for num_threads(nthread) schedule(static)
596     for (thread = 0; thread < nthread; thread++)
597     {
598         pme_calc_pidx(natoms* thread   /nthread,
599                       natoms*(thread+1)/nthread,
600                       recipbox, x, atc, atc->count_thread[thread]);
601     }
602     /* Non-parallel reduction, since nslab is small */
603
604     for (thread = 1; thread < nthread; thread++)
605     {
606         for (slab = 0; slab < atc->nslab; slab++)
607         {
608             atc->count_thread[0][slab] += atc->count_thread[thread][slab];
609         }
610     }
611 }
612
613 static void realloc_splinevec(splinevec th, real **ptr_z, int nalloc)
614 {
615     const int padding = 4;
616     int       i;
617
618     srenew(th[XX], nalloc);
619     srenew(th[YY], nalloc);
620     /* In z we add padding, this is only required for the aligned SIMD code */
621     sfree_aligned(*ptr_z);
622     snew_aligned(*ptr_z, nalloc+2*padding, SIMD4_ALIGNMENT);
623     th[ZZ] = *ptr_z + padding;
624
625     for (i = 0; i < padding; i++)
626     {
627         (*ptr_z)[               i] = 0;
628         (*ptr_z)[padding+nalloc+i] = 0;
629     }
630 }
631
632 static void pme_realloc_splinedata(splinedata_t *spline, pme_atomcomm_t *atc)
633 {
634     int i, d;
635
636     srenew(spline->ind, atc->nalloc);
637     /* Initialize the index to identity so it works without threads */
638     for (i = 0; i < atc->nalloc; i++)
639     {
640         spline->ind[i] = i;
641     }
642
643     realloc_splinevec(spline->theta, &spline->ptr_theta_z,
644                       atc->pme_order*atc->nalloc);
645     realloc_splinevec(spline->dtheta, &spline->ptr_dtheta_z,
646                       atc->pme_order*atc->nalloc);
647 }
648
649 static void pme_realloc_atomcomm_things(pme_atomcomm_t *atc)
650 {
651     int nalloc_old, i, j, nalloc_tpl;
652
653     /* We have to avoid a NULL pointer for atc->x to avoid
654      * possible fatal errors in MPI routines.
655      */
656     if (atc->n > atc->nalloc || atc->nalloc == 0)
657     {
658         nalloc_old  = atc->nalloc;
659         atc->nalloc = over_alloc_dd(max(atc->n, 1));
660
661         if (atc->nslab > 1)
662         {
663             srenew(atc->x, atc->nalloc);
664             srenew(atc->coefficient, atc->nalloc);
665             srenew(atc->f, atc->nalloc);
666             for (i = nalloc_old; i < atc->nalloc; i++)
667             {
668                 clear_rvec(atc->f[i]);
669             }
670         }
671         if (atc->bSpread)
672         {
673             srenew(atc->fractx, atc->nalloc);
674             srenew(atc->idx, atc->nalloc);
675
676             if (atc->nthread > 1)
677             {
678                 srenew(atc->thread_idx, atc->nalloc);
679             }
680
681             for (i = 0; i < atc->nthread; i++)
682             {
683                 pme_realloc_splinedata(&atc->spline[i], atc);
684             }
685         }
686     }
687 }
688
689 static void pme_dd_sendrecv(pme_atomcomm_t gmx_unused *atc,
690                             gmx_bool gmx_unused bBackward, int gmx_unused shift,
691                             void gmx_unused *buf_s, int gmx_unused nbyte_s,
692                             void gmx_unused *buf_r, int gmx_unused nbyte_r)
693 {
694 #ifdef GMX_MPI
695     int        dest, src;
696     MPI_Status stat;
697
698     if (bBackward == FALSE)
699     {
700         dest = atc->node_dest[shift];
701         src  = atc->node_src[shift];
702     }
703     else
704     {
705         dest = atc->node_src[shift];
706         src  = atc->node_dest[shift];
707     }
708
709     if (nbyte_s > 0 && nbyte_r > 0)
710     {
711         MPI_Sendrecv(buf_s, nbyte_s, MPI_BYTE,
712                      dest, shift,
713                      buf_r, nbyte_r, MPI_BYTE,
714                      src, shift,
715                      atc->mpi_comm, &stat);
716     }
717     else if (nbyte_s > 0)
718     {
719         MPI_Send(buf_s, nbyte_s, MPI_BYTE,
720                  dest, shift,
721                  atc->mpi_comm);
722     }
723     else if (nbyte_r > 0)
724     {
725         MPI_Recv(buf_r, nbyte_r, MPI_BYTE,
726                  src, shift,
727                  atc->mpi_comm, &stat);
728     }
729 #endif
730 }
731
732 static void dd_pmeredist_pos_coeffs(gmx_pme_t pme,
733                                     int n, gmx_bool bX, rvec *x, real *data,
734                                     pme_atomcomm_t *atc)
735 {
736     int *commnode, *buf_index;
737     int  nnodes_comm, i, nsend, local_pos, buf_pos, node, scount, rcount;
738
739     commnode  = atc->node_dest;
740     buf_index = atc->buf_index;
741
742     nnodes_comm = min(2*atc->maxshift, atc->nslab-1);
743
744     nsend = 0;
745     for (i = 0; i < nnodes_comm; i++)
746     {
747         buf_index[commnode[i]] = nsend;
748         nsend                 += atc->count[commnode[i]];
749     }
750     if (bX)
751     {
752         if (atc->count[atc->nodeid] + nsend != n)
753         {
754             gmx_fatal(FARGS, "%d particles communicated to PME rank %d are more than 2/3 times the cut-off out of the domain decomposition cell of their charge group in dimension %c.\n"
755                       "This usually means that your system is not well equilibrated.",
756                       n - (atc->count[atc->nodeid] + nsend),
757                       pme->nodeid, 'x'+atc->dimind);
758         }
759
760         if (nsend > pme->buf_nalloc)
761         {
762             pme->buf_nalloc = over_alloc_dd(nsend);
763             srenew(pme->bufv, pme->buf_nalloc);
764             srenew(pme->bufr, pme->buf_nalloc);
765         }
766
767         atc->n = atc->count[atc->nodeid];
768         for (i = 0; i < nnodes_comm; i++)
769         {
770             scount = atc->count[commnode[i]];
771             /* Communicate the count */
772             if (debug)
773             {
774                 fprintf(debug, "dimind %d PME rank %d send to rank %d: %d\n",
775                         atc->dimind, atc->nodeid, commnode[i], scount);
776             }
777             pme_dd_sendrecv(atc, FALSE, i,
778                             &scount, sizeof(int),
779                             &atc->rcount[i], sizeof(int));
780             atc->n += atc->rcount[i];
781         }
782
783         pme_realloc_atomcomm_things(atc);
784     }
785
786     local_pos = 0;
787     for (i = 0; i < n; i++)
788     {
789         node = atc->pd[i];
790         if (node == atc->nodeid)
791         {
792             /* Copy direct to the receive buffer */
793             if (bX)
794             {
795                 copy_rvec(x[i], atc->x[local_pos]);
796             }
797             atc->coefficient[local_pos] = data[i];
798             local_pos++;
799         }
800         else
801         {
802             /* Copy to the send buffer */
803             if (bX)
804             {
805                 copy_rvec(x[i], pme->bufv[buf_index[node]]);
806             }
807             pme->bufr[buf_index[node]] = data[i];
808             buf_index[node]++;
809         }
810     }
811
812     buf_pos = 0;
813     for (i = 0; i < nnodes_comm; i++)
814     {
815         scount = atc->count[commnode[i]];
816         rcount = atc->rcount[i];
817         if (scount > 0 || rcount > 0)
818         {
819             if (bX)
820             {
821                 /* Communicate the coordinates */
822                 pme_dd_sendrecv(atc, FALSE, i,
823                                 pme->bufv[buf_pos], scount*sizeof(rvec),
824                                 atc->x[local_pos], rcount*sizeof(rvec));
825             }
826             /* Communicate the coefficients */
827             pme_dd_sendrecv(atc, FALSE, i,
828                             pme->bufr+buf_pos, scount*sizeof(real),
829                             atc->coefficient+local_pos, rcount*sizeof(real));
830             buf_pos   += scount;
831             local_pos += atc->rcount[i];
832         }
833     }
834 }
835
836 static void dd_pmeredist_f(gmx_pme_t pme, pme_atomcomm_t *atc,
837                            int n, rvec *f,
838                            gmx_bool bAddF)
839 {
840     int *commnode, *buf_index;
841     int  nnodes_comm, local_pos, buf_pos, i, scount, rcount, node;
842
843     commnode  = atc->node_dest;
844     buf_index = atc->buf_index;
845
846     nnodes_comm = min(2*atc->maxshift, atc->nslab-1);
847
848     local_pos = atc->count[atc->nodeid];
849     buf_pos   = 0;
850     for (i = 0; i < nnodes_comm; i++)
851     {
852         scount = atc->rcount[i];
853         rcount = atc->count[commnode[i]];
854         if (scount > 0 || rcount > 0)
855         {
856             /* Communicate the forces */
857             pme_dd_sendrecv(atc, TRUE, i,
858                             atc->f[local_pos], scount*sizeof(rvec),
859                             pme->bufv[buf_pos], rcount*sizeof(rvec));
860             local_pos += scount;
861         }
862         buf_index[commnode[i]] = buf_pos;
863         buf_pos               += rcount;
864     }
865
866     local_pos = 0;
867     if (bAddF)
868     {
869         for (i = 0; i < n; i++)
870         {
871             node = atc->pd[i];
872             if (node == atc->nodeid)
873             {
874                 /* Add from the local force array */
875                 rvec_inc(f[i], atc->f[local_pos]);
876                 local_pos++;
877             }
878             else
879             {
880                 /* Add from the receive buffer */
881                 rvec_inc(f[i], pme->bufv[buf_index[node]]);
882                 buf_index[node]++;
883             }
884         }
885     }
886     else
887     {
888         for (i = 0; i < n; i++)
889         {
890             node = atc->pd[i];
891             if (node == atc->nodeid)
892             {
893                 /* Copy from the local force array */
894                 copy_rvec(atc->f[local_pos], f[i]);
895                 local_pos++;
896             }
897             else
898             {
899                 /* Copy from the receive buffer */
900                 copy_rvec(pme->bufv[buf_index[node]], f[i]);
901                 buf_index[node]++;
902             }
903         }
904     }
905 }
906
907 #ifdef GMX_MPI
908 static void gmx_sum_qgrid_dd(gmx_pme_t pme, real *grid, int direction)
909 {
910     pme_overlap_t *overlap;
911     int            send_index0, send_nindex;
912     int            recv_index0, recv_nindex;
913     MPI_Status     stat;
914     int            i, j, k, ix, iy, iz, icnt;
915     int            ipulse, send_id, recv_id, datasize;
916     real          *p;
917     real          *sendptr, *recvptr;
918
919     /* Start with minor-rank communication. This is a bit of a pain since it is not contiguous */
920     overlap = &pme->overlap[1];
921
922     for (ipulse = 0; ipulse < overlap->noverlap_nodes; ipulse++)
923     {
924         /* Since we have already (un)wrapped the overlap in the z-dimension,
925          * we only have to communicate 0 to nkz (not pmegrid_nz).
926          */
927         if (direction == GMX_SUM_GRID_FORWARD)
928         {
929             send_id       = overlap->send_id[ipulse];
930             recv_id       = overlap->recv_id[ipulse];
931             send_index0   = overlap->comm_data[ipulse].send_index0;
932             send_nindex   = overlap->comm_data[ipulse].send_nindex;
933             recv_index0   = overlap->comm_data[ipulse].recv_index0;
934             recv_nindex   = overlap->comm_data[ipulse].recv_nindex;
935         }
936         else
937         {
938             send_id       = overlap->recv_id[ipulse];
939             recv_id       = overlap->send_id[ipulse];
940             send_index0   = overlap->comm_data[ipulse].recv_index0;
941             send_nindex   = overlap->comm_data[ipulse].recv_nindex;
942             recv_index0   = overlap->comm_data[ipulse].send_index0;
943             recv_nindex   = overlap->comm_data[ipulse].send_nindex;
944         }
945
946         /* Copy data to contiguous send buffer */
947         if (debug)
948         {
949             fprintf(debug, "PME send rank %d %d -> %d grid start %d Communicating %d to %d\n",
950                     pme->nodeid, overlap->nodeid, send_id,
951                     pme->pmegrid_start_iy,
952                     send_index0-pme->pmegrid_start_iy,
953                     send_index0-pme->pmegrid_start_iy+send_nindex);
954         }
955         icnt = 0;
956         for (i = 0; i < pme->pmegrid_nx; i++)
957         {
958             ix = i;
959             for (j = 0; j < send_nindex; j++)
960             {
961                 iy = j + send_index0 - pme->pmegrid_start_iy;
962                 for (k = 0; k < pme->nkz; k++)
963                 {
964                     iz = k;
965                     overlap->sendbuf[icnt++] = grid[ix*(pme->pmegrid_ny*pme->pmegrid_nz)+iy*(pme->pmegrid_nz)+iz];
966                 }
967             }
968         }
969
970         datasize      = pme->pmegrid_nx * pme->nkz;
971
972         MPI_Sendrecv(overlap->sendbuf, send_nindex*datasize, GMX_MPI_REAL,
973                      send_id, ipulse,
974                      overlap->recvbuf, recv_nindex*datasize, GMX_MPI_REAL,
975                      recv_id, ipulse,
976                      overlap->mpi_comm, &stat);
977
978         /* Get data from contiguous recv buffer */
979         if (debug)
980         {
981             fprintf(debug, "PME recv rank %d %d <- %d grid start %d Communicating %d to %d\n",
982                     pme->nodeid, overlap->nodeid, recv_id,
983                     pme->pmegrid_start_iy,
984                     recv_index0-pme->pmegrid_start_iy,
985                     recv_index0-pme->pmegrid_start_iy+recv_nindex);
986         }
987         icnt = 0;
988         for (i = 0; i < pme->pmegrid_nx; i++)
989         {
990             ix = i;
991             for (j = 0; j < recv_nindex; j++)
992             {
993                 iy = j + recv_index0 - pme->pmegrid_start_iy;
994                 for (k = 0; k < pme->nkz; k++)
995                 {
996                     iz = k;
997                     if (direction == GMX_SUM_GRID_FORWARD)
998                     {
999                         grid[ix*(pme->pmegrid_ny*pme->pmegrid_nz)+iy*(pme->pmegrid_nz)+iz] += overlap->recvbuf[icnt++];
1000                     }
1001                     else
1002                     {
1003                         grid[ix*(pme->pmegrid_ny*pme->pmegrid_nz)+iy*(pme->pmegrid_nz)+iz]  = overlap->recvbuf[icnt++];
1004                     }
1005                 }
1006             }
1007         }
1008     }
1009
1010     /* Major dimension is easier, no copying required,
1011      * but we might have to sum to separate array.
1012      * Since we don't copy, we have to communicate up to pmegrid_nz,
1013      * not nkz as for the minor direction.
1014      */
1015     overlap = &pme->overlap[0];
1016
1017     for (ipulse = 0; ipulse < overlap->noverlap_nodes; ipulse++)
1018     {
1019         if (direction == GMX_SUM_GRID_FORWARD)
1020         {
1021             send_id       = overlap->send_id[ipulse];
1022             recv_id       = overlap->recv_id[ipulse];
1023             send_index0   = overlap->comm_data[ipulse].send_index0;
1024             send_nindex   = overlap->comm_data[ipulse].send_nindex;
1025             recv_index0   = overlap->comm_data[ipulse].recv_index0;
1026             recv_nindex   = overlap->comm_data[ipulse].recv_nindex;
1027             recvptr       = overlap->recvbuf;
1028         }
1029         else
1030         {
1031             send_id       = overlap->recv_id[ipulse];
1032             recv_id       = overlap->send_id[ipulse];
1033             send_index0   = overlap->comm_data[ipulse].recv_index0;
1034             send_nindex   = overlap->comm_data[ipulse].recv_nindex;
1035             recv_index0   = overlap->comm_data[ipulse].send_index0;
1036             recv_nindex   = overlap->comm_data[ipulse].send_nindex;
1037             recvptr       = grid + (recv_index0-pme->pmegrid_start_ix)*(pme->pmegrid_ny*pme->pmegrid_nz);
1038         }
1039
1040         sendptr       = grid + (send_index0-pme->pmegrid_start_ix)*(pme->pmegrid_ny*pme->pmegrid_nz);
1041         datasize      = pme->pmegrid_ny * pme->pmegrid_nz;
1042
1043         if (debug)
1044         {
1045             fprintf(debug, "PME send rank %d %d -> %d grid start %d Communicating %d to %d\n",
1046                     pme->nodeid, overlap->nodeid, send_id,
1047                     pme->pmegrid_start_ix,
1048                     send_index0-pme->pmegrid_start_ix,
1049                     send_index0-pme->pmegrid_start_ix+send_nindex);
1050             fprintf(debug, "PME recv rank %d %d <- %d grid start %d Communicating %d to %d\n",
1051                     pme->nodeid, overlap->nodeid, recv_id,
1052                     pme->pmegrid_start_ix,
1053                     recv_index0-pme->pmegrid_start_ix,
1054                     recv_index0-pme->pmegrid_start_ix+recv_nindex);
1055         }
1056
1057         MPI_Sendrecv(sendptr, send_nindex*datasize, GMX_MPI_REAL,
1058                      send_id, ipulse,
1059                      recvptr, recv_nindex*datasize, GMX_MPI_REAL,
1060                      recv_id, ipulse,
1061                      overlap->mpi_comm, &stat);
1062
1063         /* ADD data from contiguous recv buffer */
1064         if (direction == GMX_SUM_GRID_FORWARD)
1065         {
1066             p = grid + (recv_index0-pme->pmegrid_start_ix)*(pme->pmegrid_ny*pme->pmegrid_nz);
1067             for (i = 0; i < recv_nindex*datasize; i++)
1068             {
1069                 p[i] += overlap->recvbuf[i];
1070             }
1071         }
1072     }
1073 }
1074 #endif
1075
1076
1077 static int copy_pmegrid_to_fftgrid(gmx_pme_t pme, real *pmegrid, real *fftgrid, int grid_index)
1078 {
1079     ivec    local_fft_ndata, local_fft_offset, local_fft_size;
1080     ivec    local_pme_size;
1081     int     i, ix, iy, iz;
1082     int     pmeidx, fftidx;
1083
1084     /* Dimensions should be identical for A/B grid, so we just use A here */
1085     gmx_parallel_3dfft_real_limits(pme->pfft_setup[grid_index],
1086                                    local_fft_ndata,
1087                                    local_fft_offset,
1088                                    local_fft_size);
1089
1090     local_pme_size[0] = pme->pmegrid_nx;
1091     local_pme_size[1] = pme->pmegrid_ny;
1092     local_pme_size[2] = pme->pmegrid_nz;
1093
1094     /* The fftgrid is always 'justified' to the lower-left corner of the PME grid,
1095        the offset is identical, and the PME grid always has more data (due to overlap)
1096      */
1097     {
1098 #ifdef DEBUG_PME
1099         FILE *fp, *fp2;
1100         char  fn[STRLEN];
1101         real  val;
1102         sprintf(fn, "pmegrid%d.pdb", pme->nodeid);
1103         fp = gmx_ffopen(fn, "w");
1104         sprintf(fn, "pmegrid%d.txt", pme->nodeid);
1105         fp2 = gmx_ffopen(fn, "w");
1106 #endif
1107
1108         for (ix = 0; ix < local_fft_ndata[XX]; ix++)
1109         {
1110             for (iy = 0; iy < local_fft_ndata[YY]; iy++)
1111             {
1112                 for (iz = 0; iz < local_fft_ndata[ZZ]; iz++)
1113                 {
1114                     pmeidx          = ix*(local_pme_size[YY]*local_pme_size[ZZ])+iy*(local_pme_size[ZZ])+iz;
1115                     fftidx          = ix*(local_fft_size[YY]*local_fft_size[ZZ])+iy*(local_fft_size[ZZ])+iz;
1116                     fftgrid[fftidx] = pmegrid[pmeidx];
1117 #ifdef DEBUG_PME
1118                     val = 100*pmegrid[pmeidx];
1119                     if (pmegrid[pmeidx] != 0)
1120                     {
1121                         gmx_fprintf_pdb_atomline(fp, epdbATOM, pmeidx, "CA", ' ', "GLY", ' ', pmeidx, ' ',
1122                                                  5.0*ix, 5.0*iy, 5.0*iz, 1.0, val, "");
1123                     }
1124                     if (pmegrid[pmeidx] != 0)
1125                     {
1126                         fprintf(fp2, "%-12s  %5d  %5d  %5d  %12.5e\n",
1127                                 "qgrid",
1128                                 pme->pmegrid_start_ix + ix,
1129                                 pme->pmegrid_start_iy + iy,
1130                                 pme->pmegrid_start_iz + iz,
1131                                 pmegrid[pmeidx]);
1132                     }
1133 #endif
1134                 }
1135             }
1136         }
1137 #ifdef DEBUG_PME
1138         gmx_ffclose(fp);
1139         gmx_ffclose(fp2);
1140 #endif
1141     }
1142     return 0;
1143 }
1144
1145
1146 static gmx_cycles_t omp_cyc_start()
1147 {
1148     return gmx_cycles_read();
1149 }
1150
1151 static gmx_cycles_t omp_cyc_end(gmx_cycles_t c)
1152 {
1153     return gmx_cycles_read() - c;
1154 }
1155
1156
1157 static int copy_fftgrid_to_pmegrid(gmx_pme_t pme, const real *fftgrid, real *pmegrid, int grid_index,
1158                                    int nthread, int thread)
1159 {
1160     ivec          local_fft_ndata, local_fft_offset, local_fft_size;
1161     ivec          local_pme_size;
1162     int           ixy0, ixy1, ixy, ix, iy, iz;
1163     int           pmeidx, fftidx;
1164 #ifdef PME_TIME_THREADS
1165     gmx_cycles_t  c1;
1166     static double cs1 = 0;
1167     static int    cnt = 0;
1168 #endif
1169
1170 #ifdef PME_TIME_THREADS
1171     c1 = omp_cyc_start();
1172 #endif
1173     /* Dimensions should be identical for A/B grid, so we just use A here */
1174     gmx_parallel_3dfft_real_limits(pme->pfft_setup[grid_index],
1175                                    local_fft_ndata,
1176                                    local_fft_offset,
1177                                    local_fft_size);
1178
1179     local_pme_size[0] = pme->pmegrid_nx;
1180     local_pme_size[1] = pme->pmegrid_ny;
1181     local_pme_size[2] = pme->pmegrid_nz;
1182
1183     /* The fftgrid is always 'justified' to the lower-left corner of the PME grid,
1184        the offset is identical, and the PME grid always has more data (due to overlap)
1185      */
1186     ixy0 = ((thread  )*local_fft_ndata[XX]*local_fft_ndata[YY])/nthread;
1187     ixy1 = ((thread+1)*local_fft_ndata[XX]*local_fft_ndata[YY])/nthread;
1188
1189     for (ixy = ixy0; ixy < ixy1; ixy++)
1190     {
1191         ix = ixy/local_fft_ndata[YY];
1192         iy = ixy - ix*local_fft_ndata[YY];
1193
1194         pmeidx = (ix*local_pme_size[YY] + iy)*local_pme_size[ZZ];
1195         fftidx = (ix*local_fft_size[YY] + iy)*local_fft_size[ZZ];
1196         for (iz = 0; iz < local_fft_ndata[ZZ]; iz++)
1197         {
1198             pmegrid[pmeidx+iz] = fftgrid[fftidx+iz];
1199         }
1200     }
1201
1202 #ifdef PME_TIME_THREADS
1203     c1   = omp_cyc_end(c1);
1204     cs1 += (double)c1;
1205     cnt++;
1206     if (cnt % 20 == 0)
1207     {
1208         printf("copy %.2f\n", cs1*1e-9);
1209     }
1210 #endif
1211
1212     return 0;
1213 }
1214
1215
1216 static void wrap_periodic_pmegrid(gmx_pme_t pme, real *pmegrid)
1217 {
1218     int     nx, ny, nz, pnx, pny, pnz, ny_x, overlap, ix, iy, iz;
1219
1220     nx = pme->nkx;
1221     ny = pme->nky;
1222     nz = pme->nkz;
1223
1224     pnx = pme->pmegrid_nx;
1225     pny = pme->pmegrid_ny;
1226     pnz = pme->pmegrid_nz;
1227
1228     overlap = pme->pme_order - 1;
1229
1230     /* Add periodic overlap in z */
1231     for (ix = 0; ix < pme->pmegrid_nx; ix++)
1232     {
1233         for (iy = 0; iy < pme->pmegrid_ny; iy++)
1234         {
1235             for (iz = 0; iz < overlap; iz++)
1236             {
1237                 pmegrid[(ix*pny+iy)*pnz+iz] +=
1238                     pmegrid[(ix*pny+iy)*pnz+nz+iz];
1239             }
1240         }
1241     }
1242
1243     if (pme->nnodes_minor == 1)
1244     {
1245         for (ix = 0; ix < pme->pmegrid_nx; ix++)
1246         {
1247             for (iy = 0; iy < overlap; iy++)
1248             {
1249                 for (iz = 0; iz < nz; iz++)
1250                 {
1251                     pmegrid[(ix*pny+iy)*pnz+iz] +=
1252                         pmegrid[(ix*pny+ny+iy)*pnz+iz];
1253                 }
1254             }
1255         }
1256     }
1257
1258     if (pme->nnodes_major == 1)
1259     {
1260         ny_x = (pme->nnodes_minor == 1 ? ny : pme->pmegrid_ny);
1261
1262         for (ix = 0; ix < overlap; ix++)
1263         {
1264             for (iy = 0; iy < ny_x; iy++)
1265             {
1266                 for (iz = 0; iz < nz; iz++)
1267                 {
1268                     pmegrid[(ix*pny+iy)*pnz+iz] +=
1269                         pmegrid[((nx+ix)*pny+iy)*pnz+iz];
1270                 }
1271             }
1272         }
1273     }
1274 }
1275
1276
1277 static void unwrap_periodic_pmegrid(gmx_pme_t pme, real *pmegrid)
1278 {
1279     int     nx, ny, nz, pnx, pny, pnz, ny_x, overlap, ix;
1280
1281     nx = pme->nkx;
1282     ny = pme->nky;
1283     nz = pme->nkz;
1284
1285     pnx = pme->pmegrid_nx;
1286     pny = pme->pmegrid_ny;
1287     pnz = pme->pmegrid_nz;
1288
1289     overlap = pme->pme_order - 1;
1290
1291     if (pme->nnodes_major == 1)
1292     {
1293         ny_x = (pme->nnodes_minor == 1 ? ny : pme->pmegrid_ny);
1294
1295         for (ix = 0; ix < overlap; ix++)
1296         {
1297             int iy, iz;
1298
1299             for (iy = 0; iy < ny_x; iy++)
1300             {
1301                 for (iz = 0; iz < nz; iz++)
1302                 {
1303                     pmegrid[((nx+ix)*pny+iy)*pnz+iz] =
1304                         pmegrid[(ix*pny+iy)*pnz+iz];
1305                 }
1306             }
1307         }
1308     }
1309
1310     if (pme->nnodes_minor == 1)
1311     {
1312 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1313         for (ix = 0; ix < pme->pmegrid_nx; ix++)
1314         {
1315             int iy, iz;
1316
1317             for (iy = 0; iy < overlap; iy++)
1318             {
1319                 for (iz = 0; iz < nz; iz++)
1320                 {
1321                     pmegrid[(ix*pny+ny+iy)*pnz+iz] =
1322                         pmegrid[(ix*pny+iy)*pnz+iz];
1323                 }
1324             }
1325         }
1326     }
1327
1328     /* Copy periodic overlap in z */
1329 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1330     for (ix = 0; ix < pme->pmegrid_nx; ix++)
1331     {
1332         int iy, iz;
1333
1334         for (iy = 0; iy < pme->pmegrid_ny; iy++)
1335         {
1336             for (iz = 0; iz < overlap; iz++)
1337             {
1338                 pmegrid[(ix*pny+iy)*pnz+nz+iz] =
1339                     pmegrid[(ix*pny+iy)*pnz+iz];
1340             }
1341         }
1342     }
1343 }
1344
1345
1346 /* This has to be a macro to enable full compiler optimization with xlC (and probably others too) */
1347 #define DO_BSPLINE(order)                            \
1348     for (ithx = 0; (ithx < order); ithx++)                    \
1349     {                                                    \
1350         index_x = (i0+ithx)*pny*pnz;                     \
1351         valx    = coefficient*thx[ithx];                          \
1352                                                      \
1353         for (ithy = 0; (ithy < order); ithy++)                \
1354         {                                                \
1355             valxy    = valx*thy[ithy];                   \
1356             index_xy = index_x+(j0+ithy)*pnz;            \
1357                                                      \
1358             for (ithz = 0; (ithz < order); ithz++)            \
1359             {                                            \
1360                 index_xyz        = index_xy+(k0+ithz);   \
1361                 grid[index_xyz] += valxy*thz[ithz];      \
1362             }                                            \
1363         }                                                \
1364     }
1365
1366
1367 static void spread_coefficients_bsplines_thread(pmegrid_t                    *pmegrid,
1368                                                 pme_atomcomm_t               *atc,
1369                                                 splinedata_t                 *spline,
1370                                                 pme_spline_work_t gmx_unused *work)
1371 {
1372
1373     /* spread coefficients from home atoms to local grid */
1374     real          *grid;
1375     pme_overlap_t *ol;
1376     int            b, i, nn, n, ithx, ithy, ithz, i0, j0, k0;
1377     int       *    idxptr;
1378     int            order, norder, index_x, index_xy, index_xyz;
1379     real           valx, valxy, coefficient;
1380     real          *thx, *thy, *thz;
1381     int            localsize, bndsize;
1382     int            pnx, pny, pnz, ndatatot;
1383     int            offx, offy, offz;
1384
1385 #if defined PME_SIMD4_SPREAD_GATHER && !defined PME_SIMD4_UNALIGNED
1386     real           thz_buffer[GMX_SIMD4_WIDTH*3], *thz_aligned;
1387
1388     thz_aligned = gmx_simd4_align_r(thz_buffer);
1389 #endif
1390
1391     pnx = pmegrid->s[XX];
1392     pny = pmegrid->s[YY];
1393     pnz = pmegrid->s[ZZ];
1394
1395     offx = pmegrid->offset[XX];
1396     offy = pmegrid->offset[YY];
1397     offz = pmegrid->offset[ZZ];
1398
1399     ndatatot = pnx*pny*pnz;
1400     grid     = pmegrid->grid;
1401     for (i = 0; i < ndatatot; i++)
1402     {
1403         grid[i] = 0;
1404     }
1405
1406     order = pmegrid->order;
1407
1408     for (nn = 0; nn < spline->n; nn++)
1409     {
1410         n           = spline->ind[nn];
1411         coefficient = atc->coefficient[n];
1412
1413         if (coefficient != 0)
1414         {
1415             idxptr = atc->idx[n];
1416             norder = nn*order;
1417
1418             i0   = idxptr[XX] - offx;
1419             j0   = idxptr[YY] - offy;
1420             k0   = idxptr[ZZ] - offz;
1421
1422             thx = spline->theta[XX] + norder;
1423             thy = spline->theta[YY] + norder;
1424             thz = spline->theta[ZZ] + norder;
1425
1426             switch (order)
1427             {
1428                 case 4:
1429 #ifdef PME_SIMD4_SPREAD_GATHER
1430 #ifdef PME_SIMD4_UNALIGNED
1431 #define PME_SPREAD_SIMD4_ORDER4
1432 #else
1433 #define PME_SPREAD_SIMD4_ALIGNED
1434 #define PME_ORDER 4
1435 #endif
1436 #include "pme_simd4.h"
1437 #else
1438                     DO_BSPLINE(4);
1439 #endif
1440                     break;
1441                 case 5:
1442 #ifdef PME_SIMD4_SPREAD_GATHER
1443 #define PME_SPREAD_SIMD4_ALIGNED
1444 #define PME_ORDER 5
1445 #include "pme_simd4.h"
1446 #else
1447                     DO_BSPLINE(5);
1448 #endif
1449                     break;
1450                 default:
1451                     DO_BSPLINE(order);
1452                     break;
1453             }
1454         }
1455     }
1456 }
1457
1458 static void set_grid_alignment(int gmx_unused *pmegrid_nz, int gmx_unused pme_order)
1459 {
1460 #ifdef PME_SIMD4_SPREAD_GATHER
1461     if (pme_order == 5
1462 #ifndef PME_SIMD4_UNALIGNED
1463         || pme_order == 4
1464 #endif
1465         )
1466     {
1467         /* Round nz up to a multiple of 4 to ensure alignment */
1468         *pmegrid_nz = ((*pmegrid_nz + 3) & ~3);
1469     }
1470 #endif
1471 }
1472
1473 static void set_gridsize_alignment(int gmx_unused *gridsize, int gmx_unused pme_order)
1474 {
1475 #ifdef PME_SIMD4_SPREAD_GATHER
1476 #ifndef PME_SIMD4_UNALIGNED
1477     if (pme_order == 4)
1478     {
1479         /* Add extra elements to ensured aligned operations do not go
1480          * beyond the allocated grid size.
1481          * Note that for pme_order=5, the pme grid z-size alignment
1482          * ensures that we will not go beyond the grid size.
1483          */
1484         *gridsize += 4;
1485     }
1486 #endif
1487 #endif
1488 }
1489
1490 static void pmegrid_init(pmegrid_t *grid,
1491                          int cx, int cy, int cz,
1492                          int x0, int y0, int z0,
1493                          int x1, int y1, int z1,
1494                          gmx_bool set_alignment,
1495                          int pme_order,
1496                          real *ptr)
1497 {
1498     int nz, gridsize;
1499
1500     grid->ci[XX]     = cx;
1501     grid->ci[YY]     = cy;
1502     grid->ci[ZZ]     = cz;
1503     grid->offset[XX] = x0;
1504     grid->offset[YY] = y0;
1505     grid->offset[ZZ] = z0;
1506     grid->n[XX]      = x1 - x0 + pme_order - 1;
1507     grid->n[YY]      = y1 - y0 + pme_order - 1;
1508     grid->n[ZZ]      = z1 - z0 + pme_order - 1;
1509     copy_ivec(grid->n, grid->s);
1510
1511     nz = grid->s[ZZ];
1512     set_grid_alignment(&nz, pme_order);
1513     if (set_alignment)
1514     {
1515         grid->s[ZZ] = nz;
1516     }
1517     else if (nz != grid->s[ZZ])
1518     {
1519         gmx_incons("pmegrid_init call with an unaligned z size");
1520     }
1521
1522     grid->order = pme_order;
1523     if (ptr == NULL)
1524     {
1525         gridsize = grid->s[XX]*grid->s[YY]*grid->s[ZZ];
1526         set_gridsize_alignment(&gridsize, pme_order);
1527         snew_aligned(grid->grid, gridsize, SIMD4_ALIGNMENT);
1528     }
1529     else
1530     {
1531         grid->grid = ptr;
1532     }
1533 }
1534
1535 static int div_round_up(int enumerator, int denominator)
1536 {
1537     return (enumerator + denominator - 1)/denominator;
1538 }
1539
1540 static void make_subgrid_division(const ivec n, int ovl, int nthread,
1541                                   ivec nsub)
1542 {
1543     int gsize_opt, gsize;
1544     int nsx, nsy, nsz;
1545     char *env;
1546
1547     gsize_opt = -1;
1548     for (nsx = 1; nsx <= nthread; nsx++)
1549     {
1550         if (nthread % nsx == 0)
1551         {
1552             for (nsy = 1; nsy <= nthread; nsy++)
1553             {
1554                 if (nsx*nsy <= nthread && nthread % (nsx*nsy) == 0)
1555                 {
1556                     nsz = nthread/(nsx*nsy);
1557
1558                     /* Determine the number of grid points per thread */
1559                     gsize =
1560                         (div_round_up(n[XX], nsx) + ovl)*
1561                         (div_round_up(n[YY], nsy) + ovl)*
1562                         (div_round_up(n[ZZ], nsz) + ovl);
1563
1564                     /* Minimize the number of grids points per thread
1565                      * and, secondarily, the number of cuts in minor dimensions.
1566                      */
1567                     if (gsize_opt == -1 ||
1568                         gsize < gsize_opt ||
1569                         (gsize == gsize_opt &&
1570                          (nsz < nsub[ZZ] || (nsz == nsub[ZZ] && nsy < nsub[YY]))))
1571                     {
1572                         nsub[XX]  = nsx;
1573                         nsub[YY]  = nsy;
1574                         nsub[ZZ]  = nsz;
1575                         gsize_opt = gsize;
1576                     }
1577                 }
1578             }
1579         }
1580     }
1581
1582     env = getenv("GMX_PME_THREAD_DIVISION");
1583     if (env != NULL)
1584     {
1585         sscanf(env, "%d %d %d", &nsub[XX], &nsub[YY], &nsub[ZZ]);
1586     }
1587
1588     if (nsub[XX]*nsub[YY]*nsub[ZZ] != nthread)
1589     {
1590         gmx_fatal(FARGS, "PME grid thread division (%d x %d x %d) does not match the total number of threads (%d)", nsub[XX], nsub[YY], nsub[ZZ], nthread);
1591     }
1592 }
1593
1594 static void pmegrids_init(pmegrids_t *grids,
1595                           int nx, int ny, int nz, int nz_base,
1596                           int pme_order,
1597                           gmx_bool bUseThreads,
1598                           int nthread,
1599                           int overlap_x,
1600                           int overlap_y)
1601 {
1602     ivec n, n_base, g0, g1;
1603     int t, x, y, z, d, i, tfac;
1604     int max_comm_lines = -1;
1605
1606     n[XX] = nx - (pme_order - 1);
1607     n[YY] = ny - (pme_order - 1);
1608     n[ZZ] = nz - (pme_order - 1);
1609
1610     copy_ivec(n, n_base);
1611     n_base[ZZ] = nz_base;
1612
1613     pmegrid_init(&grids->grid, 0, 0, 0, 0, 0, 0, n[XX], n[YY], n[ZZ], FALSE, pme_order,
1614                  NULL);
1615
1616     grids->nthread = nthread;
1617
1618     make_subgrid_division(n_base, pme_order-1, grids->nthread, grids->nc);
1619
1620     if (bUseThreads)
1621     {
1622         ivec nst;
1623         int gridsize;
1624
1625         for (d = 0; d < DIM; d++)
1626         {
1627             nst[d] = div_round_up(n[d], grids->nc[d]) + pme_order - 1;
1628         }
1629         set_grid_alignment(&nst[ZZ], pme_order);
1630
1631         if (debug)
1632         {
1633             fprintf(debug, "pmegrid thread local division: %d x %d x %d\n",
1634                     grids->nc[XX], grids->nc[YY], grids->nc[ZZ]);
1635             fprintf(debug, "pmegrid %d %d %d max thread pmegrid %d %d %d\n",
1636                     nx, ny, nz,
1637                     nst[XX], nst[YY], nst[ZZ]);
1638         }
1639
1640         snew(grids->grid_th, grids->nthread);
1641         t        = 0;
1642         gridsize = nst[XX]*nst[YY]*nst[ZZ];
1643         set_gridsize_alignment(&gridsize, pme_order);
1644         snew_aligned(grids->grid_all,
1645                      grids->nthread*gridsize+(grids->nthread+1)*GMX_CACHE_SEP,
1646                      SIMD4_ALIGNMENT);
1647
1648         for (x = 0; x < grids->nc[XX]; x++)
1649         {
1650             for (y = 0; y < grids->nc[YY]; y++)
1651             {
1652                 for (z = 0; z < grids->nc[ZZ]; z++)
1653                 {
1654                     pmegrid_init(&grids->grid_th[t],
1655                                  x, y, z,
1656                                  (n[XX]*(x  ))/grids->nc[XX],
1657                                  (n[YY]*(y  ))/grids->nc[YY],
1658                                  (n[ZZ]*(z  ))/grids->nc[ZZ],
1659                                  (n[XX]*(x+1))/grids->nc[XX],
1660                                  (n[YY]*(y+1))/grids->nc[YY],
1661                                  (n[ZZ]*(z+1))/grids->nc[ZZ],
1662                                  TRUE,
1663                                  pme_order,
1664                                  grids->grid_all+GMX_CACHE_SEP+t*(gridsize+GMX_CACHE_SEP));
1665                     t++;
1666                 }
1667             }
1668         }
1669     }
1670     else
1671     {
1672         grids->grid_th = NULL;
1673     }
1674
1675     snew(grids->g2t, DIM);
1676     tfac = 1;
1677     for (d = DIM-1; d >= 0; d--)
1678     {
1679         snew(grids->g2t[d], n[d]);
1680         t = 0;
1681         for (i = 0; i < n[d]; i++)
1682         {
1683             /* The second check should match the parameters
1684              * of the pmegrid_init call above.
1685              */
1686             while (t + 1 < grids->nc[d] && i >= (n[d]*(t+1))/grids->nc[d])
1687             {
1688                 t++;
1689             }
1690             grids->g2t[d][i] = t*tfac;
1691         }
1692
1693         tfac *= grids->nc[d];
1694
1695         switch (d)
1696         {
1697             case XX: max_comm_lines = overlap_x;     break;
1698             case YY: max_comm_lines = overlap_y;     break;
1699             case ZZ: max_comm_lines = pme_order - 1; break;
1700         }
1701         grids->nthread_comm[d] = 0;
1702         while ((n[d]*grids->nthread_comm[d])/grids->nc[d] < max_comm_lines &&
1703                grids->nthread_comm[d] < grids->nc[d])
1704         {
1705             grids->nthread_comm[d]++;
1706         }
1707         if (debug != NULL)
1708         {
1709             fprintf(debug, "pmegrid thread grid communication range in %c: %d\n",
1710                     'x'+d, grids->nthread_comm[d]);
1711         }
1712         /* It should be possible to make grids->nthread_comm[d]==grids->nc[d]
1713          * work, but this is not a problematic restriction.
1714          */
1715         if (grids->nc[d] > 1 && grids->nthread_comm[d] > grids->nc[d])
1716         {
1717             gmx_fatal(FARGS, "Too many threads for PME (%d) compared to the number of grid lines, reduce the number of threads doing PME", grids->nthread);
1718         }
1719     }
1720 }
1721
1722
1723 static void pmegrids_destroy(pmegrids_t *grids)
1724 {
1725     int t;
1726
1727     if (grids->grid.grid != NULL)
1728     {
1729         sfree(grids->grid.grid);
1730
1731         if (grids->nthread > 0)
1732         {
1733             for (t = 0; t < grids->nthread; t++)
1734             {
1735                 sfree(grids->grid_th[t].grid);
1736             }
1737             sfree(grids->grid_th);
1738         }
1739     }
1740 }
1741
1742
1743 static void realloc_work(pme_work_t *work, int nkx)
1744 {
1745     int simd_width;
1746
1747     if (nkx > work->nalloc)
1748     {
1749         work->nalloc = nkx;
1750         srenew(work->mhx, work->nalloc);
1751         srenew(work->mhy, work->nalloc);
1752         srenew(work->mhz, work->nalloc);
1753         srenew(work->m2, work->nalloc);
1754         /* Allocate an aligned pointer for SIMD operations, including extra
1755          * elements at the end for padding.
1756          */
1757 #ifdef PME_SIMD_SOLVE
1758         simd_width = GMX_SIMD_REAL_WIDTH;
1759 #else
1760         /* We can use any alignment, apart from 0, so we use 4 */
1761         simd_width = 4;
1762 #endif
1763         sfree_aligned(work->denom);
1764         sfree_aligned(work->tmp1);
1765         sfree_aligned(work->tmp2);
1766         sfree_aligned(work->eterm);
1767         snew_aligned(work->denom, work->nalloc+simd_width, simd_width*sizeof(real));
1768         snew_aligned(work->tmp1,  work->nalloc+simd_width, simd_width*sizeof(real));
1769         snew_aligned(work->tmp2,  work->nalloc+simd_width, simd_width*sizeof(real));
1770         snew_aligned(work->eterm, work->nalloc+simd_width, simd_width*sizeof(real));
1771         srenew(work->m2inv, work->nalloc);
1772     }
1773 }
1774
1775
1776 static void free_work(pme_work_t *work)
1777 {
1778     sfree(work->mhx);
1779     sfree(work->mhy);
1780     sfree(work->mhz);
1781     sfree(work->m2);
1782     sfree_aligned(work->denom);
1783     sfree_aligned(work->tmp1);
1784     sfree_aligned(work->tmp2);
1785     sfree_aligned(work->eterm);
1786     sfree(work->m2inv);
1787 }
1788
1789
1790 #if defined PME_SIMD_SOLVE
1791 /* Calculate exponentials through SIMD */
1792 gmx_inline static void calc_exponentials_q(int gmx_unused start, int end, real f, real *d_aligned, real *r_aligned, real *e_aligned)
1793 {
1794     {
1795         const gmx_simd_real_t two = gmx_simd_set1_r(2.0);
1796         gmx_simd_real_t f_simd;
1797         gmx_simd_real_t lu;
1798         gmx_simd_real_t tmp_d1, d_inv, tmp_r, tmp_e;
1799         int kx;
1800         f_simd = gmx_simd_set1_r(f);
1801         /* We only need to calculate from start. But since start is 0 or 1
1802          * and we want to use aligned loads/stores, we always start from 0.
1803          */
1804         for (kx = 0; kx < end; kx += GMX_SIMD_REAL_WIDTH)
1805         {
1806             tmp_d1   = gmx_simd_load_r(d_aligned+kx);
1807             d_inv    = gmx_simd_inv_r(tmp_d1);
1808             tmp_r    = gmx_simd_load_r(r_aligned+kx);
1809             tmp_r    = gmx_simd_exp_r(tmp_r);
1810             tmp_e    = gmx_simd_mul_r(f_simd, d_inv);
1811             tmp_e    = gmx_simd_mul_r(tmp_e, tmp_r);
1812             gmx_simd_store_r(e_aligned+kx, tmp_e);
1813         }
1814     }
1815 }
1816 #else
1817 gmx_inline static void calc_exponentials_q(int start, int end, real f, real *d, real *r, real *e)
1818 {
1819     int kx;
1820     for (kx = start; kx < end; kx++)
1821     {
1822         d[kx] = 1.0/d[kx];
1823     }
1824     for (kx = start; kx < end; kx++)
1825     {
1826         r[kx] = exp(r[kx]);
1827     }
1828     for (kx = start; kx < end; kx++)
1829     {
1830         e[kx] = f*r[kx]*d[kx];
1831     }
1832 }
1833 #endif
1834
1835 #if defined PME_SIMD_SOLVE
1836 /* Calculate exponentials through SIMD */
1837 gmx_inline static void calc_exponentials_lj(int gmx_unused start, int end, real *r_aligned, real *factor_aligned, real *d_aligned)
1838 {
1839     gmx_simd_real_t tmp_r, tmp_d, tmp_fac, d_inv, tmp_mk;
1840     const gmx_simd_real_t sqr_PI = gmx_simd_sqrt_r(gmx_simd_set1_r(M_PI));
1841     int kx;
1842     for (kx = 0; kx < end; kx += GMX_SIMD_REAL_WIDTH)
1843     {
1844         /* We only need to calculate from start. But since start is 0 or 1
1845          * and we want to use aligned loads/stores, we always start from 0.
1846          */
1847         tmp_d = gmx_simd_load_r(d_aligned+kx);
1848         d_inv = gmx_simd_inv_r(tmp_d);
1849         gmx_simd_store_r(d_aligned+kx, d_inv);
1850         tmp_r = gmx_simd_load_r(r_aligned+kx);
1851         tmp_r = gmx_simd_exp_r(tmp_r);
1852         gmx_simd_store_r(r_aligned+kx, tmp_r);
1853         tmp_mk  = gmx_simd_load_r(factor_aligned+kx);
1854         tmp_fac = gmx_simd_mul_r(sqr_PI, gmx_simd_mul_r(tmp_mk, gmx_simd_erfc_r(tmp_mk)));
1855         gmx_simd_store_r(factor_aligned+kx, tmp_fac);
1856     }
1857 }
1858 #else
1859 gmx_inline static void calc_exponentials_lj(int start, int end, real *r, real *tmp2, real *d)
1860 {
1861     int kx;
1862     real mk;
1863     for (kx = start; kx < end; kx++)
1864     {
1865         d[kx] = 1.0/d[kx];
1866     }
1867
1868     for (kx = start; kx < end; kx++)
1869     {
1870         r[kx] = exp(r[kx]);
1871     }
1872
1873     for (kx = start; kx < end; kx++)
1874     {
1875         mk       = tmp2[kx];
1876         tmp2[kx] = sqrt(M_PI)*mk*gmx_erfc(mk);
1877     }
1878 }
1879 #endif
1880
1881 static int solve_pme_yzx(gmx_pme_t pme, t_complex *grid,
1882                          real ewaldcoeff, real vol,
1883                          gmx_bool bEnerVir,
1884                          int nthread, int thread)
1885 {
1886     /* do recip sum over local cells in grid */
1887     /* y major, z middle, x minor or continuous */
1888     t_complex *p0;
1889     int     kx, ky, kz, maxkx, maxky, maxkz;
1890     int     nx, ny, nz, iyz0, iyz1, iyz, iy, iz, kxstart, kxend;
1891     real    mx, my, mz;
1892     real    factor = M_PI*M_PI/(ewaldcoeff*ewaldcoeff);
1893     real    ets2, struct2, vfactor, ets2vf;
1894     real    d1, d2, energy = 0;
1895     real    by, bz;
1896     real    virxx = 0, virxy = 0, virxz = 0, viryy = 0, viryz = 0, virzz = 0;
1897     real    rxx, ryx, ryy, rzx, rzy, rzz;
1898     pme_work_t *work;
1899     real    *mhx, *mhy, *mhz, *m2, *denom, *tmp1, *eterm, *m2inv;
1900     real    mhxk, mhyk, mhzk, m2k;
1901     real    corner_fac;
1902     ivec    complex_order;
1903     ivec    local_ndata, local_offset, local_size;
1904     real    elfac;
1905
1906     elfac = ONE_4PI_EPS0/pme->epsilon_r;
1907
1908     nx = pme->nkx;
1909     ny = pme->nky;
1910     nz = pme->nkz;
1911
1912     /* Dimensions should be identical for A/B grid, so we just use A here */
1913     gmx_parallel_3dfft_complex_limits(pme->pfft_setup[PME_GRID_QA],
1914                                       complex_order,
1915                                       local_ndata,
1916                                       local_offset,
1917                                       local_size);
1918
1919     rxx = pme->recipbox[XX][XX];
1920     ryx = pme->recipbox[YY][XX];
1921     ryy = pme->recipbox[YY][YY];
1922     rzx = pme->recipbox[ZZ][XX];
1923     rzy = pme->recipbox[ZZ][YY];
1924     rzz = pme->recipbox[ZZ][ZZ];
1925
1926     maxkx = (nx+1)/2;
1927     maxky = (ny+1)/2;
1928     maxkz = nz/2+1;
1929
1930     work  = &pme->work[thread];
1931     mhx   = work->mhx;
1932     mhy   = work->mhy;
1933     mhz   = work->mhz;
1934     m2    = work->m2;
1935     denom = work->denom;
1936     tmp1  = work->tmp1;
1937     eterm = work->eterm;
1938     m2inv = work->m2inv;
1939
1940     iyz0 = local_ndata[YY]*local_ndata[ZZ]* thread   /nthread;
1941     iyz1 = local_ndata[YY]*local_ndata[ZZ]*(thread+1)/nthread;
1942
1943     for (iyz = iyz0; iyz < iyz1; iyz++)
1944     {
1945         iy = iyz/local_ndata[ZZ];
1946         iz = iyz - iy*local_ndata[ZZ];
1947
1948         ky = iy + local_offset[YY];
1949
1950         if (ky < maxky)
1951         {
1952             my = ky;
1953         }
1954         else
1955         {
1956             my = (ky - ny);
1957         }
1958
1959         by = M_PI*vol*pme->bsp_mod[YY][ky];
1960
1961         kz = iz + local_offset[ZZ];
1962
1963         mz = kz;
1964
1965         bz = pme->bsp_mod[ZZ][kz];
1966
1967         /* 0.5 correction for corner points */
1968         corner_fac = 1;
1969         if (kz == 0 || kz == (nz+1)/2)
1970         {
1971             corner_fac = 0.5;
1972         }
1973
1974         p0 = grid + iy*local_size[ZZ]*local_size[XX] + iz*local_size[XX];
1975
1976         /* We should skip the k-space point (0,0,0) */
1977         /* Note that since here x is the minor index, local_offset[XX]=0 */
1978         if (local_offset[XX] > 0 || ky > 0 || kz > 0)
1979         {
1980             kxstart = local_offset[XX];
1981         }
1982         else
1983         {
1984             kxstart = local_offset[XX] + 1;
1985             p0++;
1986         }
1987         kxend = local_offset[XX] + local_ndata[XX];
1988
1989         if (bEnerVir)
1990         {
1991             /* More expensive inner loop, especially because of the storage
1992              * of the mh elements in array's.
1993              * Because x is the minor grid index, all mh elements
1994              * depend on kx for triclinic unit cells.
1995              */
1996
1997             /* Two explicit loops to avoid a conditional inside the loop */
1998             for (kx = kxstart; kx < maxkx; kx++)
1999             {
2000                 mx = kx;
2001
2002                 mhxk      = mx * rxx;
2003                 mhyk      = mx * ryx + my * ryy;
2004                 mhzk      = mx * rzx + my * rzy + mz * rzz;
2005                 m2k       = mhxk*mhxk + mhyk*mhyk + mhzk*mhzk;
2006                 mhx[kx]   = mhxk;
2007                 mhy[kx]   = mhyk;
2008                 mhz[kx]   = mhzk;
2009                 m2[kx]    = m2k;
2010                 denom[kx] = m2k*bz*by*pme->bsp_mod[XX][kx];
2011                 tmp1[kx]  = -factor*m2k;
2012             }
2013
2014             for (kx = maxkx; kx < kxend; kx++)
2015             {
2016                 mx = (kx - nx);
2017
2018                 mhxk      = mx * rxx;
2019                 mhyk      = mx * ryx + my * ryy;
2020                 mhzk      = mx * rzx + my * rzy + mz * rzz;
2021                 m2k       = mhxk*mhxk + mhyk*mhyk + mhzk*mhzk;
2022                 mhx[kx]   = mhxk;
2023                 mhy[kx]   = mhyk;
2024                 mhz[kx]   = mhzk;
2025                 m2[kx]    = m2k;
2026                 denom[kx] = m2k*bz*by*pme->bsp_mod[XX][kx];
2027                 tmp1[kx]  = -factor*m2k;
2028             }
2029
2030             for (kx = kxstart; kx < kxend; kx++)
2031             {
2032                 m2inv[kx] = 1.0/m2[kx];
2033             }
2034
2035             calc_exponentials_q(kxstart, kxend, elfac, denom, tmp1, eterm);
2036
2037             for (kx = kxstart; kx < kxend; kx++, p0++)
2038             {
2039                 d1      = p0->re;
2040                 d2      = p0->im;
2041
2042                 p0->re  = d1*eterm[kx];
2043                 p0->im  = d2*eterm[kx];
2044
2045                 struct2 = 2.0*(d1*d1+d2*d2);
2046
2047                 tmp1[kx] = eterm[kx]*struct2;
2048             }
2049
2050             for (kx = kxstart; kx < kxend; kx++)
2051             {
2052                 ets2     = corner_fac*tmp1[kx];
2053                 vfactor  = (factor*m2[kx] + 1.0)*2.0*m2inv[kx];
2054                 energy  += ets2;
2055
2056                 ets2vf   = ets2*vfactor;
2057                 virxx   += ets2vf*mhx[kx]*mhx[kx] - ets2;
2058                 virxy   += ets2vf*mhx[kx]*mhy[kx];
2059                 virxz   += ets2vf*mhx[kx]*mhz[kx];
2060                 viryy   += ets2vf*mhy[kx]*mhy[kx] - ets2;
2061                 viryz   += ets2vf*mhy[kx]*mhz[kx];
2062                 virzz   += ets2vf*mhz[kx]*mhz[kx] - ets2;
2063             }
2064         }
2065         else
2066         {
2067             /* We don't need to calculate the energy and the virial.
2068              * In this case the triclinic overhead is small.
2069              */
2070
2071             /* Two explicit loops to avoid a conditional inside the loop */
2072
2073             for (kx = kxstart; kx < maxkx; kx++)
2074             {
2075                 mx = kx;
2076
2077                 mhxk      = mx * rxx;
2078                 mhyk      = mx * ryx + my * ryy;
2079                 mhzk      = mx * rzx + my * rzy + mz * rzz;
2080                 m2k       = mhxk*mhxk + mhyk*mhyk + mhzk*mhzk;
2081                 denom[kx] = m2k*bz*by*pme->bsp_mod[XX][kx];
2082                 tmp1[kx]  = -factor*m2k;
2083             }
2084
2085             for (kx = maxkx; kx < kxend; kx++)
2086             {
2087                 mx = (kx - nx);
2088
2089                 mhxk      = mx * rxx;
2090                 mhyk      = mx * ryx + my * ryy;
2091                 mhzk      = mx * rzx + my * rzy + mz * rzz;
2092                 m2k       = mhxk*mhxk + mhyk*mhyk + mhzk*mhzk;
2093                 denom[kx] = m2k*bz*by*pme->bsp_mod[XX][kx];
2094                 tmp1[kx]  = -factor*m2k;
2095             }
2096
2097             calc_exponentials_q(kxstart, kxend, elfac, denom, tmp1, eterm);
2098
2099             for (kx = kxstart; kx < kxend; kx++, p0++)
2100             {
2101                 d1      = p0->re;
2102                 d2      = p0->im;
2103
2104                 p0->re  = d1*eterm[kx];
2105                 p0->im  = d2*eterm[kx];
2106             }
2107         }
2108     }
2109
2110     if (bEnerVir)
2111     {
2112         /* Update virial with local values.
2113          * The virial is symmetric by definition.
2114          * this virial seems ok for isotropic scaling, but I'm
2115          * experiencing problems on semiisotropic membranes.
2116          * IS THAT COMMENT STILL VALID??? (DvdS, 2001/02/07).
2117          */
2118         work->vir_q[XX][XX] = 0.25*virxx;
2119         work->vir_q[YY][YY] = 0.25*viryy;
2120         work->vir_q[ZZ][ZZ] = 0.25*virzz;
2121         work->vir_q[XX][YY] = work->vir_q[YY][XX] = 0.25*virxy;
2122         work->vir_q[XX][ZZ] = work->vir_q[ZZ][XX] = 0.25*virxz;
2123         work->vir_q[YY][ZZ] = work->vir_q[ZZ][YY] = 0.25*viryz;
2124
2125         /* This energy should be corrected for a charged system */
2126         work->energy_q = 0.5*energy;
2127     }
2128
2129     /* Return the loop count */
2130     return local_ndata[YY]*local_ndata[XX];
2131 }
2132
2133 static int solve_pme_lj_yzx(gmx_pme_t pme, t_complex **grid, gmx_bool bLB,
2134                             real ewaldcoeff, real vol,
2135                             gmx_bool bEnerVir, int nthread, int thread)
2136 {
2137     /* do recip sum over local cells in grid */
2138     /* y major, z middle, x minor or continuous */
2139     int     ig, gcount;
2140     int     kx, ky, kz, maxkx, maxky, maxkz;
2141     int     nx, ny, nz, iy, iyz0, iyz1, iyz, iz, kxstart, kxend;
2142     real    mx, my, mz;
2143     real    factor = M_PI*M_PI/(ewaldcoeff*ewaldcoeff);
2144     real    ets2, ets2vf;
2145     real    eterm, vterm, d1, d2, energy = 0;
2146     real    by, bz;
2147     real    virxx = 0, virxy = 0, virxz = 0, viryy = 0, viryz = 0, virzz = 0;
2148     real    rxx, ryx, ryy, rzx, rzy, rzz;
2149     real    *mhx, *mhy, *mhz, *m2, *denom, *tmp1, *tmp2;
2150     real    mhxk, mhyk, mhzk, m2k;
2151     real    mk;
2152     pme_work_t *work;
2153     real    corner_fac;
2154     ivec    complex_order;
2155     ivec    local_ndata, local_offset, local_size;
2156     nx = pme->nkx;
2157     ny = pme->nky;
2158     nz = pme->nkz;
2159
2160     /* Dimensions should be identical for A/B grid, so we just use A here */
2161     gmx_parallel_3dfft_complex_limits(pme->pfft_setup[PME_GRID_C6A],
2162                                       complex_order,
2163                                       local_ndata,
2164                                       local_offset,
2165                                       local_size);
2166     rxx = pme->recipbox[XX][XX];
2167     ryx = pme->recipbox[YY][XX];
2168     ryy = pme->recipbox[YY][YY];
2169     rzx = pme->recipbox[ZZ][XX];
2170     rzy = pme->recipbox[ZZ][YY];
2171     rzz = pme->recipbox[ZZ][ZZ];
2172
2173     maxkx = (nx+1)/2;
2174     maxky = (ny+1)/2;
2175     maxkz = nz/2+1;
2176
2177     work  = &pme->work[thread];
2178     mhx   = work->mhx;
2179     mhy   = work->mhy;
2180     mhz   = work->mhz;
2181     m2    = work->m2;
2182     denom = work->denom;
2183     tmp1  = work->tmp1;
2184     tmp2  = work->tmp2;
2185
2186     iyz0 = local_ndata[YY]*local_ndata[ZZ]* thread   /nthread;
2187     iyz1 = local_ndata[YY]*local_ndata[ZZ]*(thread+1)/nthread;
2188
2189     for (iyz = iyz0; iyz < iyz1; iyz++)
2190     {
2191         iy = iyz/local_ndata[ZZ];
2192         iz = iyz - iy*local_ndata[ZZ];
2193
2194         ky = iy + local_offset[YY];
2195
2196         if (ky < maxky)
2197         {
2198             my = ky;
2199         }
2200         else
2201         {
2202             my = (ky - ny);
2203         }
2204
2205         by = 3.0*vol*pme->bsp_mod[YY][ky]
2206             / (M_PI*sqrt(M_PI)*ewaldcoeff*ewaldcoeff*ewaldcoeff);
2207
2208         kz = iz + local_offset[ZZ];
2209
2210         mz = kz;
2211
2212         bz = pme->bsp_mod[ZZ][kz];
2213
2214         /* 0.5 correction for corner points */
2215         corner_fac = 1;
2216         if (kz == 0 || kz == (nz+1)/2)
2217         {
2218             corner_fac = 0.5;
2219         }
2220
2221         kxstart = local_offset[XX];
2222         kxend   = local_offset[XX] + local_ndata[XX];
2223         if (bEnerVir)
2224         {
2225             /* More expensive inner loop, especially because of the
2226              * storage of the mh elements in array's.  Because x is the
2227              * minor grid index, all mh elements depend on kx for
2228              * triclinic unit cells.
2229              */
2230
2231             /* Two explicit loops to avoid a conditional inside the loop */
2232             for (kx = kxstart; kx < maxkx; kx++)
2233             {
2234                 mx = kx;
2235
2236                 mhxk      = mx * rxx;
2237                 mhyk      = mx * ryx + my * ryy;
2238                 mhzk      = mx * rzx + my * rzy + mz * rzz;
2239                 m2k       = mhxk*mhxk + mhyk*mhyk + mhzk*mhzk;
2240                 mhx[kx]   = mhxk;
2241                 mhy[kx]   = mhyk;
2242                 mhz[kx]   = mhzk;
2243                 m2[kx]    = m2k;
2244                 denom[kx] = bz*by*pme->bsp_mod[XX][kx];
2245                 tmp1[kx]  = -factor*m2k;
2246                 tmp2[kx]  = sqrt(factor*m2k);
2247             }
2248
2249             for (kx = maxkx; kx < kxend; kx++)
2250             {
2251                 mx = (kx - nx);
2252
2253                 mhxk      = mx * rxx;
2254                 mhyk      = mx * ryx + my * ryy;
2255                 mhzk      = mx * rzx + my * rzy + mz * rzz;
2256                 m2k       = mhxk*mhxk + mhyk*mhyk + mhzk*mhzk;
2257                 mhx[kx]   = mhxk;
2258                 mhy[kx]   = mhyk;
2259                 mhz[kx]   = mhzk;
2260                 m2[kx]    = m2k;
2261                 denom[kx] = bz*by*pme->bsp_mod[XX][kx];
2262                 tmp1[kx]  = -factor*m2k;
2263                 tmp2[kx]  = sqrt(factor*m2k);
2264             }
2265
2266             calc_exponentials_lj(kxstart, kxend, tmp1, tmp2, denom);
2267
2268             for (kx = kxstart; kx < kxend; kx++)
2269             {
2270                 m2k   = factor*m2[kx];
2271                 eterm = -((1.0 - 2.0*m2k)*tmp1[kx]
2272                           + 2.0*m2k*tmp2[kx]);
2273                 vterm    = 3.0*(-tmp1[kx] + tmp2[kx]);
2274                 tmp1[kx] = eterm*denom[kx];
2275                 tmp2[kx] = vterm*denom[kx];
2276             }
2277
2278             if (!bLB)
2279             {
2280                 t_complex *p0;
2281                 real       struct2;
2282
2283                 p0 = grid[0] + iy*local_size[ZZ]*local_size[XX] + iz*local_size[XX];
2284                 for (kx = kxstart; kx < kxend; kx++, p0++)
2285                 {
2286                     d1      = p0->re;
2287                     d2      = p0->im;
2288
2289                     eterm   = tmp1[kx];
2290                     vterm   = tmp2[kx];
2291                     p0->re  = d1*eterm;
2292                     p0->im  = d2*eterm;
2293
2294                     struct2 = 2.0*(d1*d1+d2*d2);
2295
2296                     tmp1[kx] = eterm*struct2;
2297                     tmp2[kx] = vterm*struct2;
2298                 }
2299             }
2300             else
2301             {
2302                 real *struct2 = denom;
2303                 real  str2;
2304
2305                 for (kx = kxstart; kx < kxend; kx++)
2306                 {
2307                     struct2[kx] = 0.0;
2308                 }
2309                 /* Due to symmetry we only need to calculate 4 of the 7 terms */
2310                 for (ig = 0; ig <= 3; ++ig)
2311                 {
2312                     t_complex *p0, *p1;
2313                     real       scale;
2314
2315                     p0    = grid[ig] + iy*local_size[ZZ]*local_size[XX] + iz*local_size[XX];
2316                     p1    = grid[6-ig] + iy*local_size[ZZ]*local_size[XX] + iz*local_size[XX];
2317                     scale = 2.0*lb_scale_factor_symm[ig];
2318                     for (kx = kxstart; kx < kxend; ++kx, ++p0, ++p1)
2319                     {
2320                         struct2[kx] += scale*(p0->re*p1->re + p0->im*p1->im);
2321                     }
2322
2323                 }
2324                 for (ig = 0; ig <= 6; ++ig)
2325                 {
2326                     t_complex *p0;
2327
2328                     p0 = grid[ig] + iy*local_size[ZZ]*local_size[XX] + iz*local_size[XX];
2329                     for (kx = kxstart; kx < kxend; kx++, p0++)
2330                     {
2331                         d1     = p0->re;
2332                         d2     = p0->im;
2333
2334                         eterm  = tmp1[kx];
2335                         p0->re = d1*eterm;
2336                         p0->im = d2*eterm;
2337                     }
2338                 }
2339                 for (kx = kxstart; kx < kxend; kx++)
2340                 {
2341                     eterm    = tmp1[kx];
2342                     vterm    = tmp2[kx];
2343                     str2     = struct2[kx];
2344                     tmp1[kx] = eterm*str2;
2345                     tmp2[kx] = vterm*str2;
2346                 }
2347             }
2348
2349             for (kx = kxstart; kx < kxend; kx++)
2350             {
2351                 ets2     = corner_fac*tmp1[kx];
2352                 vterm    = 2.0*factor*tmp2[kx];
2353                 energy  += ets2;
2354                 ets2vf   = corner_fac*vterm;
2355                 virxx   += ets2vf*mhx[kx]*mhx[kx] - ets2;
2356                 virxy   += ets2vf*mhx[kx]*mhy[kx];
2357                 virxz   += ets2vf*mhx[kx]*mhz[kx];
2358                 viryy   += ets2vf*mhy[kx]*mhy[kx] - ets2;
2359                 viryz   += ets2vf*mhy[kx]*mhz[kx];
2360                 virzz   += ets2vf*mhz[kx]*mhz[kx] - ets2;
2361             }
2362         }
2363         else
2364         {
2365             /* We don't need to calculate the energy and the virial.
2366              *  In this case the triclinic overhead is small.
2367              */
2368
2369             /* Two explicit loops to avoid a conditional inside the loop */
2370
2371             for (kx = kxstart; kx < maxkx; kx++)
2372             {
2373                 mx = kx;
2374
2375                 mhxk      = mx * rxx;
2376                 mhyk      = mx * ryx + my * ryy;
2377                 mhzk      = mx * rzx + my * rzy + mz * rzz;
2378                 m2k       = mhxk*mhxk + mhyk*mhyk + mhzk*mhzk;
2379                 m2[kx]    = m2k;
2380                 denom[kx] = bz*by*pme->bsp_mod[XX][kx];
2381                 tmp1[kx]  = -factor*m2k;
2382                 tmp2[kx]  = sqrt(factor*m2k);
2383             }
2384
2385             for (kx = maxkx; kx < kxend; kx++)
2386             {
2387                 mx = (kx - nx);
2388
2389                 mhxk      = mx * rxx;
2390                 mhyk      = mx * ryx + my * ryy;
2391                 mhzk      = mx * rzx + my * rzy + mz * rzz;
2392                 m2k       = mhxk*mhxk + mhyk*mhyk + mhzk*mhzk;
2393                 m2[kx]    = m2k;
2394                 denom[kx] = bz*by*pme->bsp_mod[XX][kx];
2395                 tmp1[kx]  = -factor*m2k;
2396                 tmp2[kx]  = sqrt(factor*m2k);
2397             }
2398
2399             calc_exponentials_lj(kxstart, kxend, tmp1, tmp2, denom);
2400
2401             for (kx = kxstart; kx < kxend; kx++)
2402             {
2403                 m2k    = factor*m2[kx];
2404                 eterm  = -((1.0 - 2.0*m2k)*tmp1[kx]
2405                            + 2.0*m2k*tmp2[kx]);
2406                 tmp1[kx] = eterm*denom[kx];
2407             }
2408             gcount = (bLB ? 7 : 1);
2409             for (ig = 0; ig < gcount; ++ig)
2410             {
2411                 t_complex *p0;
2412
2413                 p0 = grid[ig] + iy*local_size[ZZ]*local_size[XX] + iz*local_size[XX];
2414                 for (kx = kxstart; kx < kxend; kx++, p0++)
2415                 {
2416                     d1      = p0->re;
2417                     d2      = p0->im;
2418
2419                     eterm   = tmp1[kx];
2420
2421                     p0->re  = d1*eterm;
2422                     p0->im  = d2*eterm;
2423                 }
2424             }
2425         }
2426     }
2427     if (bEnerVir)
2428     {
2429         work->vir_lj[XX][XX] = 0.25*virxx;
2430         work->vir_lj[YY][YY] = 0.25*viryy;
2431         work->vir_lj[ZZ][ZZ] = 0.25*virzz;
2432         work->vir_lj[XX][YY] = work->vir_lj[YY][XX] = 0.25*virxy;
2433         work->vir_lj[XX][ZZ] = work->vir_lj[ZZ][XX] = 0.25*virxz;
2434         work->vir_lj[YY][ZZ] = work->vir_lj[ZZ][YY] = 0.25*viryz;
2435
2436         /* This energy should be corrected for a charged system */
2437         work->energy_lj = 0.5*energy;
2438     }
2439     /* Return the loop count */
2440     return local_ndata[YY]*local_ndata[XX];
2441 }
2442
2443 static void get_pme_ener_vir_q(const gmx_pme_t pme, int nthread,
2444                                real *mesh_energy, matrix vir)
2445 {
2446     /* This function sums output over threads and should therefore
2447      * only be called after thread synchronization.
2448      */
2449     int thread;
2450
2451     *mesh_energy = pme->work[0].energy_q;
2452     copy_mat(pme->work[0].vir_q, vir);
2453
2454     for (thread = 1; thread < nthread; thread++)
2455     {
2456         *mesh_energy += pme->work[thread].energy_q;
2457         m_add(vir, pme->work[thread].vir_q, vir);
2458     }
2459 }
2460
2461 static void get_pme_ener_vir_lj(const gmx_pme_t pme, int nthread,
2462                                 real *mesh_energy, matrix vir)
2463 {
2464     /* This function sums output over threads and should therefore
2465      * only be called after thread synchronization.
2466      */
2467     int thread;
2468
2469     *mesh_energy = pme->work[0].energy_lj;
2470     copy_mat(pme->work[0].vir_lj, vir);
2471
2472     for (thread = 1; thread < nthread; thread++)
2473     {
2474         *mesh_energy += pme->work[thread].energy_lj;
2475         m_add(vir, pme->work[thread].vir_lj, vir);
2476     }
2477 }
2478
2479
2480 #define DO_FSPLINE(order)                      \
2481     for (ithx = 0; (ithx < order); ithx++)              \
2482     {                                              \
2483         index_x = (i0+ithx)*pny*pnz;               \
2484         tx      = thx[ithx];                       \
2485         dx      = dthx[ithx];                      \
2486                                                \
2487         for (ithy = 0; (ithy < order); ithy++)          \
2488         {                                          \
2489             index_xy = index_x+(j0+ithy)*pnz;      \
2490             ty       = thy[ithy];                  \
2491             dy       = dthy[ithy];                 \
2492             fxy1     = fz1 = 0;                    \
2493                                                \
2494             for (ithz = 0; (ithz < order); ithz++)      \
2495             {                                      \
2496                 gval  = grid[index_xy+(k0+ithz)];  \
2497                 fxy1 += thz[ithz]*gval;            \
2498                 fz1  += dthz[ithz]*gval;           \
2499             }                                      \
2500             fx += dx*ty*fxy1;                      \
2501             fy += tx*dy*fxy1;                      \
2502             fz += tx*ty*fz1;                       \
2503         }                                          \
2504     }
2505
2506
2507 static void gather_f_bsplines(gmx_pme_t pme, real *grid,
2508                               gmx_bool bClearF, pme_atomcomm_t *atc,
2509                               splinedata_t *spline,
2510                               real scale)
2511 {
2512     /* sum forces for local particles */
2513     int     nn, n, ithx, ithy, ithz, i0, j0, k0;
2514     int     index_x, index_xy;
2515     int     nx, ny, nz, pnx, pny, pnz;
2516     int *   idxptr;
2517     real    tx, ty, dx, dy, coefficient;
2518     real    fx, fy, fz, gval;
2519     real    fxy1, fz1;
2520     real    *thx, *thy, *thz, *dthx, *dthy, *dthz;
2521     int     norder;
2522     real    rxx, ryx, ryy, rzx, rzy, rzz;
2523     int     order;
2524
2525     pme_spline_work_t *work;
2526
2527 #if defined PME_SIMD4_SPREAD_GATHER && !defined PME_SIMD4_UNALIGNED
2528     real           thz_buffer[GMX_SIMD4_WIDTH*3],  *thz_aligned;
2529     real           dthz_buffer[GMX_SIMD4_WIDTH*3], *dthz_aligned;
2530
2531     thz_aligned  = gmx_simd4_align_r(thz_buffer);
2532     dthz_aligned = gmx_simd4_align_r(dthz_buffer);
2533 #endif
2534
2535     work = pme->spline_work;
2536
2537     order = pme->pme_order;
2538     thx   = spline->theta[XX];
2539     thy   = spline->theta[YY];
2540     thz   = spline->theta[ZZ];
2541     dthx  = spline->dtheta[XX];
2542     dthy  = spline->dtheta[YY];
2543     dthz  = spline->dtheta[ZZ];
2544     nx    = pme->nkx;
2545     ny    = pme->nky;
2546     nz    = pme->nkz;
2547     pnx   = pme->pmegrid_nx;
2548     pny   = pme->pmegrid_ny;
2549     pnz   = pme->pmegrid_nz;
2550
2551     rxx   = pme->recipbox[XX][XX];
2552     ryx   = pme->recipbox[YY][XX];
2553     ryy   = pme->recipbox[YY][YY];
2554     rzx   = pme->recipbox[ZZ][XX];
2555     rzy   = pme->recipbox[ZZ][YY];
2556     rzz   = pme->recipbox[ZZ][ZZ];
2557
2558     for (nn = 0; nn < spline->n; nn++)
2559     {
2560         n           = spline->ind[nn];
2561         coefficient = scale*atc->coefficient[n];
2562
2563         if (bClearF)
2564         {
2565             atc->f[n][XX] = 0;
2566             atc->f[n][YY] = 0;
2567             atc->f[n][ZZ] = 0;
2568         }
2569         if (coefficient != 0)
2570         {
2571             fx     = 0;
2572             fy     = 0;
2573             fz     = 0;
2574             idxptr = atc->idx[n];
2575             norder = nn*order;
2576
2577             i0   = idxptr[XX];
2578             j0   = idxptr[YY];
2579             k0   = idxptr[ZZ];
2580
2581             /* Pointer arithmetic alert, next six statements */
2582             thx  = spline->theta[XX] + norder;
2583             thy  = spline->theta[YY] + norder;
2584             thz  = spline->theta[ZZ] + norder;
2585             dthx = spline->dtheta[XX] + norder;
2586             dthy = spline->dtheta[YY] + norder;
2587             dthz = spline->dtheta[ZZ] + norder;
2588
2589             switch (order)
2590             {
2591                 case 4:
2592 #ifdef PME_SIMD4_SPREAD_GATHER
2593 #ifdef PME_SIMD4_UNALIGNED
2594 #define PME_GATHER_F_SIMD4_ORDER4
2595 #else
2596 #define PME_GATHER_F_SIMD4_ALIGNED
2597 #define PME_ORDER 4
2598 #endif
2599 #include "pme_simd4.h"
2600 #else
2601                     DO_FSPLINE(4);
2602 #endif
2603                     break;
2604                 case 5:
2605 #ifdef PME_SIMD4_SPREAD_GATHER
2606 #define PME_GATHER_F_SIMD4_ALIGNED
2607 #define PME_ORDER 5
2608 #include "pme_simd4.h"
2609 #else
2610                     DO_FSPLINE(5);
2611 #endif
2612                     break;
2613                 default:
2614                     DO_FSPLINE(order);
2615                     break;
2616             }
2617
2618             atc->f[n][XX] += -coefficient*( fx*nx*rxx );
2619             atc->f[n][YY] += -coefficient*( fx*nx*ryx + fy*ny*ryy );
2620             atc->f[n][ZZ] += -coefficient*( fx*nx*rzx + fy*ny*rzy + fz*nz*rzz );
2621         }
2622     }
2623     /* Since the energy and not forces are interpolated
2624      * the net force might not be exactly zero.
2625      * This can be solved by also interpolating F, but
2626      * that comes at a cost.
2627      * A better hack is to remove the net force every
2628      * step, but that must be done at a higher level
2629      * since this routine doesn't see all atoms if running
2630      * in parallel. Don't know how important it is?  EL 990726
2631      */
2632 }
2633
2634
2635 static real gather_energy_bsplines(gmx_pme_t pme, real *grid,
2636                                    pme_atomcomm_t *atc)
2637 {
2638     splinedata_t *spline;
2639     int     n, ithx, ithy, ithz, i0, j0, k0;
2640     int     index_x, index_xy;
2641     int *   idxptr;
2642     real    energy, pot, tx, ty, coefficient, gval;
2643     real    *thx, *thy, *thz;
2644     int     norder;
2645     int     order;
2646
2647     spline = &atc->spline[0];
2648
2649     order = pme->pme_order;
2650
2651     energy = 0;
2652     for (n = 0; (n < atc->n); n++)
2653     {
2654         coefficient      = atc->coefficient[n];
2655
2656         if (coefficient != 0)
2657         {
2658             idxptr = atc->idx[n];
2659             norder = n*order;
2660
2661             i0   = idxptr[XX];
2662             j0   = idxptr[YY];
2663             k0   = idxptr[ZZ];
2664
2665             /* Pointer arithmetic alert, next three statements */
2666             thx  = spline->theta[XX] + norder;
2667             thy  = spline->theta[YY] + norder;
2668             thz  = spline->theta[ZZ] + norder;
2669
2670             pot = 0;
2671             for (ithx = 0; (ithx < order); ithx++)
2672             {
2673                 index_x = (i0+ithx)*pme->pmegrid_ny*pme->pmegrid_nz;
2674                 tx      = thx[ithx];
2675
2676                 for (ithy = 0; (ithy < order); ithy++)
2677                 {
2678                     index_xy = index_x+(j0+ithy)*pme->pmegrid_nz;
2679                     ty       = thy[ithy];
2680
2681                     for (ithz = 0; (ithz < order); ithz++)
2682                     {
2683                         gval  = grid[index_xy+(k0+ithz)];
2684                         pot  += tx*ty*thz[ithz]*gval;
2685                     }
2686
2687                 }
2688             }
2689
2690             energy += pot*coefficient;
2691         }
2692     }
2693
2694     return energy;
2695 }
2696
2697 /* Macro to force loop unrolling by fixing order.
2698  * This gives a significant performance gain.
2699  */
2700 #define CALC_SPLINE(order)                     \
2701     {                                              \
2702         int j, k, l;                                 \
2703         real dr, div;                               \
2704         real data[PME_ORDER_MAX];                  \
2705         real ddata[PME_ORDER_MAX];                 \
2706                                                \
2707         for (j = 0; (j < DIM); j++)                     \
2708         {                                          \
2709             dr  = xptr[j];                         \
2710                                                \
2711             /* dr is relative offset from lower cell limit */ \
2712             data[order-1] = 0;                     \
2713             data[1]       = dr;                          \
2714             data[0]       = 1 - dr;                      \
2715                                                \
2716             for (k = 3; (k < order); k++)               \
2717             {                                      \
2718                 div       = 1.0/(k - 1.0);               \
2719                 data[k-1] = div*dr*data[k-2];      \
2720                 for (l = 1; (l < (k-1)); l++)           \
2721                 {                                  \
2722                     data[k-l-1] = div*((dr+l)*data[k-l-2]+(k-l-dr)* \
2723                                        data[k-l-1]);                \
2724                 }                                  \
2725                 data[0] = div*(1-dr)*data[0];      \
2726             }                                      \
2727             /* differentiate */                    \
2728             ddata[0] = -data[0];                   \
2729             for (k = 1; (k < order); k++)               \
2730             {                                      \
2731                 ddata[k] = data[k-1] - data[k];    \
2732             }                                      \
2733                                                \
2734             div           = 1.0/(order - 1);                 \
2735             data[order-1] = div*dr*data[order-2];  \
2736             for (l = 1; (l < (order-1)); l++)           \
2737             {                                      \
2738                 data[order-l-1] = div*((dr+l)*data[order-l-2]+    \
2739                                        (order-l-dr)*data[order-l-1]); \
2740             }                                      \
2741             data[0] = div*(1 - dr)*data[0];        \
2742                                                \
2743             for (k = 0; k < order; k++)                 \
2744             {                                      \
2745                 theta[j][i*order+k]  = data[k];    \
2746                 dtheta[j][i*order+k] = ddata[k];   \
2747             }                                      \
2748         }                                          \
2749     }
2750
2751 void make_bsplines(splinevec theta, splinevec dtheta, int order,
2752                    rvec fractx[], int nr, int ind[], real coefficient[],
2753                    gmx_bool bDoSplines)
2754 {
2755     /* construct splines for local atoms */
2756     int  i, ii;
2757     real *xptr;
2758
2759     for (i = 0; i < nr; i++)
2760     {
2761         /* With free energy we do not use the coefficient check.
2762          * In most cases this will be more efficient than calling make_bsplines
2763          * twice, since usually more than half the particles have non-zero coefficients.
2764          */
2765         ii = ind[i];
2766         if (bDoSplines || coefficient[ii] != 0.0)
2767         {
2768             xptr = fractx[ii];
2769             switch (order)
2770             {
2771                 case 4:  CALC_SPLINE(4);     break;
2772                 case 5:  CALC_SPLINE(5);     break;
2773                 default: CALC_SPLINE(order); break;
2774             }
2775         }
2776     }
2777 }
2778
2779
2780 void make_dft_mod(real *mod, real *data, int ndata)
2781 {
2782     int i, j;
2783     real sc, ss, arg;
2784
2785     for (i = 0; i < ndata; i++)
2786     {
2787         sc = ss = 0;
2788         for (j = 0; j < ndata; j++)
2789         {
2790             arg = (2.0*M_PI*i*j)/ndata;
2791             sc += data[j]*cos(arg);
2792             ss += data[j]*sin(arg);
2793         }
2794         mod[i] = sc*sc+ss*ss;
2795     }
2796     for (i = 0; i < ndata; i++)
2797     {
2798         if (mod[i] < 1e-7)
2799         {
2800             mod[i] = (mod[i-1]+mod[i+1])*0.5;
2801         }
2802     }
2803 }
2804
2805
2806 static void make_bspline_moduli(splinevec bsp_mod,
2807                                 int nx, int ny, int nz, int order)
2808 {
2809     int nmax = max(nx, max(ny, nz));
2810     real *data, *ddata, *bsp_data;
2811     int i, k, l;
2812     real div;
2813
2814     snew(data, order);
2815     snew(ddata, order);
2816     snew(bsp_data, nmax);
2817
2818     data[order-1] = 0;
2819     data[1]       = 0;
2820     data[0]       = 1;
2821
2822     for (k = 3; k < order; k++)
2823     {
2824         div       = 1.0/(k-1.0);
2825         data[k-1] = 0;
2826         for (l = 1; l < (k-1); l++)
2827         {
2828             data[k-l-1] = div*(l*data[k-l-2]+(k-l)*data[k-l-1]);
2829         }
2830         data[0] = div*data[0];
2831     }
2832     /* differentiate */
2833     ddata[0] = -data[0];
2834     for (k = 1; k < order; k++)
2835     {
2836         ddata[k] = data[k-1]-data[k];
2837     }
2838     div           = 1.0/(order-1);
2839     data[order-1] = 0;
2840     for (l = 1; l < (order-1); l++)
2841     {
2842         data[order-l-1] = div*(l*data[order-l-2]+(order-l)*data[order-l-1]);
2843     }
2844     data[0] = div*data[0];
2845
2846     for (i = 0; i < nmax; i++)
2847     {
2848         bsp_data[i] = 0;
2849     }
2850     for (i = 1; i <= order; i++)
2851     {
2852         bsp_data[i] = data[i-1];
2853     }
2854
2855     make_dft_mod(bsp_mod[XX], bsp_data, nx);
2856     make_dft_mod(bsp_mod[YY], bsp_data, ny);
2857     make_dft_mod(bsp_mod[ZZ], bsp_data, nz);
2858
2859     sfree(data);
2860     sfree(ddata);
2861     sfree(bsp_data);
2862 }
2863
2864
2865 /* Return the P3M optimal influence function */
2866 static double do_p3m_influence(double z, int order)
2867 {
2868     double z2, z4;
2869
2870     z2 = z*z;
2871     z4 = z2*z2;
2872
2873     /* The formula and most constants can be found in:
2874      * Ballenegger et al., JCTC 8, 936 (2012)
2875      */
2876     switch (order)
2877     {
2878         case 2:
2879             return 1.0 - 2.0*z2/3.0;
2880             break;
2881         case 3:
2882             return 1.0 - z2 + 2.0*z4/15.0;
2883             break;
2884         case 4:
2885             return 1.0 - 4.0*z2/3.0 + 2.0*z4/5.0 + 4.0*z2*z4/315.0;
2886             break;
2887         case 5:
2888             return 1.0 - 5.0*z2/3.0 + 7.0*z4/9.0 - 17.0*z2*z4/189.0 + 2.0*z4*z4/2835.0;
2889             break;
2890         case 6:
2891             return 1.0 - 2.0*z2 + 19.0*z4/15.0 - 256.0*z2*z4/945.0 + 62.0*z4*z4/4725.0 + 4.0*z2*z4*z4/155925.0;
2892             break;
2893         case 7:
2894             return 1.0 - 7.0*z2/3.0 + 28.0*z4/15.0 - 16.0*z2*z4/27.0 + 26.0*z4*z4/405.0 - 2.0*z2*z4*z4/1485.0 + 4.0*z4*z4*z4/6081075.0;
2895         case 8:
2896             return 1.0 - 8.0*z2/3.0 + 116.0*z4/45.0 - 344.0*z2*z4/315.0 + 914.0*z4*z4/4725.0 - 248.0*z4*z4*z2/22275.0 + 21844.0*z4*z4*z4/212837625.0 - 8.0*z4*z4*z4*z2/638512875.0;
2897             break;
2898     }
2899
2900     return 0.0;
2901 }
2902
2903 /* Calculate the P3M B-spline moduli for one dimension */
2904 static void make_p3m_bspline_moduli_dim(real *bsp_mod, int n, int order)
2905 {
2906     double zarg, zai, sinzai, infl;
2907     int    maxk, i;
2908
2909     if (order > 8)
2910     {
2911         gmx_fatal(FARGS, "The current P3M code only supports orders up to 8");
2912     }
2913
2914     zarg = M_PI/n;
2915
2916     maxk = (n + 1)/2;
2917
2918     for (i = -maxk; i < 0; i++)
2919     {
2920         zai          = zarg*i;
2921         sinzai       = sin(zai);
2922         infl         = do_p3m_influence(sinzai, order);
2923         bsp_mod[n+i] = infl*infl*pow(sinzai/zai, -2.0*order);
2924     }
2925     bsp_mod[0] = 1.0;
2926     for (i = 1; i < maxk; i++)
2927     {
2928         zai        = zarg*i;
2929         sinzai     = sin(zai);
2930         infl       = do_p3m_influence(sinzai, order);
2931         bsp_mod[i] = infl*infl*pow(sinzai/zai, -2.0*order);
2932     }
2933 }
2934
2935 /* Calculate the P3M B-spline moduli */
2936 static void make_p3m_bspline_moduli(splinevec bsp_mod,
2937                                     int nx, int ny, int nz, int order)
2938 {
2939     make_p3m_bspline_moduli_dim(bsp_mod[XX], nx, order);
2940     make_p3m_bspline_moduli_dim(bsp_mod[YY], ny, order);
2941     make_p3m_bspline_moduli_dim(bsp_mod[ZZ], nz, order);
2942 }
2943
2944
2945 static void setup_coordinate_communication(pme_atomcomm_t *atc)
2946 {
2947     int nslab, n, i;
2948     int fw, bw;
2949
2950     nslab = atc->nslab;
2951
2952     n = 0;
2953     for (i = 1; i <= nslab/2; i++)
2954     {
2955         fw = (atc->nodeid + i) % nslab;
2956         bw = (atc->nodeid - i + nslab) % nslab;
2957         if (n < nslab - 1)
2958         {
2959             atc->node_dest[n] = fw;
2960             atc->node_src[n]  = bw;
2961             n++;
2962         }
2963         if (n < nslab - 1)
2964         {
2965             atc->node_dest[n] = bw;
2966             atc->node_src[n]  = fw;
2967             n++;
2968         }
2969     }
2970 }
2971
2972 int gmx_pme_destroy(FILE *log, gmx_pme_t *pmedata)
2973 {
2974     int thread, i;
2975
2976     if (NULL != log)
2977     {
2978         fprintf(log, "Destroying PME data structures.\n");
2979     }
2980
2981     sfree((*pmedata)->nnx);
2982     sfree((*pmedata)->nny);
2983     sfree((*pmedata)->nnz);
2984
2985     for (i = 0; i < (*pmedata)->ngrids; ++i)
2986     {
2987         pmegrids_destroy(&(*pmedata)->pmegrid[i]);
2988         sfree((*pmedata)->fftgrid[i]);
2989         sfree((*pmedata)->cfftgrid[i]);
2990         gmx_parallel_3dfft_destroy((*pmedata)->pfft_setup[i]);
2991     }
2992
2993     sfree((*pmedata)->lb_buf1);
2994     sfree((*pmedata)->lb_buf2);
2995
2996     for (thread = 0; thread < (*pmedata)->nthread; thread++)
2997     {
2998         free_work(&(*pmedata)->work[thread]);
2999     }
3000     sfree((*pmedata)->work);
3001
3002     sfree(*pmedata);
3003     *pmedata = NULL;
3004
3005     return 0;
3006 }
3007
3008 static int mult_up(int n, int f)
3009 {
3010     return ((n + f - 1)/f)*f;
3011 }
3012
3013
3014 static double pme_load_imbalance(gmx_pme_t pme)
3015 {
3016     int    nma, nmi;
3017     double n1, n2, n3;
3018
3019     nma = pme->nnodes_major;
3020     nmi = pme->nnodes_minor;
3021
3022     n1 = mult_up(pme->nkx, nma)*mult_up(pme->nky, nmi)*pme->nkz;
3023     n2 = mult_up(pme->nkx, nma)*mult_up(pme->nkz, nmi)*pme->nky;
3024     n3 = mult_up(pme->nky, nma)*mult_up(pme->nkz, nmi)*pme->nkx;
3025
3026     /* pme_solve is roughly double the cost of an fft */
3027
3028     return (n1 + n2 + 3*n3)/(double)(6*pme->nkx*pme->nky*pme->nkz);
3029 }
3030
3031 static void init_atomcomm(gmx_pme_t pme, pme_atomcomm_t *atc,
3032                           int dimind, gmx_bool bSpread)
3033 {
3034     int nk, k, s, thread;
3035
3036     atc->dimind    = dimind;
3037     atc->nslab     = 1;
3038     atc->nodeid    = 0;
3039     atc->pd_nalloc = 0;
3040 #ifdef GMX_MPI
3041     if (pme->nnodes > 1)
3042     {
3043         atc->mpi_comm = pme->mpi_comm_d[dimind];
3044         MPI_Comm_size(atc->mpi_comm, &atc->nslab);
3045         MPI_Comm_rank(atc->mpi_comm, &atc->nodeid);
3046     }
3047     if (debug)
3048     {
3049         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", atc->dimind, atc->nslab, atc->nodeid);
3050     }
3051 #endif
3052
3053     atc->bSpread   = bSpread;
3054     atc->pme_order = pme->pme_order;
3055
3056     if (atc->nslab > 1)
3057     {
3058         snew(atc->node_dest, atc->nslab);
3059         snew(atc->node_src, atc->nslab);
3060         setup_coordinate_communication(atc);
3061
3062         snew(atc->count_thread, pme->nthread);
3063         for (thread = 0; thread < pme->nthread; thread++)
3064         {
3065             snew(atc->count_thread[thread], atc->nslab);
3066         }
3067         atc->count = atc->count_thread[0];
3068         snew(atc->rcount, atc->nslab);
3069         snew(atc->buf_index, atc->nslab);
3070     }
3071
3072     atc->nthread = pme->nthread;
3073     if (atc->nthread > 1)
3074     {
3075         snew(atc->thread_plist, atc->nthread);
3076     }
3077     snew(atc->spline, atc->nthread);
3078     for (thread = 0; thread < atc->nthread; thread++)
3079     {
3080         if (atc->nthread > 1)
3081         {
3082             snew(atc->thread_plist[thread].n, atc->nthread+2*GMX_CACHE_SEP);
3083             atc->thread_plist[thread].n += GMX_CACHE_SEP;
3084         }
3085         snew(atc->spline[thread].thread_one, pme->nthread);
3086         atc->spline[thread].thread_one[thread] = 1;
3087     }
3088 }
3089
3090 static void
3091 init_overlap_comm(pme_overlap_t *  ol,
3092                   int              norder,
3093 #ifdef GMX_MPI
3094                   MPI_Comm         comm,
3095 #endif
3096                   int              nnodes,
3097                   int              nodeid,
3098                   int              ndata,
3099                   int              commplainsize)
3100 {
3101     int lbnd, rbnd, maxlr, b, i;
3102     int exten;
3103     int nn, nk;
3104     pme_grid_comm_t *pgc;
3105     gmx_bool bCont;
3106     int fft_start, fft_end, send_index1, recv_index1;
3107 #ifdef GMX_MPI
3108     MPI_Status stat;
3109
3110     ol->mpi_comm = comm;
3111 #endif
3112
3113     ol->nnodes = nnodes;
3114     ol->nodeid = nodeid;
3115
3116     /* Linear translation of the PME grid won't affect reciprocal space
3117      * calculations, so to optimize we only interpolate "upwards",
3118      * which also means we only have to consider overlap in one direction.
3119      * I.e., particles on this node might also be spread to grid indices
3120      * that belong to higher nodes (modulo nnodes)
3121      */
3122
3123     snew(ol->s2g0, ol->nnodes+1);
3124     snew(ol->s2g1, ol->nnodes);
3125     if (debug)
3126     {
3127         fprintf(debug, "PME slab boundaries:");
3128     }
3129     for (i = 0; i < nnodes; i++)
3130     {
3131         /* s2g0 the local interpolation grid start.
3132          * s2g1 the local interpolation grid end.
3133          * Since in calc_pidx we divide particles, and not grid lines,
3134          * spatially uniform along dimension x or y, we need to round
3135          * s2g0 down and s2g1 up.
3136          */
3137         ol->s2g0[i] = ( i   *ndata + 0       )/nnodes;
3138         ol->s2g1[i] = ((i+1)*ndata + nnodes-1)/nnodes + norder - 1;
3139
3140         if (debug)
3141         {
3142             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
3143         }
3144     }
3145     ol->s2g0[nnodes] = ndata;
3146     if (debug)
3147     {
3148         fprintf(debug, "\n");
3149     }
3150
3151     /* Determine with how many nodes we need to communicate the grid overlap */
3152     b = 0;
3153     do
3154     {
3155         b++;
3156         bCont = FALSE;
3157         for (i = 0; i < nnodes; i++)
3158         {
3159             if ((i+b <  nnodes && ol->s2g1[i] > ol->s2g0[i+b]) ||
3160                 (i+b >= nnodes && ol->s2g1[i] > ol->s2g0[i+b-nnodes] + ndata))
3161             {
3162                 bCont = TRUE;
3163             }
3164         }
3165     }
3166     while (bCont && b < nnodes);
3167     ol->noverlap_nodes = b - 1;
3168
3169     snew(ol->send_id, ol->noverlap_nodes);
3170     snew(ol->recv_id, ol->noverlap_nodes);
3171     for (b = 0; b < ol->noverlap_nodes; b++)
3172     {
3173         ol->send_id[b] = (ol->nodeid + (b + 1)) % ol->nnodes;
3174         ol->recv_id[b] = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
3175     }
3176     snew(ol->comm_data, ol->noverlap_nodes);
3177
3178     ol->send_size = 0;
3179     for (b = 0; b < ol->noverlap_nodes; b++)
3180     {
3181         pgc = &ol->comm_data[b];
3182         /* Send */
3183         fft_start        = ol->s2g0[ol->send_id[b]];
3184         fft_end          = ol->s2g0[ol->send_id[b]+1];
3185         if (ol->send_id[b] < nodeid)
3186         {
3187             fft_start += ndata;
3188             fft_end   += ndata;
3189         }
3190         send_index1       = ol->s2g1[nodeid];
3191         send_index1       = min(send_index1, fft_end);
3192         pgc->send_index0  = fft_start;
3193         pgc->send_nindex  = max(0, send_index1 - pgc->send_index0);
3194         ol->send_size    += pgc->send_nindex;
3195
3196         /* We always start receiving to the first index of our slab */
3197         fft_start        = ol->s2g0[ol->nodeid];
3198         fft_end          = ol->s2g0[ol->nodeid+1];
3199         recv_index1      = ol->s2g1[ol->recv_id[b]];
3200         if (ol->recv_id[b] > nodeid)
3201         {
3202             recv_index1 -= ndata;
3203         }
3204         recv_index1      = min(recv_index1, fft_end);
3205         pgc->recv_index0 = fft_start;
3206         pgc->recv_nindex = max(0, recv_index1 - pgc->recv_index0);
3207     }
3208
3209 #ifdef GMX_MPI
3210     /* Communicate the buffer sizes to receive */
3211     for (b = 0; b < ol->noverlap_nodes; b++)
3212     {
3213         MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->send_id[b], b,
3214                      &ol->comm_data[b].recv_size, 1, MPI_INT, ol->recv_id[b], b,
3215                      ol->mpi_comm, &stat);
3216     }
3217 #endif
3218
3219     /* For non-divisible grid we need pme_order iso pme_order-1 */
3220     snew(ol->sendbuf, norder*commplainsize);
3221     snew(ol->recvbuf, norder*commplainsize);
3222 }
3223
3224 static void
3225 make_gridindex5_to_localindex(int n, int local_start, int local_range,
3226                               int **global_to_local,
3227                               real **fraction_shift)
3228 {
3229     int i;
3230     int * gtl;
3231     real * fsh;
3232
3233     snew(gtl, 5*n);
3234     snew(fsh, 5*n);
3235     for (i = 0; (i < 5*n); i++)
3236     {
3237         /* Determine the global to local grid index */
3238         gtl[i] = (i - local_start + n) % n;
3239         /* For coordinates that fall within the local grid the fraction
3240          * is correct, we don't need to shift it.
3241          */
3242         fsh[i] = 0;
3243         if (local_range < n)
3244         {
3245             /* Due to rounding issues i could be 1 beyond the lower or
3246              * upper boundary of the local grid. Correct the index for this.
3247              * If we shift the index, we need to shift the fraction by
3248              * the same amount in the other direction to not affect
3249              * the weights.
3250              * Note that due to this shifting the weights at the end of
3251              * the spline might change, but that will only involve values
3252              * between zero and values close to the precision of a real,
3253              * which is anyhow the accuracy of the whole mesh calculation.
3254              */
3255             /* With local_range=0 we should not change i=local_start */
3256             if (i % n != local_start)
3257             {
3258                 if (gtl[i] == n-1)
3259                 {
3260                     gtl[i] = 0;
3261                     fsh[i] = -1;
3262                 }
3263                 else if (gtl[i] == local_range)
3264                 {
3265                     gtl[i] = local_range - 1;
3266                     fsh[i] = 1;
3267                 }
3268             }
3269         }
3270     }
3271
3272     *global_to_local = gtl;
3273     *fraction_shift  = fsh;
3274 }
3275
3276 static pme_spline_work_t *make_pme_spline_work(int gmx_unused order)
3277 {
3278     pme_spline_work_t *work;
3279
3280 #ifdef PME_SIMD4_SPREAD_GATHER
3281     real             tmp[GMX_SIMD4_WIDTH*3], *tmp_aligned;
3282     gmx_simd4_real_t zero_S;
3283     gmx_simd4_real_t real_mask_S0, real_mask_S1;
3284     int              of, i;
3285
3286     snew_aligned(work, 1, SIMD4_ALIGNMENT);
3287
3288     tmp_aligned = gmx_simd4_align_r(tmp);
3289
3290     zero_S = gmx_simd4_setzero_r();
3291
3292     /* Generate bit masks to mask out the unused grid entries,
3293      * as we only operate on order of the 8 grid entries that are
3294      * load into 2 SIMD registers.
3295      */
3296     for (of = 0; of < 2*GMX_SIMD4_WIDTH-(order-1); of++)
3297     {
3298         for (i = 0; i < 2*GMX_SIMD4_WIDTH; i++)
3299         {
3300             tmp_aligned[i] = (i >= of && i < of+order ? -1.0 : 1.0);
3301         }
3302         real_mask_S0      = gmx_simd4_load_r(tmp_aligned);
3303         real_mask_S1      = gmx_simd4_load_r(tmp_aligned+GMX_SIMD4_WIDTH);
3304         work->mask_S0[of] = gmx_simd4_cmplt_r(real_mask_S0, zero_S);
3305         work->mask_S1[of] = gmx_simd4_cmplt_r(real_mask_S1, zero_S);
3306     }
3307 #else
3308     work = NULL;
3309 #endif
3310
3311     return work;
3312 }
3313
3314 void gmx_pme_check_restrictions(int pme_order,
3315                                 int nkx, int nky, int nkz,
3316                                 int nnodes_major,
3317                                 int nnodes_minor,
3318                                 gmx_bool bUseThreads,
3319                                 gmx_bool bFatal,
3320                                 gmx_bool *bValidSettings)
3321 {
3322     if (pme_order > PME_ORDER_MAX)
3323     {
3324         if (!bFatal)
3325         {
3326             *bValidSettings = FALSE;
3327             return;
3328         }
3329         gmx_fatal(FARGS, "pme_order (%d) is larger than the maximum allowed value (%d). Modify and recompile the code if you really need such a high order.",
3330                   pme_order, PME_ORDER_MAX);
3331     }
3332
3333     if (nkx <= pme_order*(nnodes_major > 1 ? 2 : 1) ||
3334         nky <= pme_order*(nnodes_minor > 1 ? 2 : 1) ||
3335         nkz <= pme_order)
3336     {
3337         if (!bFatal)
3338         {
3339             *bValidSettings = FALSE;
3340             return;
3341         }
3342         gmx_fatal(FARGS, "The PME grid sizes need to be larger than pme_order (%d) and for dimensions with domain decomposition larger than 2*pme_order",
3343                   pme_order);
3344     }
3345
3346     /* Check for a limitation of the (current) sum_fftgrid_dd code.
3347      * We only allow multiple communication pulses in dim 1, not in dim 0.
3348      */
3349     if (bUseThreads && (nkx < nnodes_major*pme_order &&
3350                         nkx != nnodes_major*(pme_order - 1)))
3351     {
3352         if (!bFatal)
3353         {
3354             *bValidSettings = FALSE;
3355             return;
3356         }
3357         gmx_fatal(FARGS, "The number of PME grid lines per rank along x is %g. But when using OpenMP threads, the number of grid lines per rank along x should be >= pme_order (%d) or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly more along y and/or z) by specifying -dd manually.",
3358                   nkx/(double)nnodes_major, pme_order);
3359     }
3360
3361     if (bValidSettings != NULL)
3362     {
3363         *bValidSettings = TRUE;
3364     }
3365
3366     return;
3367 }
3368
3369 int gmx_pme_init(gmx_pme_t *         pmedata,
3370                  t_commrec *         cr,
3371                  int                 nnodes_major,
3372                  int                 nnodes_minor,
3373                  t_inputrec *        ir,
3374                  int                 homenr,
3375                  gmx_bool            bFreeEnergy_q,
3376                  gmx_bool            bFreeEnergy_lj,
3377                  gmx_bool            bReproducible,
3378                  int                 nthread)
3379 {
3380     gmx_pme_t pme = NULL;
3381
3382     int  use_threads, sum_use_threads, i;
3383     ivec ndata;
3384
3385     if (debug)
3386     {
3387         fprintf(debug, "Creating PME data structures.\n");
3388     }
3389     snew(pme, 1);
3390
3391     pme->sum_qgrid_tmp       = NULL;
3392     pme->sum_qgrid_dd_tmp    = NULL;
3393     pme->buf_nalloc          = 0;
3394
3395     pme->nnodes              = 1;
3396     pme->bPPnode             = TRUE;
3397
3398     pme->nnodes_major        = nnodes_major;
3399     pme->nnodes_minor        = nnodes_minor;
3400
3401 #ifdef GMX_MPI
3402     if (nnodes_major*nnodes_minor > 1)
3403     {
3404         pme->mpi_comm = cr->mpi_comm_mygroup;
3405
3406         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
3407         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
3408         if (pme->nnodes != nnodes_major*nnodes_minor)
3409         {
3410             gmx_incons("PME rank count mismatch");
3411         }
3412     }
3413     else
3414     {
3415         pme->mpi_comm = MPI_COMM_NULL;
3416     }
3417 #endif
3418
3419     if (pme->nnodes == 1)
3420     {
3421 #ifdef GMX_MPI
3422         pme->mpi_comm_d[0] = MPI_COMM_NULL;
3423         pme->mpi_comm_d[1] = MPI_COMM_NULL;
3424 #endif
3425         pme->ndecompdim   = 0;
3426         pme->nodeid_major = 0;
3427         pme->nodeid_minor = 0;
3428 #ifdef GMX_MPI
3429         pme->mpi_comm_d[0] = pme->mpi_comm_d[1] = MPI_COMM_NULL;
3430 #endif
3431     }
3432     else
3433     {
3434         if (nnodes_minor == 1)
3435         {
3436 #ifdef GMX_MPI
3437             pme->mpi_comm_d[0] = pme->mpi_comm;
3438             pme->mpi_comm_d[1] = MPI_COMM_NULL;
3439 #endif
3440             pme->ndecompdim   = 1;
3441             pme->nodeid_major = pme->nodeid;
3442             pme->nodeid_minor = 0;
3443
3444         }
3445         else if (nnodes_major == 1)
3446         {
3447 #ifdef GMX_MPI
3448             pme->mpi_comm_d[0] = MPI_COMM_NULL;
3449             pme->mpi_comm_d[1] = pme->mpi_comm;
3450 #endif
3451             pme->ndecompdim   = 1;
3452             pme->nodeid_major = 0;
3453             pme->nodeid_minor = pme->nodeid;
3454         }
3455         else
3456         {
3457             if (pme->nnodes % nnodes_major != 0)
3458             {
3459                 gmx_incons("For 2D PME decomposition, #PME ranks must be divisible by the number of ranks in the major dimension");
3460             }
3461             pme->ndecompdim = 2;
3462
3463 #ifdef GMX_MPI
3464             MPI_Comm_split(pme->mpi_comm, pme->nodeid % nnodes_minor,
3465                            pme->nodeid, &pme->mpi_comm_d[0]);  /* My communicator along major dimension */
3466             MPI_Comm_split(pme->mpi_comm, pme->nodeid/nnodes_minor,
3467                            pme->nodeid, &pme->mpi_comm_d[1]);  /* My communicator along minor dimension */
3468
3469             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
3470             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
3471             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
3472             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
3473 #endif
3474         }
3475         pme->bPPnode = (cr->duty & DUTY_PP);
3476     }
3477
3478     pme->nthread = nthread;
3479
3480     /* Check if any of the PME MPI ranks uses threads */
3481     use_threads = (pme->nthread > 1 ? 1 : 0);
3482 #ifdef GMX_MPI
3483     if (pme->nnodes > 1)
3484     {
3485         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT,
3486                       MPI_SUM, pme->mpi_comm);
3487     }
3488     else
3489 #endif
3490     {
3491         sum_use_threads = use_threads;
3492     }
3493     pme->bUseThreads = (sum_use_threads > 0);
3494
3495     if (ir->ePBC == epbcSCREW)
3496     {
3497         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
3498     }
3499
3500     pme->bFEP_q      = ((ir->efep != efepNO) && bFreeEnergy_q);
3501     pme->bFEP_lj     = ((ir->efep != efepNO) && bFreeEnergy_lj);
3502     pme->bFEP        = (pme->bFEP_q || pme->bFEP_lj);
3503     pme->nkx         = ir->nkx;
3504     pme->nky         = ir->nky;
3505     pme->nkz         = ir->nkz;
3506     pme->bP3M        = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != NULL);
3507     pme->pme_order   = ir->pme_order;
3508
3509     /* Always constant electrostatics coefficients */
3510     pme->epsilon_r   = ir->epsilon_r;
3511
3512     /* Always constant LJ coefficients */
3513     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
3514
3515     /* If we violate restrictions, generate a fatal error here */
3516     gmx_pme_check_restrictions(pme->pme_order,
3517                                pme->nkx, pme->nky, pme->nkz,
3518                                pme->nnodes_major,
3519                                pme->nnodes_minor,
3520                                pme->bUseThreads,
3521                                TRUE,
3522                                NULL);
3523
3524     if (pme->nnodes > 1)
3525     {
3526         double imbal;
3527
3528 #ifdef GMX_MPI
3529         MPI_Type_contiguous(DIM, mpi_type, &(pme->rvec_mpi));
3530         MPI_Type_commit(&(pme->rvec_mpi));
3531 #endif
3532
3533         /* Note that the coefficient spreading and force gathering, which usually
3534          * takes about the same amount of time as FFT+solve_pme,
3535          * is always fully load balanced
3536          * (unless the coefficient distribution is inhomogeneous).
3537          */
3538
3539         imbal = pme_load_imbalance(pme);
3540         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
3541         {
3542             fprintf(stderr,
3543                     "\n"
3544                     "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
3545                     "      For optimal PME load balancing\n"
3546                     "      PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x (%d)\n"
3547                     "      and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y (%d)\n"
3548                     "\n",
3549                     (int)((imbal-1)*100 + 0.5),
3550                     pme->nkx, pme->nky, pme->nnodes_major,
3551                     pme->nky, pme->nkz, pme->nnodes_minor);
3552         }
3553     }
3554
3555     /* For non-divisible grid we need pme_order iso pme_order-1 */
3556     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
3557      * y is always copied through a buffer: we don't need padding in z,
3558      * but we do need the overlap in x because of the communication order.
3559      */
3560     init_overlap_comm(&pme->overlap[0], pme->pme_order,
3561 #ifdef GMX_MPI
3562                       pme->mpi_comm_d[0],
3563 #endif
3564                       pme->nnodes_major, pme->nodeid_major,
3565                       pme->nkx,
3566                       (div_round_up(pme->nky, pme->nnodes_minor)+pme->pme_order)*(pme->nkz+pme->pme_order-1));
3567
3568     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
3569      * We do this with an offset buffer of equal size, so we need to allocate
3570      * extra for the offset. That's what the (+1)*pme->nkz is for.
3571      */
3572     init_overlap_comm(&pme->overlap[1], pme->pme_order,
3573 #ifdef GMX_MPI
3574                       pme->mpi_comm_d[1],
3575 #endif
3576                       pme->nnodes_minor, pme->nodeid_minor,
3577                       pme->nky,
3578                       (div_round_up(pme->nkx, pme->nnodes_major)+pme->pme_order+1)*pme->nkz);
3579
3580     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
3581      * Note that gmx_pme_check_restrictions checked for this already.
3582      */
3583     if (pme->bUseThreads && pme->overlap[0].noverlap_nodes > 1)
3584     {
3585         gmx_incons("More than one communication pulse required for grid overlap communication along the major dimension while using threads");
3586     }
3587
3588     snew(pme->bsp_mod[XX], pme->nkx);
3589     snew(pme->bsp_mod[YY], pme->nky);
3590     snew(pme->bsp_mod[ZZ], pme->nkz);
3591
3592     /* The required size of the interpolation grid, including overlap.
3593      * The allocated size (pmegrid_n?) might be slightly larger.
3594      */
3595     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] -
3596         pme->overlap[0].s2g0[pme->nodeid_major];
3597     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] -
3598         pme->overlap[1].s2g0[pme->nodeid_minor];
3599     pme->pmegrid_nz_base = pme->nkz;
3600     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
3601     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
3602
3603     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
3604     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
3605     pme->pmegrid_start_iz = 0;
3606
3607     make_gridindex5_to_localindex(pme->nkx,
3608                                   pme->pmegrid_start_ix,
3609                                   pme->pmegrid_nx - (pme->pme_order-1),
3610                                   &pme->nnx, &pme->fshx);
3611     make_gridindex5_to_localindex(pme->nky,
3612                                   pme->pmegrid_start_iy,
3613                                   pme->pmegrid_ny - (pme->pme_order-1),
3614                                   &pme->nny, &pme->fshy);
3615     make_gridindex5_to_localindex(pme->nkz,
3616                                   pme->pmegrid_start_iz,
3617                                   pme->pmegrid_nz_base,
3618                                   &pme->nnz, &pme->fshz);
3619
3620     pme->spline_work = make_pme_spline_work(pme->pme_order);
3621
3622     ndata[0]    = pme->nkx;
3623     ndata[1]    = pme->nky;
3624     ndata[2]    = pme->nkz;
3625     /* It doesn't matter if we allocate too many grids here,
3626      * we only allocate and use the ones we need.
3627      */
3628     if (EVDW_PME(ir->vdwtype))
3629     {
3630         pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
3631     }
3632     else
3633     {
3634         pme->ngrids = DO_Q;
3635     }
3636     snew(pme->fftgrid, pme->ngrids);
3637     snew(pme->cfftgrid, pme->ngrids);
3638     snew(pme->pfft_setup, pme->ngrids);
3639
3640     for (i = 0; i < pme->ngrids; ++i)
3641     {
3642         if ((i <  DO_Q && EEL_PME(ir->coulombtype) && (i == 0 ||
3643                                                        bFreeEnergy_q)) ||
3644             (i >= DO_Q && EVDW_PME(ir->vdwtype) && (i == 2 ||
3645                                                     bFreeEnergy_lj ||
3646                                                     ir->ljpme_combination_rule == eljpmeLB)))
3647         {
3648             pmegrids_init(&pme->pmegrid[i],
3649                           pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
3650                           pme->pmegrid_nz_base,
3651                           pme->pme_order,
3652                           pme->bUseThreads,
3653                           pme->nthread,
3654                           pme->overlap[0].s2g1[pme->nodeid_major]-pme->overlap[0].s2g0[pme->nodeid_major+1],
3655                           pme->overlap[1].s2g1[pme->nodeid_minor]-pme->overlap[1].s2g0[pme->nodeid_minor+1]);
3656             /* This routine will allocate the grid data to fit the FFTs */
3657             gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata,
3658                                     &pme->fftgrid[i], &pme->cfftgrid[i],
3659                                     pme->mpi_comm_d,
3660                                     bReproducible, pme->nthread);
3661
3662         }
3663     }
3664
3665     if (!pme->bP3M)
3666     {
3667         /* Use plain SPME B-spline interpolation */
3668         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
3669     }
3670     else
3671     {
3672         /* Use the P3M grid-optimized influence function */
3673         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
3674     }
3675
3676     /* Use atc[0] for spreading */
3677     init_atomcomm(pme, &pme->atc[0], nnodes_major > 1 ? 0 : 1, TRUE);
3678     if (pme->ndecompdim >= 2)
3679     {
3680         init_atomcomm(pme, &pme->atc[1], 1, FALSE);
3681     }
3682
3683     if (pme->nnodes == 1)
3684     {
3685         pme->atc[0].n = homenr;
3686         pme_realloc_atomcomm_things(&pme->atc[0]);
3687     }
3688
3689     pme->lb_buf1       = NULL;
3690     pme->lb_buf2       = NULL;
3691     pme->lb_buf_nalloc = 0;
3692
3693     {
3694         int thread;
3695
3696         /* Use fft5d, order after FFT is y major, z, x minor */
3697
3698         snew(pme->work, pme->nthread);
3699         for (thread = 0; thread < pme->nthread; thread++)
3700         {
3701             realloc_work(&pme->work[thread], pme->nkx);
3702         }
3703     }
3704
3705     *pmedata = pme;
3706
3707     return 0;
3708 }
3709
3710 static void reuse_pmegrids(const pmegrids_t *old, pmegrids_t *new)
3711 {
3712     int d, t;
3713
3714     for (d = 0; d < DIM; d++)
3715     {
3716         if (new->grid.n[d] > old->grid.n[d])
3717         {
3718             return;
3719         }
3720     }
3721
3722     sfree_aligned(new->grid.grid);
3723     new->grid.grid = old->grid.grid;
3724
3725     if (new->grid_th != NULL && new->nthread == old->nthread)
3726     {
3727         sfree_aligned(new->grid_all);
3728         for (t = 0; t < new->nthread; t++)
3729         {
3730             new->grid_th[t].grid = old->grid_th[t].grid;
3731         }
3732     }
3733 }
3734
3735 int gmx_pme_reinit(gmx_pme_t *         pmedata,
3736                    t_commrec *         cr,
3737                    gmx_pme_t           pme_src,
3738                    const t_inputrec *  ir,
3739                    ivec                grid_size)
3740 {
3741     t_inputrec irc;
3742     int homenr;
3743     int ret;
3744
3745     irc     = *ir;
3746     irc.nkx = grid_size[XX];
3747     irc.nky = grid_size[YY];
3748     irc.nkz = grid_size[ZZ];
3749
3750     if (pme_src->nnodes == 1)
3751     {
3752         homenr = pme_src->atc[0].n;
3753     }
3754     else
3755     {
3756         homenr = -1;
3757     }
3758
3759     ret = gmx_pme_init(pmedata, cr, pme_src->nnodes_major, pme_src->nnodes_minor,
3760                        &irc, homenr, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE, pme_src->nthread);
3761
3762     if (ret == 0)
3763     {
3764         /* We can easily reuse the allocated pme grids in pme_src */
3765         reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
3766         /* We would like to reuse the fft grids, but that's harder */
3767     }
3768
3769     return ret;
3770 }
3771
3772
3773 static void copy_local_grid(gmx_pme_t pme, pmegrids_t *pmegrids,
3774                             int grid_index, int thread, real *fftgrid)
3775 {
3776     ivec local_fft_ndata, local_fft_offset, local_fft_size;
3777     int  fft_my, fft_mz;
3778     int  nsx, nsy, nsz;
3779     ivec nf;
3780     int  offx, offy, offz, x, y, z, i0, i0t;
3781     int  d;
3782     pmegrid_t *pmegrid;
3783     real *grid_th;
3784
3785     gmx_parallel_3dfft_real_limits(pme->pfft_setup[grid_index],
3786                                    local_fft_ndata,
3787                                    local_fft_offset,
3788                                    local_fft_size);
3789     fft_my = local_fft_size[YY];
3790     fft_mz = local_fft_size[ZZ];
3791
3792     pmegrid = &pmegrids->grid_th[thread];
3793
3794     nsx = pmegrid->s[XX];
3795     nsy = pmegrid->s[YY];
3796     nsz = pmegrid->s[ZZ];
3797
3798     for (d = 0; d < DIM; d++)
3799     {
3800         nf[d] = min(pmegrid->n[d] - (pmegrid->order - 1),
3801                     local_fft_ndata[d] - pmegrid->offset[d]);
3802     }
3803
3804     offx = pmegrid->offset[XX];
3805     offy = pmegrid->offset[YY];
3806     offz = pmegrid->offset[ZZ];
3807
3808     /* Directly copy the non-overlapping parts of the local grids.
3809      * This also initializes the full grid.
3810      */
3811     grid_th = pmegrid->grid;
3812     for (x = 0; x < nf[XX]; x++)
3813     {
3814         for (y = 0; y < nf[YY]; y++)
3815         {
3816             i0  = ((offx + x)*fft_my + (offy + y))*fft_mz + offz;
3817             i0t = (x*nsy + y)*nsz;
3818             for (z = 0; z < nf[ZZ]; z++)
3819             {
3820                 fftgrid[i0+z] = grid_th[i0t+z];
3821             }
3822         }
3823     }
3824 }
3825
3826 static void
3827 reduce_threadgrid_overlap(gmx_pme_t pme,
3828                           const pmegrids_t *pmegrids, int thread,
3829                           real *fftgrid, real *commbuf_x, real *commbuf_y,
3830                           int grid_index)
3831 {
3832     ivec local_fft_ndata, local_fft_offset, local_fft_size;
3833     int  fft_nx, fft_ny, fft_nz;
3834     int  fft_my, fft_mz;
3835     int  buf_my = -1;
3836     int  nsx, nsy, nsz;
3837     ivec localcopy_end;
3838     int  offx, offy, offz, x, y, z, i0, i0t;
3839     int  sx, sy, sz, fx, fy, fz, tx1, ty1, tz1, ox, oy, oz;
3840     gmx_bool bClearBufX, bClearBufY, bClearBufXY, bClearBuf;
3841     gmx_bool bCommX, bCommY;
3842     int  d;
3843     int  thread_f;
3844     const pmegrid_t *pmegrid, *pmegrid_g, *pmegrid_f;
3845     const real *grid_th;
3846     real *commbuf = NULL;
3847
3848     gmx_parallel_3dfft_real_limits(pme->pfft_setup[grid_index],
3849                                    local_fft_ndata,
3850                                    local_fft_offset,
3851                                    local_fft_size);
3852     fft_nx = local_fft_ndata[XX];
3853     fft_ny = local_fft_ndata[YY];
3854     fft_nz = local_fft_ndata[ZZ];
3855
3856     fft_my = local_fft_size[YY];
3857     fft_mz = local_fft_size[ZZ];
3858
3859     /* This routine is called when all thread have finished spreading.
3860      * Here each thread sums grid contributions calculated by other threads
3861      * to the thread local grid volume.
3862      * To minimize the number of grid copying operations,
3863      * this routines sums immediately from the pmegrid to the fftgrid.
3864      */
3865
3866     /* Determine which part of the full node grid we should operate on,
3867      * this is our thread local part of the full grid.
3868      */
3869     pmegrid = &pmegrids->grid_th[thread];
3870
3871     for (d = 0; d < DIM; d++)
3872     {
3873         /* Determine up to where our thread needs to copy from the
3874          * thread-local charge spreading grid to the rank-local FFT grid.
3875          * This is up to our spreading grid end minus order-1 and
3876          * not beyond the local FFT grid.
3877          */
3878         localcopy_end[d] =
3879             min(pmegrid->offset[d]+pmegrid->n[d]-(pmegrid->order-1),
3880                 local_fft_ndata[d]);
3881     }
3882
3883     offx = pmegrid->offset[XX];
3884     offy = pmegrid->offset[YY];
3885     offz = pmegrid->offset[ZZ];
3886
3887
3888     bClearBufX  = TRUE;
3889     bClearBufY  = TRUE;
3890     bClearBufXY = TRUE;
3891
3892     /* Now loop over all the thread data blocks that contribute
3893      * to the grid region we (our thread) are operating on.
3894      */
3895     /* Note that fft_nx/y is equal to the number of grid points
3896      * between the first point of our node grid and the one of the next node.
3897      */
3898     for (sx = 0; sx >= -pmegrids->nthread_comm[XX]; sx--)
3899     {
3900         fx     = pmegrid->ci[XX] + sx;
3901         ox     = 0;
3902         bCommX = FALSE;
3903         if (fx < 0)
3904         {
3905             fx    += pmegrids->nc[XX];
3906             ox    -= fft_nx;
3907             bCommX = (pme->nnodes_major > 1);
3908         }
3909         pmegrid_g = &pmegrids->grid_th[fx*pmegrids->nc[YY]*pmegrids->nc[ZZ]];
3910         ox       += pmegrid_g->offset[XX];
3911         /* Determine the end of our part of the source grid */
3912         if (!bCommX)
3913         {
3914             /* Use our thread local source grid and target grid part */
3915             tx1 = min(ox + pmegrid_g->n[XX], localcopy_end[XX]);
3916         }
3917         else
3918         {
3919             /* Use our thread local source grid and the spreading range */
3920             tx1 = min(ox + pmegrid_g->n[XX], pme->pme_order);
3921         }
3922
3923         for (sy = 0; sy >= -pmegrids->nthread_comm[YY]; sy--)
3924         {
3925             fy     = pmegrid->ci[YY] + sy;
3926             oy     = 0;
3927             bCommY = FALSE;
3928             if (fy < 0)
3929             {
3930                 fy    += pmegrids->nc[YY];
3931                 oy    -= fft_ny;
3932                 bCommY = (pme->nnodes_minor > 1);
3933             }
3934             pmegrid_g = &pmegrids->grid_th[fy*pmegrids->nc[ZZ]];
3935             oy       += pmegrid_g->offset[YY];
3936             /* Determine the end of our part of the source grid */
3937             if (!bCommY)
3938             {
3939                 /* Use our thread local source grid and target grid part */
3940                 ty1 = min(oy + pmegrid_g->n[YY], localcopy_end[YY]);
3941             }
3942             else
3943             {
3944                 /* Use our thread local source grid and the spreading range */
3945                 ty1 = min(oy + pmegrid_g->n[YY], pme->pme_order);
3946             }
3947
3948             for (sz = 0; sz >= -pmegrids->nthread_comm[ZZ]; sz--)
3949             {
3950                 fz = pmegrid->ci[ZZ] + sz;
3951                 oz = 0;
3952                 if (fz < 0)
3953                 {
3954                     fz += pmegrids->nc[ZZ];
3955                     oz -= fft_nz;
3956                 }
3957                 pmegrid_g = &pmegrids->grid_th[fz];
3958                 oz       += pmegrid_g->offset[ZZ];
3959                 tz1       = min(oz + pmegrid_g->n[ZZ], localcopy_end[ZZ]);
3960
3961                 if (sx == 0 && sy == 0 && sz == 0)
3962                 {
3963                     /* We have already added our local contribution
3964                      * before calling this routine, so skip it here.
3965                      */
3966                     continue;
3967                 }
3968
3969                 thread_f = (fx*pmegrids->nc[YY] + fy)*pmegrids->nc[ZZ] + fz;
3970
3971                 pmegrid_f = &pmegrids->grid_th[thread_f];
3972
3973                 grid_th = pmegrid_f->grid;
3974
3975                 nsx = pmegrid_f->s[XX];
3976                 nsy = pmegrid_f->s[YY];
3977                 nsz = pmegrid_f->s[ZZ];
3978
3979 #ifdef DEBUG_PME_REDUCE
3980                 printf("n%d t%d add %d  %2d %2d %2d  %2d %2d %2d  %2d-%2d %2d-%2d, %2d-%2d %2d-%2d, %2d-%2d %2d-%2d\n",
3981                        pme->nodeid, thread, thread_f,
3982                        pme->pmegrid_start_ix,
3983                        pme->pmegrid_start_iy,
3984                        pme->pmegrid_start_iz,
3985                        sx, sy, sz,
3986                        offx-ox, tx1-ox, offx, tx1,
3987                        offy-oy, ty1-oy, offy, ty1,
3988                        offz-oz, tz1-oz, offz, tz1);
3989 #endif
3990
3991                 if (!(bCommX || bCommY))
3992                 {
3993                     /* Copy from the thread local grid to the node grid */
3994                     for (x = offx; x < tx1; x++)
3995                     {
3996                         for (y = offy; y < ty1; y++)
3997                         {
3998                             i0  = (x*fft_my + y)*fft_mz;
3999                             i0t = ((x - ox)*nsy + (y - oy))*nsz - oz;
4000                             for (z = offz; z < tz1; z++)
4001                             {
4002                                 fftgrid[i0+z] += grid_th[i0t+z];
4003                             }
4004                         }
4005                     }
4006                 }
4007                 else
4008                 {
4009                     /* The order of this conditional decides
4010                      * where the corner volume gets stored with x+y decomp.
4011                      */
4012                     if (bCommY)
4013                     {
4014                         commbuf = commbuf_y;
4015                         /* The y-size of the communication buffer is set by
4016                          * the overlap of the grid part of our local slab
4017                          * with the part starting at the next slab.
4018                          */
4019                         buf_my  =
4020                             pme->overlap[1].s2g1[pme->nodeid_minor] -
4021                             pme->overlap[1].s2g0[pme->nodeid_minor+1];
4022                         if (bCommX)
4023                         {
4024                             /* We index commbuf modulo the local grid size */
4025                             commbuf += buf_my*fft_nx*fft_nz;
4026
4027                             bClearBuf   = bClearBufXY;
4028                             bClearBufXY = FALSE;
4029                         }
4030                         else
4031                         {
4032                             bClearBuf  = bClearBufY;
4033                             bClearBufY = FALSE;
4034                         }
4035                     }
4036                     else
4037                     {
4038                         commbuf    = commbuf_x;
4039                         buf_my     = fft_ny;
4040                         bClearBuf  = bClearBufX;
4041                         bClearBufX = FALSE;
4042                     }
4043
4044                     /* Copy to the communication buffer */
4045                     for (x = offx; x < tx1; x++)
4046                     {
4047                         for (y = offy; y < ty1; y++)
4048                         {
4049                             i0  = (x*buf_my + y)*fft_nz;
4050                             i0t = ((x - ox)*nsy + (y - oy))*nsz - oz;
4051
4052                             if (bClearBuf)
4053                             {
4054                                 /* First access of commbuf, initialize it */
4055                                 for (z = offz; z < tz1; z++)
4056                                 {
4057                                     commbuf[i0+z]  = grid_th[i0t+z];
4058                                 }
4059                             }
4060                             else
4061                             {
4062                                 for (z = offz; z < tz1; z++)
4063                                 {
4064                                     commbuf[i0+z] += grid_th[i0t+z];
4065                                 }
4066                             }
4067                         }
4068                     }
4069                 }
4070             }
4071         }
4072     }
4073 }
4074
4075
4076 static void sum_fftgrid_dd(gmx_pme_t pme, real *fftgrid, int grid_index)
4077 {
4078     ivec local_fft_ndata, local_fft_offset, local_fft_size;
4079     pme_overlap_t *overlap;
4080     int  send_index0, send_nindex;
4081     int  recv_nindex;
4082 #ifdef GMX_MPI
4083     MPI_Status stat;
4084 #endif
4085     int  send_size_y, recv_size_y;
4086     int  ipulse, send_id, recv_id, datasize, gridsize, size_yx;
4087     real *sendptr, *recvptr;
4088     int  x, y, z, indg, indb;
4089
4090     /* Note that this routine is only used for forward communication.
4091      * Since the force gathering, unlike the coefficient spreading,
4092      * can be trivially parallelized over the particles,
4093      * the backwards process is much simpler and can use the "old"
4094      * communication setup.
4095      */
4096
4097     gmx_parallel_3dfft_real_limits(pme->pfft_setup[grid_index],
4098                                    local_fft_ndata,
4099                                    local_fft_offset,
4100                                    local_fft_size);
4101
4102     if (pme->nnodes_minor > 1)
4103     {
4104         /* Major dimension */
4105         overlap = &pme->overlap[1];
4106
4107         if (pme->nnodes_major > 1)
4108         {
4109             size_yx = pme->overlap[0].comm_data[0].send_nindex;
4110         }
4111         else
4112         {
4113             size_yx = 0;
4114         }
4115         datasize = (local_fft_ndata[XX] + size_yx)*local_fft_ndata[ZZ];
4116
4117         send_size_y = overlap->send_size;
4118
4119         for (ipulse = 0; ipulse < overlap->noverlap_nodes; ipulse++)
4120         {
4121             send_id       = overlap->send_id[ipulse];
4122             recv_id       = overlap->recv_id[ipulse];
4123             send_index0   =
4124                 overlap->comm_data[ipulse].send_index0 -
4125                 overlap->comm_data[0].send_index0;
4126             send_nindex   = overlap->comm_data[ipulse].send_nindex;
4127             /* We don't use recv_index0, as we always receive starting at 0 */
4128             recv_nindex   = overlap->comm_data[ipulse].recv_nindex;
4129             recv_size_y   = overlap->comm_data[ipulse].recv_size;
4130
4131             sendptr = overlap->sendbuf + send_index0*local_fft_ndata[ZZ];
4132             recvptr = overlap->recvbuf;
4133
4134             if (debug != NULL)
4135             {
4136                 fprintf(debug, "PME fftgrid comm y %2d x %2d x %2d\n",
4137                         local_fft_ndata[XX], send_nindex, local_fft_ndata[ZZ]);
4138             }
4139
4140 #ifdef GMX_MPI
4141             MPI_Sendrecv(sendptr, send_size_y*datasize, GMX_MPI_REAL,
4142                          send_id, ipulse,
4143                          recvptr, recv_size_y*datasize, GMX_MPI_REAL,
4144                          recv_id, ipulse,
4145                          overlap->mpi_comm, &stat);
4146 #endif
4147
4148             for (x = 0; x < local_fft_ndata[XX]; x++)
4149             {
4150                 for (y = 0; y < recv_nindex; y++)
4151                 {
4152                     indg = (x*local_fft_size[YY] + y)*local_fft_size[ZZ];
4153                     indb = (x*recv_size_y        + y)*local_fft_ndata[ZZ];
4154                     for (z = 0; z < local_fft_ndata[ZZ]; z++)
4155                     {
4156                         fftgrid[indg+z] += recvptr[indb+z];
4157                     }
4158                 }
4159             }
4160
4161             if (pme->nnodes_major > 1)
4162             {
4163                 /* Copy from the received buffer to the send buffer for dim 0 */
4164                 sendptr = pme->overlap[0].sendbuf;
4165                 for (x = 0; x < size_yx; x++)
4166                 {
4167                     for (y = 0; y < recv_nindex; y++)
4168                     {
4169                         indg = (x*local_fft_ndata[YY] + y)*local_fft_ndata[ZZ];
4170                         indb = ((local_fft_ndata[XX] + x)*recv_size_y + y)*local_fft_ndata[ZZ];
4171                         for (z = 0; z < local_fft_ndata[ZZ]; z++)
4172                         {
4173                             sendptr[indg+z] += recvptr[indb+z];
4174                         }
4175                     }
4176                 }
4177             }
4178         }
4179     }
4180
4181     /* We only support a single pulse here.
4182      * This is not a severe limitation, as this code is only used
4183      * with OpenMP and with OpenMP the (PME) domains can be larger.
4184      */
4185     if (pme->nnodes_major > 1)
4186     {
4187         /* Major dimension */
4188         overlap = &pme->overlap[0];
4189
4190         datasize = local_fft_ndata[YY]*local_fft_ndata[ZZ];
4191         gridsize = local_fft_size[YY] *local_fft_size[ZZ];
4192
4193         ipulse = 0;
4194
4195         send_id       = overlap->send_id[ipulse];
4196         recv_id       = overlap->recv_id[ipulse];
4197         send_nindex   = overlap->comm_data[ipulse].send_nindex;
4198         /* We don't use recv_index0, as we always receive starting at 0 */
4199         recv_nindex   = overlap->comm_data[ipulse].recv_nindex;
4200
4201         sendptr = overlap->sendbuf;
4202         recvptr = overlap->recvbuf;
4203
4204         if (debug != NULL)
4205         {
4206             fprintf(debug, "PME fftgrid comm x %2d x %2d x %2d\n",
4207                     send_nindex, local_fft_ndata[YY], local_fft_ndata[ZZ]);
4208         }
4209
4210 #ifdef GMX_MPI
4211         MPI_Sendrecv(sendptr, send_nindex*datasize, GMX_MPI_REAL,
4212                      send_id, ipulse,
4213                      recvptr, recv_nindex*datasize, GMX_MPI_REAL,
4214                      recv_id, ipulse,
4215                      overlap->mpi_comm, &stat);
4216 #endif
4217
4218         for (x = 0; x < recv_nindex; x++)
4219         {
4220             for (y = 0; y < local_fft_ndata[YY]; y++)
4221             {
4222                 indg = (x*local_fft_size[YY]  + y)*local_fft_size[ZZ];
4223                 indb = (x*local_fft_ndata[YY] + y)*local_fft_ndata[ZZ];
4224                 for (z = 0; z < local_fft_ndata[ZZ]; z++)
4225                 {
4226                     fftgrid[indg+z] += recvptr[indb+z];
4227                 }
4228             }
4229         }
4230     }
4231 }
4232
4233
4234 static void spread_on_grid(gmx_pme_t pme,
4235                            pme_atomcomm_t *atc, pmegrids_t *grids,
4236                            gmx_bool bCalcSplines, gmx_bool bSpread,
4237                            real *fftgrid, gmx_bool bDoSplines, int grid_index)
4238 {
4239     int nthread, thread;
4240 #ifdef PME_TIME_THREADS
4241     gmx_cycles_t c1, c2, c3, ct1a, ct1b, ct1c;
4242     static double cs1     = 0, cs2 = 0, cs3 = 0;
4243     static double cs1a[6] = {0, 0, 0, 0, 0, 0};
4244     static int cnt        = 0;
4245 #endif
4246
4247     nthread = pme->nthread;
4248     assert(nthread > 0);
4249
4250 #ifdef PME_TIME_THREADS
4251     c1 = omp_cyc_start();
4252 #endif
4253     if (bCalcSplines)
4254     {
4255 #pragma omp parallel for num_threads(nthread) schedule(static)
4256         for (thread = 0; thread < nthread; thread++)
4257         {
4258             int start, end;
4259
4260             start = atc->n* thread   /nthread;
4261             end   = atc->n*(thread+1)/nthread;
4262
4263             /* Compute fftgrid index for all atoms,
4264              * with help of some extra variables.
4265              */
4266             calc_interpolation_idx(pme, atc, start, grid_index, end, thread);
4267         }
4268     }
4269 #ifdef PME_TIME_THREADS
4270     c1   = omp_cyc_end(c1);
4271     cs1 += (double)c1;
4272 #endif
4273
4274 #ifdef PME_TIME_THREADS
4275     c2 = omp_cyc_start();
4276 #endif
4277 #pragma omp parallel for num_threads(nthread) schedule(static)
4278     for (thread = 0; thread < nthread; thread++)
4279     {
4280         splinedata_t *spline;
4281         pmegrid_t *grid = NULL;
4282
4283         /* make local bsplines  */
4284         if (grids == NULL || !pme->bUseThreads)
4285         {
4286             spline = &atc->spline[0];
4287
4288             spline->n = atc->n;
4289
4290             if (bSpread)
4291             {
4292                 grid = &grids->grid;
4293             }
4294         }
4295         else
4296         {
4297             spline = &atc->spline[thread];
4298
4299             if (grids->nthread == 1)
4300             {
4301                 /* One thread, we operate on all coefficients */
4302                 spline->n = atc->n;
4303             }
4304             else
4305             {
4306                 /* Get the indices our thread should operate on */
4307                 make_thread_local_ind(atc, thread, spline);
4308             }
4309
4310             grid = &grids->grid_th[thread];
4311         }
4312
4313         if (bCalcSplines)
4314         {
4315             make_bsplines(spline->theta, spline->dtheta, pme->pme_order,
4316                           atc->fractx, spline->n, spline->ind, atc->coefficient, bDoSplines);
4317         }
4318
4319         if (bSpread)
4320         {
4321             /* put local atoms on grid. */
4322 #ifdef PME_TIME_SPREAD
4323             ct1a = omp_cyc_start();
4324 #endif
4325             spread_coefficients_bsplines_thread(grid, atc, spline, pme->spline_work);
4326
4327             if (pme->bUseThreads)
4328             {
4329                 copy_local_grid(pme, grids, grid_index, thread, fftgrid);
4330             }
4331 #ifdef PME_TIME_SPREAD
4332             ct1a          = omp_cyc_end(ct1a);
4333             cs1a[thread] += (double)ct1a;
4334 #endif
4335         }
4336     }
4337 #ifdef PME_TIME_THREADS
4338     c2   = omp_cyc_end(c2);
4339     cs2 += (double)c2;
4340 #endif
4341
4342     if (bSpread && pme->bUseThreads)
4343     {
4344 #ifdef PME_TIME_THREADS
4345         c3 = omp_cyc_start();
4346 #endif
4347 #pragma omp parallel for num_threads(grids->nthread) schedule(static)
4348         for (thread = 0; thread < grids->nthread; thread++)
4349         {
4350             reduce_threadgrid_overlap(pme, grids, thread,
4351                                       fftgrid,
4352                                       pme->overlap[0].sendbuf,
4353                                       pme->overlap[1].sendbuf,
4354                                       grid_index);
4355         }
4356 #ifdef PME_TIME_THREADS
4357         c3   = omp_cyc_end(c3);
4358         cs3 += (double)c3;
4359 #endif
4360
4361         if (pme->nnodes > 1)
4362         {
4363             /* Communicate the overlapping part of the fftgrid.
4364              * For this communication call we need to check pme->bUseThreads
4365              * to have all ranks communicate here, regardless of pme->nthread.
4366              */
4367             sum_fftgrid_dd(pme, fftgrid, grid_index);
4368         }
4369     }
4370
4371 #ifdef PME_TIME_THREADS
4372     cnt++;
4373     if (cnt % 20 == 0)
4374     {
4375         printf("idx %.2f spread %.2f red %.2f",
4376                cs1*1e-9, cs2*1e-9, cs3*1e-9);
4377 #ifdef PME_TIME_SPREAD
4378         for (thread = 0; thread < nthread; thread++)
4379         {
4380             printf(" %.2f", cs1a[thread]*1e-9);
4381         }
4382 #endif
4383         printf("\n");
4384     }
4385 #endif
4386 }
4387
4388
4389 static void dump_grid(FILE *fp,
4390                       int sx, int sy, int sz, int nx, int ny, int nz,
4391                       int my, int mz, const real *g)
4392 {
4393     int x, y, z;
4394
4395     for (x = 0; x < nx; x++)
4396     {
4397         for (y = 0; y < ny; y++)
4398         {
4399             for (z = 0; z < nz; z++)
4400             {
4401                 fprintf(fp, "%2d %2d %2d %6.3f\n",
4402                         sx+x, sy+y, sz+z, g[(x*my + y)*mz + z]);
4403             }
4404         }
4405     }
4406 }
4407
4408 static void dump_local_fftgrid(gmx_pme_t pme, const real *fftgrid)
4409 {
4410     ivec local_fft_ndata, local_fft_offset, local_fft_size;
4411
4412     gmx_parallel_3dfft_real_limits(pme->pfft_setup[PME_GRID_QA],
4413                                    local_fft_ndata,
4414                                    local_fft_offset,
4415                                    local_fft_size);
4416
4417     dump_grid(stderr,
4418               pme->pmegrid_start_ix,
4419               pme->pmegrid_start_iy,
4420               pme->pmegrid_start_iz,
4421               pme->pmegrid_nx-pme->pme_order+1,
4422               pme->pmegrid_ny-pme->pme_order+1,
4423               pme->pmegrid_nz-pme->pme_order+1,
4424               local_fft_size[YY],
4425               local_fft_size[ZZ],
4426               fftgrid);
4427 }
4428
4429
4430 void gmx_pme_calc_energy(gmx_pme_t pme, int n, rvec *x, real *q, real *V)
4431 {
4432     pme_atomcomm_t *atc;
4433     pmegrids_t *grid;
4434
4435     if (pme->nnodes > 1)
4436     {
4437         gmx_incons("gmx_pme_calc_energy called in parallel");
4438     }
4439     if (pme->bFEP_q > 1)
4440     {
4441         gmx_incons("gmx_pme_calc_energy with free energy");
4442     }
4443
4444     atc            = &pme->atc_energy;
4445     atc->nthread   = 1;
4446     if (atc->spline == NULL)
4447     {
4448         snew(atc->spline, atc->nthread);
4449     }
4450     atc->nslab     = 1;
4451     atc->bSpread   = TRUE;
4452     atc->pme_order = pme->pme_order;
4453     atc->n         = n;
4454     pme_realloc_atomcomm_things(atc);
4455     atc->x           = x;
4456     atc->coefficient = q;
4457
4458     /* We only use the A-charges grid */
4459     grid = &pme->pmegrid[PME_GRID_QA];
4460
4461     /* Only calculate the spline coefficients, don't actually spread */
4462     spread_on_grid(pme, atc, NULL, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
4463
4464     *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
4465 }
4466
4467
4468 static void reset_pmeonly_counters(gmx_wallcycle_t wcycle,
4469                                    gmx_walltime_accounting_t walltime_accounting,
4470                                    t_nrnb *nrnb, t_inputrec *ir,
4471                                    gmx_int64_t step)
4472 {
4473     /* Reset all the counters related to performance over the run */
4474     wallcycle_stop(wcycle, ewcRUN);
4475     wallcycle_reset_all(wcycle);
4476     init_nrnb(nrnb);
4477     if (ir->nsteps >= 0)
4478     {
4479         /* ir->nsteps is not used here, but we update it for consistency */
4480         ir->nsteps -= step - ir->init_step;
4481     }
4482     ir->init_step = step;
4483     wallcycle_start(wcycle, ewcRUN);
4484     walltime_accounting_start(walltime_accounting);
4485 }
4486
4487
4488 static void gmx_pmeonly_switch(int *npmedata, gmx_pme_t **pmedata,
4489                                ivec grid_size,
4490                                t_commrec *cr, t_inputrec *ir,
4491                                gmx_pme_t *pme_ret)
4492 {
4493     int ind;
4494     gmx_pme_t pme = NULL;
4495
4496     ind = 0;
4497     while (ind < *npmedata)
4498     {
4499         pme = (*pmedata)[ind];
4500         if (pme->nkx == grid_size[XX] &&
4501             pme->nky == grid_size[YY] &&
4502             pme->nkz == grid_size[ZZ])
4503         {
4504             *pme_ret = pme;
4505
4506             return;
4507         }
4508
4509         ind++;
4510     }
4511
4512     (*npmedata)++;
4513     srenew(*pmedata, *npmedata);
4514
4515     /* Generate a new PME data structure, copying part of the old pointers */
4516     gmx_pme_reinit(&((*pmedata)[ind]), cr, pme, ir, grid_size);
4517
4518     *pme_ret = (*pmedata)[ind];
4519 }
4520
4521 int gmx_pmeonly(gmx_pme_t pme,
4522                 t_commrec *cr,    t_nrnb *mynrnb,
4523                 gmx_wallcycle_t wcycle,
4524                 gmx_walltime_accounting_t walltime_accounting,
4525                 real ewaldcoeff_q, real ewaldcoeff_lj,
4526                 t_inputrec *ir)
4527 {
4528     int npmedata;
4529     gmx_pme_t *pmedata;
4530     gmx_pme_pp_t pme_pp;
4531     int  ret;
4532     int  natoms;
4533     matrix box;
4534     rvec *x_pp      = NULL, *f_pp = NULL;
4535     real *chargeA   = NULL, *chargeB = NULL;
4536     real *c6A       = NULL, *c6B = NULL;
4537     real *sigmaA    = NULL, *sigmaB = NULL;
4538     real lambda_q   = 0;
4539     real lambda_lj  = 0;
4540     int  maxshift_x = 0, maxshift_y = 0;
4541     real energy_q, energy_lj, dvdlambda_q, dvdlambda_lj;
4542     matrix vir_q, vir_lj;
4543     float cycles;
4544     int  count;
4545     gmx_bool bEnerVir;
4546     int pme_flags;
4547     gmx_int64_t step, step_rel;
4548     ivec grid_switch;
4549
4550     /* This data will only use with PME tuning, i.e. switching PME grids */
4551     npmedata = 1;
4552     snew(pmedata, npmedata);
4553     pmedata[0] = pme;
4554
4555     pme_pp = gmx_pme_pp_init(cr);
4556
4557     init_nrnb(mynrnb);
4558
4559     count = 0;
4560     do /****** this is a quasi-loop over time steps! */
4561     {
4562         /* The reason for having a loop here is PME grid tuning/switching */
4563         do
4564         {
4565             /* Domain decomposition */
4566             ret = gmx_pme_recv_coeffs_coords(pme_pp,
4567                                              &natoms,
4568                                              &chargeA, &chargeB,
4569                                              &c6A, &c6B,
4570                                              &sigmaA, &sigmaB,
4571                                              box, &x_pp, &f_pp,
4572                                              &maxshift_x, &maxshift_y,
4573                                              &pme->bFEP_q, &pme->bFEP_lj,
4574                                              &lambda_q, &lambda_lj,
4575                                              &bEnerVir,
4576                                              &pme_flags,
4577                                              &step,
4578                                              grid_switch, &ewaldcoeff_q, &ewaldcoeff_lj);
4579
4580             if (ret == pmerecvqxSWITCHGRID)
4581             {
4582                 /* Switch the PME grid to grid_switch */
4583                 gmx_pmeonly_switch(&npmedata, &pmedata, grid_switch, cr, ir, &pme);
4584             }
4585
4586             if (ret == pmerecvqxRESETCOUNTERS)
4587             {
4588                 /* Reset the cycle and flop counters */
4589                 reset_pmeonly_counters(wcycle, walltime_accounting, mynrnb, ir, step);
4590             }
4591         }
4592         while (ret == pmerecvqxSWITCHGRID || ret == pmerecvqxRESETCOUNTERS);
4593
4594         if (ret == pmerecvqxFINISH)
4595         {
4596             /* We should stop: break out of the loop */
4597             break;
4598         }
4599
4600         step_rel = step - ir->init_step;
4601
4602         if (count == 0)
4603         {
4604             wallcycle_start(wcycle, ewcRUN);
4605             walltime_accounting_start(walltime_accounting);
4606         }
4607
4608         wallcycle_start(wcycle, ewcPMEMESH);
4609
4610         dvdlambda_q  = 0;
4611         dvdlambda_lj = 0;
4612         clear_mat(vir_q);
4613         clear_mat(vir_lj);
4614
4615         gmx_pme_do(pme, 0, natoms, x_pp, f_pp,
4616                    chargeA, chargeB, c6A, c6B, sigmaA, sigmaB, box,
4617                    cr, maxshift_x, maxshift_y, mynrnb, wcycle,
4618                    vir_q, ewaldcoeff_q, vir_lj, ewaldcoeff_lj,
4619                    &energy_q, &energy_lj, lambda_q, lambda_lj, &dvdlambda_q, &dvdlambda_lj,
4620                    pme_flags | GMX_PME_DO_ALL_F | (bEnerVir ? GMX_PME_CALC_ENER_VIR : 0));
4621
4622         cycles = wallcycle_stop(wcycle, ewcPMEMESH);
4623
4624         gmx_pme_send_force_vir_ener(pme_pp,
4625                                     f_pp, vir_q, energy_q, vir_lj, energy_lj,
4626                                     dvdlambda_q, dvdlambda_lj, cycles);
4627
4628         count++;
4629     } /***** end of quasi-loop, we stop with the break above */
4630     while (TRUE);
4631
4632     walltime_accounting_end(walltime_accounting);
4633
4634     return 0;
4635 }
4636
4637 static void
4638 calc_initial_lb_coeffs(gmx_pme_t pme, real *local_c6, real *local_sigma)
4639 {
4640     int  i;
4641
4642     for (i = 0; i < pme->atc[0].n; ++i)
4643     {
4644         real sigma4;
4645
4646         sigma4                     = local_sigma[i];
4647         sigma4                     = sigma4*sigma4;
4648         sigma4                     = sigma4*sigma4;
4649         pme->atc[0].coefficient[i] = local_c6[i] / sigma4;
4650     }
4651 }
4652
4653 static void
4654 calc_next_lb_coeffs(gmx_pme_t pme, real *local_sigma)
4655 {
4656     int  i;
4657
4658     for (i = 0; i < pme->atc[0].n; ++i)
4659     {
4660         pme->atc[0].coefficient[i] *= local_sigma[i];
4661     }
4662 }
4663
4664 static void
4665 do_redist_pos_coeffs(gmx_pme_t pme, t_commrec *cr, int start, int homenr,
4666                      gmx_bool bFirst, rvec x[], real *data)
4667 {
4668     int      d;
4669     pme_atomcomm_t *atc;
4670     atc = &pme->atc[0];
4671
4672     for (d = pme->ndecompdim - 1; d >= 0; d--)
4673     {
4674         int             n_d;
4675         rvec           *x_d;
4676         real           *param_d;
4677
4678         if (d == pme->ndecompdim - 1)
4679         {
4680             n_d     = homenr;
4681             x_d     = x + start;
4682             param_d = data;
4683         }
4684         else
4685         {
4686             n_d     = pme->atc[d + 1].n;
4687             x_d     = atc->x;
4688             param_d = atc->coefficient;
4689         }
4690         atc      = &pme->atc[d];
4691         atc->npd = n_d;
4692         if (atc->npd > atc->pd_nalloc)
4693         {
4694             atc->pd_nalloc = over_alloc_dd(atc->npd);
4695             srenew(atc->pd, atc->pd_nalloc);
4696         }
4697         pme_calc_pidx_wrapper(n_d, pme->recipbox, x_d, atc);
4698         where();
4699         /* Redistribute x (only once) and qA/c6A or qB/c6B */
4700         if (DOMAINDECOMP(cr))
4701         {
4702             dd_pmeredist_pos_coeffs(pme, n_d, bFirst, x_d, param_d, atc);
4703         }
4704     }
4705 }
4706
4707 int gmx_pme_do(gmx_pme_t pme,
4708                int start,       int homenr,
4709                rvec x[],        rvec f[],
4710                real *chargeA,   real *chargeB,
4711                real *c6A,       real *c6B,
4712                real *sigmaA,    real *sigmaB,
4713                matrix box, t_commrec *cr,
4714                int  maxshift_x, int maxshift_y,
4715                t_nrnb *nrnb,    gmx_wallcycle_t wcycle,
4716                matrix vir_q,      real ewaldcoeff_q,
4717                matrix vir_lj,   real ewaldcoeff_lj,
4718                real *energy_q,  real *energy_lj,
4719                real lambda_q, real lambda_lj,
4720                real *dvdlambda_q, real *dvdlambda_lj,
4721                int flags)
4722 {
4723     int     d, i, j, k, ntot, npme, grid_index, max_grid_index;
4724     int     nx, ny, nz;
4725     int     n_d, local_ny;
4726     pme_atomcomm_t *atc = NULL;
4727     pmegrids_t *pmegrid = NULL;
4728     real    *grid       = NULL;
4729     real    *ptr;
4730     rvec    *x_d, *f_d;
4731     real    *coefficient = NULL;
4732     real    energy_AB[4];
4733     matrix  vir_AB[4];
4734     real    scale, lambda;
4735     gmx_bool bClearF;
4736     gmx_parallel_3dfft_t pfft_setup;
4737     real *  fftgrid;
4738     t_complex * cfftgrid;
4739     int     thread;
4740     gmx_bool bFirst, bDoSplines;
4741     int fep_state;
4742     int fep_states_lj           = pme->bFEP_lj ? 2 : 1;
4743     const gmx_bool bCalcEnerVir = flags & GMX_PME_CALC_ENER_VIR;
4744     const gmx_bool bCalcF       = flags & GMX_PME_CALC_F;
4745
4746     assert(pme->nnodes > 0);
4747     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
4748
4749     if (pme->nnodes > 1)
4750     {
4751         atc      = &pme->atc[0];
4752         atc->npd = homenr;
4753         if (atc->npd > atc->pd_nalloc)
4754         {
4755             atc->pd_nalloc = over_alloc_dd(atc->npd);
4756             srenew(atc->pd, atc->pd_nalloc);
4757         }
4758         for (d = pme->ndecompdim-1; d >= 0; d--)
4759         {
4760             atc           = &pme->atc[d];
4761             atc->maxshift = (atc->dimind == 0 ? maxshift_x : maxshift_y);
4762         }
4763     }
4764     else
4765     {
4766         atc = &pme->atc[0];
4767         /* This could be necessary for TPI */
4768         pme->atc[0].n = homenr;
4769         if (DOMAINDECOMP(cr))
4770         {
4771             pme_realloc_atomcomm_things(atc);
4772         }
4773         atc->x = x;
4774         atc->f = f;
4775     }
4776
4777     m_inv_ur0(box, pme->recipbox);
4778     bFirst = TRUE;
4779
4780     /* For simplicity, we construct the splines for all particles if
4781      * more than one PME calculations is needed. Some optimization
4782      * could be done by keeping track of which atoms have splines
4783      * constructed, and construct new splines on each pass for atoms
4784      * that don't yet have them.
4785      */
4786
4787     bDoSplines = pme->bFEP || ((flags & GMX_PME_DO_COULOMB) && (flags & GMX_PME_DO_LJ));
4788
4789     /* We need a maximum of four separate PME calculations:
4790      * grid_index=0: Coulomb PME with charges from state A
4791      * grid_index=1: Coulomb PME with charges from state B
4792      * grid_index=2: LJ PME with C6 from state A
4793      * grid_index=3: LJ PME with C6 from state B
4794      * For Lorentz-Berthelot combination rules, a separate loop is used to
4795      * calculate all the terms
4796      */
4797
4798     /* If we are doing LJ-PME with LB, we only do Q here */
4799     max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
4800
4801     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
4802     {
4803         /* Check if we should do calculations at this grid_index
4804          * If grid_index is odd we should be doing FEP
4805          * If grid_index < 2 we should be doing electrostatic PME
4806          * If grid_index >= 2 we should be doing LJ-PME
4807          */
4808         if ((grid_index <  DO_Q && (!(flags & GMX_PME_DO_COULOMB) ||
4809                                     (grid_index == 1 && !pme->bFEP_q))) ||
4810             (grid_index >= DO_Q && (!(flags & GMX_PME_DO_LJ) ||
4811                                     (grid_index == 3 && !pme->bFEP_lj))))
4812         {
4813             continue;
4814         }
4815         /* Unpack structure */
4816         pmegrid    = &pme->pmegrid[grid_index];
4817         fftgrid    = pme->fftgrid[grid_index];
4818         cfftgrid   = pme->cfftgrid[grid_index];
4819         pfft_setup = pme->pfft_setup[grid_index];
4820         switch (grid_index)
4821         {
4822             case 0: coefficient = chargeA + start; break;
4823             case 1: coefficient = chargeB + start; break;
4824             case 2: coefficient = c6A + start; break;
4825             case 3: coefficient = c6B + start; break;
4826         }
4827
4828         grid = pmegrid->grid.grid;
4829
4830         if (debug)
4831         {
4832             fprintf(debug, "PME: number of ranks = %d, rank = %d\n",
4833                     cr->nnodes, cr->nodeid);
4834             fprintf(debug, "Grid = %p\n", (void*)grid);
4835             if (grid == NULL)
4836             {
4837                 gmx_fatal(FARGS, "No grid!");
4838             }
4839         }
4840         where();
4841
4842         if (pme->nnodes == 1)
4843         {
4844             atc->coefficient = coefficient;
4845         }
4846         else
4847         {
4848             wallcycle_start(wcycle, ewcPME_REDISTXF);
4849             do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, coefficient);
4850             where();
4851
4852             wallcycle_stop(wcycle, ewcPME_REDISTXF);
4853         }
4854
4855         if (debug)
4856         {
4857             fprintf(debug, "Rank= %6d, pme local particles=%6d\n",
4858                     cr->nodeid, atc->n);
4859         }
4860
4861         if (flags & GMX_PME_SPREAD)
4862         {
4863             wallcycle_start(wcycle, ewcPME_SPREADGATHER);
4864
4865             /* Spread the coefficients on a grid */
4866             spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
4867
4868             if (bFirst)
4869             {
4870                 inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
4871             }
4872             inc_nrnb(nrnb, eNR_SPREADBSP,
4873                      pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
4874
4875             if (!pme->bUseThreads)
4876             {
4877                 wrap_periodic_pmegrid(pme, grid);
4878
4879                 /* sum contributions to local grid from other nodes */
4880 #ifdef GMX_MPI
4881                 if (pme->nnodes > 1)
4882                 {
4883                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
4884                     where();
4885                 }
4886 #endif
4887
4888                 copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
4889             }
4890
4891             wallcycle_stop(wcycle, ewcPME_SPREADGATHER);
4892
4893             /*
4894                dump_local_fftgrid(pme,fftgrid);
4895                exit(0);
4896              */
4897         }
4898
4899         /* Here we start a large thread parallel region */
4900 #pragma omp parallel num_threads(pme->nthread) private(thread)
4901         {
4902             thread = gmx_omp_get_thread_num();
4903             if (flags & GMX_PME_SOLVE)
4904             {
4905                 int loop_count;
4906
4907                 /* do 3d-fft */
4908                 if (thread == 0)
4909                 {
4910                     wallcycle_start(wcycle, ewcPME_FFT);
4911                 }
4912                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
4913                                            thread, wcycle);
4914                 if (thread == 0)
4915                 {
4916                     wallcycle_stop(wcycle, ewcPME_FFT);
4917                 }
4918                 where();
4919
4920                 /* solve in k-space for our local cells */
4921                 if (thread == 0)
4922                 {
4923                     wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
4924                 }
4925                 if (grid_index < DO_Q)
4926                 {
4927                     loop_count =
4928                         solve_pme_yzx(pme, cfftgrid, ewaldcoeff_q,
4929                                       box[XX][XX]*box[YY][YY]*box[ZZ][ZZ],
4930                                       bCalcEnerVir,
4931                                       pme->nthread, thread);
4932                 }
4933                 else
4934                 {
4935                     loop_count =
4936                         solve_pme_lj_yzx(pme, &cfftgrid, FALSE, ewaldcoeff_lj,
4937                                          box[XX][XX]*box[YY][YY]*box[ZZ][ZZ],
4938                                          bCalcEnerVir,
4939                                          pme->nthread, thread);
4940                 }
4941
4942                 if (thread == 0)
4943                 {
4944                     wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
4945                     where();
4946                     inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
4947                 }
4948             }
4949
4950             if (bCalcF)
4951             {
4952                 /* do 3d-invfft */
4953                 if (thread == 0)
4954                 {
4955                     where();
4956                     wallcycle_start(wcycle, ewcPME_FFT);
4957                 }
4958                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
4959                                            thread, wcycle);
4960                 if (thread == 0)
4961                 {
4962                     wallcycle_stop(wcycle, ewcPME_FFT);
4963
4964                     where();
4965
4966                     if (pme->nodeid == 0)
4967                     {
4968                         ntot  = pme->nkx*pme->nky*pme->nkz;
4969                         npme  = ntot*log((real)ntot)/log(2.0);
4970                         inc_nrnb(nrnb, eNR_FFT, 2*npme);
4971                     }
4972
4973                     /* Note: this wallcycle region is closed below
4974                        outside an OpenMP region, so take care if
4975                        refactoring code here. */
4976                     wallcycle_start(wcycle, ewcPME_SPREADGATHER);
4977                 }
4978
4979                 copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
4980             }
4981         }
4982         /* End of thread parallel section.
4983          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
4984          */
4985
4986         if (bCalcF)
4987         {
4988             /* distribute local grid to all nodes */
4989 #ifdef GMX_MPI
4990             if (pme->nnodes > 1)
4991             {
4992                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
4993             }
4994 #endif
4995             where();
4996
4997             unwrap_periodic_pmegrid(pme, grid);
4998
4999             /* interpolate forces for our local atoms */
5000
5001             where();
5002
5003             /* If we are running without parallelization,
5004              * atc->f is the actual force array, not a buffer,
5005              * therefore we should not clear it.
5006              */
5007             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
5008             bClearF = (bFirst && PAR(cr));
5009 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
5010             for (thread = 0; thread < pme->nthread; thread++)
5011             {
5012                 gather_f_bsplines(pme, grid, bClearF, atc,
5013                                   &atc->spline[thread],
5014                                   pme->bFEP ? (grid_index % 2 == 0 ? 1.0-lambda : lambda) : 1.0);
5015             }
5016
5017             where();
5018
5019             inc_nrnb(nrnb, eNR_GATHERFBSP,
5020                      pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
5021             /* Note: this wallcycle region is opened above inside an OpenMP
5022                region, so take care if refactoring code here. */
5023             wallcycle_stop(wcycle, ewcPME_SPREADGATHER);
5024         }
5025
5026         if (bCalcEnerVir)
5027         {
5028             /* This should only be called on the master thread
5029              * and after the threads have synchronized.
5030              */
5031             if (grid_index < 2)
5032             {
5033                 get_pme_ener_vir_q(pme, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
5034             }
5035             else
5036             {
5037                 get_pme_ener_vir_lj(pme, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
5038             }
5039         }
5040         bFirst = FALSE;
5041     } /* of grid_index-loop */
5042
5043     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
5044      * seven terms. */
5045
5046     if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB)
5047     {
5048         /* Loop over A- and B-state if we are doing FEP */
5049         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
5050         {
5051             real *local_c6 = NULL, *local_sigma = NULL, *RedistC6 = NULL, *RedistSigma = NULL;
5052             if (pme->nnodes == 1)
5053             {
5054                 if (pme->lb_buf1 == NULL)
5055                 {
5056                     pme->lb_buf_nalloc = pme->atc[0].n;
5057                     snew(pme->lb_buf1, pme->lb_buf_nalloc);
5058                 }
5059                 pme->atc[0].coefficient = pme->lb_buf1;
5060                 switch (fep_state)
5061                 {
5062                     case 0:
5063                         local_c6      = c6A;
5064                         local_sigma   = sigmaA;
5065                         break;
5066                     case 1:
5067                         local_c6      = c6B;
5068                         local_sigma   = sigmaB;
5069                         break;
5070                     default:
5071                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
5072                 }
5073             }
5074             else
5075             {
5076                 atc = &pme->atc[0];
5077                 switch (fep_state)
5078                 {
5079                     case 0:
5080                         RedistC6      = c6A;
5081                         RedistSigma   = sigmaA;
5082                         break;
5083                     case 1:
5084                         RedistC6      = c6B;
5085                         RedistSigma   = sigmaB;
5086                         break;
5087                     default:
5088                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
5089                 }
5090                 wallcycle_start(wcycle, ewcPME_REDISTXF);
5091
5092                 do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, RedistC6);
5093                 if (pme->lb_buf_nalloc < atc->n)
5094                 {
5095                     pme->lb_buf_nalloc = atc->nalloc;
5096                     srenew(pme->lb_buf1, pme->lb_buf_nalloc);
5097                     srenew(pme->lb_buf2, pme->lb_buf_nalloc);
5098                 }
5099                 local_c6 = pme->lb_buf1;
5100                 for (i = 0; i < atc->n; ++i)
5101                 {
5102                     local_c6[i] = atc->coefficient[i];
5103                 }
5104                 where();
5105
5106                 do_redist_pos_coeffs(pme, cr, start, homenr, FALSE, x, RedistSigma);
5107                 local_sigma = pme->lb_buf2;
5108                 for (i = 0; i < atc->n; ++i)
5109                 {
5110                     local_sigma[i] = atc->coefficient[i];
5111                 }
5112                 where();
5113
5114                 wallcycle_stop(wcycle, ewcPME_REDISTXF);
5115             }
5116             calc_initial_lb_coeffs(pme, local_c6, local_sigma);
5117
5118             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
5119             for (grid_index = 2; grid_index < 9; ++grid_index)
5120             {
5121                 /* Unpack structure */
5122                 pmegrid    = &pme->pmegrid[grid_index];
5123                 fftgrid    = pme->fftgrid[grid_index];
5124                 cfftgrid   = pme->cfftgrid[grid_index];
5125                 pfft_setup = pme->pfft_setup[grid_index];
5126                 calc_next_lb_coeffs(pme, local_sigma);
5127                 grid = pmegrid->grid.grid;
5128                 where();
5129
5130                 if (flags & GMX_PME_SPREAD)
5131                 {
5132                     wallcycle_start(wcycle, ewcPME_SPREADGATHER);
5133                     /* Spread the c6 on a grid */
5134                     spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
5135
5136                     if (bFirst)
5137                     {
5138                         inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
5139                     }
5140
5141                     inc_nrnb(nrnb, eNR_SPREADBSP,
5142                              pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
5143                     if (pme->nthread == 1)
5144                     {
5145                         wrap_periodic_pmegrid(pme, grid);
5146                         /* sum contributions to local grid from other nodes */
5147 #ifdef GMX_MPI
5148                         if (pme->nnodes > 1)
5149                         {
5150                             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
5151                             where();
5152                         }
5153 #endif
5154                         copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
5155                     }
5156                     wallcycle_stop(wcycle, ewcPME_SPREADGATHER);
5157                 }
5158                 /*Here we start a large thread parallel region*/
5159 #pragma omp parallel num_threads(pme->nthread) private(thread)
5160                 {
5161                     thread = gmx_omp_get_thread_num();
5162                     if (flags & GMX_PME_SOLVE)
5163                     {
5164                         /* do 3d-fft */
5165                         if (thread == 0)
5166                         {
5167                             wallcycle_start(wcycle, ewcPME_FFT);
5168                         }
5169
5170                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
5171                                                    thread, wcycle);
5172                         if (thread == 0)
5173                         {
5174                             wallcycle_stop(wcycle, ewcPME_FFT);
5175                         }
5176                         where();
5177                     }
5178                 }
5179                 bFirst = FALSE;
5180             }
5181             if (flags & GMX_PME_SOLVE)
5182             {
5183                 /* solve in k-space for our local cells */
5184 #pragma omp parallel num_threads(pme->nthread) private(thread)
5185                 {
5186                     int loop_count;
5187                     thread = gmx_omp_get_thread_num();
5188                     if (thread == 0)
5189                     {
5190                         wallcycle_start(wcycle, ewcLJPME);
5191                     }
5192
5193                     loop_count =
5194                         solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE, ewaldcoeff_lj,
5195                                          box[XX][XX]*box[YY][YY]*box[ZZ][ZZ],
5196                                          bCalcEnerVir,
5197                                          pme->nthread, thread);
5198                     if (thread == 0)
5199                     {
5200                         wallcycle_stop(wcycle, ewcLJPME);
5201                         where();
5202                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
5203                     }
5204                 }
5205             }
5206
5207             if (bCalcEnerVir)
5208             {
5209                 /* This should only be called on the master thread and
5210                  * after the threads have synchronized.
5211                  */
5212                 get_pme_ener_vir_lj(pme, pme->nthread, &energy_AB[2+fep_state], vir_AB[2+fep_state]);
5213             }
5214
5215             if (bCalcF)
5216             {
5217                 bFirst = !(flags & GMX_PME_DO_COULOMB);
5218                 calc_initial_lb_coeffs(pme, local_c6, local_sigma);
5219                 for (grid_index = 8; grid_index >= 2; --grid_index)
5220                 {
5221                     /* Unpack structure */
5222                     pmegrid    = &pme->pmegrid[grid_index];
5223                     fftgrid    = pme->fftgrid[grid_index];
5224                     cfftgrid   = pme->cfftgrid[grid_index];
5225                     pfft_setup = pme->pfft_setup[grid_index];
5226                     grid       = pmegrid->grid.grid;
5227                     calc_next_lb_coeffs(pme, local_sigma);
5228                     where();
5229 #pragma omp parallel num_threads(pme->nthread) private(thread)
5230                     {
5231                         thread = gmx_omp_get_thread_num();
5232                         /* do 3d-invfft */
5233                         if (thread == 0)
5234                         {
5235                             where();
5236                             wallcycle_start(wcycle, ewcPME_FFT);
5237                         }
5238
5239                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
5240                                                    thread, wcycle);
5241                         if (thread == 0)
5242                         {
5243                             wallcycle_stop(wcycle, ewcPME_FFT);
5244
5245                             where();
5246
5247                             if (pme->nodeid == 0)
5248                             {
5249                                 ntot  = pme->nkx*pme->nky*pme->nkz;
5250                                 npme  = ntot*log((real)ntot)/log(2.0);
5251                                 inc_nrnb(nrnb, eNR_FFT, 2*npme);
5252                             }
5253                             wallcycle_start(wcycle, ewcPME_SPREADGATHER);
5254                         }
5255
5256                         copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
5257
5258                     } /*#pragma omp parallel*/
5259
5260                     /* distribute local grid to all nodes */
5261 #ifdef GMX_MPI
5262                     if (pme->nnodes > 1)
5263                     {
5264                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
5265                     }
5266 #endif
5267                     where();
5268
5269                     unwrap_periodic_pmegrid(pme, grid);
5270
5271                     /* interpolate forces for our local atoms */
5272                     where();
5273                     bClearF = (bFirst && PAR(cr));
5274                     scale   = pme->bFEP ? (fep_state < 1 ? 1.0-lambda_lj : lambda_lj) : 1.0;
5275                     scale  *= lb_scale_factor[grid_index-2];
5276 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
5277                     for (thread = 0; thread < pme->nthread; thread++)
5278                     {
5279                         gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
5280                                           &pme->atc[0].spline[thread],
5281                                           scale);
5282                     }
5283                     where();
5284
5285                     inc_nrnb(nrnb, eNR_GATHERFBSP,
5286                              pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
5287                     wallcycle_stop(wcycle, ewcPME_SPREADGATHER);
5288
5289                     bFirst = FALSE;
5290                 } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
5291             }     /* if (bCalcF) */
5292         }         /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
5293     }             /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
5294
5295     if (bCalcF && pme->nnodes > 1)
5296     {
5297         wallcycle_start(wcycle, ewcPME_REDISTXF);
5298         for (d = 0; d < pme->ndecompdim; d++)
5299         {
5300             atc = &pme->atc[d];
5301             if (d == pme->ndecompdim - 1)
5302             {
5303                 n_d = homenr;
5304                 f_d = f + start;
5305             }
5306             else
5307             {
5308                 n_d = pme->atc[d+1].n;
5309                 f_d = pme->atc[d+1].f;
5310             }
5311             if (DOMAINDECOMP(cr))
5312             {
5313                 dd_pmeredist_f(pme, atc, n_d, f_d,
5314                                d == pme->ndecompdim-1 && pme->bPPnode);
5315             }
5316         }
5317
5318         wallcycle_stop(wcycle, ewcPME_REDISTXF);
5319     }
5320     where();
5321
5322     if (bCalcEnerVir)
5323     {
5324         if (flags & GMX_PME_DO_COULOMB)
5325         {
5326             if (!pme->bFEP_q)
5327             {
5328                 *energy_q = energy_AB[0];
5329                 m_add(vir_q, vir_AB[0], vir_q);
5330             }
5331             else
5332             {
5333                 *energy_q       = (1.0-lambda_q)*energy_AB[0] + lambda_q*energy_AB[1];
5334                 *dvdlambda_q   += energy_AB[1] - energy_AB[0];
5335                 for (i = 0; i < DIM; i++)
5336                 {
5337                     for (j = 0; j < DIM; j++)
5338                     {
5339                         vir_q[i][j] += (1.0-lambda_q)*vir_AB[0][i][j] +
5340                             lambda_q*vir_AB[1][i][j];
5341                     }
5342                 }
5343             }
5344             if (debug)
5345             {
5346                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
5347             }
5348         }
5349         else
5350         {
5351             *energy_q = 0;
5352         }
5353
5354         if (flags & GMX_PME_DO_LJ)
5355         {
5356             if (!pme->bFEP_lj)
5357             {
5358                 *energy_lj = energy_AB[2];
5359                 m_add(vir_lj, vir_AB[2], vir_lj);
5360             }
5361             else
5362             {
5363                 *energy_lj     = (1.0-lambda_lj)*energy_AB[2] + lambda_lj*energy_AB[3];
5364                 *dvdlambda_lj += energy_AB[3] - energy_AB[2];
5365                 for (i = 0; i < DIM; i++)
5366                 {
5367                     for (j = 0; j < DIM; j++)
5368                     {
5369                         vir_lj[i][j] += (1.0-lambda_lj)*vir_AB[2][i][j] + lambda_lj*vir_AB[3][i][j];
5370                     }
5371                 }
5372             }
5373             if (debug)
5374             {
5375                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
5376             }
5377         }
5378         else
5379         {
5380             *energy_lj = 0;
5381         }
5382     }
5383     return 0;
5384 }