Update to prepare for 2018.4 point release
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /*! \internal \file
38  *
39  * \brief This file contains function definitions necessary for
40  * computing energies and forces for the PME long-ranged part (Coulomb
41  * and LJ).
42  *
43  * \author Erik Lindahl <erik@kth.se>
44  * \author Berk Hess <hess@kth.se>
45  * \ingroup module_ewald
46  */
47 /* IMPORTANT FOR DEVELOPERS:
48  *
49  * Triclinic pme stuff isn't entirely trivial, and we've experienced
50  * some bugs during development (many of them due to me). To avoid
51  * this in the future, please check the following things if you make
52  * changes in this file:
53  *
54  * 1. You should obtain identical (at least to the PME precision)
55  *    energies, forces, and virial for
56  *    a rectangular box and a triclinic one where the z (or y) axis is
57  *    tilted a whole box side. For instance you could use these boxes:
58  *
59  *    rectangular       triclinic
60  *     2  0  0           2  0  0
61  *     0  2  0           0  2  0
62  *     0  0  6           2  2  6
63  *
64  * 2. You should check the energy conservation in a triclinic box.
65  *
66  * It might seem an overkill, but better safe than sorry.
67  * /Erik 001109
68  */
69
70 #include "gmxpre.h"
71
72 #include "pme.h"
73
74 #include "config.h"
75
76 #include <assert.h>
77 #include <stdio.h>
78 #include <stdlib.h>
79 #include <string.h>
80
81 #include <cmath>
82
83 #include <algorithm>
84
85 #include "gromacs/ewald/ewald-utils.h"
86 #include "gromacs/fft/parallel_3dfft.h"
87 #include "gromacs/fileio/pdbio.h"
88 #include "gromacs/gmxlib/network.h"
89 #include "gromacs/gmxlib/nrnb.h"
90 #include "gromacs/math/gmxcomplex.h"
91 #include "gromacs/math/invertmatrix.h"
92 #include "gromacs/math/units.h"
93 #include "gromacs/math/vec.h"
94 #include "gromacs/math/vectypes.h"
95 #include "gromacs/mdtypes/commrec.h"
96 #include "gromacs/mdtypes/forcerec.h"
97 #include "gromacs/mdtypes/inputrec.h"
98 #include "gromacs/mdtypes/md_enums.h"
99 #include "gromacs/pbcutil/pbc.h"
100 #include "gromacs/timing/cyclecounter.h"
101 #include "gromacs/timing/wallcycle.h"
102 #include "gromacs/timing/walltime_accounting.h"
103 #include "gromacs/utility/basedefinitions.h"
104 #include "gromacs/utility/exceptions.h"
105 #include "gromacs/utility/fatalerror.h"
106 #include "gromacs/utility/gmxmpi.h"
107 #include "gromacs/utility/gmxomp.h"
108 #include "gromacs/utility/logger.h"
109 #include "gromacs/utility/real.h"
110 #include "gromacs/utility/smalloc.h"
111 #include "gromacs/utility/stringutil.h"
112 #include "gromacs/utility/unique_cptr.h"
113
114 #include "calculate-spline-moduli.h"
115 #include "pme-gather.h"
116 #include "pme-gpu-internal.h"
117 #include "pme-grid.h"
118 #include "pme-internal.h"
119 #include "pme-redistribute.h"
120 #include "pme-solve.h"
121 #include "pme-spline-work.h"
122 #include "pme-spread.h"
123
124 /*! \brief Number of bytes in a cache line.
125  *
126  * Must also be a multiple of the SIMD and SIMD4 register size, to
127  * preserve alignment.
128  */
129 const int gmxCacheLineSize = 64;
130
131 //! Set up coordinate communication
132 static void setup_coordinate_communication(pme_atomcomm_t *atc)
133 {
134     int nslab, n, i;
135     int fw, bw;
136
137     nslab = atc->nslab;
138
139     n = 0;
140     for (i = 1; i <= nslab/2; i++)
141     {
142         fw = (atc->nodeid + i) % nslab;
143         bw = (atc->nodeid - i + nslab) % nslab;
144         if (n < nslab - 1)
145         {
146             atc->node_dest[n] = fw;
147             atc->node_src[n]  = bw;
148             n++;
149         }
150         if (n < nslab - 1)
151         {
152             atc->node_dest[n] = bw;
153             atc->node_src[n]  = fw;
154             n++;
155         }
156     }
157 }
158
159 /*! \brief Round \p n up to the next multiple of \p f */
160 static int mult_up(int n, int f)
161 {
162     return ((n + f - 1)/f)*f;
163 }
164
165 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
166 static double estimate_pme_load_imbalance(struct gmx_pme_t *pme)
167 {
168     int    nma, nmi;
169     double n1, n2, n3;
170
171     nma = pme->nnodes_major;
172     nmi = pme->nnodes_minor;
173
174     n1 = mult_up(pme->nkx, nma)*mult_up(pme->nky, nmi)*pme->nkz;
175     n2 = mult_up(pme->nkx, nma)*mult_up(pme->nkz, nmi)*pme->nky;
176     n3 = mult_up(pme->nky, nma)*mult_up(pme->nkz, nmi)*pme->nkx;
177
178     /* pme_solve is roughly double the cost of an fft */
179
180     return (n1 + n2 + 3*n3)/(double)(6*pme->nkx*pme->nky*pme->nkz);
181 }
182
183 /*! \brief Initialize atom communication data structure */
184 static void init_atomcomm(struct gmx_pme_t *pme, pme_atomcomm_t *atc,
185                           int dimind, gmx_bool bSpread)
186 {
187     int thread;
188
189     atc->dimind    = dimind;
190     atc->nslab     = 1;
191     atc->nodeid    = 0;
192     atc->pd_nalloc = 0;
193 #if GMX_MPI
194     if (pme->nnodes > 1)
195     {
196         atc->mpi_comm = pme->mpi_comm_d[dimind];
197         MPI_Comm_size(atc->mpi_comm, &atc->nslab);
198         MPI_Comm_rank(atc->mpi_comm, &atc->nodeid);
199     }
200     if (debug)
201     {
202         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", atc->dimind, atc->nslab, atc->nodeid);
203     }
204 #endif
205
206     atc->bSpread   = bSpread;
207     atc->pme_order = pme->pme_order;
208
209     if (atc->nslab > 1)
210     {
211         snew(atc->node_dest, atc->nslab);
212         snew(atc->node_src, atc->nslab);
213         setup_coordinate_communication(atc);
214
215         snew(atc->count_thread, pme->nthread);
216         for (thread = 0; thread < pme->nthread; thread++)
217         {
218             snew(atc->count_thread[thread], atc->nslab);
219         }
220         atc->count = atc->count_thread[0];
221         snew(atc->rcount, atc->nslab);
222         snew(atc->buf_index, atc->nslab);
223     }
224
225     atc->nthread = pme->nthread;
226     if (atc->nthread > 1)
227     {
228         snew(atc->thread_plist, atc->nthread);
229     }
230     snew(atc->spline, atc->nthread);
231     for (thread = 0; thread < atc->nthread; thread++)
232     {
233         if (atc->nthread > 1)
234         {
235             snew(atc->thread_plist[thread].n, atc->nthread+2*gmxCacheLineSize);
236             atc->thread_plist[thread].n += gmxCacheLineSize;
237         }
238     }
239 }
240
241 /*! \brief Destroy an atom communication data structure and its child structs */
242 static void destroy_atomcomm(pme_atomcomm_t *atc)
243 {
244     sfree(atc->pd);
245     if (atc->nslab > 1)
246     {
247         sfree(atc->node_dest);
248         sfree(atc->node_src);
249         for (int i = 0; i < atc->nthread; i++)
250         {
251             sfree(atc->count_thread[i]);
252         }
253         sfree(atc->count_thread);
254         sfree(atc->rcount);
255         sfree(atc->buf_index);
256
257         sfree(atc->x);
258         sfree(atc->coefficient);
259         sfree(atc->f);
260     }
261     sfree(atc->idx);
262     sfree(atc->fractx);
263
264     sfree(atc->thread_idx);
265     for (int i = 0; i < atc->nthread; i++)
266     {
267         if (atc->nthread > 1)
268         {
269             int *n_ptr = atc->thread_plist[i].n - gmxCacheLineSize;
270             sfree(n_ptr);
271             sfree(atc->thread_plist[i].i);
272         }
273         sfree(atc->spline[i].ind);
274         for (int d = 0; d < ZZ; d++)
275         {
276             sfree(atc->spline[i].theta[d]);
277             sfree(atc->spline[i].dtheta[d]);
278         }
279         sfree_aligned(atc->spline[i].ptr_dtheta_z);
280         sfree_aligned(atc->spline[i].ptr_theta_z);
281     }
282     if (atc->nthread > 1)
283     {
284         sfree(atc->thread_plist);
285     }
286     sfree(atc->spline);
287 }
288
289 /*! \brief Initialize data structure for communication */
290 static void
291 init_overlap_comm(pme_overlap_t *  ol,
292                   int              norder,
293 #if GMX_MPI
294                   MPI_Comm         comm,
295 #endif
296                   int              nnodes,
297                   int              nodeid,
298                   int              ndata,
299                   int              commplainsize)
300 {
301     int              b, i;
302     pme_grid_comm_t *pgc;
303     gmx_bool         bCont;
304     int              fft_start, fft_end, send_index1, recv_index1;
305 #if GMX_MPI
306     MPI_Status       stat;
307
308     ol->mpi_comm = comm;
309 #endif
310
311     ol->nnodes = nnodes;
312     ol->nodeid = nodeid;
313
314     /* Linear translation of the PME grid won't affect reciprocal space
315      * calculations, so to optimize we only interpolate "upwards",
316      * which also means we only have to consider overlap in one direction.
317      * I.e., particles on this node might also be spread to grid indices
318      * that belong to higher nodes (modulo nnodes)
319      */
320
321     snew(ol->s2g0, ol->nnodes+1);
322     snew(ol->s2g1, ol->nnodes);
323     if (debug)
324     {
325         fprintf(debug, "PME slab boundaries:");
326     }
327     for (i = 0; i < nnodes; i++)
328     {
329         /* s2g0 the local interpolation grid start.
330          * s2g1 the local interpolation grid end.
331          * Since in calc_pidx we divide particles, and not grid lines,
332          * spatially uniform along dimension x or y, we need to round
333          * s2g0 down and s2g1 up.
334          */
335         ol->s2g0[i] = ( i   *ndata + 0       )/nnodes;
336         ol->s2g1[i] = ((i+1)*ndata + nnodes-1)/nnodes + norder - 1;
337
338         if (debug)
339         {
340             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
341         }
342     }
343     ol->s2g0[nnodes] = ndata;
344     if (debug)
345     {
346         fprintf(debug, "\n");
347     }
348
349     /* Determine with how many nodes we need to communicate the grid overlap */
350     b = 0;
351     do
352     {
353         b++;
354         bCont = FALSE;
355         for (i = 0; i < nnodes; i++)
356         {
357             if ((i+b <  nnodes && ol->s2g1[i] > ol->s2g0[i+b]) ||
358                 (i+b >= nnodes && ol->s2g1[i] > ol->s2g0[i+b-nnodes] + ndata))
359             {
360                 bCont = TRUE;
361             }
362         }
363     }
364     while (bCont && b < nnodes);
365     ol->noverlap_nodes = b - 1;
366
367     snew(ol->send_id, ol->noverlap_nodes);
368     snew(ol->recv_id, ol->noverlap_nodes);
369     for (b = 0; b < ol->noverlap_nodes; b++)
370     {
371         ol->send_id[b] = (ol->nodeid + (b + 1)) % ol->nnodes;
372         ol->recv_id[b] = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
373     }
374     snew(ol->comm_data, ol->noverlap_nodes);
375
376     ol->send_size = 0;
377     for (b = 0; b < ol->noverlap_nodes; b++)
378     {
379         pgc = &ol->comm_data[b];
380         /* Send */
381         fft_start        = ol->s2g0[ol->send_id[b]];
382         fft_end          = ol->s2g0[ol->send_id[b]+1];
383         if (ol->send_id[b] < nodeid)
384         {
385             fft_start += ndata;
386             fft_end   += ndata;
387         }
388         send_index1       = ol->s2g1[nodeid];
389         send_index1       = std::min(send_index1, fft_end);
390         pgc->send_index0  = fft_start;
391         pgc->send_nindex  = std::max(0, send_index1 - pgc->send_index0);
392         ol->send_size    += pgc->send_nindex;
393
394         /* We always start receiving to the first index of our slab */
395         fft_start        = ol->s2g0[ol->nodeid];
396         fft_end          = ol->s2g0[ol->nodeid+1];
397         recv_index1      = ol->s2g1[ol->recv_id[b]];
398         if (ol->recv_id[b] > nodeid)
399         {
400             recv_index1 -= ndata;
401         }
402         recv_index1      = std::min(recv_index1, fft_end);
403         pgc->recv_index0 = fft_start;
404         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
405     }
406
407 #if GMX_MPI
408     /* Communicate the buffer sizes to receive */
409     for (b = 0; b < ol->noverlap_nodes; b++)
410     {
411         MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->send_id[b], b,
412                      &ol->comm_data[b].recv_size, 1, MPI_INT, ol->recv_id[b], b,
413                      ol->mpi_comm, &stat);
414     }
415 #endif
416
417     /* For non-divisible grid we need pme_order iso pme_order-1 */
418     snew(ol->sendbuf, norder*commplainsize);
419     snew(ol->recvbuf, norder*commplainsize);
420 }
421
422 /*! \brief Destroy data structure for communication */
423 static void
424 destroy_overlap_comm(const pme_overlap_t *ol)
425 {
426     sfree(ol->s2g0);
427     sfree(ol->s2g1);
428     sfree(ol->send_id);
429     sfree(ol->recv_id);
430     sfree(ol->comm_data);
431     sfree(ol->sendbuf);
432     sfree(ol->recvbuf);
433 }
434
435 int minimalPmeGridSize(int pmeOrder)
436 {
437     /* The actual grid size limitations are:
438      *   serial:        >= pme_order
439      *   DD, no OpenMP: >= 2*(pme_order - 1)
440      *   DD, OpenMP:    >= pme_order + 1
441      * But we use the maximum for simplicity since in practice there is not
442      * much performance difference between pme_order and 2*(pme_order -1).
443      */
444     int minimalSize = 2*(pmeOrder - 1);
445
446     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
447     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
448
449     return minimalSize;
450 }
451
452 bool gmx_pme_check_restrictions(int pme_order,
453                                 int nkx, int nky, int nkz,
454                                 int nnodes_major,
455                                 bool useThreads,
456                                 bool errorsAreFatal)
457 {
458     if (pme_order > PME_ORDER_MAX)
459     {
460         if (!errorsAreFatal)
461         {
462             return false;
463         }
464
465         std::string message = gmx::formatString(
466                     "pme_order (%d) is larger than the maximum allowed value (%d). Modify and recompile the code if you really need such a high order.",
467                     pme_order, PME_ORDER_MAX);
468         GMX_THROW(InconsistentInputError(message));
469     }
470
471     const int minGridSize = minimalPmeGridSize(pme_order);
472     if (nkx < minGridSize ||
473         nky < minGridSize ||
474         nkz < minGridSize)
475     {
476         if (!errorsAreFatal)
477         {
478             return false;
479         }
480         std::string message = gmx::formatString(
481                     "The PME grid sizes need to be >= 2*(pme_order-1) (%d)",
482                     minGridSize);
483         GMX_THROW(InconsistentInputError(message));
484     }
485
486     /* Check for a limitation of the (current) sum_fftgrid_dd code.
487      * We only allow multiple communication pulses in dim 1, not in dim 0.
488      */
489     if (useThreads && (nkx < nnodes_major*pme_order &&
490                        nkx != nnodes_major*(pme_order - 1)))
491     {
492         if (!errorsAreFatal)
493         {
494             return false;
495         }
496         gmx_fatal(FARGS, "The number of PME grid lines per rank along x is %g. But when using OpenMP threads, the number of grid lines per rank along x should be >= pme_order (%d) or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly more along y and/or z) by specifying -dd manually.",
497                   nkx/(double)nnodes_major, pme_order);
498     }
499
500     return true;
501 }
502
503 /*! \brief Round \p enumerator */
504 static int div_round_up(int enumerator, int denominator)
505 {
506     return (enumerator + denominator - 1)/denominator;
507 }
508
509 gmx_pme_t *gmx_pme_init(const t_commrec     *cr,
510                         int                  nnodes_major,
511                         int                  nnodes_minor,
512                         const t_inputrec    *ir,
513                         int                  homenr,
514                         gmx_bool             bFreeEnergy_q,
515                         gmx_bool             bFreeEnergy_lj,
516                         gmx_bool             bReproducible,
517                         real                 ewaldcoeff_q,
518                         real                 ewaldcoeff_lj,
519                         int                  nthread,
520                         PmeRunMode           runMode,
521                         PmeGpu              *pmeGpu,
522                         gmx_device_info_t   *gpuInfo,
523                         const gmx::MDLogger  & /*mdlog*/)
524 {
525     int               use_threads, sum_use_threads, i;
526     ivec              ndata;
527
528     if (debug)
529     {
530         fprintf(debug, "Creating PME data structures.\n");
531     }
532
533     unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
534
535     pme->sum_qgrid_tmp       = nullptr;
536     pme->sum_qgrid_dd_tmp    = nullptr;
537
538     pme->buf_nalloc          = 0;
539
540     pme->nnodes              = 1;
541     pme->bPPnode             = TRUE;
542
543     pme->nnodes_major        = nnodes_major;
544     pme->nnodes_minor        = nnodes_minor;
545
546 #if GMX_MPI
547     if (nnodes_major*nnodes_minor > 1)
548     {
549         pme->mpi_comm = cr->mpi_comm_mygroup;
550
551         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
552         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
553         if (pme->nnodes != nnodes_major*nnodes_minor)
554         {
555             gmx_incons("PME rank count mismatch");
556         }
557     }
558     else
559     {
560         pme->mpi_comm = MPI_COMM_NULL;
561     }
562 #endif
563
564     if (pme->nnodes == 1)
565     {
566 #if GMX_MPI
567         pme->mpi_comm_d[0] = MPI_COMM_NULL;
568         pme->mpi_comm_d[1] = MPI_COMM_NULL;
569 #endif
570         pme->ndecompdim   = 0;
571         pme->nodeid_major = 0;
572         pme->nodeid_minor = 0;
573 #if GMX_MPI
574         pme->mpi_comm_d[0] = pme->mpi_comm_d[1] = MPI_COMM_NULL;
575 #endif
576     }
577     else
578     {
579         if (nnodes_minor == 1)
580         {
581 #if GMX_MPI
582             pme->mpi_comm_d[0] = pme->mpi_comm;
583             pme->mpi_comm_d[1] = MPI_COMM_NULL;
584 #endif
585             pme->ndecompdim   = 1;
586             pme->nodeid_major = pme->nodeid;
587             pme->nodeid_minor = 0;
588
589         }
590         else if (nnodes_major == 1)
591         {
592 #if GMX_MPI
593             pme->mpi_comm_d[0] = MPI_COMM_NULL;
594             pme->mpi_comm_d[1] = pme->mpi_comm;
595 #endif
596             pme->ndecompdim   = 1;
597             pme->nodeid_major = 0;
598             pme->nodeid_minor = pme->nodeid;
599         }
600         else
601         {
602             if (pme->nnodes % nnodes_major != 0)
603             {
604                 gmx_incons("For 2D PME decomposition, #PME ranks must be divisible by the number of ranks in the major dimension");
605             }
606             pme->ndecompdim = 2;
607
608 #if GMX_MPI
609             MPI_Comm_split(pme->mpi_comm, pme->nodeid % nnodes_minor,
610                            pme->nodeid, &pme->mpi_comm_d[0]);  /* My communicator along major dimension */
611             MPI_Comm_split(pme->mpi_comm, pme->nodeid/nnodes_minor,
612                            pme->nodeid, &pme->mpi_comm_d[1]);  /* My communicator along minor dimension */
613
614             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
615             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
616             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
617             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
618 #endif
619         }
620         pme->bPPnode = thisRankHasDuty(cr, DUTY_PP);
621     }
622
623     pme->nthread = nthread;
624
625     /* Check if any of the PME MPI ranks uses threads */
626     use_threads = (pme->nthread > 1 ? 1 : 0);
627 #if GMX_MPI
628     if (pme->nnodes > 1)
629     {
630         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT,
631                       MPI_SUM, pme->mpi_comm);
632     }
633     else
634 #endif
635     {
636         sum_use_threads = use_threads;
637     }
638     pme->bUseThreads = (sum_use_threads > 0);
639
640     if (ir->ePBC == epbcSCREW)
641     {
642         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
643     }
644
645     /* NOTE:
646      * It is likely that the current gmx_pme_do() routine supports calculating
647      * only Coulomb or LJ while gmx_pme_init() configures for both,
648      * but that has never been tested.
649      * It is likely that the current gmx_pme_do() routine supports calculating,
650      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
651      * configures with free-energy, but that has never been tested.
652      */
653     pme->doCoulomb     = EEL_PME(ir->coulombtype);
654     pme->doLJ          = EVDW_PME(ir->vdwtype);
655     pme->bFEP_q        = ((ir->efep != efepNO) && bFreeEnergy_q);
656     pme->bFEP_lj       = ((ir->efep != efepNO) && bFreeEnergy_lj);
657     pme->bFEP          = (pme->bFEP_q || pme->bFEP_lj);
658     pme->nkx           = ir->nkx;
659     pme->nky           = ir->nky;
660     pme->nkz           = ir->nkz;
661     pme->bP3M          = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != nullptr);
662     pme->pme_order     = ir->pme_order;
663     pme->ewaldcoeff_q  = ewaldcoeff_q;
664     pme->ewaldcoeff_lj = ewaldcoeff_lj;
665
666     /* Always constant electrostatics coefficients */
667     pme->epsilon_r     = ir->epsilon_r;
668
669     /* Always constant LJ coefficients */
670     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
671
672     // The box requires scaling with nwalls = 2, we store that condition as well
673     // as the scaling factor
674     delete pme->boxScaler;
675     pme->boxScaler = new EwaldBoxZScaler(*ir);
676
677     /* If we violate restrictions, generate a fatal error here */
678     gmx_pme_check_restrictions(pme->pme_order,
679                                pme->nkx, pme->nky, pme->nkz,
680                                pme->nnodes_major,
681                                pme->bUseThreads,
682                                true);
683
684     if (pme->nnodes > 1)
685     {
686         double imbal;
687
688 #if GMX_MPI
689         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
690         MPI_Type_commit(&(pme->rvec_mpi));
691 #endif
692
693         /* Note that the coefficient spreading and force gathering, which usually
694          * takes about the same amount of time as FFT+solve_pme,
695          * is always fully load balanced
696          * (unless the coefficient distribution is inhomogeneous).
697          */
698
699         imbal = estimate_pme_load_imbalance(pme.get());
700         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
701         {
702             fprintf(stderr,
703                     "\n"
704                     "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
705                     "      For optimal PME load balancing\n"
706                     "      PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x (%d)\n"
707                     "      and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y (%d)\n"
708                     "\n",
709                     (int)((imbal-1)*100 + 0.5),
710                     pme->nkx, pme->nky, pme->nnodes_major,
711                     pme->nky, pme->nkz, pme->nnodes_minor);
712         }
713     }
714
715     /* For non-divisible grid we need pme_order iso pme_order-1 */
716     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
717      * y is always copied through a buffer: we don't need padding in z,
718      * but we do need the overlap in x because of the communication order.
719      */
720     init_overlap_comm(&pme->overlap[0], pme->pme_order,
721 #if GMX_MPI
722                       pme->mpi_comm_d[0],
723 #endif
724                       pme->nnodes_major, pme->nodeid_major,
725                       pme->nkx,
726                       (div_round_up(pme->nky, pme->nnodes_minor)+pme->pme_order)*(pme->nkz+pme->pme_order-1));
727
728     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
729      * We do this with an offset buffer of equal size, so we need to allocate
730      * extra for the offset. That's what the (+1)*pme->nkz is for.
731      */
732     init_overlap_comm(&pme->overlap[1], pme->pme_order,
733 #if GMX_MPI
734                       pme->mpi_comm_d[1],
735 #endif
736                       pme->nnodes_minor, pme->nodeid_minor,
737                       pme->nky,
738                       (div_round_up(pme->nkx, pme->nnodes_major)+pme->pme_order+1)*pme->nkz);
739
740     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
741      * Note that gmx_pme_check_restrictions checked for this already.
742      */
743     if (pme->bUseThreads && pme->overlap[0].noverlap_nodes > 1)
744     {
745         gmx_incons("More than one communication pulse required for grid overlap communication along the major dimension while using threads");
746     }
747
748     snew(pme->bsp_mod[XX], pme->nkx);
749     snew(pme->bsp_mod[YY], pme->nky);
750     snew(pme->bsp_mod[ZZ], pme->nkz);
751
752     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
753     pme->runMode = runMode;
754
755     /* The required size of the interpolation grid, including overlap.
756      * The allocated size (pmegrid_n?) might be slightly larger.
757      */
758     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] -
759         pme->overlap[0].s2g0[pme->nodeid_major];
760     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] -
761         pme->overlap[1].s2g0[pme->nodeid_minor];
762     pme->pmegrid_nz_base = pme->nkz;
763     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
764     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
765     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
766     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
767     pme->pmegrid_start_iz = 0;
768
769     make_gridindex_to_localindex(pme->nkx,
770                                  pme->pmegrid_start_ix,
771                                  pme->pmegrid_nx - (pme->pme_order-1),
772                                  &pme->nnx, &pme->fshx);
773     make_gridindex_to_localindex(pme->nky,
774                                  pme->pmegrid_start_iy,
775                                  pme->pmegrid_ny - (pme->pme_order-1),
776                                  &pme->nny, &pme->fshy);
777     make_gridindex_to_localindex(pme->nkz,
778                                  pme->pmegrid_start_iz,
779                                  pme->pmegrid_nz_base,
780                                  &pme->nnz, &pme->fshz);
781
782     pme->spline_work = make_pme_spline_work(pme->pme_order);
783
784     ndata[0]    = pme->nkx;
785     ndata[1]    = pme->nky;
786     ndata[2]    = pme->nkz;
787     /* It doesn't matter if we allocate too many grids here,
788      * we only allocate and use the ones we need.
789      */
790     if (pme->doLJ)
791     {
792         pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
793     }
794     else
795     {
796         pme->ngrids = DO_Q;
797     }
798     snew(pme->fftgrid, pme->ngrids);
799     snew(pme->cfftgrid, pme->ngrids);
800     snew(pme->pfft_setup, pme->ngrids);
801
802     for (i = 0; i < pme->ngrids; ++i)
803     {
804         if ((i <  DO_Q && pme->doCoulomb && (i == 0 ||
805                                              bFreeEnergy_q)) ||
806             (i >= DO_Q && pme->doLJ && (i == 2 ||
807                                         bFreeEnergy_lj ||
808                                         ir->ljpme_combination_rule == eljpmeLB)))
809         {
810             pmegrids_init(&pme->pmegrid[i],
811                           pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
812                           pme->pmegrid_nz_base,
813                           pme->pme_order,
814                           pme->bUseThreads,
815                           pme->nthread,
816                           pme->overlap[0].s2g1[pme->nodeid_major]-pme->overlap[0].s2g0[pme->nodeid_major+1],
817                           pme->overlap[1].s2g1[pme->nodeid_minor]-pme->overlap[1].s2g0[pme->nodeid_minor+1]);
818             /* This routine will allocate the grid data to fit the FFTs */
819             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed) ? gmx::PinningPolicy::CanBePinned : gmx::PinningPolicy::CannotBePinned;
820             gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata,
821                                     &pme->fftgrid[i], &pme->cfftgrid[i],
822                                     pme->mpi_comm_d,
823                                     bReproducible, pme->nthread, allocateRealGridForGpu);
824
825         }
826     }
827
828     if (!pme->bP3M)
829     {
830         /* Use plain SPME B-spline interpolation */
831         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
832     }
833     else
834     {
835         /* Use the P3M grid-optimized influence function */
836         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
837     }
838
839     /* Use atc[0] for spreading */
840     init_atomcomm(pme.get(), &pme->atc[0], nnodes_major > 1 ? 0 : 1, TRUE);
841     if (pme->ndecompdim >= 2)
842     {
843         init_atomcomm(pme.get(), &pme->atc[1], 1, FALSE);
844     }
845
846     if (pme->nnodes == 1)
847     {
848         pme->atc[0].n = homenr;
849         pme_realloc_atomcomm_things(&pme->atc[0]);
850     }
851
852     pme->lb_buf1       = nullptr;
853     pme->lb_buf2       = nullptr;
854     pme->lb_buf_nalloc = 0;
855
856     pme_gpu_reinit(pme.get(), gpuInfo);
857
858     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
859
860     // no exception was thrown during the init, so we hand over the PME structure handle
861     return pme.release();
862 }
863
864 void gmx_pme_reinit(struct gmx_pme_t **pmedata,
865                     t_commrec *        cr,
866                     struct gmx_pme_t * pme_src,
867                     const t_inputrec * ir,
868                     const ivec         grid_size,
869                     real               ewaldcoeff_q,
870                     real               ewaldcoeff_lj)
871 {
872     int        homenr;
873
874     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
875     // TODO: This would be better as just copying a sub-structure that contains
876     // all the PME parameters and nothing else.
877     t_inputrec irc;
878     irc.ePBC                   = ir->ePBC;
879     irc.coulombtype            = ir->coulombtype;
880     irc.vdwtype                = ir->vdwtype;
881     irc.efep                   = ir->efep;
882     irc.pme_order              = ir->pme_order;
883     irc.epsilon_r              = ir->epsilon_r;
884     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
885     irc.nkx                    = grid_size[XX];
886     irc.nky                    = grid_size[YY];
887     irc.nkz                    = grid_size[ZZ];
888
889     if (pme_src->nnodes == 1)
890     {
891         homenr = pme_src->atc[0].n;
892     }
893     else
894     {
895         homenr = -1;
896     }
897
898     try
899     {
900         const gmx::MDLogger dummyLogger;
901         // This is reinit which is currently only changing grid size/coefficients,
902         // so we don't expect the actual logging.
903         // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
904         GMX_ASSERT(pmedata, "Invalid PME pointer");
905         *pmedata = gmx_pme_init(cr, pme_src->nnodes_major, pme_src->nnodes_minor,
906                                 &irc, homenr, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE, ewaldcoeff_q, ewaldcoeff_lj,
907                                 pme_src->nthread, pme_src->runMode, pme_src->gpu, nullptr, dummyLogger);
908         //TODO this is mostly passing around current values
909     }
910     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
911
912     /* We can easily reuse the allocated pme grids in pme_src */
913     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
914     /* We would like to reuse the fft grids, but that's harder */
915 }
916
917 void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V)
918 {
919     pme_atomcomm_t *atc;
920     pmegrids_t     *grid;
921
922     if (pme->nnodes > 1)
923     {
924         gmx_incons("gmx_pme_calc_energy called in parallel");
925     }
926     if (pme->bFEP_q > 1)
927     {
928         gmx_incons("gmx_pme_calc_energy with free energy");
929     }
930
931     atc            = &pme->atc_energy;
932     atc->nthread   = 1;
933     if (atc->spline == nullptr)
934     {
935         snew(atc->spline, atc->nthread);
936     }
937     atc->nslab     = 1;
938     atc->bSpread   = TRUE;
939     atc->pme_order = pme->pme_order;
940     atc->n         = n;
941     pme_realloc_atomcomm_things(atc);
942     atc->x           = x;
943     atc->coefficient = q;
944
945     /* We only use the A-charges grid */
946     grid = &pme->pmegrid[PME_GRID_QA];
947
948     /* Only calculate the spline coefficients, don't actually spread */
949     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
950
951     *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
952 }
953
954 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
955 static void
956 calc_initial_lb_coeffs(struct gmx_pme_t *pme, real *local_c6, real *local_sigma)
957 {
958     int  i;
959     for (i = 0; i < pme->atc[0].n; ++i)
960     {
961         real sigma4;
962         sigma4                     = local_sigma[i];
963         sigma4                     = sigma4*sigma4;
964         sigma4                     = sigma4*sigma4;
965         pme->atc[0].coefficient[i] = local_c6[i] / sigma4;
966     }
967 }
968
969 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
970 static void
971 calc_next_lb_coeffs(struct gmx_pme_t *pme, real *local_sigma)
972 {
973     int  i;
974
975     for (i = 0; i < pme->atc[0].n; ++i)
976     {
977         pme->atc[0].coefficient[i] *= local_sigma[i];
978     }
979 }
980
981 int gmx_pme_do(struct gmx_pme_t *pme,
982                int start,       int homenr,
983                rvec x[],        rvec f[],
984                real chargeA[],  real chargeB[],
985                real c6A[],      real c6B[],
986                real sigmaA[],   real sigmaB[],
987                matrix box,      t_commrec *cr,
988                int  maxshift_x, int maxshift_y,
989                t_nrnb *nrnb,    gmx_wallcycle_t wcycle,
990                matrix vir_q,    matrix vir_lj,
991                real *energy_q,  real *energy_lj,
992                real lambda_q,   real lambda_lj,
993                real *dvdlambda_q, real *dvdlambda_lj,
994                int flags)
995 {
996     GMX_ASSERT(pme->runMode == PmeRunMode::CPU, "gmx_pme_do should not be called on the GPU PME run.");
997
998     int                  d, i, j, npme, grid_index, max_grid_index;
999     int                  n_d;
1000     pme_atomcomm_t      *atc        = nullptr;
1001     pmegrids_t          *pmegrid    = nullptr;
1002     real                *grid       = nullptr;
1003     rvec                *f_d;
1004     real                *coefficient = nullptr;
1005     real                 energy_AB[4];
1006     matrix               vir_AB[4];
1007     real                 scale, lambda;
1008     gmx_bool             bClearF;
1009     gmx_parallel_3dfft_t pfft_setup;
1010     real              *  fftgrid;
1011     t_complex          * cfftgrid;
1012     int                  thread;
1013     gmx_bool             bFirst, bDoSplines;
1014     int                  fep_state;
1015     int                  fep_states_lj           = pme->bFEP_lj ? 2 : 1;
1016     const gmx_bool       bCalcEnerVir            = flags & GMX_PME_CALC_ENER_VIR;
1017     const gmx_bool       bBackFFT                = flags & (GMX_PME_CALC_F | GMX_PME_CALC_POT);
1018     const gmx_bool       bCalcF                  = flags & GMX_PME_CALC_F;
1019
1020     assert(pme->nnodes > 0);
1021     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1022
1023     if (pme->nnodes > 1)
1024     {
1025         atc      = &pme->atc[0];
1026         atc->npd = homenr;
1027         if (atc->npd > atc->pd_nalloc)
1028         {
1029             atc->pd_nalloc = over_alloc_dd(atc->npd);
1030             srenew(atc->pd, atc->pd_nalloc);
1031         }
1032         for (d = pme->ndecompdim-1; d >= 0; d--)
1033         {
1034             atc           = &pme->atc[d];
1035             atc->maxshift = (atc->dimind == 0 ? maxshift_x : maxshift_y);
1036         }
1037     }
1038     else
1039     {
1040         atc = &pme->atc[0];
1041         /* This could be necessary for TPI */
1042         pme->atc[0].n = homenr;
1043         if (DOMAINDECOMP(cr))
1044         {
1045             pme_realloc_atomcomm_things(atc);
1046         }
1047         atc->x = x;
1048         atc->f = f;
1049     }
1050
1051     matrix scaledBox;
1052     pme->boxScaler->scaleBox(box, scaledBox);
1053
1054     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1055     bFirst = TRUE;
1056
1057     /* For simplicity, we construct the splines for all particles if
1058      * more than one PME calculations is needed. Some optimization
1059      * could be done by keeping track of which atoms have splines
1060      * constructed, and construct new splines on each pass for atoms
1061      * that don't yet have them.
1062      */
1063
1064     bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1065
1066     /* We need a maximum of four separate PME calculations:
1067      * grid_index=0: Coulomb PME with charges from state A
1068      * grid_index=1: Coulomb PME with charges from state B
1069      * grid_index=2: LJ PME with C6 from state A
1070      * grid_index=3: LJ PME with C6 from state B
1071      * For Lorentz-Berthelot combination rules, a separate loop is used to
1072      * calculate all the terms
1073      */
1074
1075     /* If we are doing LJ-PME with LB, we only do Q here */
1076     max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
1077
1078     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1079     {
1080         /* Check if we should do calculations at this grid_index
1081          * If grid_index is odd we should be doing FEP
1082          * If grid_index < 2 we should be doing electrostatic PME
1083          * If grid_index >= 2 we should be doing LJ-PME
1084          */
1085         if ((grid_index <  DO_Q && (!pme->doCoulomb ||
1086                                     (grid_index == 1 && !pme->bFEP_q))) ||
1087             (grid_index >= DO_Q && (!pme->doLJ ||
1088                                     (grid_index == 3 && !pme->bFEP_lj))))
1089         {
1090             continue;
1091         }
1092         /* Unpack structure */
1093         pmegrid    = &pme->pmegrid[grid_index];
1094         fftgrid    = pme->fftgrid[grid_index];
1095         cfftgrid   = pme->cfftgrid[grid_index];
1096         pfft_setup = pme->pfft_setup[grid_index];
1097         switch (grid_index)
1098         {
1099             case 0: coefficient = chargeA + start; break;
1100             case 1: coefficient = chargeB + start; break;
1101             case 2: coefficient = c6A + start; break;
1102             case 3: coefficient = c6B + start; break;
1103         }
1104
1105         grid = pmegrid->grid.grid;
1106
1107         if (debug)
1108         {
1109             fprintf(debug, "PME: number of ranks = %d, rank = %d\n",
1110                     cr->nnodes, cr->nodeid);
1111             fprintf(debug, "Grid = %p\n", (void*)grid);
1112             if (grid == nullptr)
1113             {
1114                 gmx_fatal(FARGS, "No grid!");
1115             }
1116         }
1117         where();
1118
1119         if (pme->nnodes == 1)
1120         {
1121             atc->coefficient = coefficient;
1122         }
1123         else
1124         {
1125             wallcycle_start(wcycle, ewcPME_REDISTXF);
1126             do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, coefficient);
1127             where();
1128
1129             wallcycle_stop(wcycle, ewcPME_REDISTXF);
1130         }
1131
1132         if (debug)
1133         {
1134             fprintf(debug, "Rank= %6d, pme local particles=%6d\n",
1135                     cr->nodeid, atc->n);
1136         }
1137
1138         if (flags & GMX_PME_SPREAD)
1139         {
1140             wallcycle_start(wcycle, ewcPME_SPREAD);
1141
1142             /* Spread the coefficients on a grid */
1143             spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1144
1145             if (bFirst)
1146             {
1147                 inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
1148             }
1149             inc_nrnb(nrnb, eNR_SPREADBSP,
1150                      pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
1151
1152             if (!pme->bUseThreads)
1153             {
1154                 wrap_periodic_pmegrid(pme, grid);
1155
1156                 /* sum contributions to local grid from other nodes */
1157 #if GMX_MPI
1158                 if (pme->nnodes > 1)
1159                 {
1160                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1161                     where();
1162                 }
1163 #endif
1164
1165                 copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1166             }
1167
1168             wallcycle_stop(wcycle, ewcPME_SPREAD);
1169
1170             /* TODO If the OpenMP and single-threaded implementations
1171                converge, then spread_on_grid() and
1172                copy_pmegrid_to_fftgrid() will perhaps live in the same
1173                source file.
1174              */
1175         }
1176
1177         /* Here we start a large thread parallel region */
1178 #pragma omp parallel num_threads(pme->nthread) private(thread)
1179         {
1180             try
1181             {
1182                 thread = gmx_omp_get_thread_num();
1183                 if (flags & GMX_PME_SOLVE)
1184                 {
1185                     int loop_count;
1186
1187                     /* do 3d-fft */
1188                     if (thread == 0)
1189                     {
1190                         wallcycle_start(wcycle, ewcPME_FFT);
1191                     }
1192                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1193                                                thread, wcycle);
1194                     if (thread == 0)
1195                     {
1196                         wallcycle_stop(wcycle, ewcPME_FFT);
1197                     }
1198                     where();
1199
1200                     /* solve in k-space for our local cells */
1201                     if (thread == 0)
1202                     {
1203                         wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1204                     }
1205                     if (grid_index < DO_Q)
1206                     {
1207                         loop_count =
1208                             solve_pme_yzx(pme, cfftgrid,
1209                                           scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1210                                           bCalcEnerVir,
1211                                           pme->nthread, thread);
1212                     }
1213                     else
1214                     {
1215                         loop_count =
1216                             solve_pme_lj_yzx(pme, &cfftgrid, FALSE,
1217                                              scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1218                                              bCalcEnerVir,
1219                                              pme->nthread, thread);
1220                     }
1221
1222                     if (thread == 0)
1223                     {
1224                         wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1225                         where();
1226                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1227                     }
1228                 }
1229
1230                 if (bBackFFT)
1231                 {
1232                     /* do 3d-invfft */
1233                     if (thread == 0)
1234                     {
1235                         where();
1236                         wallcycle_start(wcycle, ewcPME_FFT);
1237                     }
1238                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1239                                                thread, wcycle);
1240                     if (thread == 0)
1241                     {
1242                         wallcycle_stop(wcycle, ewcPME_FFT);
1243
1244                         where();
1245
1246                         if (pme->nodeid == 0)
1247                         {
1248                             real ntot = pme->nkx*pme->nky*pme->nkz;
1249                             npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1250                             inc_nrnb(nrnb, eNR_FFT, 2*npme);
1251                         }
1252
1253                         /* Note: this wallcycle region is closed below
1254                            outside an OpenMP region, so take care if
1255                            refactoring code here. */
1256                         wallcycle_start(wcycle, ewcPME_GATHER);
1257                     }
1258
1259                     copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1260                 }
1261             } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1262         }
1263         /* End of thread parallel section.
1264          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1265          */
1266
1267         if (bBackFFT)
1268         {
1269             /* distribute local grid to all nodes */
1270 #if GMX_MPI
1271             if (pme->nnodes > 1)
1272             {
1273                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1274             }
1275 #endif
1276             where();
1277
1278             unwrap_periodic_pmegrid(pme, grid);
1279         }
1280
1281         if (bCalcF)
1282         {
1283             /* interpolate forces for our local atoms */
1284
1285             where();
1286
1287             /* If we are running without parallelization,
1288              * atc->f is the actual force array, not a buffer,
1289              * therefore we should not clear it.
1290              */
1291             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
1292             bClearF = (bFirst && PAR(cr));
1293 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1294             for (thread = 0; thread < pme->nthread; thread++)
1295             {
1296                 try
1297                 {
1298                     gather_f_bsplines(pme, grid, bClearF, atc,
1299                                       &atc->spline[thread],
1300                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0-lambda : lambda) : 1.0);
1301                 }
1302                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1303             }
1304
1305             where();
1306
1307             inc_nrnb(nrnb, eNR_GATHERFBSP,
1308                      pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
1309             /* Note: this wallcycle region is opened above inside an OpenMP
1310                region, so take care if refactoring code here. */
1311             wallcycle_stop(wcycle, ewcPME_GATHER);
1312         }
1313
1314         if (bCalcEnerVir)
1315         {
1316             /* This should only be called on the master thread
1317              * and after the threads have synchronized.
1318              */
1319             if (grid_index < 2)
1320             {
1321                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
1322             }
1323             else
1324             {
1325                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
1326             }
1327         }
1328         bFirst = FALSE;
1329     } /* of grid_index-loop */
1330
1331     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1332      * seven terms. */
1333
1334     if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB)
1335     {
1336         /* Loop over A- and B-state if we are doing FEP */
1337         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1338         {
1339             real *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
1340             if (pme->nnodes == 1)
1341             {
1342                 if (pme->lb_buf1 == nullptr)
1343                 {
1344                     pme->lb_buf_nalloc = pme->atc[0].n;
1345                     snew(pme->lb_buf1, pme->lb_buf_nalloc);
1346                 }
1347                 pme->atc[0].coefficient = pme->lb_buf1;
1348                 switch (fep_state)
1349                 {
1350                     case 0:
1351                         local_c6      = c6A;
1352                         local_sigma   = sigmaA;
1353                         break;
1354                     case 1:
1355                         local_c6      = c6B;
1356                         local_sigma   = sigmaB;
1357                         break;
1358                     default:
1359                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1360                 }
1361             }
1362             else
1363             {
1364                 atc = &pme->atc[0];
1365                 switch (fep_state)
1366                 {
1367                     case 0:
1368                         RedistC6      = c6A;
1369                         RedistSigma   = sigmaA;
1370                         break;
1371                     case 1:
1372                         RedistC6      = c6B;
1373                         RedistSigma   = sigmaB;
1374                         break;
1375                     default:
1376                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1377                 }
1378                 wallcycle_start(wcycle, ewcPME_REDISTXF);
1379
1380                 do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, RedistC6);
1381                 if (pme->lb_buf_nalloc < atc->n)
1382                 {
1383                     pme->lb_buf_nalloc = atc->nalloc;
1384                     srenew(pme->lb_buf1, pme->lb_buf_nalloc);
1385                     srenew(pme->lb_buf2, pme->lb_buf_nalloc);
1386                 }
1387                 local_c6 = pme->lb_buf1;
1388                 for (i = 0; i < atc->n; ++i)
1389                 {
1390                     local_c6[i] = atc->coefficient[i];
1391                 }
1392                 where();
1393
1394                 do_redist_pos_coeffs(pme, cr, start, homenr, FALSE, x, RedistSigma);
1395                 local_sigma = pme->lb_buf2;
1396                 for (i = 0; i < atc->n; ++i)
1397                 {
1398                     local_sigma[i] = atc->coefficient[i];
1399                 }
1400                 where();
1401
1402                 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1403             }
1404             calc_initial_lb_coeffs(pme, local_c6, local_sigma);
1405
1406             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1407             for (grid_index = 2; grid_index < 9; ++grid_index)
1408             {
1409                 /* Unpack structure */
1410                 pmegrid    = &pme->pmegrid[grid_index];
1411                 fftgrid    = pme->fftgrid[grid_index];
1412                 pfft_setup = pme->pfft_setup[grid_index];
1413                 calc_next_lb_coeffs(pme, local_sigma);
1414                 grid = pmegrid->grid.grid;
1415                 where();
1416
1417                 if (flags & GMX_PME_SPREAD)
1418                 {
1419                     wallcycle_start(wcycle, ewcPME_SPREAD);
1420                     /* Spread the c6 on a grid */
1421                     spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1422
1423                     if (bFirst)
1424                     {
1425                         inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
1426                     }
1427
1428                     inc_nrnb(nrnb, eNR_SPREADBSP,
1429                              pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
1430                     if (pme->nthread == 1)
1431                     {
1432                         wrap_periodic_pmegrid(pme, grid);
1433                         /* sum contributions to local grid from other nodes */
1434 #if GMX_MPI
1435                         if (pme->nnodes > 1)
1436                         {
1437                             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1438                             where();
1439                         }
1440 #endif
1441                         copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1442                     }
1443                     wallcycle_stop(wcycle, ewcPME_SPREAD);
1444                 }
1445                 /*Here we start a large thread parallel region*/
1446 #pragma omp parallel num_threads(pme->nthread) private(thread)
1447                 {
1448                     try
1449                     {
1450                         thread = gmx_omp_get_thread_num();
1451                         if (flags & GMX_PME_SOLVE)
1452                         {
1453                             /* do 3d-fft */
1454                             if (thread == 0)
1455                             {
1456                                 wallcycle_start(wcycle, ewcPME_FFT);
1457                             }
1458
1459                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1460                                                        thread, wcycle);
1461                             if (thread == 0)
1462                             {
1463                                 wallcycle_stop(wcycle, ewcPME_FFT);
1464                             }
1465                             where();
1466                         }
1467                     }
1468                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1469                 }
1470                 bFirst = FALSE;
1471             }
1472             if (flags & GMX_PME_SOLVE)
1473             {
1474                 /* solve in k-space for our local cells */
1475 #pragma omp parallel num_threads(pme->nthread) private(thread)
1476                 {
1477                     try
1478                     {
1479                         int loop_count;
1480                         thread = gmx_omp_get_thread_num();
1481                         if (thread == 0)
1482                         {
1483                             wallcycle_start(wcycle, ewcLJPME);
1484                         }
1485
1486                         loop_count =
1487                             solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE,
1488                                              scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1489                                              bCalcEnerVir,
1490                                              pme->nthread, thread);
1491                         if (thread == 0)
1492                         {
1493                             wallcycle_stop(wcycle, ewcLJPME);
1494                             where();
1495                             inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1496                         }
1497                     }
1498                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1499                 }
1500             }
1501
1502             if (bCalcEnerVir)
1503             {
1504                 /* This should only be called on the master thread and
1505                  * after the threads have synchronized.
1506                  */
1507                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[2+fep_state], vir_AB[2+fep_state]);
1508             }
1509
1510             if (bBackFFT)
1511             {
1512                 bFirst = !pme->doCoulomb;
1513                 calc_initial_lb_coeffs(pme, local_c6, local_sigma);
1514                 for (grid_index = 8; grid_index >= 2; --grid_index)
1515                 {
1516                     /* Unpack structure */
1517                     pmegrid    = &pme->pmegrid[grid_index];
1518                     fftgrid    = pme->fftgrid[grid_index];
1519                     pfft_setup = pme->pfft_setup[grid_index];
1520                     grid       = pmegrid->grid.grid;
1521                     calc_next_lb_coeffs(pme, local_sigma);
1522                     where();
1523 #pragma omp parallel num_threads(pme->nthread) private(thread)
1524                     {
1525                         try
1526                         {
1527                             thread = gmx_omp_get_thread_num();
1528                             /* do 3d-invfft */
1529                             if (thread == 0)
1530                             {
1531                                 where();
1532                                 wallcycle_start(wcycle, ewcPME_FFT);
1533                             }
1534
1535                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1536                                                        thread, wcycle);
1537                             if (thread == 0)
1538                             {
1539                                 wallcycle_stop(wcycle, ewcPME_FFT);
1540
1541                                 where();
1542
1543                                 if (pme->nodeid == 0)
1544                                 {
1545                                     real ntot = pme->nkx*pme->nky*pme->nkz;
1546                                     npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1547                                     inc_nrnb(nrnb, eNR_FFT, 2*npme);
1548                                 }
1549                                 wallcycle_start(wcycle, ewcPME_GATHER);
1550                             }
1551
1552                             copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1553                         }
1554                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1555                     } /*#pragma omp parallel*/
1556
1557                     /* distribute local grid to all nodes */
1558 #if GMX_MPI
1559                     if (pme->nnodes > 1)
1560                     {
1561                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1562                     }
1563 #endif
1564                     where();
1565
1566                     unwrap_periodic_pmegrid(pme, grid);
1567
1568                     if (bCalcF)
1569                     {
1570                         /* interpolate forces for our local atoms */
1571                         where();
1572                         bClearF = (bFirst && PAR(cr));
1573                         scale   = pme->bFEP ? (fep_state < 1 ? 1.0-lambda_lj : lambda_lj) : 1.0;
1574                         scale  *= lb_scale_factor[grid_index-2];
1575
1576 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1577                         for (thread = 0; thread < pme->nthread; thread++)
1578                         {
1579                             try
1580                             {
1581                                 gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
1582                                                   &pme->atc[0].spline[thread],
1583                                                   scale);
1584                             }
1585                             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1586                         }
1587
1588                         where();
1589
1590                         inc_nrnb(nrnb, eNR_GATHERFBSP,
1591                                  pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
1592                     }
1593                     wallcycle_stop(wcycle, ewcPME_GATHER);
1594
1595                     bFirst = FALSE;
1596                 } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1597             }     /* if (bCalcF) */
1598         }         /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1599     }             /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
1600
1601     if (bCalcF && pme->nnodes > 1)
1602     {
1603         wallcycle_start(wcycle, ewcPME_REDISTXF);
1604         for (d = 0; d < pme->ndecompdim; d++)
1605         {
1606             atc = &pme->atc[d];
1607             if (d == pme->ndecompdim - 1)
1608             {
1609                 n_d = homenr;
1610                 f_d = f + start;
1611             }
1612             else
1613             {
1614                 n_d = pme->atc[d+1].n;
1615                 f_d = pme->atc[d+1].f;
1616             }
1617             if (DOMAINDECOMP(cr))
1618             {
1619                 dd_pmeredist_f(pme, atc, n_d, f_d,
1620                                d == pme->ndecompdim-1 && pme->bPPnode);
1621             }
1622         }
1623
1624         wallcycle_stop(wcycle, ewcPME_REDISTXF);
1625     }
1626     where();
1627
1628     if (bCalcEnerVir)
1629     {
1630         if (pme->doCoulomb)
1631         {
1632             if (!pme->bFEP_q)
1633             {
1634                 *energy_q = energy_AB[0];
1635                 m_add(vir_q, vir_AB[0], vir_q);
1636             }
1637             else
1638             {
1639                 *energy_q       = (1.0-lambda_q)*energy_AB[0] + lambda_q*energy_AB[1];
1640                 *dvdlambda_q   += energy_AB[1] - energy_AB[0];
1641                 for (i = 0; i < DIM; i++)
1642                 {
1643                     for (j = 0; j < DIM; j++)
1644                     {
1645                         vir_q[i][j] += (1.0-lambda_q)*vir_AB[0][i][j] +
1646                             lambda_q*vir_AB[1][i][j];
1647                     }
1648                 }
1649             }
1650             if (debug)
1651             {
1652                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1653             }
1654         }
1655         else
1656         {
1657             *energy_q = 0;
1658         }
1659
1660         if (pme->doLJ)
1661         {
1662             if (!pme->bFEP_lj)
1663             {
1664                 *energy_lj = energy_AB[2];
1665                 m_add(vir_lj, vir_AB[2], vir_lj);
1666             }
1667             else
1668             {
1669                 *energy_lj     = (1.0-lambda_lj)*energy_AB[2] + lambda_lj*energy_AB[3];
1670                 *dvdlambda_lj += energy_AB[3] - energy_AB[2];
1671                 for (i = 0; i < DIM; i++)
1672                 {
1673                     for (j = 0; j < DIM; j++)
1674                     {
1675                         vir_lj[i][j] += (1.0-lambda_lj)*vir_AB[2][i][j] + lambda_lj*vir_AB[3][i][j];
1676                     }
1677                 }
1678             }
1679             if (debug)
1680             {
1681                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1682             }
1683         }
1684         else
1685         {
1686             *energy_lj = 0;
1687         }
1688     }
1689     return 0;
1690 }
1691
1692 void gmx_pme_destroy(gmx_pme_t *pme)
1693 {
1694     if (!pme)
1695     {
1696         return;
1697     }
1698
1699     delete pme->boxScaler;
1700
1701     sfree(pme->nnx);
1702     sfree(pme->nny);
1703     sfree(pme->nnz);
1704     sfree(pme->fshx);
1705     sfree(pme->fshy);
1706     sfree(pme->fshz);
1707
1708     for (int i = 0; i < pme->ngrids; ++i)
1709     {
1710         pmegrids_destroy(&pme->pmegrid[i]);
1711     }
1712     if (pme->pfft_setup)
1713     {
1714         for (int i = 0; i < pme->ngrids; ++i)
1715         {
1716             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1717         }
1718     }
1719     sfree(pme->fftgrid);
1720     sfree(pme->cfftgrid);
1721     sfree(pme->pfft_setup);
1722
1723     for (int i = 0; i < std::max(1, pme->ndecompdim); i++) //pme->atc[0] is always allocated
1724     {
1725         destroy_atomcomm(&pme->atc[i]);
1726     }
1727
1728     for (int i = 0; i < DIM; i++)
1729     {
1730         sfree(pme->bsp_mod[i]);
1731     }
1732
1733     destroy_overlap_comm(&pme->overlap[0]);
1734     destroy_overlap_comm(&pme->overlap[1]);
1735
1736     sfree(pme->lb_buf1);
1737     sfree(pme->lb_buf2);
1738
1739     sfree(pme->bufv);
1740     sfree(pme->bufr);
1741
1742     if (pme->solve_work)
1743     {
1744         pme_free_all_work(&pme->solve_work, pme->nthread);
1745     }
1746
1747     sfree(pme->sum_qgrid_tmp);
1748     sfree(pme->sum_qgrid_dd_tmp);
1749
1750     if (pme_gpu_active(pme) && pme->gpu)
1751     {
1752         pme_gpu_destroy(pme->gpu);
1753     }
1754
1755     delete pme;
1756 }
1757
1758 void gmx_pme_reinit_atoms(const gmx_pme_t *pme, const int nAtoms, const real *charges)
1759 {
1760     if (pme_gpu_active(pme))
1761     {
1762         pme_gpu_reinit_atoms(pme->gpu, nAtoms, charges);
1763     }
1764     // TODO: handle the CPU case here; handle the whole t_mdatoms
1765 }