Support persistent device context-derived data in PME tests
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /*! \internal \file
38  *
39  * \brief This file contains function definitions necessary for
40  * computing energies and forces for the PME long-ranged part (Coulomb
41  * and LJ).
42  *
43  * \author Erik Lindahl <erik@kth.se>
44  * \author Berk Hess <hess@kth.se>
45  * \ingroup module_ewald
46  */
47 /* IMPORTANT FOR DEVELOPERS:
48  *
49  * Triclinic pme stuff isn't entirely trivial, and we've experienced
50  * some bugs during development (many of them due to me). To avoid
51  * this in the future, please check the following things if you make
52  * changes in this file:
53  *
54  * 1. You should obtain identical (at least to the PME precision)
55  *    energies, forces, and virial for
56  *    a rectangular box and a triclinic one where the z (or y) axis is
57  *    tilted a whole box side. For instance you could use these boxes:
58  *
59  *    rectangular       triclinic
60  *     2  0  0           2  0  0
61  *     0  2  0           0  2  0
62  *     0  0  6           2  2  6
63  *
64  * 2. You should check the energy conservation in a triclinic box.
65  *
66  * It might seem an overkill, but better safe than sorry.
67  * /Erik 001109
68  */
69
70 #include "gmxpre.h"
71
72 #include "pme.h"
73
74 #include "config.h"
75
76 #include <assert.h>
77 #include <stdio.h>
78 #include <stdlib.h>
79 #include <string.h>
80
81 #include <cmath>
82
83 #include <algorithm>
84 #include <list>
85
86 #include "gromacs/domdec/domdec.h"
87 #include "gromacs/ewald/ewald-utils.h"
88 #include "gromacs/fft/parallel_3dfft.h"
89 #include "gromacs/fileio/pdbio.h"
90 #include "gromacs/gmxlib/network.h"
91 #include "gromacs/gmxlib/nrnb.h"
92 #include "gromacs/math/gmxcomplex.h"
93 #include "gromacs/math/invertmatrix.h"
94 #include "gromacs/math/units.h"
95 #include "gromacs/math/vec.h"
96 #include "gromacs/math/vectypes.h"
97 #include "gromacs/mdtypes/commrec.h"
98 #include "gromacs/mdtypes/forcerec.h"
99 #include "gromacs/mdtypes/inputrec.h"
100 #include "gromacs/mdtypes/md_enums.h"
101 #include "gromacs/pbcutil/pbc.h"
102 #include "gromacs/timing/cyclecounter.h"
103 #include "gromacs/timing/wallcycle.h"
104 #include "gromacs/timing/walltime_accounting.h"
105 #include "gromacs/utility/basedefinitions.h"
106 #include "gromacs/utility/exceptions.h"
107 #include "gromacs/utility/fatalerror.h"
108 #include "gromacs/utility/gmxmpi.h"
109 #include "gromacs/utility/gmxomp.h"
110 #include "gromacs/utility/logger.h"
111 #include "gromacs/utility/real.h"
112 #include "gromacs/utility/smalloc.h"
113 #include "gromacs/utility/stringutil.h"
114 #include "gromacs/utility/unique_cptr.h"
115
116 #include "calculate-spline-moduli.h"
117 #include "pme-gather.h"
118 #include "pme-gpu-internal.h"
119 #include "pme-grid.h"
120 #include "pme-internal.h"
121 #include "pme-redistribute.h"
122 #include "pme-solve.h"
123 #include "pme-spline-work.h"
124 #include "pme-spread.h"
125
126 /*! \brief Help build a descriptive message in \c error if there are
127  * \c errorReasons why PME on GPU is not supported.
128  *
129  * \returns Whether the lack of errorReasons indicate there is support. */
130 static bool
131 addMessageIfNotSupported(const std::list<std::string> &errorReasons,
132                          std::string                  *error)
133 {
134     bool foundErrorReasons = errorReasons.empty();
135     if (!foundErrorReasons && error)
136     {
137         std::string regressionTestMarker = "PME GPU does not support";
138         // this prefix is tested for in the regression tests script gmxtest.pl
139         *error = regressionTestMarker + ": " + gmx::joinStrings(errorReasons, "; ") + ".";
140     }
141     return foundErrorReasons;
142 }
143
144 bool pme_gpu_supports_build(std::string *error)
145 {
146     std::list<std::string> errorReasons;
147     if (GMX_DOUBLE)
148     {
149         errorReasons.push_back("double precision");
150     }
151     if (GMX_GPU != GMX_GPU_CUDA)
152     {
153         errorReasons.push_back("non-CUDA build of GROMACS");
154     }
155     return addMessageIfNotSupported(errorReasons, error);
156 }
157
158 bool pme_gpu_supports_input(const t_inputrec *ir, std::string *error)
159 {
160     std::list<std::string> errorReasons;
161     if (!EEL_PME(ir->coulombtype))
162     {
163         errorReasons.push_back("systems that do not use PME for electrostatics");
164     }
165     if (ir->pme_order != 4)
166     {
167         errorReasons.push_back("interpolation orders other than 4");
168     }
169     if (ir->efep != efepNO)
170     {
171         errorReasons.push_back("free energy calculations (multiple grids)");
172     }
173     if (EVDW_PME(ir->vdwtype))
174     {
175         errorReasons.push_back("Lennard-Jones PME");
176     }
177     if (ir->cutoff_scheme == ecutsGROUP)
178     {
179         errorReasons.push_back("group cutoff scheme");
180     }
181     if (EI_TPI(ir->eI))
182     {
183         errorReasons.push_back("test particle insertion");
184     }
185     return addMessageIfNotSupported(errorReasons, error);
186 }
187
188 /*! \brief \libinternal
189  * Finds out if PME with given inputs is possible to run on GPU.
190  * This function is an internal final check, validating the whole PME structure on creation,
191  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
192  *
193  * \param[in]  pme          The PME structure.
194  * \param[out] error        The error message if the input is not supported on GPU.
195  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
196  */
197 static bool pme_gpu_check_restrictions(const gmx_pme_t *pme, std::string *error)
198 {
199     std::list<std::string> errorReasons;
200     if (pme->nnodes != 1)
201     {
202         errorReasons.push_back("PME decomposition");
203     }
204     if (pme->pme_order != 4)
205     {
206         errorReasons.push_back("interpolation orders other than 4");
207     }
208     if (pme->bFEP)
209     {
210         errorReasons.push_back("free energy calculations (multiple grids)");
211     }
212     if (pme->doLJ)
213     {
214         errorReasons.push_back("Lennard-Jones PME");
215     }
216     if (GMX_DOUBLE)
217     {
218         errorReasons.push_back("double precision");
219     }
220     if (GMX_GPU != GMX_GPU_CUDA)
221     {
222         errorReasons.push_back("non-CUDA build of GROMACS");
223     }
224
225     return addMessageIfNotSupported(errorReasons, error);
226 }
227
228 PmeRunMode pme_run_mode(const gmx_pme_t *pme)
229 {
230     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
231     return pme->runMode;
232 }
233
234 /*! \brief Number of bytes in a cache line.
235  *
236  * Must also be a multiple of the SIMD and SIMD4 register size, to
237  * preserve alignment.
238  */
239 const int gmxCacheLineSize = 64;
240
241 //! Set up coordinate communication
242 static void setup_coordinate_communication(pme_atomcomm_t *atc)
243 {
244     int nslab, n, i;
245     int fw, bw;
246
247     nslab = atc->nslab;
248
249     n = 0;
250     for (i = 1; i <= nslab/2; i++)
251     {
252         fw = (atc->nodeid + i) % nslab;
253         bw = (atc->nodeid - i + nslab) % nslab;
254         if (n < nslab - 1)
255         {
256             atc->node_dest[n] = fw;
257             atc->node_src[n]  = bw;
258             n++;
259         }
260         if (n < nslab - 1)
261         {
262             atc->node_dest[n] = bw;
263             atc->node_src[n]  = fw;
264             n++;
265         }
266     }
267 }
268
269 /*! \brief Round \p n up to the next multiple of \p f */
270 static int mult_up(int n, int f)
271 {
272     return ((n + f - 1)/f)*f;
273 }
274
275 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
276 static double estimate_pme_load_imbalance(struct gmx_pme_t *pme)
277 {
278     int    nma, nmi;
279     double n1, n2, n3;
280
281     nma = pme->nnodes_major;
282     nmi = pme->nnodes_minor;
283
284     n1 = mult_up(pme->nkx, nma)*mult_up(pme->nky, nmi)*pme->nkz;
285     n2 = mult_up(pme->nkx, nma)*mult_up(pme->nkz, nmi)*pme->nky;
286     n3 = mult_up(pme->nky, nma)*mult_up(pme->nkz, nmi)*pme->nkx;
287
288     /* pme_solve is roughly double the cost of an fft */
289
290     return (n1 + n2 + 3*n3)/(double)(6*pme->nkx*pme->nky*pme->nkz);
291 }
292
293 /*! \brief Initialize atom communication data structure */
294 static void init_atomcomm(struct gmx_pme_t *pme, pme_atomcomm_t *atc,
295                           int dimind, gmx_bool bSpread)
296 {
297     int thread;
298
299     atc->dimind    = dimind;
300     atc->nslab     = 1;
301     atc->nodeid    = 0;
302     atc->pd_nalloc = 0;
303 #if GMX_MPI
304     if (pme->nnodes > 1)
305     {
306         atc->mpi_comm = pme->mpi_comm_d[dimind];
307         MPI_Comm_size(atc->mpi_comm, &atc->nslab);
308         MPI_Comm_rank(atc->mpi_comm, &atc->nodeid);
309     }
310     if (debug)
311     {
312         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", atc->dimind, atc->nslab, atc->nodeid);
313     }
314 #endif
315
316     atc->bSpread   = bSpread;
317     atc->pme_order = pme->pme_order;
318
319     if (atc->nslab > 1)
320     {
321         snew(atc->node_dest, atc->nslab);
322         snew(atc->node_src, atc->nslab);
323         setup_coordinate_communication(atc);
324
325         snew(atc->count_thread, pme->nthread);
326         for (thread = 0; thread < pme->nthread; thread++)
327         {
328             snew(atc->count_thread[thread], atc->nslab);
329         }
330         atc->count = atc->count_thread[0];
331         snew(atc->rcount, atc->nslab);
332         snew(atc->buf_index, atc->nslab);
333     }
334
335     atc->nthread = pme->nthread;
336     if (atc->nthread > 1)
337     {
338         snew(atc->thread_plist, atc->nthread);
339     }
340     snew(atc->spline, atc->nthread);
341     for (thread = 0; thread < atc->nthread; thread++)
342     {
343         if (atc->nthread > 1)
344         {
345             snew(atc->thread_plist[thread].n, atc->nthread+2*gmxCacheLineSize);
346             atc->thread_plist[thread].n += gmxCacheLineSize;
347         }
348     }
349 }
350
351 /*! \brief Destroy an atom communication data structure and its child structs */
352 static void destroy_atomcomm(pme_atomcomm_t *atc)
353 {
354     sfree(atc->pd);
355     if (atc->nslab > 1)
356     {
357         sfree(atc->node_dest);
358         sfree(atc->node_src);
359         for (int i = 0; i < atc->nthread; i++)
360         {
361             sfree(atc->count_thread[i]);
362         }
363         sfree(atc->count_thread);
364         sfree(atc->rcount);
365         sfree(atc->buf_index);
366
367         sfree(atc->x);
368         sfree(atc->coefficient);
369         sfree(atc->f);
370     }
371     sfree(atc->idx);
372     sfree(atc->fractx);
373
374     sfree(atc->thread_idx);
375     for (int i = 0; i < atc->nthread; i++)
376     {
377         if (atc->nthread > 1)
378         {
379             int *n_ptr = atc->thread_plist[i].n - gmxCacheLineSize;
380             sfree(n_ptr);
381             sfree(atc->thread_plist[i].i);
382         }
383         sfree(atc->spline[i].ind);
384         for (int d = 0; d < ZZ; d++)
385         {
386             sfree(atc->spline[i].theta[d]);
387             sfree(atc->spline[i].dtheta[d]);
388         }
389         sfree_aligned(atc->spline[i].ptr_dtheta_z);
390         sfree_aligned(atc->spline[i].ptr_theta_z);
391     }
392     if (atc->nthread > 1)
393     {
394         sfree(atc->thread_plist);
395     }
396     sfree(atc->spline);
397 }
398
399 /*! \brief Initialize data structure for communication */
400 static void
401 init_overlap_comm(pme_overlap_t *  ol,
402                   int              norder,
403 #if GMX_MPI
404                   MPI_Comm         comm,
405 #endif
406                   int              nnodes,
407                   int              nodeid,
408                   int              ndata,
409                   int              commplainsize)
410 {
411     gmx_bool         bCont;
412 #if GMX_MPI
413     MPI_Status       stat;
414
415     ol->mpi_comm = comm;
416 #endif
417
418     ol->nnodes = nnodes;
419     ol->nodeid = nodeid;
420
421     /* Linear translation of the PME grid won't affect reciprocal space
422      * calculations, so to optimize we only interpolate "upwards",
423      * which also means we only have to consider overlap in one direction.
424      * I.e., particles on this node might also be spread to grid indices
425      * that belong to higher nodes (modulo nnodes)
426      */
427
428     ol->s2g0.resize(ol->nnodes + 1);
429     ol->s2g1.resize(ol->nnodes);
430     if (debug)
431     {
432         fprintf(debug, "PME slab boundaries:");
433     }
434     for (int i = 0; i < nnodes; i++)
435     {
436         /* s2g0 the local interpolation grid start.
437          * s2g1 the local interpolation grid end.
438          * Since in calc_pidx we divide particles, and not grid lines,
439          * spatially uniform along dimension x or y, we need to round
440          * s2g0 down and s2g1 up.
441          */
442         ol->s2g0[i] = (i * ndata + 0) / nnodes;
443         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
444
445         if (debug)
446         {
447             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
448         }
449     }
450     ol->s2g0[nnodes] = ndata;
451     if (debug)
452     {
453         fprintf(debug, "\n");
454     }
455
456     /* Determine with how many nodes we need to communicate the grid overlap */
457     int testRankCount = 0;
458     do
459     {
460         testRankCount++;
461         bCont = FALSE;
462         for (int i = 0; i < nnodes; i++)
463         {
464             if ((i + testRankCount <  nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount]) ||
465                 (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
466             {
467                 bCont = TRUE;
468             }
469         }
470     }
471     while (bCont && testRankCount < nnodes);
472
473     ol->comm_data.resize(testRankCount - 1);
474     ol->send_size = 0;
475
476     for (size_t b = 0; b < ol->comm_data.size(); b++)
477     {
478         pme_grid_comm_t *pgc = &ol->comm_data[b];
479
480         /* Send */
481         pgc->send_id = (ol->nodeid + (b + 1)) % ol->nnodes;
482         int fft_start = ol->s2g0[pgc->send_id];
483         int fft_end   = ol->s2g0[pgc->send_id + 1];
484         if (pgc->send_id < nodeid)
485         {
486             fft_start += ndata;
487             fft_end   += ndata;
488         }
489         int send_index1  = ol->s2g1[nodeid];
490         send_index1      = std::min(send_index1, fft_end);
491         pgc->send_index0 = fft_start;
492         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
493         ol->send_size   += pgc->send_nindex;
494
495         /* We always start receiving to the first index of our slab */
496         pgc->recv_id     = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
497         fft_start        = ol->s2g0[ol->nodeid];
498         fft_end          = ol->s2g0[ol->nodeid + 1];
499         int recv_index1  = ol->s2g1[pgc->recv_id];
500         if (pgc->recv_id > nodeid)
501         {
502             recv_index1 -= ndata;
503         }
504         recv_index1      = std::min(recv_index1, fft_end);
505         pgc->recv_index0 = fft_start;
506         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
507     }
508
509 #if GMX_MPI
510     /* Communicate the buffer sizes to receive */
511     for (size_t b = 0; b < ol->comm_data.size(); b++)
512     {
513         MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b,
514                      &ol->comm_data[b].recv_size, 1, MPI_INT, ol->comm_data[b].recv_id, b,
515                      ol->mpi_comm, &stat);
516     }
517 #endif
518
519     /* For non-divisible grid we need pme_order iso pme_order-1 */
520     ol->sendbuf.resize(norder * commplainsize);
521     ol->recvbuf.resize(norder * commplainsize);
522 }
523
524 int minimalPmeGridSize(int pmeOrder)
525 {
526     /* The actual grid size limitations are:
527      *   serial:        >= pme_order
528      *   DD, no OpenMP: >= 2*(pme_order - 1)
529      *   DD, OpenMP:    >= pme_order + 1
530      * But we use the maximum for simplicity since in practice there is not
531      * much performance difference between pme_order and 2*(pme_order -1).
532      */
533     int minimalSize = 2*(pmeOrder - 1);
534
535     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
536     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
537
538     return minimalSize;
539 }
540
541 bool gmx_pme_check_restrictions(int pme_order,
542                                 int nkx, int nky, int nkz,
543                                 int numPmeDomainsAlongX,
544                                 bool useThreads,
545                                 bool errorsAreFatal)
546 {
547     if (pme_order > PME_ORDER_MAX)
548     {
549         if (!errorsAreFatal)
550         {
551             return false;
552         }
553
554         std::string message = gmx::formatString(
555                     "pme_order (%d) is larger than the maximum allowed value (%d). Modify and recompile the code if you really need such a high order.",
556                     pme_order, PME_ORDER_MAX);
557         GMX_THROW(InconsistentInputError(message));
558     }
559
560     const int minGridSize = minimalPmeGridSize(pme_order);
561     if (nkx < minGridSize ||
562         nky < minGridSize ||
563         nkz < minGridSize)
564     {
565         if (!errorsAreFatal)
566         {
567             return false;
568         }
569         std::string message = gmx::formatString(
570                     "The PME grid sizes need to be >= 2*(pme_order-1) (%d)",
571                     minGridSize);
572         GMX_THROW(InconsistentInputError(message));
573     }
574
575     /* Check for a limitation of the (current) sum_fftgrid_dd code.
576      * We only allow multiple communication pulses in dim 1, not in dim 0.
577      */
578     if (useThreads && (nkx < numPmeDomainsAlongX*pme_order &&
579                        nkx != numPmeDomainsAlongX*(pme_order - 1)))
580     {
581         if (!errorsAreFatal)
582         {
583             return false;
584         }
585         gmx_fatal(FARGS, "The number of PME grid lines per rank along x is %g. But when using OpenMP threads, the number of grid lines per rank along x should be >= pme_order (%d) or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly more along y and/or z) by specifying -dd manually.",
586                   nkx/static_cast<double>(numPmeDomainsAlongX), pme_order);
587     }
588
589     return true;
590 }
591
592 /*! \brief Round \p enumerator */
593 static int div_round_up(int enumerator, int denominator)
594 {
595     return (enumerator + denominator - 1)/denominator;
596 }
597
598 gmx_pme_t *gmx_pme_init(const t_commrec     *cr,
599                         const NumPmeDomains &numPmeDomains,
600                         const t_inputrec    *ir,
601                         int                  homenr,
602                         gmx_bool             bFreeEnergy_q,
603                         gmx_bool             bFreeEnergy_lj,
604                         gmx_bool             bReproducible,
605                         real                 ewaldcoeff_q,
606                         real                 ewaldcoeff_lj,
607                         int                  nthread,
608                         PmeRunMode           runMode,
609                         PmeGpu              *pmeGpu,
610                         gmx_device_info_t   *gpuInfo,
611                         PmeGpuProgramHandle  pmeGpuProgram,
612                         const gmx::MDLogger  & /*mdlog*/)
613 {
614     int               use_threads, sum_use_threads, i;
615     ivec              ndata;
616
617     if (debug)
618     {
619         fprintf(debug, "Creating PME data structures.\n");
620     }
621
622     unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
623
624     pme->sum_qgrid_tmp       = nullptr;
625     pme->sum_qgrid_dd_tmp    = nullptr;
626
627     pme->buf_nalloc          = 0;
628
629     pme->nnodes              = 1;
630     pme->bPPnode             = TRUE;
631
632     pme->nnodes_major        = numPmeDomains.x;
633     pme->nnodes_minor        = numPmeDomains.y;
634
635 #if GMX_MPI
636     if (numPmeDomains.x*numPmeDomains.y > 1)
637     {
638         pme->mpi_comm = cr->mpi_comm_mygroup;
639
640         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
641         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
642         if (pme->nnodes != numPmeDomains.x*numPmeDomains.y)
643         {
644             gmx_incons("PME rank count mismatch");
645         }
646     }
647     else
648     {
649         pme->mpi_comm = MPI_COMM_NULL;
650     }
651 #endif
652
653     if (pme->nnodes == 1)
654     {
655 #if GMX_MPI
656         pme->mpi_comm_d[0] = MPI_COMM_NULL;
657         pme->mpi_comm_d[1] = MPI_COMM_NULL;
658 #endif
659         pme->ndecompdim   = 0;
660         pme->nodeid_major = 0;
661         pme->nodeid_minor = 0;
662 #if GMX_MPI
663         pme->mpi_comm_d[0] = pme->mpi_comm_d[1] = MPI_COMM_NULL;
664 #endif
665     }
666     else
667     {
668         if (numPmeDomains.y == 1)
669         {
670 #if GMX_MPI
671             pme->mpi_comm_d[0] = pme->mpi_comm;
672             pme->mpi_comm_d[1] = MPI_COMM_NULL;
673 #endif
674             pme->ndecompdim   = 1;
675             pme->nodeid_major = pme->nodeid;
676             pme->nodeid_minor = 0;
677
678         }
679         else if (numPmeDomains.x == 1)
680         {
681 #if GMX_MPI
682             pme->mpi_comm_d[0] = MPI_COMM_NULL;
683             pme->mpi_comm_d[1] = pme->mpi_comm;
684 #endif
685             pme->ndecompdim   = 1;
686             pme->nodeid_major = 0;
687             pme->nodeid_minor = pme->nodeid;
688         }
689         else
690         {
691             if (pme->nnodes % numPmeDomains.x != 0)
692             {
693                 gmx_incons("For 2D PME decomposition, #PME ranks must be divisible by the number of domains along x");
694             }
695             pme->ndecompdim = 2;
696
697 #if GMX_MPI
698             MPI_Comm_split(pme->mpi_comm, pme->nodeid % numPmeDomains.y,
699                            pme->nodeid, &pme->mpi_comm_d[0]);  /* My communicator along major dimension */
700             MPI_Comm_split(pme->mpi_comm, pme->nodeid/numPmeDomains.y,
701                            pme->nodeid, &pme->mpi_comm_d[1]);  /* My communicator along minor dimension */
702
703             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
704             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
705             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
706             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
707 #endif
708         }
709         pme->bPPnode = thisRankHasDuty(cr, DUTY_PP);
710     }
711
712     pme->nthread = nthread;
713
714     /* Check if any of the PME MPI ranks uses threads */
715     use_threads = (pme->nthread > 1 ? 1 : 0);
716 #if GMX_MPI
717     if (pme->nnodes > 1)
718     {
719         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT,
720                       MPI_SUM, pme->mpi_comm);
721     }
722     else
723 #endif
724     {
725         sum_use_threads = use_threads;
726     }
727     pme->bUseThreads = (sum_use_threads > 0);
728
729     if (ir->ePBC == epbcSCREW)
730     {
731         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
732     }
733
734     /* NOTE:
735      * It is likely that the current gmx_pme_do() routine supports calculating
736      * only Coulomb or LJ while gmx_pme_init() configures for both,
737      * but that has never been tested.
738      * It is likely that the current gmx_pme_do() routine supports calculating,
739      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
740      * configures with free-energy, but that has never been tested.
741      */
742     pme->doCoulomb     = EEL_PME(ir->coulombtype);
743     pme->doLJ          = EVDW_PME(ir->vdwtype);
744     pme->bFEP_q        = ((ir->efep != efepNO) && bFreeEnergy_q);
745     pme->bFEP_lj       = ((ir->efep != efepNO) && bFreeEnergy_lj);
746     pme->bFEP          = (pme->bFEP_q || pme->bFEP_lj);
747     pme->nkx           = ir->nkx;
748     pme->nky           = ir->nky;
749     pme->nkz           = ir->nkz;
750     pme->bP3M          = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != nullptr);
751     pme->pme_order     = ir->pme_order;
752     pme->ewaldcoeff_q  = ewaldcoeff_q;
753     pme->ewaldcoeff_lj = ewaldcoeff_lj;
754
755     /* Always constant electrostatics coefficients */
756     pme->epsilon_r     = ir->epsilon_r;
757
758     /* Always constant LJ coefficients */
759     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
760
761     // The box requires scaling with nwalls = 2, we store that condition as well
762     // as the scaling factor
763     delete pme->boxScaler;
764     pme->boxScaler = new EwaldBoxZScaler(*ir);
765
766     /* If we violate restrictions, generate a fatal error here */
767     gmx_pme_check_restrictions(pme->pme_order,
768                                pme->nkx, pme->nky, pme->nkz,
769                                pme->nnodes_major,
770                                pme->bUseThreads,
771                                true);
772
773     if (pme->nnodes > 1)
774     {
775         double imbal;
776
777 #if GMX_MPI
778         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
779         MPI_Type_commit(&(pme->rvec_mpi));
780 #endif
781
782         /* Note that the coefficient spreading and force gathering, which usually
783          * takes about the same amount of time as FFT+solve_pme,
784          * is always fully load balanced
785          * (unless the coefficient distribution is inhomogeneous).
786          */
787
788         imbal = estimate_pme_load_imbalance(pme.get());
789         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
790         {
791             fprintf(stderr,
792                     "\n"
793                     "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
794                     "      For optimal PME load balancing\n"
795                     "      PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x (%d)\n"
796                     "      and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y (%d)\n"
797                     "\n",
798                     (int)((imbal-1)*100 + 0.5),
799                     pme->nkx, pme->nky, pme->nnodes_major,
800                     pme->nky, pme->nkz, pme->nnodes_minor);
801         }
802     }
803
804     /* For non-divisible grid we need pme_order iso pme_order-1 */
805     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
806      * y is always copied through a buffer: we don't need padding in z,
807      * but we do need the overlap in x because of the communication order.
808      */
809     init_overlap_comm(&pme->overlap[0], pme->pme_order,
810 #if GMX_MPI
811                       pme->mpi_comm_d[0],
812 #endif
813                       pme->nnodes_major, pme->nodeid_major,
814                       pme->nkx,
815                       (div_round_up(pme->nky, pme->nnodes_minor)+pme->pme_order)*(pme->nkz+pme->pme_order-1));
816
817     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
818      * We do this with an offset buffer of equal size, so we need to allocate
819      * extra for the offset. That's what the (+1)*pme->nkz is for.
820      */
821     init_overlap_comm(&pme->overlap[1], pme->pme_order,
822 #if GMX_MPI
823                       pme->mpi_comm_d[1],
824 #endif
825                       pme->nnodes_minor, pme->nodeid_minor,
826                       pme->nky,
827                       (div_round_up(pme->nkx, pme->nnodes_major)+pme->pme_order+1)*pme->nkz);
828
829     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
830      * Note that gmx_pme_check_restrictions checked for this already.
831      */
832     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
833     {
834         gmx_incons("More than one communication pulse required for grid overlap communication along the major dimension while using threads");
835     }
836
837     snew(pme->bsp_mod[XX], pme->nkx);
838     snew(pme->bsp_mod[YY], pme->nky);
839     snew(pme->bsp_mod[ZZ], pme->nkz);
840
841     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
842     pme->runMode = runMode;
843
844     /* The required size of the interpolation grid, including overlap.
845      * The allocated size (pmegrid_n?) might be slightly larger.
846      */
847     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] -
848         pme->overlap[0].s2g0[pme->nodeid_major];
849     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] -
850         pme->overlap[1].s2g0[pme->nodeid_minor];
851     pme->pmegrid_nz_base = pme->nkz;
852     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
853     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
854     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
855     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
856     pme->pmegrid_start_iz = 0;
857
858     make_gridindex_to_localindex(pme->nkx,
859                                  pme->pmegrid_start_ix,
860                                  pme->pmegrid_nx - (pme->pme_order-1),
861                                  &pme->nnx, &pme->fshx);
862     make_gridindex_to_localindex(pme->nky,
863                                  pme->pmegrid_start_iy,
864                                  pme->pmegrid_ny - (pme->pme_order-1),
865                                  &pme->nny, &pme->fshy);
866     make_gridindex_to_localindex(pme->nkz,
867                                  pme->pmegrid_start_iz,
868                                  pme->pmegrid_nz_base,
869                                  &pme->nnz, &pme->fshz);
870
871     pme->spline_work = make_pme_spline_work(pme->pme_order);
872
873     ndata[0]    = pme->nkx;
874     ndata[1]    = pme->nky;
875     ndata[2]    = pme->nkz;
876     /* It doesn't matter if we allocate too many grids here,
877      * we only allocate and use the ones we need.
878      */
879     if (pme->doLJ)
880     {
881         pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
882     }
883     else
884     {
885         pme->ngrids = DO_Q;
886     }
887     snew(pme->fftgrid, pme->ngrids);
888     snew(pme->cfftgrid, pme->ngrids);
889     snew(pme->pfft_setup, pme->ngrids);
890
891     for (i = 0; i < pme->ngrids; ++i)
892     {
893         if ((i <  DO_Q && pme->doCoulomb && (i == 0 ||
894                                              bFreeEnergy_q)) ||
895             (i >= DO_Q && pme->doLJ && (i == 2 ||
896                                         bFreeEnergy_lj ||
897                                         ir->ljpme_combination_rule == eljpmeLB)))
898         {
899             pmegrids_init(&pme->pmegrid[i],
900                           pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
901                           pme->pmegrid_nz_base,
902                           pme->pme_order,
903                           pme->bUseThreads,
904                           pme->nthread,
905                           pme->overlap[0].s2g1[pme->nodeid_major]-pme->overlap[0].s2g0[pme->nodeid_major+1],
906                           pme->overlap[1].s2g1[pme->nodeid_minor]-pme->overlap[1].s2g0[pme->nodeid_minor+1]);
907             /* This routine will allocate the grid data to fit the FFTs */
908             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed) ? gmx::PinningPolicy::CanBePinned : gmx::PinningPolicy::CannotBePinned;
909             gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata,
910                                     &pme->fftgrid[i], &pme->cfftgrid[i],
911                                     pme->mpi_comm_d,
912                                     bReproducible, pme->nthread, allocateRealGridForGpu);
913
914         }
915     }
916
917     if (!pme->bP3M)
918     {
919         /* Use plain SPME B-spline interpolation */
920         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
921     }
922     else
923     {
924         /* Use the P3M grid-optimized influence function */
925         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
926     }
927
928     /* Use atc[0] for spreading */
929     init_atomcomm(pme.get(), &pme->atc[0], numPmeDomains.x > 1 ? 0 : 1, TRUE);
930     if (pme->ndecompdim >= 2)
931     {
932         init_atomcomm(pme.get(), &pme->atc[1], 1, FALSE);
933     }
934
935     if (pme->nnodes == 1)
936     {
937         pme->atc[0].n = homenr;
938         pme_realloc_atomcomm_things(&pme->atc[0]);
939     }
940
941     pme->lb_buf1       = nullptr;
942     pme->lb_buf2       = nullptr;
943     pme->lb_buf_nalloc = 0;
944
945     if (pme_gpu_active(pme.get()))
946     {
947         if (!pme->gpu)
948         {
949             // Initial check of validity of the data
950             std::string errorString;
951             bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
952             if (!canRunOnGpu)
953             {
954                 GMX_THROW(gmx::NotImplementedError(errorString));
955             }
956         }
957
958         pme_gpu_reinit(pme.get(), gpuInfo, pmeGpuProgram);
959     }
960
961     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
962
963     // no exception was thrown during the init, so we hand over the PME structure handle
964     return pme.release();
965 }
966
967 void gmx_pme_reinit(struct gmx_pme_t **pmedata,
968                     const t_commrec   *cr,
969                     struct gmx_pme_t * pme_src,
970                     const t_inputrec * ir,
971                     const ivec         grid_size,
972                     real               ewaldcoeff_q,
973                     real               ewaldcoeff_lj)
974 {
975     int        homenr;
976
977     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
978     // TODO: This would be better as just copying a sub-structure that contains
979     // all the PME parameters and nothing else.
980     t_inputrec irc;
981     irc.ePBC                   = ir->ePBC;
982     irc.coulombtype            = ir->coulombtype;
983     irc.vdwtype                = ir->vdwtype;
984     irc.efep                   = ir->efep;
985     irc.pme_order              = ir->pme_order;
986     irc.epsilon_r              = ir->epsilon_r;
987     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
988     irc.nkx                    = grid_size[XX];
989     irc.nky                    = grid_size[YY];
990     irc.nkz                    = grid_size[ZZ];
991
992     if (pme_src->nnodes == 1)
993     {
994         homenr = pme_src->atc[0].n;
995     }
996     else
997     {
998         homenr = -1;
999     }
1000
1001     try
1002     {
1003         const gmx::MDLogger dummyLogger;
1004         // This is reinit which is currently only changing grid size/coefficients,
1005         // so we don't expect the actual logging.
1006         // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
1007         GMX_ASSERT(pmedata, "Invalid PME pointer");
1008         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
1009         *pmedata = gmx_pme_init(cr, numPmeDomains,
1010                                 &irc, homenr, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE, ewaldcoeff_q, ewaldcoeff_lj,
1011                                 pme_src->nthread, pme_src->runMode, pme_src->gpu, nullptr, nullptr, dummyLogger);
1012         //TODO this is mostly passing around current values
1013     }
1014     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1015
1016     /* We can easily reuse the allocated pme grids in pme_src */
1017     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
1018     /* We would like to reuse the fft grids, but that's harder */
1019 }
1020
1021 void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V)
1022 {
1023     pme_atomcomm_t *atc;
1024     pmegrids_t     *grid;
1025
1026     if (pme->nnodes > 1)
1027     {
1028         gmx_incons("gmx_pme_calc_energy called in parallel");
1029     }
1030     if (pme->bFEP_q > 1)
1031     {
1032         gmx_incons("gmx_pme_calc_energy with free energy");
1033     }
1034
1035     atc            = &pme->atc_energy;
1036     atc->nthread   = 1;
1037     if (atc->spline == nullptr)
1038     {
1039         snew(atc->spline, atc->nthread);
1040     }
1041     atc->nslab     = 1;
1042     atc->bSpread   = TRUE;
1043     atc->pme_order = pme->pme_order;
1044     atc->n         = n;
1045     pme_realloc_atomcomm_things(atc);
1046     atc->x           = x;
1047     atc->coefficient = q;
1048
1049     /* We only use the A-charges grid */
1050     grid = &pme->pmegrid[PME_GRID_QA];
1051
1052     /* Only calculate the spline coefficients, don't actually spread */
1053     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
1054
1055     *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
1056 }
1057
1058 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
1059 static void
1060 calc_initial_lb_coeffs(struct gmx_pme_t *pme, real *local_c6, real *local_sigma)
1061 {
1062     int  i;
1063     for (i = 0; i < pme->atc[0].n; ++i)
1064     {
1065         real sigma4;
1066         sigma4                     = local_sigma[i];
1067         sigma4                     = sigma4*sigma4;
1068         sigma4                     = sigma4*sigma4;
1069         pme->atc[0].coefficient[i] = local_c6[i] / sigma4;
1070     }
1071 }
1072
1073 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
1074 static void
1075 calc_next_lb_coeffs(struct gmx_pme_t *pme, real *local_sigma)
1076 {
1077     int  i;
1078
1079     for (i = 0; i < pme->atc[0].n; ++i)
1080     {
1081         pme->atc[0].coefficient[i] *= local_sigma[i];
1082     }
1083 }
1084
1085 int gmx_pme_do(struct gmx_pme_t *pme,
1086                int start,       int homenr,
1087                rvec x[],        rvec f[],
1088                real chargeA[],  real chargeB[],
1089                real c6A[],      real c6B[],
1090                real sigmaA[],   real sigmaB[],
1091                matrix box,      const t_commrec *cr,
1092                int  maxshift_x, int maxshift_y,
1093                t_nrnb *nrnb,    gmx_wallcycle *wcycle,
1094                matrix vir_q,    matrix vir_lj,
1095                real *energy_q,  real *energy_lj,
1096                real lambda_q,   real lambda_lj,
1097                real *dvdlambda_q, real *dvdlambda_lj,
1098                int flags)
1099 {
1100     GMX_ASSERT(pme->runMode == PmeRunMode::CPU, "gmx_pme_do should not be called on the GPU PME run.");
1101
1102     int                  d, i, j, npme, grid_index, max_grid_index;
1103     int                  n_d;
1104     pme_atomcomm_t      *atc        = nullptr;
1105     pmegrids_t          *pmegrid    = nullptr;
1106     real                *grid       = nullptr;
1107     rvec                *f_d;
1108     real                *coefficient = nullptr;
1109     real                 energy_AB[4];
1110     matrix               vir_AB[4];
1111     real                 scale, lambda;
1112     gmx_bool             bClearF;
1113     gmx_parallel_3dfft_t pfft_setup;
1114     real              *  fftgrid;
1115     t_complex          * cfftgrid;
1116     int                  thread;
1117     gmx_bool             bFirst, bDoSplines;
1118     int                  fep_state;
1119     int                  fep_states_lj           = pme->bFEP_lj ? 2 : 1;
1120     const gmx_bool       bCalcEnerVir            = flags & GMX_PME_CALC_ENER_VIR;
1121     const gmx_bool       bBackFFT                = flags & (GMX_PME_CALC_F | GMX_PME_CALC_POT);
1122     const gmx_bool       bCalcF                  = flags & GMX_PME_CALC_F;
1123
1124     assert(pme->nnodes > 0);
1125     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1126
1127     if (pme->nnodes > 1)
1128     {
1129         atc      = &pme->atc[0];
1130         atc->npd = homenr;
1131         if (atc->npd > atc->pd_nalloc)
1132         {
1133             atc->pd_nalloc = over_alloc_dd(atc->npd);
1134             srenew(atc->pd, atc->pd_nalloc);
1135         }
1136         for (d = pme->ndecompdim-1; d >= 0; d--)
1137         {
1138             atc           = &pme->atc[d];
1139             atc->maxshift = (atc->dimind == 0 ? maxshift_x : maxshift_y);
1140         }
1141     }
1142     else
1143     {
1144         atc = &pme->atc[0];
1145         /* This could be necessary for TPI */
1146         pme->atc[0].n = homenr;
1147         if (DOMAINDECOMP(cr))
1148         {
1149             pme_realloc_atomcomm_things(atc);
1150         }
1151         atc->x = x;
1152         atc->f = f;
1153     }
1154
1155     matrix scaledBox;
1156     pme->boxScaler->scaleBox(box, scaledBox);
1157
1158     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1159     bFirst = TRUE;
1160
1161     /* For simplicity, we construct the splines for all particles if
1162      * more than one PME calculations is needed. Some optimization
1163      * could be done by keeping track of which atoms have splines
1164      * constructed, and construct new splines on each pass for atoms
1165      * that don't yet have them.
1166      */
1167
1168     bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1169
1170     /* We need a maximum of four separate PME calculations:
1171      * grid_index=0: Coulomb PME with charges from state A
1172      * grid_index=1: Coulomb PME with charges from state B
1173      * grid_index=2: LJ PME with C6 from state A
1174      * grid_index=3: LJ PME with C6 from state B
1175      * For Lorentz-Berthelot combination rules, a separate loop is used to
1176      * calculate all the terms
1177      */
1178
1179     /* If we are doing LJ-PME with LB, we only do Q here */
1180     max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
1181
1182     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1183     {
1184         /* Check if we should do calculations at this grid_index
1185          * If grid_index is odd we should be doing FEP
1186          * If grid_index < 2 we should be doing electrostatic PME
1187          * If grid_index >= 2 we should be doing LJ-PME
1188          */
1189         if ((grid_index <  DO_Q && (!pme->doCoulomb ||
1190                                     (grid_index == 1 && !pme->bFEP_q))) ||
1191             (grid_index >= DO_Q && (!pme->doLJ ||
1192                                     (grid_index == 3 && !pme->bFEP_lj))))
1193         {
1194             continue;
1195         }
1196         /* Unpack structure */
1197         pmegrid    = &pme->pmegrid[grid_index];
1198         fftgrid    = pme->fftgrid[grid_index];
1199         cfftgrid   = pme->cfftgrid[grid_index];
1200         pfft_setup = pme->pfft_setup[grid_index];
1201         switch (grid_index)
1202         {
1203             case 0: coefficient = chargeA + start; break;
1204             case 1: coefficient = chargeB + start; break;
1205             case 2: coefficient = c6A + start; break;
1206             case 3: coefficient = c6B + start; break;
1207         }
1208
1209         grid = pmegrid->grid.grid;
1210
1211         if (debug)
1212         {
1213             fprintf(debug, "PME: number of ranks = %d, rank = %d\n",
1214                     cr->nnodes, cr->nodeid);
1215             fprintf(debug, "Grid = %p\n", (void*)grid);
1216             if (grid == nullptr)
1217             {
1218                 gmx_fatal(FARGS, "No grid!");
1219             }
1220         }
1221
1222         if (pme->nnodes == 1)
1223         {
1224             atc->coefficient = coefficient;
1225         }
1226         else
1227         {
1228             wallcycle_start(wcycle, ewcPME_REDISTXF);
1229             do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, coefficient);
1230
1231             wallcycle_stop(wcycle, ewcPME_REDISTXF);
1232         }
1233
1234         if (debug)
1235         {
1236             fprintf(debug, "Rank= %6d, pme local particles=%6d\n",
1237                     cr->nodeid, atc->n);
1238         }
1239
1240         if (flags & GMX_PME_SPREAD)
1241         {
1242             wallcycle_start(wcycle, ewcPME_SPREAD);
1243
1244             /* Spread the coefficients on a grid */
1245             spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1246
1247             if (bFirst)
1248             {
1249                 inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
1250             }
1251             inc_nrnb(nrnb, eNR_SPREADBSP,
1252                      pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
1253
1254             if (!pme->bUseThreads)
1255             {
1256                 wrap_periodic_pmegrid(pme, grid);
1257
1258                 /* sum contributions to local grid from other nodes */
1259 #if GMX_MPI
1260                 if (pme->nnodes > 1)
1261                 {
1262                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1263                 }
1264 #endif
1265
1266                 copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1267             }
1268
1269             wallcycle_stop(wcycle, ewcPME_SPREAD);
1270
1271             /* TODO If the OpenMP and single-threaded implementations
1272                converge, then spread_on_grid() and
1273                copy_pmegrid_to_fftgrid() will perhaps live in the same
1274                source file.
1275              */
1276         }
1277
1278         /* Here we start a large thread parallel region */
1279 #pragma omp parallel num_threads(pme->nthread) private(thread)
1280         {
1281             try
1282             {
1283                 thread = gmx_omp_get_thread_num();
1284                 if (flags & GMX_PME_SOLVE)
1285                 {
1286                     int loop_count;
1287
1288                     /* do 3d-fft */
1289                     if (thread == 0)
1290                     {
1291                         wallcycle_start(wcycle, ewcPME_FFT);
1292                     }
1293                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1294                                                thread, wcycle);
1295                     if (thread == 0)
1296                     {
1297                         wallcycle_stop(wcycle, ewcPME_FFT);
1298                     }
1299
1300                     /* solve in k-space for our local cells */
1301                     if (thread == 0)
1302                     {
1303                         wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1304                     }
1305                     if (grid_index < DO_Q)
1306                     {
1307                         loop_count =
1308                             solve_pme_yzx(pme, cfftgrid,
1309                                           scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1310                                           bCalcEnerVir,
1311                                           pme->nthread, thread);
1312                     }
1313                     else
1314                     {
1315                         loop_count =
1316                             solve_pme_lj_yzx(pme, &cfftgrid, FALSE,
1317                                              scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1318                                              bCalcEnerVir,
1319                                              pme->nthread, thread);
1320                     }
1321
1322                     if (thread == 0)
1323                     {
1324                         wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1325                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1326                     }
1327                 }
1328
1329                 if (bBackFFT)
1330                 {
1331                     /* do 3d-invfft */
1332                     if (thread == 0)
1333                     {
1334                         wallcycle_start(wcycle, ewcPME_FFT);
1335                     }
1336                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1337                                                thread, wcycle);
1338                     if (thread == 0)
1339                     {
1340                         wallcycle_stop(wcycle, ewcPME_FFT);
1341
1342
1343                         if (pme->nodeid == 0)
1344                         {
1345                             real ntot = pme->nkx*pme->nky*pme->nkz;
1346                             npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1347                             inc_nrnb(nrnb, eNR_FFT, 2*npme);
1348                         }
1349
1350                         /* Note: this wallcycle region is closed below
1351                            outside an OpenMP region, so take care if
1352                            refactoring code here. */
1353                         wallcycle_start(wcycle, ewcPME_GATHER);
1354                     }
1355
1356                     copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1357                 }
1358             } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1359         }
1360         /* End of thread parallel section.
1361          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1362          */
1363
1364         if (bBackFFT)
1365         {
1366             /* distribute local grid to all nodes */
1367 #if GMX_MPI
1368             if (pme->nnodes > 1)
1369             {
1370                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1371             }
1372 #endif
1373
1374             unwrap_periodic_pmegrid(pme, grid);
1375         }
1376
1377         if (bCalcF)
1378         {
1379             /* interpolate forces for our local atoms */
1380
1381
1382             /* If we are running without parallelization,
1383              * atc->f is the actual force array, not a buffer,
1384              * therefore we should not clear it.
1385              */
1386             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
1387             bClearF = (bFirst && PAR(cr));
1388 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1389             for (thread = 0; thread < pme->nthread; thread++)
1390             {
1391                 try
1392                 {
1393                     gather_f_bsplines(pme, grid, bClearF, atc,
1394                                       &atc->spline[thread],
1395                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0-lambda : lambda) : 1.0);
1396                 }
1397                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1398             }
1399
1400
1401             inc_nrnb(nrnb, eNR_GATHERFBSP,
1402                      pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
1403             /* Note: this wallcycle region is opened above inside an OpenMP
1404                region, so take care if refactoring code here. */
1405             wallcycle_stop(wcycle, ewcPME_GATHER);
1406         }
1407
1408         if (bCalcEnerVir)
1409         {
1410             /* This should only be called on the master thread
1411              * and after the threads have synchronized.
1412              */
1413             if (grid_index < 2)
1414             {
1415                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
1416             }
1417             else
1418             {
1419                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
1420             }
1421         }
1422         bFirst = FALSE;
1423     } /* of grid_index-loop */
1424
1425     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1426      * seven terms. */
1427
1428     if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB)
1429     {
1430         /* Loop over A- and B-state if we are doing FEP */
1431         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1432         {
1433             real *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
1434             if (pme->nnodes == 1)
1435             {
1436                 if (pme->lb_buf1 == nullptr)
1437                 {
1438                     pme->lb_buf_nalloc = pme->atc[0].n;
1439                     snew(pme->lb_buf1, pme->lb_buf_nalloc);
1440                 }
1441                 pme->atc[0].coefficient = pme->lb_buf1;
1442                 switch (fep_state)
1443                 {
1444                     case 0:
1445                         local_c6      = c6A;
1446                         local_sigma   = sigmaA;
1447                         break;
1448                     case 1:
1449                         local_c6      = c6B;
1450                         local_sigma   = sigmaB;
1451                         break;
1452                     default:
1453                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1454                 }
1455             }
1456             else
1457             {
1458                 atc = &pme->atc[0];
1459                 switch (fep_state)
1460                 {
1461                     case 0:
1462                         RedistC6      = c6A;
1463                         RedistSigma   = sigmaA;
1464                         break;
1465                     case 1:
1466                         RedistC6      = c6B;
1467                         RedistSigma   = sigmaB;
1468                         break;
1469                     default:
1470                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1471                 }
1472                 wallcycle_start(wcycle, ewcPME_REDISTXF);
1473
1474                 do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, RedistC6);
1475                 if (pme->lb_buf_nalloc < atc->n)
1476                 {
1477                     pme->lb_buf_nalloc = atc->nalloc;
1478                     srenew(pme->lb_buf1, pme->lb_buf_nalloc);
1479                     srenew(pme->lb_buf2, pme->lb_buf_nalloc);
1480                 }
1481                 local_c6 = pme->lb_buf1;
1482                 for (i = 0; i < atc->n; ++i)
1483                 {
1484                     local_c6[i] = atc->coefficient[i];
1485                 }
1486
1487                 do_redist_pos_coeffs(pme, cr, start, homenr, FALSE, x, RedistSigma);
1488                 local_sigma = pme->lb_buf2;
1489                 for (i = 0; i < atc->n; ++i)
1490                 {
1491                     local_sigma[i] = atc->coefficient[i];
1492                 }
1493
1494                 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1495             }
1496             calc_initial_lb_coeffs(pme, local_c6, local_sigma);
1497
1498             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1499             for (grid_index = 2; grid_index < 9; ++grid_index)
1500             {
1501                 /* Unpack structure */
1502                 pmegrid    = &pme->pmegrid[grid_index];
1503                 fftgrid    = pme->fftgrid[grid_index];
1504                 pfft_setup = pme->pfft_setup[grid_index];
1505                 calc_next_lb_coeffs(pme, local_sigma);
1506                 grid = pmegrid->grid.grid;
1507
1508                 if (flags & GMX_PME_SPREAD)
1509                 {
1510                     wallcycle_start(wcycle, ewcPME_SPREAD);
1511                     /* Spread the c6 on a grid */
1512                     spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1513
1514                     if (bFirst)
1515                     {
1516                         inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
1517                     }
1518
1519                     inc_nrnb(nrnb, eNR_SPREADBSP,
1520                              pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
1521                     if (pme->nthread == 1)
1522                     {
1523                         wrap_periodic_pmegrid(pme, grid);
1524                         /* sum contributions to local grid from other nodes */
1525 #if GMX_MPI
1526                         if (pme->nnodes > 1)
1527                         {
1528                             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1529                         }
1530 #endif
1531                         copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1532                     }
1533                     wallcycle_stop(wcycle, ewcPME_SPREAD);
1534                 }
1535                 /*Here we start a large thread parallel region*/
1536 #pragma omp parallel num_threads(pme->nthread) private(thread)
1537                 {
1538                     try
1539                     {
1540                         thread = gmx_omp_get_thread_num();
1541                         if (flags & GMX_PME_SOLVE)
1542                         {
1543                             /* do 3d-fft */
1544                             if (thread == 0)
1545                             {
1546                                 wallcycle_start(wcycle, ewcPME_FFT);
1547                             }
1548
1549                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1550                                                        thread, wcycle);
1551                             if (thread == 0)
1552                             {
1553                                 wallcycle_stop(wcycle, ewcPME_FFT);
1554                             }
1555                         }
1556                     }
1557                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1558                 }
1559                 bFirst = FALSE;
1560             }
1561             if (flags & GMX_PME_SOLVE)
1562             {
1563                 /* solve in k-space for our local cells */
1564 #pragma omp parallel num_threads(pme->nthread) private(thread)
1565                 {
1566                     try
1567                     {
1568                         int loop_count;
1569                         thread = gmx_omp_get_thread_num();
1570                         if (thread == 0)
1571                         {
1572                             wallcycle_start(wcycle, ewcLJPME);
1573                         }
1574
1575                         loop_count =
1576                             solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE,
1577                                              scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1578                                              bCalcEnerVir,
1579                                              pme->nthread, thread);
1580                         if (thread == 0)
1581                         {
1582                             wallcycle_stop(wcycle, ewcLJPME);
1583                             inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1584                         }
1585                     }
1586                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1587                 }
1588             }
1589
1590             if (bCalcEnerVir)
1591             {
1592                 /* This should only be called on the master thread and
1593                  * after the threads have synchronized.
1594                  */
1595                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[2+fep_state], vir_AB[2+fep_state]);
1596             }
1597
1598             if (bBackFFT)
1599             {
1600                 bFirst = !pme->doCoulomb;
1601                 calc_initial_lb_coeffs(pme, local_c6, local_sigma);
1602                 for (grid_index = 8; grid_index >= 2; --grid_index)
1603                 {
1604                     /* Unpack structure */
1605                     pmegrid    = &pme->pmegrid[grid_index];
1606                     fftgrid    = pme->fftgrid[grid_index];
1607                     pfft_setup = pme->pfft_setup[grid_index];
1608                     grid       = pmegrid->grid.grid;
1609                     calc_next_lb_coeffs(pme, local_sigma);
1610 #pragma omp parallel num_threads(pme->nthread) private(thread)
1611                     {
1612                         try
1613                         {
1614                             thread = gmx_omp_get_thread_num();
1615                             /* do 3d-invfft */
1616                             if (thread == 0)
1617                             {
1618                                 wallcycle_start(wcycle, ewcPME_FFT);
1619                             }
1620
1621                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1622                                                        thread, wcycle);
1623                             if (thread == 0)
1624                             {
1625                                 wallcycle_stop(wcycle, ewcPME_FFT);
1626
1627
1628                                 if (pme->nodeid == 0)
1629                                 {
1630                                     real ntot = pme->nkx*pme->nky*pme->nkz;
1631                                     npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1632                                     inc_nrnb(nrnb, eNR_FFT, 2*npme);
1633                                 }
1634                                 wallcycle_start(wcycle, ewcPME_GATHER);
1635                             }
1636
1637                             copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1638                         }
1639                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1640                     } /*#pragma omp parallel*/
1641
1642                     /* distribute local grid to all nodes */
1643 #if GMX_MPI
1644                     if (pme->nnodes > 1)
1645                     {
1646                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1647                     }
1648 #endif
1649
1650                     unwrap_periodic_pmegrid(pme, grid);
1651
1652                     if (bCalcF)
1653                     {
1654                         /* interpolate forces for our local atoms */
1655                         bClearF = (bFirst && PAR(cr));
1656                         scale   = pme->bFEP ? (fep_state < 1 ? 1.0-lambda_lj : lambda_lj) : 1.0;
1657                         scale  *= lb_scale_factor[grid_index-2];
1658
1659 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1660                         for (thread = 0; thread < pme->nthread; thread++)
1661                         {
1662                             try
1663                             {
1664                                 gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
1665                                                   &pme->atc[0].spline[thread],
1666                                                   scale);
1667                             }
1668                             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1669                         }
1670
1671
1672                         inc_nrnb(nrnb, eNR_GATHERFBSP,
1673                                  pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
1674                     }
1675                     wallcycle_stop(wcycle, ewcPME_GATHER);
1676
1677                     bFirst = FALSE;
1678                 } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1679             }     /* if (bCalcF) */
1680         }         /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1681     }             /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
1682
1683     if (bCalcF && pme->nnodes > 1)
1684     {
1685         wallcycle_start(wcycle, ewcPME_REDISTXF);
1686         for (d = 0; d < pme->ndecompdim; d++)
1687         {
1688             atc = &pme->atc[d];
1689             if (d == pme->ndecompdim - 1)
1690             {
1691                 n_d = homenr;
1692                 f_d = f + start;
1693             }
1694             else
1695             {
1696                 n_d = pme->atc[d+1].n;
1697                 f_d = pme->atc[d+1].f;
1698             }
1699             if (DOMAINDECOMP(cr))
1700             {
1701                 dd_pmeredist_f(pme, atc, n_d, f_d,
1702                                d == pme->ndecompdim-1 && pme->bPPnode);
1703             }
1704         }
1705
1706         wallcycle_stop(wcycle, ewcPME_REDISTXF);
1707     }
1708
1709     if (bCalcEnerVir)
1710     {
1711         if (pme->doCoulomb)
1712         {
1713             if (!pme->bFEP_q)
1714             {
1715                 *energy_q = energy_AB[0];
1716                 m_add(vir_q, vir_AB[0], vir_q);
1717             }
1718             else
1719             {
1720                 *energy_q       = (1.0-lambda_q)*energy_AB[0] + lambda_q*energy_AB[1];
1721                 *dvdlambda_q   += energy_AB[1] - energy_AB[0];
1722                 for (i = 0; i < DIM; i++)
1723                 {
1724                     for (j = 0; j < DIM; j++)
1725                     {
1726                         vir_q[i][j] += (1.0-lambda_q)*vir_AB[0][i][j] +
1727                             lambda_q*vir_AB[1][i][j];
1728                     }
1729                 }
1730             }
1731             if (debug)
1732             {
1733                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1734             }
1735         }
1736         else
1737         {
1738             *energy_q = 0;
1739         }
1740
1741         if (pme->doLJ)
1742         {
1743             if (!pme->bFEP_lj)
1744             {
1745                 *energy_lj = energy_AB[2];
1746                 m_add(vir_lj, vir_AB[2], vir_lj);
1747             }
1748             else
1749             {
1750                 *energy_lj     = (1.0-lambda_lj)*energy_AB[2] + lambda_lj*energy_AB[3];
1751                 *dvdlambda_lj += energy_AB[3] - energy_AB[2];
1752                 for (i = 0; i < DIM; i++)
1753                 {
1754                     for (j = 0; j < DIM; j++)
1755                     {
1756                         vir_lj[i][j] += (1.0-lambda_lj)*vir_AB[2][i][j] + lambda_lj*vir_AB[3][i][j];
1757                     }
1758                 }
1759             }
1760             if (debug)
1761             {
1762                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1763             }
1764         }
1765         else
1766         {
1767             *energy_lj = 0;
1768         }
1769     }
1770     return 0;
1771 }
1772
1773 void gmx_pme_destroy(gmx_pme_t *pme)
1774 {
1775     if (!pme)
1776     {
1777         return;
1778     }
1779
1780     delete pme->boxScaler;
1781
1782     sfree(pme->nnx);
1783     sfree(pme->nny);
1784     sfree(pme->nnz);
1785     sfree(pme->fshx);
1786     sfree(pme->fshy);
1787     sfree(pme->fshz);
1788
1789     for (int i = 0; i < pme->ngrids; ++i)
1790     {
1791         pmegrids_destroy(&pme->pmegrid[i]);
1792     }
1793     if (pme->pfft_setup)
1794     {
1795         for (int i = 0; i < pme->ngrids; ++i)
1796         {
1797             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1798         }
1799     }
1800     sfree(pme->fftgrid);
1801     sfree(pme->cfftgrid);
1802     sfree(pme->pfft_setup);
1803
1804     for (int i = 0; i < std::max(1, pme->ndecompdim); i++) //pme->atc[0] is always allocated
1805     {
1806         destroy_atomcomm(&pme->atc[i]);
1807     }
1808
1809     for (int i = 0; i < DIM; i++)
1810     {
1811         sfree(pme->bsp_mod[i]);
1812     }
1813
1814     sfree(pme->lb_buf1);
1815     sfree(pme->lb_buf2);
1816
1817     sfree(pme->bufv);
1818     sfree(pme->bufr);
1819
1820     if (pme->solve_work)
1821     {
1822         pme_free_all_work(&pme->solve_work, pme->nthread);
1823     }
1824
1825     sfree(pme->sum_qgrid_tmp);
1826     sfree(pme->sum_qgrid_dd_tmp);
1827
1828     destroy_pme_spline_work(pme->spline_work);
1829
1830     if (pme_gpu_active(pme) && pme->gpu)
1831     {
1832         pme_gpu_destroy(pme->gpu);
1833     }
1834
1835     delete pme;
1836 }
1837
1838 void gmx_pme_reinit_atoms(const gmx_pme_t *pme, const int nAtoms, const real *charges)
1839 {
1840     if (pme_gpu_active(pme))
1841     {
1842         pme_gpu_reinit_atoms(pme->gpu, nAtoms, charges);
1843     }
1844     // TODO: handle the CPU case here; handle the whole t_mdatoms
1845 }