clang-tidy modernize
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /*! \internal \file
38  *
39  * \brief This file contains function definitions necessary for
40  * computing energies and forces for the PME long-ranged part (Coulomb
41  * and LJ).
42  *
43  * \author Erik Lindahl <erik@kth.se>
44  * \author Berk Hess <hess@kth.se>
45  * \ingroup module_ewald
46  */
47 /* IMPORTANT FOR DEVELOPERS:
48  *
49  * Triclinic pme stuff isn't entirely trivial, and we've experienced
50  * some bugs during development (many of them due to me). To avoid
51  * this in the future, please check the following things if you make
52  * changes in this file:
53  *
54  * 1. You should obtain identical (at least to the PME precision)
55  *    energies, forces, and virial for
56  *    a rectangular box and a triclinic one where the z (or y) axis is
57  *    tilted a whole box side. For instance you could use these boxes:
58  *
59  *    rectangular       triclinic
60  *     2  0  0           2  0  0
61  *     0  2  0           0  2  0
62  *     0  0  6           2  2  6
63  *
64  * 2. You should check the energy conservation in a triclinic box.
65  *
66  * It might seem an overkill, but better safe than sorry.
67  * /Erik 001109
68  */
69
70 #include "gmxpre.h"
71
72 #include "pme.h"
73
74 #include "config.h"
75
76 #include <cassert>
77 #include <cmath>
78 #include <cstdio>
79 #include <cstdlib>
80 #include <cstring>
81
82 #include <algorithm>
83 #include <list>
84
85 #include "gromacs/domdec/domdec.h"
86 #include "gromacs/ewald/ewald-utils.h"
87 #include "gromacs/fft/parallel_3dfft.h"
88 #include "gromacs/fileio/pdbio.h"
89 #include "gromacs/gmxlib/network.h"
90 #include "gromacs/gmxlib/nrnb.h"
91 #include "gromacs/math/gmxcomplex.h"
92 #include "gromacs/math/invertmatrix.h"
93 #include "gromacs/math/units.h"
94 #include "gromacs/math/vec.h"
95 #include "gromacs/math/vectypes.h"
96 #include "gromacs/mdtypes/commrec.h"
97 #include "gromacs/mdtypes/forcerec.h"
98 #include "gromacs/mdtypes/inputrec.h"
99 #include "gromacs/mdtypes/md_enums.h"
100 #include "gromacs/pbcutil/pbc.h"
101 #include "gromacs/timing/cyclecounter.h"
102 #include "gromacs/timing/wallcycle.h"
103 #include "gromacs/timing/walltime_accounting.h"
104 #include "gromacs/utility/basedefinitions.h"
105 #include "gromacs/utility/exceptions.h"
106 #include "gromacs/utility/fatalerror.h"
107 #include "gromacs/utility/gmxmpi.h"
108 #include "gromacs/utility/gmxomp.h"
109 #include "gromacs/utility/logger.h"
110 #include "gromacs/utility/real.h"
111 #include "gromacs/utility/smalloc.h"
112 #include "gromacs/utility/stringutil.h"
113 #include "gromacs/utility/unique_cptr.h"
114
115 #include "calculate-spline-moduli.h"
116 #include "pme-gather.h"
117 #include "pme-gpu-internal.h"
118 #include "pme-grid.h"
119 #include "pme-internal.h"
120 #include "pme-redistribute.h"
121 #include "pme-solve.h"
122 #include "pme-spline-work.h"
123 #include "pme-spread.h"
124
125 /*! \brief Help build a descriptive message in \c error if there are
126  * \c errorReasons why PME on GPU is not supported.
127  *
128  * \returns Whether the lack of errorReasons indicate there is support. */
129 static bool
130 addMessageIfNotSupported(const std::list<std::string> &errorReasons,
131                          std::string                  *error)
132 {
133     bool foundErrorReasons = errorReasons.empty();
134     if (!foundErrorReasons && error)
135     {
136         std::string regressionTestMarker = "PME GPU does not support";
137         // this prefix is tested for in the regression tests script gmxtest.pl
138         *error = regressionTestMarker + ": " + gmx::joinStrings(errorReasons, "; ") + ".";
139     }
140     return foundErrorReasons;
141 }
142
143 bool pme_gpu_supports_build(std::string *error)
144 {
145     std::list<std::string> errorReasons;
146     if (GMX_DOUBLE)
147     {
148         errorReasons.emplace_back("double precision");
149     }
150     if (GMX_GPU != GMX_GPU_CUDA)
151     {
152         errorReasons.emplace_back("non-CUDA build of GROMACS");
153     }
154     return addMessageIfNotSupported(errorReasons, error);
155 }
156
157 bool pme_gpu_supports_input(const t_inputrec *ir, std::string *error)
158 {
159     std::list<std::string> errorReasons;
160     if (!EEL_PME(ir->coulombtype))
161     {
162         errorReasons.emplace_back("systems that do not use PME for electrostatics");
163     }
164     if (ir->pme_order != 4)
165     {
166         errorReasons.emplace_back("interpolation orders other than 4");
167     }
168     if (ir->efep != efepNO)
169     {
170         errorReasons.emplace_back("free energy calculations (multiple grids)");
171     }
172     if (EVDW_PME(ir->vdwtype))
173     {
174         errorReasons.emplace_back("Lennard-Jones PME");
175     }
176     if (ir->cutoff_scheme == ecutsGROUP)
177     {
178         errorReasons.emplace_back("group cutoff scheme");
179     }
180     if (EI_TPI(ir->eI))
181     {
182         errorReasons.emplace_back("test particle insertion");
183     }
184     return addMessageIfNotSupported(errorReasons, error);
185 }
186
187 /*! \brief \libinternal
188  * Finds out if PME with given inputs is possible to run on GPU.
189  * This function is an internal final check, validating the whole PME structure on creation,
190  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
191  *
192  * \param[in]  pme          The PME structure.
193  * \param[out] error        The error message if the input is not supported on GPU.
194  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
195  */
196 static bool pme_gpu_check_restrictions(const gmx_pme_t *pme, std::string *error)
197 {
198     std::list<std::string> errorReasons;
199     if (pme->nnodes != 1)
200     {
201         errorReasons.emplace_back("PME decomposition");
202     }
203     if (pme->pme_order != 4)
204     {
205         errorReasons.emplace_back("interpolation orders other than 4");
206     }
207     if (pme->bFEP)
208     {
209         errorReasons.emplace_back("free energy calculations (multiple grids)");
210     }
211     if (pme->doLJ)
212     {
213         errorReasons.emplace_back("Lennard-Jones PME");
214     }
215     if (GMX_DOUBLE)
216     {
217         errorReasons.emplace_back("double precision");
218     }
219     if (GMX_GPU != GMX_GPU_CUDA)
220     {
221         errorReasons.emplace_back("non-CUDA build of GROMACS");
222     }
223
224     return addMessageIfNotSupported(errorReasons, error);
225 }
226
227 PmeRunMode pme_run_mode(const gmx_pme_t *pme)
228 {
229     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
230     return pme->runMode;
231 }
232
233 gmx::PinningPolicy pme_get_pinning_policy()
234 {
235     // When the OpenCL implementation of HostAllocationPolicy
236     // implements an form of host-side pinning, amend this logic.
237     if (GMX_GPU == GMX_GPU_CUDA)
238     {
239         return gmx::PinningPolicy::CanBePinned;
240     }
241     else
242     {
243         return gmx::PinningPolicy::CannotBePinned;
244     }
245 }
246
247 /*! \brief Number of bytes in a cache line.
248  *
249  * Must also be a multiple of the SIMD and SIMD4 register size, to
250  * preserve alignment.
251  */
252 const int gmxCacheLineSize = 64;
253
254 //! Set up coordinate communication
255 static void setup_coordinate_communication(pme_atomcomm_t *atc)
256 {
257     int nslab, n, i;
258     int fw, bw;
259
260     nslab = atc->nslab;
261
262     n = 0;
263     for (i = 1; i <= nslab/2; i++)
264     {
265         fw = (atc->nodeid + i) % nslab;
266         bw = (atc->nodeid - i + nslab) % nslab;
267         if (n < nslab - 1)
268         {
269             atc->node_dest[n] = fw;
270             atc->node_src[n]  = bw;
271             n++;
272         }
273         if (n < nslab - 1)
274         {
275             atc->node_dest[n] = bw;
276             atc->node_src[n]  = fw;
277             n++;
278         }
279     }
280 }
281
282 /*! \brief Round \p n up to the next multiple of \p f */
283 static int mult_up(int n, int f)
284 {
285     return ((n + f - 1)/f)*f;
286 }
287
288 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
289 static double estimate_pme_load_imbalance(struct gmx_pme_t *pme)
290 {
291     int    nma, nmi;
292     double n1, n2, n3;
293
294     nma = pme->nnodes_major;
295     nmi = pme->nnodes_minor;
296
297     n1 = mult_up(pme->nkx, nma)*mult_up(pme->nky, nmi)*pme->nkz;
298     n2 = mult_up(pme->nkx, nma)*mult_up(pme->nkz, nmi)*pme->nky;
299     n3 = mult_up(pme->nky, nma)*mult_up(pme->nkz, nmi)*pme->nkx;
300
301     /* pme_solve is roughly double the cost of an fft */
302
303     return (n1 + n2 + 3*n3)/static_cast<double>(6*pme->nkx*pme->nky*pme->nkz);
304 }
305
306 /*! \brief Initialize atom communication data structure */
307 static void init_atomcomm(struct gmx_pme_t *pme, pme_atomcomm_t *atc,
308                           int dimind, gmx_bool bSpread)
309 {
310     int thread;
311
312     atc->dimind    = dimind;
313     atc->nslab     = 1;
314     atc->nodeid    = 0;
315     atc->pd_nalloc = 0;
316 #if GMX_MPI
317     if (pme->nnodes > 1)
318     {
319         atc->mpi_comm = pme->mpi_comm_d[dimind];
320         MPI_Comm_size(atc->mpi_comm, &atc->nslab);
321         MPI_Comm_rank(atc->mpi_comm, &atc->nodeid);
322     }
323     if (debug)
324     {
325         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", atc->dimind, atc->nslab, atc->nodeid);
326     }
327 #endif
328
329     atc->bSpread   = bSpread;
330     atc->pme_order = pme->pme_order;
331
332     if (atc->nslab > 1)
333     {
334         snew(atc->node_dest, atc->nslab);
335         snew(atc->node_src, atc->nslab);
336         setup_coordinate_communication(atc);
337
338         snew(atc->count_thread, pme->nthread);
339         for (thread = 0; thread < pme->nthread; thread++)
340         {
341             snew(atc->count_thread[thread], atc->nslab);
342         }
343         atc->count = atc->count_thread[0];
344         snew(atc->rcount, atc->nslab);
345         snew(atc->buf_index, atc->nslab);
346     }
347
348     atc->nthread = pme->nthread;
349     if (atc->nthread > 1)
350     {
351         snew(atc->thread_plist, atc->nthread);
352     }
353     snew(atc->spline, atc->nthread);
354     for (thread = 0; thread < atc->nthread; thread++)
355     {
356         if (atc->nthread > 1)
357         {
358             snew(atc->thread_plist[thread].n, atc->nthread+2*gmxCacheLineSize);
359             atc->thread_plist[thread].n += gmxCacheLineSize;
360         }
361     }
362 }
363
364 /*! \brief Destroy an atom communication data structure and its child structs */
365 static void destroy_atomcomm(pme_atomcomm_t *atc)
366 {
367     sfree(atc->pd);
368     if (atc->nslab > 1)
369     {
370         sfree(atc->node_dest);
371         sfree(atc->node_src);
372         for (int i = 0; i < atc->nthread; i++)
373         {
374             sfree(atc->count_thread[i]);
375         }
376         sfree(atc->count_thread);
377         sfree(atc->rcount);
378         sfree(atc->buf_index);
379
380         sfree(atc->x);
381         sfree(atc->coefficient);
382         sfree(atc->f);
383     }
384     sfree(atc->idx);
385     sfree(atc->fractx);
386
387     sfree(atc->thread_idx);
388     for (int i = 0; i < atc->nthread; i++)
389     {
390         if (atc->nthread > 1)
391         {
392             int *n_ptr = atc->thread_plist[i].n - gmxCacheLineSize;
393             sfree(n_ptr);
394             sfree(atc->thread_plist[i].i);
395         }
396         sfree(atc->spline[i].ind);
397         for (int d = 0; d < ZZ; d++)
398         {
399             sfree(atc->spline[i].theta[d]);
400             sfree(atc->spline[i].dtheta[d]);
401         }
402         sfree_aligned(atc->spline[i].ptr_dtheta_z);
403         sfree_aligned(atc->spline[i].ptr_theta_z);
404     }
405     if (atc->nthread > 1)
406     {
407         sfree(atc->thread_plist);
408     }
409     sfree(atc->spline);
410 }
411
412 /*! \brief Initialize data structure for communication */
413 static void
414 init_overlap_comm(pme_overlap_t *  ol,
415                   int              norder,
416 #if GMX_MPI
417                   MPI_Comm         comm,
418 #endif
419                   int              nnodes,
420                   int              nodeid,
421                   int              ndata,
422                   int              commplainsize)
423 {
424     gmx_bool         bCont;
425 #if GMX_MPI
426     MPI_Status       stat;
427
428     ol->mpi_comm = comm;
429 #endif
430
431     ol->nnodes = nnodes;
432     ol->nodeid = nodeid;
433
434     /* Linear translation of the PME grid won't affect reciprocal space
435      * calculations, so to optimize we only interpolate "upwards",
436      * which also means we only have to consider overlap in one direction.
437      * I.e., particles on this node might also be spread to grid indices
438      * that belong to higher nodes (modulo nnodes)
439      */
440
441     ol->s2g0.resize(ol->nnodes + 1);
442     ol->s2g1.resize(ol->nnodes);
443     if (debug)
444     {
445         fprintf(debug, "PME slab boundaries:");
446     }
447     for (int i = 0; i < nnodes; i++)
448     {
449         /* s2g0 the local interpolation grid start.
450          * s2g1 the local interpolation grid end.
451          * Since in calc_pidx we divide particles, and not grid lines,
452          * spatially uniform along dimension x or y, we need to round
453          * s2g0 down and s2g1 up.
454          */
455         ol->s2g0[i] = (i * ndata + 0) / nnodes;
456         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
457
458         if (debug)
459         {
460             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
461         }
462     }
463     ol->s2g0[nnodes] = ndata;
464     if (debug)
465     {
466         fprintf(debug, "\n");
467     }
468
469     /* Determine with how many nodes we need to communicate the grid overlap */
470     int testRankCount = 0;
471     do
472     {
473         testRankCount++;
474         bCont = FALSE;
475         for (int i = 0; i < nnodes; i++)
476         {
477             if ((i + testRankCount <  nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount]) ||
478                 (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
479             {
480                 bCont = TRUE;
481             }
482         }
483     }
484     while (bCont && testRankCount < nnodes);
485
486     ol->comm_data.resize(testRankCount - 1);
487     ol->send_size = 0;
488
489     for (size_t b = 0; b < ol->comm_data.size(); b++)
490     {
491         pme_grid_comm_t *pgc = &ol->comm_data[b];
492
493         /* Send */
494         pgc->send_id = (ol->nodeid + (b + 1)) % ol->nnodes;
495         int fft_start = ol->s2g0[pgc->send_id];
496         int fft_end   = ol->s2g0[pgc->send_id + 1];
497         if (pgc->send_id < nodeid)
498         {
499             fft_start += ndata;
500             fft_end   += ndata;
501         }
502         int send_index1  = ol->s2g1[nodeid];
503         send_index1      = std::min(send_index1, fft_end);
504         pgc->send_index0 = fft_start;
505         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
506         ol->send_size   += pgc->send_nindex;
507
508         /* We always start receiving to the first index of our slab */
509         pgc->recv_id     = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
510         fft_start        = ol->s2g0[ol->nodeid];
511         fft_end          = ol->s2g0[ol->nodeid + 1];
512         int recv_index1  = ol->s2g1[pgc->recv_id];
513         if (pgc->recv_id > nodeid)
514         {
515             recv_index1 -= ndata;
516         }
517         recv_index1      = std::min(recv_index1, fft_end);
518         pgc->recv_index0 = fft_start;
519         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
520     }
521
522 #if GMX_MPI
523     /* Communicate the buffer sizes to receive */
524     for (size_t b = 0; b < ol->comm_data.size(); b++)
525     {
526         MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b,
527                      &ol->comm_data[b].recv_size, 1, MPI_INT, ol->comm_data[b].recv_id, b,
528                      ol->mpi_comm, &stat);
529     }
530 #endif
531
532     /* For non-divisible grid we need pme_order iso pme_order-1 */
533     ol->sendbuf.resize(norder * commplainsize);
534     ol->recvbuf.resize(norder * commplainsize);
535 }
536
537 int minimalPmeGridSize(int pmeOrder)
538 {
539     /* The actual grid size limitations are:
540      *   serial:        >= pme_order
541      *   DD, no OpenMP: >= 2*(pme_order - 1)
542      *   DD, OpenMP:    >= pme_order + 1
543      * But we use the maximum for simplicity since in practice there is not
544      * much performance difference between pme_order and 2*(pme_order -1).
545      */
546     int minimalSize = 2*(pmeOrder - 1);
547
548     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
549     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
550
551     return minimalSize;
552 }
553
554 bool gmx_pme_check_restrictions(int pme_order,
555                                 int nkx, int nky, int nkz,
556                                 int numPmeDomainsAlongX,
557                                 bool useThreads,
558                                 bool errorsAreFatal)
559 {
560     if (pme_order > PME_ORDER_MAX)
561     {
562         if (!errorsAreFatal)
563         {
564             return false;
565         }
566
567         std::string message = gmx::formatString(
568                     "pme_order (%d) is larger than the maximum allowed value (%d). Modify and recompile the code if you really need such a high order.",
569                     pme_order, PME_ORDER_MAX);
570         GMX_THROW(gmx::InconsistentInputError(message));
571     }
572
573     const int minGridSize = minimalPmeGridSize(pme_order);
574     if (nkx < minGridSize ||
575         nky < minGridSize ||
576         nkz < minGridSize)
577     {
578         if (!errorsAreFatal)
579         {
580             return false;
581         }
582         std::string message = gmx::formatString(
583                     "The PME grid sizes need to be >= 2*(pme_order-1) (%d)",
584                     minGridSize);
585         GMX_THROW(gmx::InconsistentInputError(message));
586     }
587
588     /* Check for a limitation of the (current) sum_fftgrid_dd code.
589      * We only allow multiple communication pulses in dim 1, not in dim 0.
590      */
591     if (useThreads && (nkx < numPmeDomainsAlongX*pme_order &&
592                        nkx != numPmeDomainsAlongX*(pme_order - 1)))
593     {
594         if (!errorsAreFatal)
595         {
596             return false;
597         }
598         gmx_fatal(FARGS, "The number of PME grid lines per rank along x is %g. But when using OpenMP threads, the number of grid lines per rank along x should be >= pme_order (%d) or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly more along y and/or z) by specifying -dd manually.",
599                   nkx/static_cast<double>(numPmeDomainsAlongX), pme_order);
600     }
601
602     return true;
603 }
604
605 /*! \brief Round \p enumerator */
606 static int div_round_up(int enumerator, int denominator)
607 {
608     return (enumerator + denominator - 1)/denominator;
609 }
610
611 gmx_pme_t *gmx_pme_init(const t_commrec     *cr,
612                         const NumPmeDomains &numPmeDomains,
613                         const t_inputrec    *ir,
614                         int                  homenr,
615                         gmx_bool             bFreeEnergy_q,
616                         gmx_bool             bFreeEnergy_lj,
617                         gmx_bool             bReproducible,
618                         real                 ewaldcoeff_q,
619                         real                 ewaldcoeff_lj,
620                         int                  nthread,
621                         PmeRunMode           runMode,
622                         PmeGpu              *pmeGpu,
623                         gmx_device_info_t   *gpuInfo,
624                         PmeGpuProgramHandle  pmeGpuProgram,
625                         const gmx::MDLogger  & /*mdlog*/)
626 {
627     int               use_threads, sum_use_threads, i;
628     ivec              ndata;
629
630     if (debug)
631     {
632         fprintf(debug, "Creating PME data structures.\n");
633     }
634
635     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
636
637     pme->sum_qgrid_tmp       = nullptr;
638     pme->sum_qgrid_dd_tmp    = nullptr;
639
640     pme->buf_nalloc          = 0;
641
642     pme->nnodes              = 1;
643     pme->bPPnode             = TRUE;
644
645     pme->nnodes_major        = numPmeDomains.x;
646     pme->nnodes_minor        = numPmeDomains.y;
647
648 #if GMX_MPI
649     if (numPmeDomains.x*numPmeDomains.y > 1)
650     {
651         pme->mpi_comm = cr->mpi_comm_mygroup;
652
653         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
654         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
655         if (pme->nnodes != numPmeDomains.x*numPmeDomains.y)
656         {
657             gmx_incons("PME rank count mismatch");
658         }
659     }
660     else
661     {
662         pme->mpi_comm = MPI_COMM_NULL;
663     }
664 #endif
665
666     if (pme->nnodes == 1)
667     {
668 #if GMX_MPI
669         pme->mpi_comm_d[0] = MPI_COMM_NULL;
670         pme->mpi_comm_d[1] = MPI_COMM_NULL;
671 #endif
672         pme->ndecompdim   = 0;
673         pme->nodeid_major = 0;
674         pme->nodeid_minor = 0;
675 #if GMX_MPI
676         pme->mpi_comm_d[0] = pme->mpi_comm_d[1] = MPI_COMM_NULL;
677 #endif
678     }
679     else
680     {
681         if (numPmeDomains.y == 1)
682         {
683 #if GMX_MPI
684             pme->mpi_comm_d[0] = pme->mpi_comm;
685             pme->mpi_comm_d[1] = MPI_COMM_NULL;
686 #endif
687             pme->ndecompdim   = 1;
688             pme->nodeid_major = pme->nodeid;
689             pme->nodeid_minor = 0;
690
691         }
692         else if (numPmeDomains.x == 1)
693         {
694 #if GMX_MPI
695             pme->mpi_comm_d[0] = MPI_COMM_NULL;
696             pme->mpi_comm_d[1] = pme->mpi_comm;
697 #endif
698             pme->ndecompdim   = 1;
699             pme->nodeid_major = 0;
700             pme->nodeid_minor = pme->nodeid;
701         }
702         else
703         {
704             if (pme->nnodes % numPmeDomains.x != 0)
705             {
706                 gmx_incons("For 2D PME decomposition, #PME ranks must be divisible by the number of domains along x");
707             }
708             pme->ndecompdim = 2;
709
710 #if GMX_MPI
711             MPI_Comm_split(pme->mpi_comm, pme->nodeid % numPmeDomains.y,
712                            pme->nodeid, &pme->mpi_comm_d[0]);  /* My communicator along major dimension */
713             MPI_Comm_split(pme->mpi_comm, pme->nodeid/numPmeDomains.y,
714                            pme->nodeid, &pme->mpi_comm_d[1]);  /* My communicator along minor dimension */
715
716             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
717             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
718             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
719             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
720 #endif
721         }
722         pme->bPPnode = thisRankHasDuty(cr, DUTY_PP);
723     }
724
725     pme->nthread = nthread;
726
727     /* Check if any of the PME MPI ranks uses threads */
728     use_threads = (pme->nthread > 1 ? 1 : 0);
729 #if GMX_MPI
730     if (pme->nnodes > 1)
731     {
732         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT,
733                       MPI_SUM, pme->mpi_comm);
734     }
735     else
736 #endif
737     {
738         sum_use_threads = use_threads;
739     }
740     pme->bUseThreads = (sum_use_threads > 0);
741
742     if (ir->ePBC == epbcSCREW)
743     {
744         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
745     }
746
747     /* NOTE:
748      * It is likely that the current gmx_pme_do() routine supports calculating
749      * only Coulomb or LJ while gmx_pme_init() configures for both,
750      * but that has never been tested.
751      * It is likely that the current gmx_pme_do() routine supports calculating,
752      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
753      * configures with free-energy, but that has never been tested.
754      */
755     pme->doCoulomb     = EEL_PME(ir->coulombtype);
756     pme->doLJ          = EVDW_PME(ir->vdwtype);
757     pme->bFEP_q        = ((ir->efep != efepNO) && bFreeEnergy_q);
758     pme->bFEP_lj       = ((ir->efep != efepNO) && bFreeEnergy_lj);
759     pme->bFEP          = (pme->bFEP_q || pme->bFEP_lj);
760     pme->nkx           = ir->nkx;
761     pme->nky           = ir->nky;
762     pme->nkz           = ir->nkz;
763     pme->bP3M          = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != nullptr);
764     pme->pme_order     = ir->pme_order;
765     pme->ewaldcoeff_q  = ewaldcoeff_q;
766     pme->ewaldcoeff_lj = ewaldcoeff_lj;
767
768     /* Always constant electrostatics coefficients */
769     pme->epsilon_r     = ir->epsilon_r;
770
771     /* Always constant LJ coefficients */
772     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
773
774     // The box requires scaling with nwalls = 2, we store that condition as well
775     // as the scaling factor
776     delete pme->boxScaler;
777     pme->boxScaler = new EwaldBoxZScaler(*ir);
778
779     /* If we violate restrictions, generate a fatal error here */
780     gmx_pme_check_restrictions(pme->pme_order,
781                                pme->nkx, pme->nky, pme->nkz,
782                                pme->nnodes_major,
783                                pme->bUseThreads,
784                                true);
785
786     if (pme->nnodes > 1)
787     {
788         double imbal;
789
790 #if GMX_MPI
791         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
792         MPI_Type_commit(&(pme->rvec_mpi));
793 #endif
794
795         /* Note that the coefficient spreading and force gathering, which usually
796          * takes about the same amount of time as FFT+solve_pme,
797          * is always fully load balanced
798          * (unless the coefficient distribution is inhomogeneous).
799          */
800
801         imbal = estimate_pme_load_imbalance(pme.get());
802         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
803         {
804             fprintf(stderr,
805                     "\n"
806                     "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
807                     "      For optimal PME load balancing\n"
808                     "      PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x (%d)\n"
809                     "      and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y (%d)\n"
810                     "\n",
811                     static_cast<int>((imbal-1)*100 + 0.5),
812                     pme->nkx, pme->nky, pme->nnodes_major,
813                     pme->nky, pme->nkz, pme->nnodes_minor);
814         }
815     }
816
817     /* For non-divisible grid we need pme_order iso pme_order-1 */
818     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
819      * y is always copied through a buffer: we don't need padding in z,
820      * but we do need the overlap in x because of the communication order.
821      */
822     init_overlap_comm(&pme->overlap[0], pme->pme_order,
823 #if GMX_MPI
824                       pme->mpi_comm_d[0],
825 #endif
826                       pme->nnodes_major, pme->nodeid_major,
827                       pme->nkx,
828                       (div_round_up(pme->nky, pme->nnodes_minor)+pme->pme_order)*(pme->nkz+pme->pme_order-1));
829
830     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
831      * We do this with an offset buffer of equal size, so we need to allocate
832      * extra for the offset. That's what the (+1)*pme->nkz is for.
833      */
834     init_overlap_comm(&pme->overlap[1], pme->pme_order,
835 #if GMX_MPI
836                       pme->mpi_comm_d[1],
837 #endif
838                       pme->nnodes_minor, pme->nodeid_minor,
839                       pme->nky,
840                       (div_round_up(pme->nkx, pme->nnodes_major)+pme->pme_order+1)*pme->nkz);
841
842     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
843      * Note that gmx_pme_check_restrictions checked for this already.
844      */
845     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
846     {
847         gmx_incons("More than one communication pulse required for grid overlap communication along the major dimension while using threads");
848     }
849
850     snew(pme->bsp_mod[XX], pme->nkx);
851     snew(pme->bsp_mod[YY], pme->nky);
852     snew(pme->bsp_mod[ZZ], pme->nkz);
853
854     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
855     pme->runMode = runMode;
856
857     /* The required size of the interpolation grid, including overlap.
858      * The allocated size (pmegrid_n?) might be slightly larger.
859      */
860     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] -
861         pme->overlap[0].s2g0[pme->nodeid_major];
862     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] -
863         pme->overlap[1].s2g0[pme->nodeid_minor];
864     pme->pmegrid_nz_base = pme->nkz;
865     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
866     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
867     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
868     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
869     pme->pmegrid_start_iz = 0;
870
871     make_gridindex_to_localindex(pme->nkx,
872                                  pme->pmegrid_start_ix,
873                                  pme->pmegrid_nx - (pme->pme_order-1),
874                                  &pme->nnx, &pme->fshx);
875     make_gridindex_to_localindex(pme->nky,
876                                  pme->pmegrid_start_iy,
877                                  pme->pmegrid_ny - (pme->pme_order-1),
878                                  &pme->nny, &pme->fshy);
879     make_gridindex_to_localindex(pme->nkz,
880                                  pme->pmegrid_start_iz,
881                                  pme->pmegrid_nz_base,
882                                  &pme->nnz, &pme->fshz);
883
884     pme->spline_work = make_pme_spline_work(pme->pme_order);
885
886     ndata[0]    = pme->nkx;
887     ndata[1]    = pme->nky;
888     ndata[2]    = pme->nkz;
889     /* It doesn't matter if we allocate too many grids here,
890      * we only allocate and use the ones we need.
891      */
892     if (pme->doLJ)
893     {
894         pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
895     }
896     else
897     {
898         pme->ngrids = DO_Q;
899     }
900     snew(pme->fftgrid, pme->ngrids);
901     snew(pme->cfftgrid, pme->ngrids);
902     snew(pme->pfft_setup, pme->ngrids);
903
904     for (i = 0; i < pme->ngrids; ++i)
905     {
906         if ((i <  DO_Q && pme->doCoulomb && (i == 0 ||
907                                              bFreeEnergy_q)) ||
908             (i >= DO_Q && pme->doLJ && (i == 2 ||
909                                         bFreeEnergy_lj ||
910                                         ir->ljpme_combination_rule == eljpmeLB)))
911         {
912             pmegrids_init(&pme->pmegrid[i],
913                           pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
914                           pme->pmegrid_nz_base,
915                           pme->pme_order,
916                           pme->bUseThreads,
917                           pme->nthread,
918                           pme->overlap[0].s2g1[pme->nodeid_major]-pme->overlap[0].s2g0[pme->nodeid_major+1],
919                           pme->overlap[1].s2g1[pme->nodeid_minor]-pme->overlap[1].s2g0[pme->nodeid_minor+1]);
920             /* This routine will allocate the grid data to fit the FFTs */
921             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed) ? gmx::PinningPolicy::CanBePinned : gmx::PinningPolicy::CannotBePinned;
922             gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata,
923                                     &pme->fftgrid[i], &pme->cfftgrid[i],
924                                     pme->mpi_comm_d,
925                                     bReproducible, pme->nthread, allocateRealGridForGpu);
926
927         }
928     }
929
930     if (!pme->bP3M)
931     {
932         /* Use plain SPME B-spline interpolation */
933         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
934     }
935     else
936     {
937         /* Use the P3M grid-optimized influence function */
938         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
939     }
940
941     /* Use atc[0] for spreading */
942     init_atomcomm(pme.get(), &pme->atc[0], numPmeDomains.x > 1 ? 0 : 1, TRUE);
943     if (pme->ndecompdim >= 2)
944     {
945         init_atomcomm(pme.get(), &pme->atc[1], 1, FALSE);
946     }
947
948     if (pme->nnodes == 1)
949     {
950         pme->atc[0].n = homenr;
951         pme_realloc_atomcomm_things(&pme->atc[0]);
952     }
953
954     pme->lb_buf1       = nullptr;
955     pme->lb_buf2       = nullptr;
956     pme->lb_buf_nalloc = 0;
957
958     if (pme_gpu_active(pme.get()))
959     {
960         if (!pme->gpu)
961         {
962             // Initial check of validity of the data
963             std::string errorString;
964             bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
965             if (!canRunOnGpu)
966             {
967                 GMX_THROW(gmx::NotImplementedError(errorString));
968             }
969         }
970
971         pme_gpu_reinit(pme.get(), gpuInfo, pmeGpuProgram);
972     }
973
974     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
975
976     // no exception was thrown during the init, so we hand over the PME structure handle
977     return pme.release();
978 }
979
980 void gmx_pme_reinit(struct gmx_pme_t **pmedata,
981                     const t_commrec   *cr,
982                     struct gmx_pme_t * pme_src,
983                     const t_inputrec * ir,
984                     const ivec         grid_size,
985                     real               ewaldcoeff_q,
986                     real               ewaldcoeff_lj)
987 {
988     int        homenr;
989
990     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
991     // TODO: This would be better as just copying a sub-structure that contains
992     // all the PME parameters and nothing else.
993     t_inputrec irc;
994     irc.ePBC                   = ir->ePBC;
995     irc.coulombtype            = ir->coulombtype;
996     irc.vdwtype                = ir->vdwtype;
997     irc.efep                   = ir->efep;
998     irc.pme_order              = ir->pme_order;
999     irc.epsilon_r              = ir->epsilon_r;
1000     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
1001     irc.nkx                    = grid_size[XX];
1002     irc.nky                    = grid_size[YY];
1003     irc.nkz                    = grid_size[ZZ];
1004
1005     if (pme_src->nnodes == 1)
1006     {
1007         homenr = pme_src->atc[0].n;
1008     }
1009     else
1010     {
1011         homenr = -1;
1012     }
1013
1014     try
1015     {
1016         const gmx::MDLogger dummyLogger;
1017         // This is reinit which is currently only changing grid size/coefficients,
1018         // so we don't expect the actual logging.
1019         // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
1020         GMX_ASSERT(pmedata, "Invalid PME pointer");
1021         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
1022         *pmedata = gmx_pme_init(cr, numPmeDomains,
1023                                 &irc, homenr, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE, ewaldcoeff_q, ewaldcoeff_lj,
1024                                 pme_src->nthread, pme_src->runMode, pme_src->gpu, nullptr, nullptr, dummyLogger);
1025         //TODO this is mostly passing around current values
1026     }
1027     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1028
1029     /* We can easily reuse the allocated pme grids in pme_src */
1030     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
1031     /* We would like to reuse the fft grids, but that's harder */
1032 }
1033
1034 void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V)
1035 {
1036     pme_atomcomm_t *atc;
1037     pmegrids_t     *grid;
1038
1039     if (pme->nnodes > 1)
1040     {
1041         gmx_incons("gmx_pme_calc_energy called in parallel");
1042     }
1043     if (pme->bFEP_q)
1044     {
1045         gmx_incons("gmx_pme_calc_energy with free energy");
1046     }
1047
1048     atc            = &pme->atc_energy;
1049     atc->nthread   = 1;
1050     if (atc->spline == nullptr)
1051     {
1052         snew(atc->spline, atc->nthread);
1053     }
1054     atc->nslab     = 1;
1055     atc->bSpread   = TRUE;
1056     atc->pme_order = pme->pme_order;
1057     atc->n         = n;
1058     pme_realloc_atomcomm_things(atc);
1059     atc->x           = x;
1060     atc->coefficient = q;
1061
1062     /* We only use the A-charges grid */
1063     grid = &pme->pmegrid[PME_GRID_QA];
1064
1065     /* Only calculate the spline coefficients, don't actually spread */
1066     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
1067
1068     *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
1069 }
1070
1071 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
1072 static void
1073 calc_initial_lb_coeffs(struct gmx_pme_t *pme, const real *local_c6, const real *local_sigma)
1074 {
1075     int  i;
1076     for (i = 0; i < pme->atc[0].n; ++i)
1077     {
1078         real sigma4;
1079         sigma4                     = local_sigma[i];
1080         sigma4                     = sigma4*sigma4;
1081         sigma4                     = sigma4*sigma4;
1082         pme->atc[0].coefficient[i] = local_c6[i] / sigma4;
1083     }
1084 }
1085
1086 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
1087 static void
1088 calc_next_lb_coeffs(struct gmx_pme_t *pme, const real *local_sigma)
1089 {
1090     int  i;
1091
1092     for (i = 0; i < pme->atc[0].n; ++i)
1093     {
1094         pme->atc[0].coefficient[i] *= local_sigma[i];
1095     }
1096 }
1097
1098 int gmx_pme_do(struct gmx_pme_t *pme,
1099                int start,       int homenr,
1100                rvec x[],        rvec f[],
1101                real chargeA[],  real chargeB[],
1102                real c6A[],      real c6B[],
1103                real sigmaA[],   real sigmaB[],
1104                matrix box,      const t_commrec *cr,
1105                int  maxshift_x, int maxshift_y,
1106                t_nrnb *nrnb,    gmx_wallcycle *wcycle,
1107                matrix vir_q,    matrix vir_lj,
1108                real *energy_q,  real *energy_lj,
1109                real lambda_q,   real lambda_lj,
1110                real *dvdlambda_q, real *dvdlambda_lj,
1111                int flags)
1112 {
1113     GMX_ASSERT(pme->runMode == PmeRunMode::CPU, "gmx_pme_do should not be called on the GPU PME run.");
1114
1115     int                  d, i, j, npme, grid_index, max_grid_index;
1116     int                  n_d;
1117     pme_atomcomm_t      *atc        = nullptr;
1118     pmegrids_t          *pmegrid    = nullptr;
1119     real                *grid       = nullptr;
1120     rvec                *f_d;
1121     real                *coefficient = nullptr;
1122     real                 energy_AB[4];
1123     matrix               vir_AB[4];
1124     real                 scale, lambda;
1125     gmx_bool             bClearF;
1126     gmx_parallel_3dfft_t pfft_setup;
1127     real              *  fftgrid;
1128     t_complex          * cfftgrid;
1129     int                  thread;
1130     gmx_bool             bFirst, bDoSplines;
1131     int                  fep_state;
1132     int                  fep_states_lj           = pme->bFEP_lj ? 2 : 1;
1133     const gmx_bool       bCalcEnerVir            = flags & GMX_PME_CALC_ENER_VIR;
1134     const gmx_bool       bBackFFT                = flags & (GMX_PME_CALC_F | GMX_PME_CALC_POT);
1135     const gmx_bool       bCalcF                  = flags & GMX_PME_CALC_F;
1136
1137     assert(pme->nnodes > 0);
1138     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1139
1140     if (pme->nnodes > 1)
1141     {
1142         atc      = &pme->atc[0];
1143         atc->npd = homenr;
1144         if (atc->npd > atc->pd_nalloc)
1145         {
1146             atc->pd_nalloc = over_alloc_dd(atc->npd);
1147             srenew(atc->pd, atc->pd_nalloc);
1148         }
1149         for (d = pme->ndecompdim-1; d >= 0; d--)
1150         {
1151             atc           = &pme->atc[d];
1152             atc->maxshift = (atc->dimind == 0 ? maxshift_x : maxshift_y);
1153         }
1154     }
1155     else
1156     {
1157         atc = &pme->atc[0];
1158         /* This could be necessary for TPI */
1159         pme->atc[0].n = homenr;
1160         if (DOMAINDECOMP(cr))
1161         {
1162             pme_realloc_atomcomm_things(atc);
1163         }
1164         atc->x = x;
1165         atc->f = f;
1166     }
1167
1168     matrix scaledBox;
1169     pme->boxScaler->scaleBox(box, scaledBox);
1170
1171     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1172     bFirst = TRUE;
1173
1174     /* For simplicity, we construct the splines for all particles if
1175      * more than one PME calculations is needed. Some optimization
1176      * could be done by keeping track of which atoms have splines
1177      * constructed, and construct new splines on each pass for atoms
1178      * that don't yet have them.
1179      */
1180
1181     bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1182
1183     /* We need a maximum of four separate PME calculations:
1184      * grid_index=0: Coulomb PME with charges from state A
1185      * grid_index=1: Coulomb PME with charges from state B
1186      * grid_index=2: LJ PME with C6 from state A
1187      * grid_index=3: LJ PME with C6 from state B
1188      * For Lorentz-Berthelot combination rules, a separate loop is used to
1189      * calculate all the terms
1190      */
1191
1192     /* If we are doing LJ-PME with LB, we only do Q here */
1193     max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
1194
1195     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1196     {
1197         /* Check if we should do calculations at this grid_index
1198          * If grid_index is odd we should be doing FEP
1199          * If grid_index < 2 we should be doing electrostatic PME
1200          * If grid_index >= 2 we should be doing LJ-PME
1201          */
1202         if ((grid_index <  DO_Q && (!pme->doCoulomb ||
1203                                     (grid_index == 1 && !pme->bFEP_q))) ||
1204             (grid_index >= DO_Q && (!pme->doLJ ||
1205                                     (grid_index == 3 && !pme->bFEP_lj))))
1206         {
1207             continue;
1208         }
1209         /* Unpack structure */
1210         pmegrid    = &pme->pmegrid[grid_index];
1211         fftgrid    = pme->fftgrid[grid_index];
1212         cfftgrid   = pme->cfftgrid[grid_index];
1213         pfft_setup = pme->pfft_setup[grid_index];
1214         switch (grid_index)
1215         {
1216             case 0: coefficient = chargeA + start; break;
1217             case 1: coefficient = chargeB + start; break;
1218             case 2: coefficient = c6A + start; break;
1219             case 3: coefficient = c6B + start; break;
1220         }
1221
1222         grid = pmegrid->grid.grid;
1223
1224         if (debug)
1225         {
1226             fprintf(debug, "PME: number of ranks = %d, rank = %d\n",
1227                     cr->nnodes, cr->nodeid);
1228             fprintf(debug, "Grid = %p\n", static_cast<void*>(grid));
1229             if (grid == nullptr)
1230             {
1231                 gmx_fatal(FARGS, "No grid!");
1232             }
1233         }
1234
1235         if (pme->nnodes == 1)
1236         {
1237             atc->coefficient = coefficient;
1238         }
1239         else
1240         {
1241             wallcycle_start(wcycle, ewcPME_REDISTXF);
1242             do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, coefficient);
1243
1244             wallcycle_stop(wcycle, ewcPME_REDISTXF);
1245         }
1246
1247         if (debug)
1248         {
1249             fprintf(debug, "Rank= %6d, pme local particles=%6d\n",
1250                     cr->nodeid, atc->n);
1251         }
1252
1253         if (flags & GMX_PME_SPREAD)
1254         {
1255             wallcycle_start(wcycle, ewcPME_SPREAD);
1256
1257             /* Spread the coefficients on a grid */
1258             spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1259
1260             if (bFirst)
1261             {
1262                 inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
1263             }
1264             inc_nrnb(nrnb, eNR_SPREADBSP,
1265                      pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
1266
1267             if (!pme->bUseThreads)
1268             {
1269                 wrap_periodic_pmegrid(pme, grid);
1270
1271                 /* sum contributions to local grid from other nodes */
1272 #if GMX_MPI
1273                 if (pme->nnodes > 1)
1274                 {
1275                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1276                 }
1277 #endif
1278
1279                 copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1280             }
1281
1282             wallcycle_stop(wcycle, ewcPME_SPREAD);
1283
1284             /* TODO If the OpenMP and single-threaded implementations
1285                converge, then spread_on_grid() and
1286                copy_pmegrid_to_fftgrid() will perhaps live in the same
1287                source file.
1288              */
1289         }
1290
1291         /* Here we start a large thread parallel region */
1292 #pragma omp parallel num_threads(pme->nthread) private(thread)
1293         {
1294             try
1295             {
1296                 thread = gmx_omp_get_thread_num();
1297                 if (flags & GMX_PME_SOLVE)
1298                 {
1299                     int loop_count;
1300
1301                     /* do 3d-fft */
1302                     if (thread == 0)
1303                     {
1304                         wallcycle_start(wcycle, ewcPME_FFT);
1305                     }
1306                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1307                                                thread, wcycle);
1308                     if (thread == 0)
1309                     {
1310                         wallcycle_stop(wcycle, ewcPME_FFT);
1311                     }
1312
1313                     /* solve in k-space for our local cells */
1314                     if (thread == 0)
1315                     {
1316                         wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1317                     }
1318                     if (grid_index < DO_Q)
1319                     {
1320                         loop_count =
1321                             solve_pme_yzx(pme, cfftgrid,
1322                                           scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1323                                           bCalcEnerVir,
1324                                           pme->nthread, thread);
1325                     }
1326                     else
1327                     {
1328                         loop_count =
1329                             solve_pme_lj_yzx(pme, &cfftgrid, FALSE,
1330                                              scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1331                                              bCalcEnerVir,
1332                                              pme->nthread, thread);
1333                     }
1334
1335                     if (thread == 0)
1336                     {
1337                         wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1338                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1339                     }
1340                 }
1341
1342                 if (bBackFFT)
1343                 {
1344                     /* do 3d-invfft */
1345                     if (thread == 0)
1346                     {
1347                         wallcycle_start(wcycle, ewcPME_FFT);
1348                     }
1349                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1350                                                thread, wcycle);
1351                     if (thread == 0)
1352                     {
1353                         wallcycle_stop(wcycle, ewcPME_FFT);
1354
1355
1356                         if (pme->nodeid == 0)
1357                         {
1358                             real ntot = pme->nkx*pme->nky*pme->nkz;
1359                             npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1360                             inc_nrnb(nrnb, eNR_FFT, 2*npme);
1361                         }
1362
1363                         /* Note: this wallcycle region is closed below
1364                            outside an OpenMP region, so take care if
1365                            refactoring code here. */
1366                         wallcycle_start(wcycle, ewcPME_GATHER);
1367                     }
1368
1369                     copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1370                 }
1371             } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1372         }
1373         /* End of thread parallel section.
1374          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1375          */
1376
1377         if (bBackFFT)
1378         {
1379             /* distribute local grid to all nodes */
1380 #if GMX_MPI
1381             if (pme->nnodes > 1)
1382             {
1383                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1384             }
1385 #endif
1386
1387             unwrap_periodic_pmegrid(pme, grid);
1388         }
1389
1390         if (bCalcF)
1391         {
1392             /* interpolate forces for our local atoms */
1393
1394
1395             /* If we are running without parallelization,
1396              * atc->f is the actual force array, not a buffer,
1397              * therefore we should not clear it.
1398              */
1399             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
1400             bClearF = (bFirst && PAR(cr));
1401 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1402             for (thread = 0; thread < pme->nthread; thread++)
1403             {
1404                 try
1405                 {
1406                     gather_f_bsplines(pme, grid, bClearF, atc,
1407                                       &atc->spline[thread],
1408                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0-lambda : lambda) : 1.0);
1409                 }
1410                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1411             }
1412
1413
1414             inc_nrnb(nrnb, eNR_GATHERFBSP,
1415                      pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
1416             /* Note: this wallcycle region is opened above inside an OpenMP
1417                region, so take care if refactoring code here. */
1418             wallcycle_stop(wcycle, ewcPME_GATHER);
1419         }
1420
1421         if (bCalcEnerVir)
1422         {
1423             /* This should only be called on the master thread
1424              * and after the threads have synchronized.
1425              */
1426             if (grid_index < 2)
1427             {
1428                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
1429             }
1430             else
1431             {
1432                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
1433             }
1434         }
1435         bFirst = FALSE;
1436     } /* of grid_index-loop */
1437
1438     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1439      * seven terms. */
1440
1441     if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB)
1442     {
1443         /* Loop over A- and B-state if we are doing FEP */
1444         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1445         {
1446             real *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
1447             if (pme->nnodes == 1)
1448             {
1449                 if (pme->lb_buf1 == nullptr)
1450                 {
1451                     pme->lb_buf_nalloc = pme->atc[0].n;
1452                     snew(pme->lb_buf1, pme->lb_buf_nalloc);
1453                 }
1454                 pme->atc[0].coefficient = pme->lb_buf1;
1455                 switch (fep_state)
1456                 {
1457                     case 0:
1458                         local_c6      = c6A;
1459                         local_sigma   = sigmaA;
1460                         break;
1461                     case 1:
1462                         local_c6      = c6B;
1463                         local_sigma   = sigmaB;
1464                         break;
1465                     default:
1466                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1467                 }
1468             }
1469             else
1470             {
1471                 atc = &pme->atc[0];
1472                 switch (fep_state)
1473                 {
1474                     case 0:
1475                         RedistC6      = c6A;
1476                         RedistSigma   = sigmaA;
1477                         break;
1478                     case 1:
1479                         RedistC6      = c6B;
1480                         RedistSigma   = sigmaB;
1481                         break;
1482                     default:
1483                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1484                 }
1485                 wallcycle_start(wcycle, ewcPME_REDISTXF);
1486
1487                 do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, RedistC6);
1488                 if (pme->lb_buf_nalloc < atc->n)
1489                 {
1490                     pme->lb_buf_nalloc = atc->nalloc;
1491                     srenew(pme->lb_buf1, pme->lb_buf_nalloc);
1492                     srenew(pme->lb_buf2, pme->lb_buf_nalloc);
1493                 }
1494                 local_c6 = pme->lb_buf1;
1495                 for (i = 0; i < atc->n; ++i)
1496                 {
1497                     local_c6[i] = atc->coefficient[i];
1498                 }
1499
1500                 do_redist_pos_coeffs(pme, cr, start, homenr, FALSE, x, RedistSigma);
1501                 local_sigma = pme->lb_buf2;
1502                 for (i = 0; i < atc->n; ++i)
1503                 {
1504                     local_sigma[i] = atc->coefficient[i];
1505                 }
1506
1507                 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1508             }
1509             calc_initial_lb_coeffs(pme, local_c6, local_sigma);
1510
1511             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1512             for (grid_index = 2; grid_index < 9; ++grid_index)
1513             {
1514                 /* Unpack structure */
1515                 pmegrid    = &pme->pmegrid[grid_index];
1516                 fftgrid    = pme->fftgrid[grid_index];
1517                 pfft_setup = pme->pfft_setup[grid_index];
1518                 calc_next_lb_coeffs(pme, local_sigma);
1519                 grid = pmegrid->grid.grid;
1520
1521                 if (flags & GMX_PME_SPREAD)
1522                 {
1523                     wallcycle_start(wcycle, ewcPME_SPREAD);
1524                     /* Spread the c6 on a grid */
1525                     spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1526
1527                     if (bFirst)
1528                     {
1529                         inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
1530                     }
1531
1532                     inc_nrnb(nrnb, eNR_SPREADBSP,
1533                              pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
1534                     if (pme->nthread == 1)
1535                     {
1536                         wrap_periodic_pmegrid(pme, grid);
1537                         /* sum contributions to local grid from other nodes */
1538 #if GMX_MPI
1539                         if (pme->nnodes > 1)
1540                         {
1541                             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1542                         }
1543 #endif
1544                         copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1545                     }
1546                     wallcycle_stop(wcycle, ewcPME_SPREAD);
1547                 }
1548                 /*Here we start a large thread parallel region*/
1549 #pragma omp parallel num_threads(pme->nthread) private(thread)
1550                 {
1551                     try
1552                     {
1553                         thread = gmx_omp_get_thread_num();
1554                         if (flags & GMX_PME_SOLVE)
1555                         {
1556                             /* do 3d-fft */
1557                             if (thread == 0)
1558                             {
1559                                 wallcycle_start(wcycle, ewcPME_FFT);
1560                             }
1561
1562                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1563                                                        thread, wcycle);
1564                             if (thread == 0)
1565                             {
1566                                 wallcycle_stop(wcycle, ewcPME_FFT);
1567                             }
1568                         }
1569                     }
1570                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1571                 }
1572                 bFirst = FALSE;
1573             }
1574             if (flags & GMX_PME_SOLVE)
1575             {
1576                 /* solve in k-space for our local cells */
1577 #pragma omp parallel num_threads(pme->nthread) private(thread)
1578                 {
1579                     try
1580                     {
1581                         int loop_count;
1582                         thread = gmx_omp_get_thread_num();
1583                         if (thread == 0)
1584                         {
1585                             wallcycle_start(wcycle, ewcLJPME);
1586                         }
1587
1588                         loop_count =
1589                             solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE,
1590                                              scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1591                                              bCalcEnerVir,
1592                                              pme->nthread, thread);
1593                         if (thread == 0)
1594                         {
1595                             wallcycle_stop(wcycle, ewcLJPME);
1596                             inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1597                         }
1598                     }
1599                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1600                 }
1601             }
1602
1603             if (bCalcEnerVir)
1604             {
1605                 /* This should only be called on the master thread and
1606                  * after the threads have synchronized.
1607                  */
1608                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[2+fep_state], vir_AB[2+fep_state]);
1609             }
1610
1611             if (bBackFFT)
1612             {
1613                 bFirst = !pme->doCoulomb;
1614                 calc_initial_lb_coeffs(pme, local_c6, local_sigma);
1615                 for (grid_index = 8; grid_index >= 2; --grid_index)
1616                 {
1617                     /* Unpack structure */
1618                     pmegrid    = &pme->pmegrid[grid_index];
1619                     fftgrid    = pme->fftgrid[grid_index];
1620                     pfft_setup = pme->pfft_setup[grid_index];
1621                     grid       = pmegrid->grid.grid;
1622                     calc_next_lb_coeffs(pme, local_sigma);
1623 #pragma omp parallel num_threads(pme->nthread) private(thread)
1624                     {
1625                         try
1626                         {
1627                             thread = gmx_omp_get_thread_num();
1628                             /* do 3d-invfft */
1629                             if (thread == 0)
1630                             {
1631                                 wallcycle_start(wcycle, ewcPME_FFT);
1632                             }
1633
1634                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1635                                                        thread, wcycle);
1636                             if (thread == 0)
1637                             {
1638                                 wallcycle_stop(wcycle, ewcPME_FFT);
1639
1640
1641                                 if (pme->nodeid == 0)
1642                                 {
1643                                     real ntot = pme->nkx*pme->nky*pme->nkz;
1644                                     npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1645                                     inc_nrnb(nrnb, eNR_FFT, 2*npme);
1646                                 }
1647                                 wallcycle_start(wcycle, ewcPME_GATHER);
1648                             }
1649
1650                             copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1651                         }
1652                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1653                     } /*#pragma omp parallel*/
1654
1655                     /* distribute local grid to all nodes */
1656 #if GMX_MPI
1657                     if (pme->nnodes > 1)
1658                     {
1659                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1660                     }
1661 #endif
1662
1663                     unwrap_periodic_pmegrid(pme, grid);
1664
1665                     if (bCalcF)
1666                     {
1667                         /* interpolate forces for our local atoms */
1668                         bClearF = (bFirst && PAR(cr));
1669                         scale   = pme->bFEP ? (fep_state < 1 ? 1.0-lambda_lj : lambda_lj) : 1.0;
1670                         scale  *= lb_scale_factor[grid_index-2];
1671
1672 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1673                         for (thread = 0; thread < pme->nthread; thread++)
1674                         {
1675                             try
1676                             {
1677                                 gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
1678                                                   &pme->atc[0].spline[thread],
1679                                                   scale);
1680                             }
1681                             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1682                         }
1683
1684
1685                         inc_nrnb(nrnb, eNR_GATHERFBSP,
1686                                  pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
1687                     }
1688                     wallcycle_stop(wcycle, ewcPME_GATHER);
1689
1690                     bFirst = FALSE;
1691                 } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1692             }     /* if (bCalcF) */
1693         }         /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1694     }             /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
1695
1696     if (bCalcF && pme->nnodes > 1)
1697     {
1698         wallcycle_start(wcycle, ewcPME_REDISTXF);
1699         for (d = 0; d < pme->ndecompdim; d++)
1700         {
1701             atc = &pme->atc[d];
1702             if (d == pme->ndecompdim - 1)
1703             {
1704                 n_d = homenr;
1705                 f_d = f + start;
1706             }
1707             else
1708             {
1709                 n_d = pme->atc[d+1].n;
1710                 f_d = pme->atc[d+1].f;
1711             }
1712             if (DOMAINDECOMP(cr))
1713             {
1714                 dd_pmeredist_f(pme, atc, n_d, f_d,
1715                                d == pme->ndecompdim-1 && pme->bPPnode);
1716             }
1717         }
1718
1719         wallcycle_stop(wcycle, ewcPME_REDISTXF);
1720     }
1721
1722     if (bCalcEnerVir)
1723     {
1724         if (pme->doCoulomb)
1725         {
1726             if (!pme->bFEP_q)
1727             {
1728                 *energy_q = energy_AB[0];
1729                 m_add(vir_q, vir_AB[0], vir_q);
1730             }
1731             else
1732             {
1733                 *energy_q       = (1.0-lambda_q)*energy_AB[0] + lambda_q*energy_AB[1];
1734                 *dvdlambda_q   += energy_AB[1] - energy_AB[0];
1735                 for (i = 0; i < DIM; i++)
1736                 {
1737                     for (j = 0; j < DIM; j++)
1738                     {
1739                         vir_q[i][j] += (1.0-lambda_q)*vir_AB[0][i][j] +
1740                             lambda_q*vir_AB[1][i][j];
1741                     }
1742                 }
1743             }
1744             if (debug)
1745             {
1746                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1747             }
1748         }
1749         else
1750         {
1751             *energy_q = 0;
1752         }
1753
1754         if (pme->doLJ)
1755         {
1756             if (!pme->bFEP_lj)
1757             {
1758                 *energy_lj = energy_AB[2];
1759                 m_add(vir_lj, vir_AB[2], vir_lj);
1760             }
1761             else
1762             {
1763                 *energy_lj     = (1.0-lambda_lj)*energy_AB[2] + lambda_lj*energy_AB[3];
1764                 *dvdlambda_lj += energy_AB[3] - energy_AB[2];
1765                 for (i = 0; i < DIM; i++)
1766                 {
1767                     for (j = 0; j < DIM; j++)
1768                     {
1769                         vir_lj[i][j] += (1.0-lambda_lj)*vir_AB[2][i][j] + lambda_lj*vir_AB[3][i][j];
1770                     }
1771                 }
1772             }
1773             if (debug)
1774             {
1775                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1776             }
1777         }
1778         else
1779         {
1780             *energy_lj = 0;
1781         }
1782     }
1783     return 0;
1784 }
1785
1786 void gmx_pme_destroy(gmx_pme_t *pme)
1787 {
1788     if (!pme)
1789     {
1790         return;
1791     }
1792
1793     delete pme->boxScaler;
1794
1795     sfree(pme->nnx);
1796     sfree(pme->nny);
1797     sfree(pme->nnz);
1798     sfree(pme->fshx);
1799     sfree(pme->fshy);
1800     sfree(pme->fshz);
1801
1802     for (int i = 0; i < pme->ngrids; ++i)
1803     {
1804         pmegrids_destroy(&pme->pmegrid[i]);
1805     }
1806     if (pme->pfft_setup)
1807     {
1808         for (int i = 0; i < pme->ngrids; ++i)
1809         {
1810             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1811         }
1812     }
1813     sfree(pme->fftgrid);
1814     sfree(pme->cfftgrid);
1815     sfree(pme->pfft_setup);
1816
1817     for (int i = 0; i < std::max(1, pme->ndecompdim); i++) //pme->atc[0] is always allocated
1818     {
1819         destroy_atomcomm(&pme->atc[i]);
1820     }
1821
1822     for (int i = 0; i < DIM; i++)
1823     {
1824         sfree(pme->bsp_mod[i]);
1825     }
1826
1827     sfree(pme->lb_buf1);
1828     sfree(pme->lb_buf2);
1829
1830     sfree(pme->bufv);
1831     sfree(pme->bufr);
1832
1833     if (pme->solve_work)
1834     {
1835         pme_free_all_work(&pme->solve_work, pme->nthread);
1836     }
1837
1838     sfree(pme->sum_qgrid_tmp);
1839     sfree(pme->sum_qgrid_dd_tmp);
1840
1841     destroy_pme_spline_work(pme->spline_work);
1842
1843     if (pme_gpu_active(pme) && pme->gpu)
1844     {
1845         pme_gpu_destroy(pme->gpu);
1846     }
1847
1848     delete pme;
1849 }
1850
1851 void gmx_pme_reinit_atoms(const gmx_pme_t *pme, const int nAtoms, const real *charges)
1852 {
1853     if (pme_gpu_active(pme))
1854     {
1855         pme_gpu_reinit_atoms(pme->gpu, nAtoms, charges);
1856     }
1857     // TODO: handle the CPU case here; handle the whole t_mdatoms
1858 }