73323842c542e2476d933452c76c4bc36077b24f
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /*! \internal \file
38  *
39  * \brief This file contains function definitions necessary for
40  * computing energies and forces for the PME long-ranged part (Coulomb
41  * and LJ).
42  *
43  * \author Erik Lindahl <erik@kth.se>
44  * \author Berk Hess <hess@kth.se>
45  * \ingroup module_ewald
46  */
47 /* IMPORTANT FOR DEVELOPERS:
48  *
49  * Triclinic pme stuff isn't entirely trivial, and we've experienced
50  * some bugs during development (many of them due to me). To avoid
51  * this in the future, please check the following things if you make
52  * changes in this file:
53  *
54  * 1. You should obtain identical (at least to the PME precision)
55  *    energies, forces, and virial for
56  *    a rectangular box and a triclinic one where the z (or y) axis is
57  *    tilted a whole box side. For instance you could use these boxes:
58  *
59  *    rectangular       triclinic
60  *     2  0  0           2  0  0
61  *     0  2  0           0  2  0
62  *     0  0  6           2  2  6
63  *
64  * 2. You should check the energy conservation in a triclinic box.
65  *
66  * It might seem an overkill, but better safe than sorry.
67  * /Erik 001109
68  */
69
70 #include "gmxpre.h"
71
72 #include "pme.h"
73
74 #include "config.h"
75
76 #include <cassert>
77 #include <cmath>
78 #include <cstdio>
79 #include <cstdlib>
80 #include <cstring>
81
82 #include <algorithm>
83 #include <list>
84
85 #include "gromacs/domdec/domdec.h"
86 #include "gromacs/ewald/ewald_utils.h"
87 #include "gromacs/fft/parallel_3dfft.h"
88 #include "gromacs/fileio/pdbio.h"
89 #include "gromacs/gmxlib/network.h"
90 #include "gromacs/gmxlib/nrnb.h"
91 #include "gromacs/hardware/hw_info.h"
92 #include "gromacs/math/gmxcomplex.h"
93 #include "gromacs/math/invertmatrix.h"
94 #include "gromacs/math/units.h"
95 #include "gromacs/math/vec.h"
96 #include "gromacs/math/vectypes.h"
97 #include "gromacs/mdtypes/commrec.h"
98 #include "gromacs/mdtypes/forcerec.h"
99 #include "gromacs/mdtypes/inputrec.h"
100 #include "gromacs/mdtypes/md_enums.h"
101 #include "gromacs/pbcutil/pbc.h"
102 #include "gromacs/timing/cyclecounter.h"
103 #include "gromacs/timing/wallcycle.h"
104 #include "gromacs/timing/walltime_accounting.h"
105 #include "gromacs/topology/topology.h"
106 #include "gromacs/utility/basedefinitions.h"
107 #include "gromacs/utility/cstringutil.h"
108 #include "gromacs/utility/exceptions.h"
109 #include "gromacs/utility/fatalerror.h"
110 #include "gromacs/utility/gmxmpi.h"
111 #include "gromacs/utility/gmxomp.h"
112 #include "gromacs/utility/logger.h"
113 #include "gromacs/utility/real.h"
114 #include "gromacs/utility/smalloc.h"
115 #include "gromacs/utility/stringutil.h"
116 #include "gromacs/utility/unique_cptr.h"
117
118 #include "calculate_spline_moduli.h"
119 #include "pme_gather.h"
120 #include "pme_gpu_internal.h"
121 #include "pme_grid.h"
122 #include "pme_internal.h"
123 #include "pme_redistribute.h"
124 #include "pme_solve.h"
125 #include "pme_spline_work.h"
126 #include "pme_spread.h"
127
128 /*! \brief Help build a descriptive message in \c error if there are
129  * \c errorReasons why PME on GPU is not supported.
130  *
131  * \returns Whether the lack of errorReasons indicate there is support. */
132 static bool addMessageIfNotSupported(const std::list<std::string>& errorReasons, std::string* error)
133 {
134     bool isSupported = errorReasons.empty();
135     if (!isSupported && error)
136     {
137         std::string regressionTestMarker = "PME GPU does not support";
138         // this prefix is tested for in the regression tests script gmxtest.pl
139         *error = regressionTestMarker;
140         if (errorReasons.size() == 1)
141         {
142             *error += " " + errorReasons.back();
143         }
144         else
145         {
146             *error += ": " + gmx::joinStrings(errorReasons, "; ");
147         }
148         *error += ".";
149     }
150     return isSupported;
151 }
152
153 bool pme_gpu_supports_build(std::string* error)
154 {
155     std::list<std::string> errorReasons;
156     if (GMX_DOUBLE)
157     {
158         errorReasons.emplace_back("a double-precision build");
159     }
160     if (GMX_GPU == GMX_GPU_NONE)
161     {
162         errorReasons.emplace_back("a non-GPU build");
163     }
164     return addMessageIfNotSupported(errorReasons, error);
165 }
166
167 bool pme_gpu_supports_hardware(const gmx_hw_info_t gmx_unused& hwinfo, std::string* error)
168 {
169     std::list<std::string> errorReasons;
170
171     if (GMX_GPU == GMX_GPU_OPENCL)
172     {
173 #ifdef __APPLE__
174         errorReasons.emplace_back("Apple OS X operating system");
175 #endif
176     }
177     return addMessageIfNotSupported(errorReasons, error);
178 }
179
180 bool pme_gpu_supports_input(const t_inputrec& ir, const gmx_mtop_t& mtop, std::string* error)
181 {
182     std::list<std::string> errorReasons;
183     if (!EEL_PME(ir.coulombtype))
184     {
185         errorReasons.emplace_back("systems that do not use PME for electrostatics");
186     }
187     if (ir.pme_order != 4)
188     {
189         errorReasons.emplace_back("interpolation orders other than 4");
190     }
191     if (ir.efep != efepNO)
192     {
193         if (gmx_mtop_has_perturbed_charges(mtop))
194         {
195             errorReasons.emplace_back(
196                     "free energy calculations with perturbed charges (multiple grids)");
197         }
198     }
199     if (EVDW_PME(ir.vdwtype))
200     {
201         errorReasons.emplace_back("Lennard-Jones PME");
202     }
203     if (!EI_DYNAMICS(ir.eI))
204     {
205         errorReasons.emplace_back("not a dynamical integrator");
206     }
207     return addMessageIfNotSupported(errorReasons, error);
208 }
209
210 /*! \brief \libinternal
211  * Finds out if PME with given inputs is possible to run on GPU.
212  * This function is an internal final check, validating the whole PME structure on creation,
213  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
214  *
215  * \param[in]  pme          The PME structure.
216  * \param[out] error        The error message if the input is not supported on GPU.
217  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
218  */
219 static bool pme_gpu_check_restrictions(const gmx_pme_t* pme, std::string* error)
220 {
221     std::list<std::string> errorReasons;
222     if (pme->nnodes != 1)
223     {
224         errorReasons.emplace_back("PME decomposition");
225     }
226     if (pme->pme_order != 4)
227     {
228         errorReasons.emplace_back("interpolation orders other than 4");
229     }
230     if (pme->bFEP)
231     {
232         errorReasons.emplace_back("free energy calculations (multiple grids)");
233     }
234     if (pme->doLJ)
235     {
236         errorReasons.emplace_back("Lennard-Jones PME");
237     }
238     if (GMX_DOUBLE)
239     {
240         errorReasons.emplace_back("double precision");
241     }
242     if (GMX_GPU == GMX_GPU_NONE)
243     {
244         errorReasons.emplace_back("non-GPU build of GROMACS");
245     }
246
247     return addMessageIfNotSupported(errorReasons, error);
248 }
249
250 PmeRunMode pme_run_mode(const gmx_pme_t* pme)
251 {
252     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
253     return pme->runMode;
254 }
255
256 gmx::PinningPolicy pme_get_pinning_policy()
257 {
258     return gmx::PinningPolicy::PinnedIfSupported;
259 }
260
261 /*! \brief Number of bytes in a cache line.
262  *
263  * Must also be a multiple of the SIMD and SIMD4 register size, to
264  * preserve alignment.
265  */
266 const int gmxCacheLineSize = 64;
267
268 //! Set up coordinate communication
269 static void setup_coordinate_communication(PmeAtomComm* atc)
270 {
271     int nslab, n, i;
272     int fw, bw;
273
274     nslab = atc->nslab;
275
276     n = 0;
277     for (i = 1; i <= nslab / 2; i++)
278     {
279         fw = (atc->nodeid + i) % nslab;
280         bw = (atc->nodeid - i + nslab) % nslab;
281         if (n < nslab - 1)
282         {
283             atc->slabCommSetup[n].node_dest = fw;
284             atc->slabCommSetup[n].node_src  = bw;
285             n++;
286         }
287         if (n < nslab - 1)
288         {
289             atc->slabCommSetup[n].node_dest = bw;
290             atc->slabCommSetup[n].node_src  = fw;
291             n++;
292         }
293     }
294 }
295
296 /*! \brief Round \p n up to the next multiple of \p f */
297 static int mult_up(int n, int f)
298 {
299     return ((n + f - 1) / f) * f;
300 }
301
302 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
303 static double estimate_pme_load_imbalance(struct gmx_pme_t* pme)
304 {
305     int    nma, nmi;
306     double n1, n2, n3;
307
308     nma = pme->nnodes_major;
309     nmi = pme->nnodes_minor;
310
311     n1 = mult_up(pme->nkx, nma) * mult_up(pme->nky, nmi) * pme->nkz;
312     n2 = mult_up(pme->nkx, nma) * mult_up(pme->nkz, nmi) * pme->nky;
313     n3 = mult_up(pme->nky, nma) * mult_up(pme->nkz, nmi) * pme->nkx;
314
315     /* pme_solve is roughly double the cost of an fft */
316
317     return (n1 + n2 + 3 * n3) / static_cast<double>(6 * pme->nkx * pme->nky * pme->nkz);
318 }
319
320 #ifndef DOXYGEN
321
322 PmeAtomComm::PmeAtomComm(MPI_Comm   PmeMpiCommunicator,
323                          const int  numThreads,
324                          const int  pmeOrder,
325                          const int  dimIndex,
326                          const bool doSpread) :
327     dimind(dimIndex),
328     bSpread(doSpread),
329     pme_order(pmeOrder),
330     nthread(numThreads),
331     spline(nthread)
332 {
333     if (PmeMpiCommunicator != MPI_COMM_NULL)
334     {
335         mpi_comm = PmeMpiCommunicator;
336 #    if GMX_MPI
337         MPI_Comm_size(mpi_comm, &nslab);
338         MPI_Comm_rank(mpi_comm, &nodeid);
339 #    endif
340     }
341     if (debug)
342     {
343         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", dimind, nslab, nodeid);
344     }
345
346     if (nslab > 1)
347     {
348         slabCommSetup.resize(nslab);
349         setup_coordinate_communication(this);
350
351         count_thread.resize(nthread);
352         for (auto& countThread : count_thread)
353         {
354             countThread.resize(nslab);
355         }
356     }
357
358     if (nthread > 1)
359     {
360         threadMap.resize(nthread);
361
362 #    pragma omp parallel for num_threads(nthread) schedule(static)
363         for (int thread = 0; thread < nthread; thread++)
364         {
365             try
366             {
367                 /* Allocate buffer with padding to avoid cache polution */
368                 threadMap[thread].nBuffer.resize(nthread + 2 * gmxCacheLineSize);
369                 threadMap[thread].n = threadMap[thread].nBuffer.data() + gmxCacheLineSize;
370             }
371             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
372         }
373     }
374 }
375
376 #endif // !DOXYGEN
377
378 /*! \brief Initialize data structure for communication */
379 static void init_overlap_comm(pme_overlap_t* ol, int norder, MPI_Comm comm, int nnodes, int nodeid, int ndata, int commplainsize)
380 {
381     gmx_bool bCont;
382
383     ol->mpi_comm = comm;
384     ol->nnodes   = nnodes;
385     ol->nodeid   = nodeid;
386
387     /* Linear translation of the PME grid won't affect reciprocal space
388      * calculations, so to optimize we only interpolate "upwards",
389      * which also means we only have to consider overlap in one direction.
390      * I.e., particles on this node might also be spread to grid indices
391      * that belong to higher nodes (modulo nnodes)
392      */
393
394     ol->s2g0.resize(ol->nnodes + 1);
395     ol->s2g1.resize(ol->nnodes);
396     if (debug)
397     {
398         fprintf(debug, "PME slab boundaries:");
399     }
400     for (int i = 0; i < nnodes; i++)
401     {
402         /* s2g0 the local interpolation grid start.
403          * s2g1 the local interpolation grid end.
404          * Since in calc_pidx we divide particles, and not grid lines,
405          * spatially uniform along dimension x or y, we need to round
406          * s2g0 down and s2g1 up.
407          */
408         ol->s2g0[i] = (i * ndata + 0) / nnodes;
409         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
410
411         if (debug)
412         {
413             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
414         }
415     }
416     ol->s2g0[nnodes] = ndata;
417     if (debug)
418     {
419         fprintf(debug, "\n");
420     }
421
422     /* Determine with how many nodes we need to communicate the grid overlap */
423     int testRankCount = 0;
424     do
425     {
426         testRankCount++;
427         bCont = FALSE;
428         for (int i = 0; i < nnodes; i++)
429         {
430             if ((i + testRankCount < nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount])
431                 || (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
432             {
433                 bCont = TRUE;
434             }
435         }
436     } while (bCont && testRankCount < nnodes);
437
438     ol->comm_data.resize(testRankCount - 1);
439     ol->send_size = 0;
440
441     for (size_t b = 0; b < ol->comm_data.size(); b++)
442     {
443         pme_grid_comm_t* pgc = &ol->comm_data[b];
444
445         /* Send */
446         pgc->send_id  = (ol->nodeid + (b + 1)) % ol->nnodes;
447         int fft_start = ol->s2g0[pgc->send_id];
448         int fft_end   = ol->s2g0[pgc->send_id + 1];
449         if (pgc->send_id < nodeid)
450         {
451             fft_start += ndata;
452             fft_end += ndata;
453         }
454         int send_index1  = ol->s2g1[nodeid];
455         send_index1      = std::min(send_index1, fft_end);
456         pgc->send_index0 = fft_start;
457         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
458         ol->send_size += pgc->send_nindex;
459
460         /* We always start receiving to the first index of our slab */
461         pgc->recv_id    = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
462         fft_start       = ol->s2g0[ol->nodeid];
463         fft_end         = ol->s2g0[ol->nodeid + 1];
464         int recv_index1 = ol->s2g1[pgc->recv_id];
465         if (pgc->recv_id > nodeid)
466         {
467             recv_index1 -= ndata;
468         }
469         recv_index1      = std::min(recv_index1, fft_end);
470         pgc->recv_index0 = fft_start;
471         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
472     }
473
474 #if GMX_MPI
475     /* Communicate the buffer sizes to receive */
476     MPI_Status stat;
477     for (size_t b = 0; b < ol->comm_data.size(); b++)
478     {
479         MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b, &ol->comm_data[b].recv_size,
480                      1, MPI_INT, ol->comm_data[b].recv_id, b, ol->mpi_comm, &stat);
481     }
482 #endif
483
484     /* For non-divisible grid we need pme_order iso pme_order-1 */
485     ol->sendbuf.resize(norder * commplainsize);
486     ol->recvbuf.resize(norder * commplainsize);
487 }
488
489 int minimalPmeGridSize(int pmeOrder)
490 {
491     /* The actual grid size limitations are:
492      *   serial:        >= pme_order
493      *   DD, no OpenMP: >= 2*(pme_order - 1)
494      *   DD, OpenMP:    >= pme_order + 1
495      * But we use the maximum for simplicity since in practice there is not
496      * much performance difference between pme_order and 2*(pme_order -1).
497      */
498     int minimalSize = 2 * (pmeOrder - 1);
499
500     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
501     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
502
503     return minimalSize;
504 }
505
506 bool gmx_pme_check_restrictions(int pme_order, int nkx, int nky, int nkz, int numPmeDomainsAlongX, bool useThreads, bool errorsAreFatal)
507 {
508     if (pme_order > PME_ORDER_MAX)
509     {
510         if (!errorsAreFatal)
511         {
512             return false;
513         }
514
515         std::string message = gmx::formatString(
516                 "pme_order (%d) is larger than the maximum allowed value (%d). Modify and "
517                 "recompile the code if you really need such a high order.",
518                 pme_order, PME_ORDER_MAX);
519         GMX_THROW(gmx::InconsistentInputError(message));
520     }
521
522     const int minGridSize = minimalPmeGridSize(pme_order);
523     if (nkx < minGridSize || nky < minGridSize || nkz < minGridSize)
524     {
525         if (!errorsAreFatal)
526         {
527             return false;
528         }
529         std::string message =
530                 gmx::formatString("The PME grid sizes need to be >= 2*(pme_order-1) (%d)", minGridSize);
531         GMX_THROW(gmx::InconsistentInputError(message));
532     }
533
534     /* Check for a limitation of the (current) sum_fftgrid_dd code.
535      * We only allow multiple communication pulses in dim 1, not in dim 0.
536      */
537     if (useThreads
538         && (nkx < numPmeDomainsAlongX * pme_order && nkx != numPmeDomainsAlongX * (pme_order - 1)))
539     {
540         if (!errorsAreFatal)
541         {
542             return false;
543         }
544         gmx_fatal(FARGS,
545                   "The number of PME grid lines per rank along x is %g. But when using OpenMP "
546                   "threads, the number of grid lines per rank along x should be >= pme_order (%d) "
547                   "or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly "
548                   "more along y and/or z) by specifying -dd manually.",
549                   nkx / static_cast<double>(numPmeDomainsAlongX), pme_order);
550     }
551
552     return true;
553 }
554
555 /*! \brief Round \p enumerator */
556 static int div_round_up(int enumerator, int denominator)
557 {
558     return (enumerator + denominator - 1) / denominator;
559 }
560
561 gmx_pme_t* gmx_pme_init(const t_commrec*         cr,
562                         const NumPmeDomains&     numPmeDomains,
563                         const t_inputrec*        ir,
564                         gmx_bool                 bFreeEnergy_q,
565                         gmx_bool                 bFreeEnergy_lj,
566                         gmx_bool                 bReproducible,
567                         real                     ewaldcoeff_q,
568                         real                     ewaldcoeff_lj,
569                         int                      nthread,
570                         PmeRunMode               runMode,
571                         PmeGpu*                  pmeGpu,
572                         const gmx_device_info_t* gpuInfo,
573                         PmeGpuProgramHandle      pmeGpuProgram,
574                         const gmx::MDLogger& /*mdlog*/)
575 {
576     int  use_threads, sum_use_threads, i;
577     ivec ndata;
578
579     if (debug)
580     {
581         fprintf(debug, "Creating PME data structures.\n");
582     }
583
584     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
585
586     pme->sum_qgrid_tmp    = nullptr;
587     pme->sum_qgrid_dd_tmp = nullptr;
588
589     pme->buf_nalloc = 0;
590
591     pme->nnodes  = 1;
592     pme->bPPnode = TRUE;
593
594     pme->nnodes_major = numPmeDomains.x;
595     pme->nnodes_minor = numPmeDomains.y;
596
597     if (numPmeDomains.x * numPmeDomains.y > 1)
598     {
599         pme->mpi_comm = cr->mpi_comm_mygroup;
600
601 #if GMX_MPI
602         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
603         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
604 #endif
605         if (pme->nnodes != numPmeDomains.x * numPmeDomains.y)
606         {
607             gmx_incons("PME rank count mismatch");
608         }
609     }
610     else
611     {
612         pme->mpi_comm = MPI_COMM_NULL;
613     }
614
615     if (pme->nnodes == 1)
616     {
617         pme->mpi_comm_d[0] = MPI_COMM_NULL;
618         pme->mpi_comm_d[1] = MPI_COMM_NULL;
619         pme->ndecompdim    = 0;
620         pme->nodeid_major  = 0;
621         pme->nodeid_minor  = 0;
622     }
623     else
624     {
625         if (numPmeDomains.y == 1)
626         {
627             pme->mpi_comm_d[0] = pme->mpi_comm;
628             pme->mpi_comm_d[1] = MPI_COMM_NULL;
629             pme->ndecompdim    = 1;
630             pme->nodeid_major  = pme->nodeid;
631             pme->nodeid_minor  = 0;
632         }
633         else if (numPmeDomains.x == 1)
634         {
635             pme->mpi_comm_d[0] = MPI_COMM_NULL;
636             pme->mpi_comm_d[1] = pme->mpi_comm;
637             pme->ndecompdim    = 1;
638             pme->nodeid_major  = 0;
639             pme->nodeid_minor  = pme->nodeid;
640         }
641         else
642         {
643             if (pme->nnodes % numPmeDomains.x != 0)
644             {
645                 gmx_incons(
646                         "For 2D PME decomposition, #PME ranks must be divisible by the number of "
647                         "domains along x");
648             }
649             pme->ndecompdim = 2;
650
651 #if GMX_MPI
652             MPI_Comm_split(pme->mpi_comm, pme->nodeid % numPmeDomains.y, pme->nodeid,
653                            &pme->mpi_comm_d[0]); /* My communicator along major dimension */
654             MPI_Comm_split(pme->mpi_comm, pme->nodeid / numPmeDomains.y, pme->nodeid,
655                            &pme->mpi_comm_d[1]); /* My communicator along minor dimension */
656
657             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
658             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
659             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
660             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
661 #endif
662         }
663     }
664     // cr is always initialized if there is a a PP rank, so we can safely assume
665     // that when it is not, like in ewald tests, we not on a PP rank.
666     pme->bPPnode = ((cr != nullptr && cr->duty != 0) && thisRankHasDuty(cr, DUTY_PP));
667
668     pme->nthread = nthread;
669
670     /* Check if any of the PME MPI ranks uses threads */
671     use_threads = (pme->nthread > 1 ? 1 : 0);
672 #if GMX_MPI
673     if (pme->nnodes > 1)
674     {
675         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT, MPI_SUM, pme->mpi_comm);
676     }
677     else
678 #endif
679     {
680         sum_use_threads = use_threads;
681     }
682     pme->bUseThreads = (sum_use_threads > 0);
683
684     if (ir->ePBC == epbcSCREW)
685     {
686         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
687     }
688
689     /* NOTE:
690      * It is likely that the current gmx_pme_do() routine supports calculating
691      * only Coulomb or LJ while gmx_pme_init() configures for both,
692      * but that has never been tested.
693      * It is likely that the current gmx_pme_do() routine supports calculating,
694      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
695      * configures with free-energy, but that has never been tested.
696      */
697     pme->doCoulomb     = EEL_PME(ir->coulombtype);
698     pme->doLJ          = EVDW_PME(ir->vdwtype);
699     pme->bFEP_q        = ((ir->efep != efepNO) && bFreeEnergy_q);
700     pme->bFEP_lj       = ((ir->efep != efepNO) && bFreeEnergy_lj);
701     pme->bFEP          = (pme->bFEP_q || pme->bFEP_lj);
702     pme->nkx           = ir->nkx;
703     pme->nky           = ir->nky;
704     pme->nkz           = ir->nkz;
705     pme->bP3M          = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != nullptr);
706     pme->pme_order     = ir->pme_order;
707     pme->ewaldcoeff_q  = ewaldcoeff_q;
708     pme->ewaldcoeff_lj = ewaldcoeff_lj;
709
710     /* Always constant electrostatics coefficients */
711     pme->epsilon_r = ir->epsilon_r;
712
713     /* Always constant LJ coefficients */
714     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
715
716     // The box requires scaling with nwalls = 2, we store that condition as well
717     // as the scaling factor
718     delete pme->boxScaler;
719     pme->boxScaler = new EwaldBoxZScaler(*ir);
720
721     /* If we violate restrictions, generate a fatal error here */
722     gmx_pme_check_restrictions(pme->pme_order, pme->nkx, pme->nky, pme->nkz, pme->nnodes_major,
723                                pme->bUseThreads, true);
724
725     if (pme->nnodes > 1)
726     {
727         double imbal;
728
729 #if GMX_MPI
730         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
731         MPI_Type_commit(&(pme->rvec_mpi));
732 #endif
733
734         /* Note that the coefficient spreading and force gathering, which usually
735          * takes about the same amount of time as FFT+solve_pme,
736          * is always fully load balanced
737          * (unless the coefficient distribution is inhomogeneous).
738          */
739
740         imbal = estimate_pme_load_imbalance(pme.get());
741         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
742         {
743             fprintf(stderr,
744                     "\n"
745                     "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
746                     "      For optimal PME load balancing\n"
747                     "      PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x "
748                     "(%d)\n"
749                     "      and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y "
750                     "(%d)\n"
751                     "\n",
752                     gmx::roundToInt((imbal - 1) * 100), pme->nkx, pme->nky, pme->nnodes_major,
753                     pme->nky, pme->nkz, pme->nnodes_minor);
754         }
755     }
756
757     /* For non-divisible grid we need pme_order iso pme_order-1 */
758     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
759      * y is always copied through a buffer: we don't need padding in z,
760      * but we do need the overlap in x because of the communication order.
761      */
762     init_overlap_comm(&pme->overlap[0], pme->pme_order, pme->mpi_comm_d[0], pme->nnodes_major,
763                       pme->nodeid_major, pme->nkx,
764                       (div_round_up(pme->nky, pme->nnodes_minor) + pme->pme_order)
765                               * (pme->nkz + pme->pme_order - 1));
766
767     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
768      * We do this with an offset buffer of equal size, so we need to allocate
769      * extra for the offset. That's what the (+1)*pme->nkz is for.
770      */
771     init_overlap_comm(&pme->overlap[1], pme->pme_order, pme->mpi_comm_d[1], pme->nnodes_minor,
772                       pme->nodeid_minor, pme->nky,
773                       (div_round_up(pme->nkx, pme->nnodes_major) + pme->pme_order + 1) * pme->nkz);
774
775     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
776      * Note that gmx_pme_check_restrictions checked for this already.
777      */
778     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
779     {
780         gmx_incons(
781                 "More than one communication pulse required for grid overlap communication along "
782                 "the major dimension while using threads");
783     }
784
785     snew(pme->bsp_mod[XX], pme->nkx);
786     snew(pme->bsp_mod[YY], pme->nky);
787     snew(pme->bsp_mod[ZZ], pme->nkz);
788
789     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
790     pme->runMode = runMode;
791
792     /* The required size of the interpolation grid, including overlap.
793      * The allocated size (pmegrid_n?) might be slightly larger.
794      */
795     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] - pme->overlap[0].s2g0[pme->nodeid_major];
796     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] - pme->overlap[1].s2g0[pme->nodeid_minor];
797     pme->pmegrid_nz_base = pme->nkz;
798     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
799     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
800     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
801     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
802     pme->pmegrid_start_iz = 0;
803
804     make_gridindex_to_localindex(pme->nkx, pme->pmegrid_start_ix,
805                                  pme->pmegrid_nx - (pme->pme_order - 1), &pme->nnx, &pme->fshx);
806     make_gridindex_to_localindex(pme->nky, pme->pmegrid_start_iy,
807                                  pme->pmegrid_ny - (pme->pme_order - 1), &pme->nny, &pme->fshy);
808     make_gridindex_to_localindex(pme->nkz, pme->pmegrid_start_iz, pme->pmegrid_nz_base, &pme->nnz,
809                                  &pme->fshz);
810
811     pme->spline_work = make_pme_spline_work(pme->pme_order);
812
813     ndata[0] = pme->nkx;
814     ndata[1] = pme->nky;
815     ndata[2] = pme->nkz;
816     /* It doesn't matter if we allocate too many grids here,
817      * we only allocate and use the ones we need.
818      */
819     if (pme->doLJ)
820     {
821         pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
822     }
823     else
824     {
825         pme->ngrids = DO_Q;
826     }
827     snew(pme->fftgrid, pme->ngrids);
828     snew(pme->cfftgrid, pme->ngrids);
829     snew(pme->pfft_setup, pme->ngrids);
830
831     for (i = 0; i < pme->ngrids; ++i)
832     {
833         if ((i < DO_Q && pme->doCoulomb && (i == 0 || bFreeEnergy_q))
834             || (i >= DO_Q && pme->doLJ
835                 && (i == 2 || bFreeEnergy_lj || ir->ljpme_combination_rule == eljpmeLB)))
836         {
837             pmegrids_init(&pme->pmegrid[i], pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
838                           pme->pmegrid_nz_base, pme->pme_order, pme->bUseThreads, pme->nthread,
839                           pme->overlap[0].s2g1[pme->nodeid_major]
840                                   - pme->overlap[0].s2g0[pme->nodeid_major + 1],
841                           pme->overlap[1].s2g1[pme->nodeid_minor]
842                                   - pme->overlap[1].s2g0[pme->nodeid_minor + 1]);
843             /* This routine will allocate the grid data to fit the FFTs */
844             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed)
845                                                         ? gmx::PinningPolicy::PinnedIfSupported
846                                                         : gmx::PinningPolicy::CannotBePinned;
847             gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata, &pme->fftgrid[i], &pme->cfftgrid[i],
848                                     pme->mpi_comm_d, bReproducible, pme->nthread, allocateRealGridForGpu);
849         }
850     }
851
852     if (!pme->bP3M)
853     {
854         /* Use plain SPME B-spline interpolation */
855         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
856     }
857     else
858     {
859         /* Use the P3M grid-optimized influence function */
860         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
861     }
862
863     /* Use atc[0] for spreading */
864     const int firstDimIndex   = (numPmeDomains.x > 1 ? 0 : 1);
865     MPI_Comm  mpiCommFirstDim = (pme->nnodes > 1 ? pme->mpi_comm_d[firstDimIndex] : MPI_COMM_NULL);
866     bool      doSpread        = true;
867     pme->atc.emplace_back(mpiCommFirstDim, pme->nthread, pme->pme_order, firstDimIndex, doSpread);
868     if (pme->ndecompdim >= 2)
869     {
870         const int secondDimIndex = 1;
871         doSpread                 = false;
872         pme->atc.emplace_back(pme->mpi_comm_d[1], pme->nthread, pme->pme_order, secondDimIndex, doSpread);
873     }
874
875     if (pme_gpu_active(pme.get()))
876     {
877         if (!pme->gpu)
878         {
879             // Initial check of validity of the data
880             std::string errorString;
881             bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
882             if (!canRunOnGpu)
883             {
884                 GMX_THROW(gmx::NotImplementedError(errorString));
885             }
886         }
887
888         pme_gpu_reinit(pme.get(), gpuInfo, pmeGpuProgram);
889     }
890
891     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
892
893     // no exception was thrown during the init, so we hand over the PME structure handle
894     return pme.release();
895 }
896
897 void gmx_pme_reinit(struct gmx_pme_t** pmedata,
898                     const t_commrec*   cr,
899                     struct gmx_pme_t*  pme_src,
900                     const t_inputrec*  ir,
901                     const ivec         grid_size,
902                     real               ewaldcoeff_q,
903                     real               ewaldcoeff_lj)
904 {
905     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
906     // TODO: This would be better as just copying a sub-structure that contains
907     // all the PME parameters and nothing else.
908     t_inputrec irc;
909     irc.ePBC                   = ir->ePBC;
910     irc.coulombtype            = ir->coulombtype;
911     irc.vdwtype                = ir->vdwtype;
912     irc.efep                   = ir->efep;
913     irc.pme_order              = ir->pme_order;
914     irc.epsilon_r              = ir->epsilon_r;
915     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
916     irc.nkx                    = grid_size[XX];
917     irc.nky                    = grid_size[YY];
918     irc.nkz                    = grid_size[ZZ];
919
920     try
921     {
922         const gmx::MDLogger dummyLogger;
923         // This is reinit which is currently only changing grid size/coefficients,
924         // so we don't expect the actual logging.
925         // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
926         GMX_ASSERT(pmedata, "Invalid PME pointer");
927         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
928         *pmedata = gmx_pme_init(cr, numPmeDomains, &irc, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE,
929                                 ewaldcoeff_q, ewaldcoeff_lj, pme_src->nthread, pme_src->runMode,
930                                 pme_src->gpu, nullptr, nullptr, dummyLogger);
931         /* When running PME on the CPU not using domain decomposition,
932          * the atom data is allocated once only in gmx_pme_(re)init().
933          */
934         if (!pme_src->gpu && pme_src->nnodes == 1)
935         {
936             gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), nullptr);
937         }
938         // TODO this is mostly passing around current values
939     }
940     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
941
942     /* We can easily reuse the allocated pme grids in pme_src */
943     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
944     /* We would like to reuse the fft grids, but that's harder */
945 }
946
947 void gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q, real* V)
948 {
949     pmegrids_t* grid;
950
951     if (pme->nnodes > 1)
952     {
953         gmx_incons("gmx_pme_calc_energy called in parallel");
954     }
955     if (pme->bFEP_q)
956     {
957         gmx_incons("gmx_pme_calc_energy with free energy");
958     }
959
960     if (!pme->atc_energy)
961     {
962         pme->atc_energy = std::make_unique<PmeAtomComm>(MPI_COMM_NULL, 1, pme->pme_order, 0, true);
963     }
964     PmeAtomComm* atc = pme->atc_energy.get();
965     atc->setNumAtoms(x.ssize());
966     atc->x           = x;
967     atc->coefficient = q;
968
969     /* We only use the A-charges grid */
970     grid = &pme->pmegrid[PME_GRID_QA];
971
972     /* Only calculate the spline coefficients, don't actually spread */
973     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
974
975     *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
976 }
977
978 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
979 static void calc_initial_lb_coeffs(gmx::ArrayRef<real> coefficient, const real* local_c6, const real* local_sigma)
980 {
981     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
982     {
983         real sigma4    = local_sigma[i];
984         sigma4         = sigma4 * sigma4;
985         sigma4         = sigma4 * sigma4;
986         coefficient[i] = local_c6[i] / sigma4;
987     }
988 }
989
990 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
991 static void calc_next_lb_coeffs(gmx::ArrayRef<real> coefficient, const real* local_sigma)
992 {
993     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
994     {
995         coefficient[i] *= local_sigma[i];
996     }
997 }
998
999 int gmx_pme_do(struct gmx_pme_t*              pme,
1000                gmx::ArrayRef<const gmx::RVec> coordinates,
1001                gmx::ArrayRef<gmx::RVec>       forces,
1002                real                           chargeA[],
1003                real                           chargeB[],
1004                real                           c6A[],
1005                real                           c6B[],
1006                real                           sigmaA[],
1007                real                           sigmaB[],
1008                const matrix                   box,
1009                const t_commrec*               cr,
1010                int                            maxshift_x,
1011                int                            maxshift_y,
1012                t_nrnb*                        nrnb,
1013                gmx_wallcycle*                 wcycle,
1014                matrix                         vir_q,
1015                matrix                         vir_lj,
1016                real*                          energy_q,
1017                real*                          energy_lj,
1018                real                           lambda_q,
1019                real                           lambda_lj,
1020                real*                          dvdlambda_q,
1021                real*                          dvdlambda_lj,
1022                int                            flags)
1023 {
1024     GMX_ASSERT(pme->runMode == PmeRunMode::CPU,
1025                "gmx_pme_do should not be called on the GPU PME run.");
1026
1027     int                  d, npme, grid_index, max_grid_index;
1028     PmeAtomComm&         atc         = pme->atc[0];
1029     pmegrids_t*          pmegrid     = nullptr;
1030     real*                grid        = nullptr;
1031     real*                coefficient = nullptr;
1032     PmeOutput            output[2]; // The second is used for the B state with FEP
1033     real                 scale, lambda;
1034     gmx_bool             bClearF;
1035     gmx_parallel_3dfft_t pfft_setup;
1036     real*                fftgrid;
1037     t_complex*           cfftgrid;
1038     int                  thread;
1039     gmx_bool             bFirst, bDoSplines;
1040     int                  fep_state;
1041     int                  fep_states_lj = pme->bFEP_lj ? 2 : 1;
1042     const gmx_bool       bCalcEnerVir  = (flags & GMX_PME_CALC_ENER_VIR) != 0;
1043     const gmx_bool       bBackFFT      = (flags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
1044     const gmx_bool       bCalcF        = (flags & GMX_PME_CALC_F) != 0;
1045
1046     /* We could be passing lambda!=1 while no q or LJ is actually perturbed */
1047     if (!pme->bFEP_q)
1048     {
1049         lambda_q = 1;
1050     }
1051     if (!pme->bFEP_lj)
1052     {
1053         lambda_lj = 1;
1054     }
1055
1056     assert(pme->nnodes > 0);
1057     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1058
1059     if (pme->nnodes > 1)
1060     {
1061         atc.pd.resize(coordinates.ssize());
1062         for (int d = pme->ndecompdim - 1; d >= 0; d--)
1063         {
1064             PmeAtomComm& atc = pme->atc[d];
1065             atc.maxshift     = (atc.dimind == 0 ? maxshift_x : maxshift_y);
1066         }
1067     }
1068     else
1069     {
1070         GMX_ASSERT(coordinates.ssize() == atc.numAtoms(), "We expect atc.numAtoms() coordinates");
1071         GMX_ASSERT(forces.ssize() >= atc.numAtoms(),
1072                    "We need a force buffer with at least atc.numAtoms() elements");
1073
1074         atc.x = coordinates;
1075         atc.f = forces;
1076     }
1077
1078     matrix scaledBox;
1079     pme->boxScaler->scaleBox(box, scaledBox);
1080
1081     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1082     bFirst = TRUE;
1083
1084     /* For simplicity, we construct the splines for all particles if
1085      * more than one PME calculations is needed. Some optimization
1086      * could be done by keeping track of which atoms have splines
1087      * constructed, and construct new splines on each pass for atoms
1088      * that don't yet have them.
1089      */
1090
1091     bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1092
1093     /* We need a maximum of four separate PME calculations:
1094      * grid_index=0: Coulomb PME with charges from state A
1095      * grid_index=1: Coulomb PME with charges from state B
1096      * grid_index=2: LJ PME with C6 from state A
1097      * grid_index=3: LJ PME with C6 from state B
1098      * For Lorentz-Berthelot combination rules, a separate loop is used to
1099      * calculate all the terms
1100      */
1101
1102     /* If we are doing LJ-PME with LB, we only do Q here */
1103     max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
1104
1105     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1106     {
1107         /* Check if we should do calculations at this grid_index
1108          * If grid_index is odd we should be doing FEP
1109          * If grid_index < 2 we should be doing electrostatic PME
1110          * If grid_index >= 2 we should be doing LJ-PME
1111          */
1112         if ((grid_index < DO_Q && (!pme->doCoulomb || (grid_index == 1 && !pme->bFEP_q)))
1113             || (grid_index >= DO_Q && (!pme->doLJ || (grid_index == 3 && !pme->bFEP_lj))))
1114         {
1115             continue;
1116         }
1117         /* Unpack structure */
1118         pmegrid    = &pme->pmegrid[grid_index];
1119         fftgrid    = pme->fftgrid[grid_index];
1120         cfftgrid   = pme->cfftgrid[grid_index];
1121         pfft_setup = pme->pfft_setup[grid_index];
1122         switch (grid_index)
1123         {
1124             case 0: coefficient = chargeA; break;
1125             case 1: coefficient = chargeB; break;
1126             case 2: coefficient = c6A; break;
1127             case 3: coefficient = c6B; break;
1128         }
1129
1130         grid = pmegrid->grid.grid;
1131
1132         if (debug)
1133         {
1134             fprintf(debug, "PME: number of ranks = %d, rank = %d\n", cr->nnodes, cr->nodeid);
1135             fprintf(debug, "Grid = %p\n", static_cast<void*>(grid));
1136             if (grid == nullptr)
1137             {
1138                 gmx_fatal(FARGS, "No grid!");
1139             }
1140         }
1141
1142         if (pme->nnodes == 1)
1143         {
1144             atc.coefficient = gmx::arrayRefFromArray(coefficient, coordinates.size());
1145         }
1146         else
1147         {
1148             wallcycle_start(wcycle, ewcPME_REDISTXF);
1149             do_redist_pos_coeffs(pme, cr, bFirst, coordinates, coefficient);
1150
1151             wallcycle_stop(wcycle, ewcPME_REDISTXF);
1152         }
1153
1154         if (debug)
1155         {
1156             fprintf(debug, "Rank= %6d, pme local particles=%6d\n", cr->nodeid, atc.numAtoms());
1157         }
1158
1159         if (flags & GMX_PME_SPREAD)
1160         {
1161             wallcycle_start(wcycle, ewcPME_SPREAD);
1162
1163             /* Spread the coefficients on a grid */
1164             spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1165
1166             if (bFirst)
1167             {
1168                 inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1169             }
1170             inc_nrnb(nrnb, eNR_SPREADBSP,
1171                      pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1172
1173             if (!pme->bUseThreads)
1174             {
1175                 wrap_periodic_pmegrid(pme, grid);
1176
1177                 /* sum contributions to local grid from other nodes */
1178                 if (pme->nnodes > 1)
1179                 {
1180                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1181                 }
1182
1183                 copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1184             }
1185
1186             wallcycle_stop(wcycle, ewcPME_SPREAD);
1187
1188             /* TODO If the OpenMP and single-threaded implementations
1189                converge, then spread_on_grid() and
1190                copy_pmegrid_to_fftgrid() will perhaps live in the same
1191                source file.
1192              */
1193         }
1194
1195         /* Here we start a large thread parallel region */
1196 #pragma omp parallel num_threads(pme->nthread) private(thread)
1197         {
1198             try
1199             {
1200                 thread = gmx_omp_get_thread_num();
1201                 if (flags & GMX_PME_SOLVE)
1202                 {
1203                     int loop_count;
1204
1205                     /* do 3d-fft */
1206                     if (thread == 0)
1207                     {
1208                         wallcycle_start(wcycle, ewcPME_FFT);
1209                     }
1210                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1211                     if (thread == 0)
1212                     {
1213                         wallcycle_stop(wcycle, ewcPME_FFT);
1214                     }
1215
1216                     /* solve in k-space for our local cells */
1217                     if (thread == 0)
1218                     {
1219                         wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1220                     }
1221                     if (grid_index < DO_Q)
1222                     {
1223                         loop_count = solve_pme_yzx(
1224                                 pme, cfftgrid, scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1225                                 bCalcEnerVir, pme->nthread, thread);
1226                     }
1227                     else
1228                     {
1229                         loop_count = solve_pme_lj_yzx(
1230                                 pme, &cfftgrid, FALSE,
1231                                 scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1232                                 bCalcEnerVir, pme->nthread, thread);
1233                     }
1234
1235                     if (thread == 0)
1236                     {
1237                         wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1238                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1239                     }
1240                 }
1241
1242                 if (bBackFFT)
1243                 {
1244                     /* do 3d-invfft */
1245                     if (thread == 0)
1246                     {
1247                         wallcycle_start(wcycle, ewcPME_FFT);
1248                     }
1249                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1250                     if (thread == 0)
1251                     {
1252                         wallcycle_stop(wcycle, ewcPME_FFT);
1253
1254
1255                         if (pme->nodeid == 0)
1256                         {
1257                             real ntot = pme->nkx * pme->nky * pme->nkz;
1258                             npme      = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1259                             inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1260                         }
1261
1262                         /* Note: this wallcycle region is closed below
1263                            outside an OpenMP region, so take care if
1264                            refactoring code here. */
1265                         wallcycle_start(wcycle, ewcPME_GATHER);
1266                     }
1267
1268                     copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1269                 }
1270             }
1271             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1272         }
1273         /* End of thread parallel section.
1274          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1275          */
1276
1277         if (bBackFFT)
1278         {
1279             /* distribute local grid to all nodes */
1280             if (pme->nnodes > 1)
1281             {
1282                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1283             }
1284
1285             unwrap_periodic_pmegrid(pme, grid);
1286         }
1287
1288         if (bCalcF)
1289         {
1290             /* interpolate forces for our local atoms */
1291
1292
1293             /* If we are running without parallelization,
1294              * atc->f is the actual force array, not a buffer,
1295              * therefore we should not clear it.
1296              */
1297             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
1298             bClearF = (bFirst && PAR(cr));
1299 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1300             for (thread = 0; thread < pme->nthread; thread++)
1301             {
1302                 try
1303                 {
1304                     gather_f_bsplines(pme, grid, bClearF, &atc, &atc.spline[thread],
1305                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0 - lambda : lambda) : 1.0);
1306                 }
1307                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1308             }
1309
1310
1311             inc_nrnb(nrnb, eNR_GATHERFBSP,
1312                      pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1313             /* Note: this wallcycle region is opened above inside an OpenMP
1314                region, so take care if refactoring code here. */
1315             wallcycle_stop(wcycle, ewcPME_GATHER);
1316         }
1317
1318         if (bCalcEnerVir)
1319         {
1320             /* This should only be called on the master thread
1321              * and after the threads have synchronized.
1322              */
1323             if (grid_index < 2)
1324             {
1325                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1326             }
1327             else
1328             {
1329                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1330             }
1331         }
1332         bFirst = FALSE;
1333     } /* of grid_index-loop */
1334
1335     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1336      * seven terms. */
1337
1338     if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB)
1339     {
1340         /* Loop over A- and B-state if we are doing FEP */
1341         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1342         {
1343             real *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
1344             gmx::ArrayRef<real> coefficientBuffer;
1345             if (pme->nnodes == 1)
1346             {
1347                 pme->lb_buf1.resize(atc.numAtoms());
1348                 coefficientBuffer = pme->lb_buf1;
1349                 switch (fep_state)
1350                 {
1351                     case 0:
1352                         local_c6    = c6A;
1353                         local_sigma = sigmaA;
1354                         break;
1355                     case 1:
1356                         local_c6    = c6B;
1357                         local_sigma = sigmaB;
1358                         break;
1359                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1360                 }
1361             }
1362             else
1363             {
1364                 coefficientBuffer = atc.coefficientBuffer;
1365                 switch (fep_state)
1366                 {
1367                     case 0:
1368                         RedistC6    = c6A;
1369                         RedistSigma = sigmaA;
1370                         break;
1371                     case 1:
1372                         RedistC6    = c6B;
1373                         RedistSigma = sigmaB;
1374                         break;
1375                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1376                 }
1377                 wallcycle_start(wcycle, ewcPME_REDISTXF);
1378
1379                 do_redist_pos_coeffs(pme, cr, bFirst, coordinates, RedistC6);
1380                 pme->lb_buf1.resize(atc.numAtoms());
1381                 pme->lb_buf2.resize(atc.numAtoms());
1382                 local_c6 = pme->lb_buf1.data();
1383                 for (int i = 0; i < atc.numAtoms(); ++i)
1384                 {
1385                     local_c6[i] = atc.coefficient[i];
1386                 }
1387
1388                 do_redist_pos_coeffs(pme, cr, FALSE, coordinates, RedistSigma);
1389                 local_sigma = pme->lb_buf2.data();
1390                 for (int i = 0; i < atc.numAtoms(); ++i)
1391                 {
1392                     local_sigma[i] = atc.coefficient[i];
1393                 }
1394
1395                 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1396             }
1397             atc.coefficient = coefficientBuffer;
1398             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1399
1400             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1401             for (grid_index = 2; grid_index < 9; ++grid_index)
1402             {
1403                 /* Unpack structure */
1404                 pmegrid    = &pme->pmegrid[grid_index];
1405                 fftgrid    = pme->fftgrid[grid_index];
1406                 pfft_setup = pme->pfft_setup[grid_index];
1407                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1408                 grid = pmegrid->grid.grid;
1409
1410                 if (flags & GMX_PME_SPREAD)
1411                 {
1412                     wallcycle_start(wcycle, ewcPME_SPREAD);
1413                     /* Spread the c6 on a grid */
1414                     spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1415
1416                     if (bFirst)
1417                     {
1418                         inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1419                     }
1420
1421                     inc_nrnb(nrnb, eNR_SPREADBSP,
1422                              pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1423                     if (pme->nthread == 1)
1424                     {
1425                         wrap_periodic_pmegrid(pme, grid);
1426                         /* sum contributions to local grid from other nodes */
1427                         if (pme->nnodes > 1)
1428                         {
1429                             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1430                         }
1431                         copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1432                     }
1433                     wallcycle_stop(wcycle, ewcPME_SPREAD);
1434                 }
1435                 /*Here we start a large thread parallel region*/
1436 #pragma omp parallel num_threads(pme->nthread) private(thread)
1437                 {
1438                     try
1439                     {
1440                         thread = gmx_omp_get_thread_num();
1441                         if (flags & GMX_PME_SOLVE)
1442                         {
1443                             /* do 3d-fft */
1444                             if (thread == 0)
1445                             {
1446                                 wallcycle_start(wcycle, ewcPME_FFT);
1447                             }
1448
1449                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1450                             if (thread == 0)
1451                             {
1452                                 wallcycle_stop(wcycle, ewcPME_FFT);
1453                             }
1454                         }
1455                     }
1456                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1457                 }
1458                 bFirst = FALSE;
1459             }
1460             if (flags & GMX_PME_SOLVE)
1461             {
1462                 /* solve in k-space for our local cells */
1463 #pragma omp parallel num_threads(pme->nthread) private(thread)
1464                 {
1465                     try
1466                     {
1467                         int loop_count;
1468                         thread = gmx_omp_get_thread_num();
1469                         if (thread == 0)
1470                         {
1471                             wallcycle_start(wcycle, ewcLJPME);
1472                         }
1473
1474                         loop_count = solve_pme_lj_yzx(
1475                                 pme, &pme->cfftgrid[2], TRUE,
1476                                 scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1477                                 bCalcEnerVir, pme->nthread, thread);
1478                         if (thread == 0)
1479                         {
1480                             wallcycle_stop(wcycle, ewcLJPME);
1481                             inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1482                         }
1483                     }
1484                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1485                 }
1486             }
1487
1488             if (bCalcEnerVir)
1489             {
1490                 /* This should only be called on the master thread and
1491                  * after the threads have synchronized.
1492                  */
1493                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
1494             }
1495
1496             if (bBackFFT)
1497             {
1498                 bFirst = !pme->doCoulomb;
1499                 calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1500                 for (grid_index = 8; grid_index >= 2; --grid_index)
1501                 {
1502                     /* Unpack structure */
1503                     pmegrid    = &pme->pmegrid[grid_index];
1504                     fftgrid    = pme->fftgrid[grid_index];
1505                     pfft_setup = pme->pfft_setup[grid_index];
1506                     grid       = pmegrid->grid.grid;
1507                     calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1508 #pragma omp parallel num_threads(pme->nthread) private(thread)
1509                     {
1510                         try
1511                         {
1512                             thread = gmx_omp_get_thread_num();
1513                             /* do 3d-invfft */
1514                             if (thread == 0)
1515                             {
1516                                 wallcycle_start(wcycle, ewcPME_FFT);
1517                             }
1518
1519                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1520                             if (thread == 0)
1521                             {
1522                                 wallcycle_stop(wcycle, ewcPME_FFT);
1523
1524
1525                                 if (pme->nodeid == 0)
1526                                 {
1527                                     real ntot = pme->nkx * pme->nky * pme->nkz;
1528                                     npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1529                                     inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1530                                 }
1531                                 wallcycle_start(wcycle, ewcPME_GATHER);
1532                             }
1533
1534                             copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1535                         }
1536                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1537                     } /*#pragma omp parallel*/
1538
1539                     /* distribute local grid to all nodes */
1540                     if (pme->nnodes > 1)
1541                     {
1542                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1543                     }
1544
1545                     unwrap_periodic_pmegrid(pme, grid);
1546
1547                     if (bCalcF)
1548                     {
1549                         /* interpolate forces for our local atoms */
1550                         bClearF = (bFirst && PAR(cr));
1551                         scale   = pme->bFEP ? (fep_state < 1 ? 1.0 - lambda_lj : lambda_lj) : 1.0;
1552                         scale *= lb_scale_factor[grid_index - 2];
1553
1554 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1555                         for (thread = 0; thread < pme->nthread; thread++)
1556                         {
1557                             try
1558                             {
1559                                 gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
1560                                                   &pme->atc[0].spline[thread], scale);
1561                             }
1562                             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1563                         }
1564
1565
1566                         inc_nrnb(nrnb, eNR_GATHERFBSP,
1567                                  pme->pme_order * pme->pme_order * pme->pme_order * pme->atc[0].numAtoms());
1568                     }
1569                     wallcycle_stop(wcycle, ewcPME_GATHER);
1570
1571                     bFirst = FALSE;
1572                 } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1573             }     /* if (bCalcF) */
1574         }         /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1575     }             /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
1576
1577     if (bCalcF && pme->nnodes > 1)
1578     {
1579         wallcycle_start(wcycle, ewcPME_REDISTXF);
1580         for (d = 0; d < pme->ndecompdim; d++)
1581         {
1582             gmx::ArrayRef<gmx::RVec> forcesRef;
1583             if (d == pme->ndecompdim - 1)
1584             {
1585                 const size_t numAtoms = coordinates.size();
1586                 GMX_ASSERT(forces.size() >= numAtoms, "Need at least numAtoms forces");
1587                 forcesRef = forces.subArray(0, numAtoms);
1588             }
1589             else
1590             {
1591                 forcesRef = pme->atc[d + 1].f;
1592             }
1593             if (DOMAINDECOMP(cr))
1594             {
1595                 dd_pmeredist_f(pme, &pme->atc[d], forcesRef, d == pme->ndecompdim - 1 && pme->bPPnode);
1596             }
1597         }
1598
1599         wallcycle_stop(wcycle, ewcPME_REDISTXF);
1600     }
1601
1602     if (bCalcEnerVir)
1603     {
1604         if (pme->doCoulomb)
1605         {
1606             if (!pme->bFEP_q)
1607             {
1608                 *energy_q = output[0].coulombEnergy_;
1609                 m_add(vir_q, output[0].coulombVirial_, vir_q);
1610             }
1611             else
1612             {
1613                 *energy_q = (1.0 - lambda_q) * output[0].coulombEnergy_ + lambda_q * output[1].coulombEnergy_;
1614                 *dvdlambda_q += output[1].coulombEnergy_ - output[0].coulombEnergy_;
1615                 for (int i = 0; i < DIM; i++)
1616                 {
1617                     for (int j = 0; j < DIM; j++)
1618                     {
1619                         vir_q[i][j] += (1.0 - lambda_q) * output[0].coulombVirial_[i][j]
1620                                        + lambda_q * output[1].coulombVirial_[i][j];
1621                     }
1622                 }
1623             }
1624             if (debug)
1625             {
1626                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1627             }
1628         }
1629         else
1630         {
1631             *energy_q = 0;
1632         }
1633
1634         if (pme->doLJ)
1635         {
1636             if (!pme->bFEP_lj)
1637             {
1638                 *energy_lj = output[0].lennardJonesEnergy_;
1639                 m_add(vir_lj, output[0].lennardJonesVirial_, vir_lj);
1640             }
1641             else
1642             {
1643                 *energy_lj = (1.0 - lambda_lj) * output[0].lennardJonesEnergy_
1644                              + lambda_lj * output[1].lennardJonesEnergy_;
1645                 *dvdlambda_lj += output[1].lennardJonesEnergy_ - output[0].lennardJonesEnergy_;
1646                 for (int i = 0; i < DIM; i++)
1647                 {
1648                     for (int j = 0; j < DIM; j++)
1649                     {
1650                         vir_lj[i][j] += (1.0 - lambda_lj) * output[0].lennardJonesVirial_[i][j]
1651                                         + lambda_lj * output[1].lennardJonesVirial_[i][j];
1652                     }
1653                 }
1654             }
1655             if (debug)
1656             {
1657                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1658             }
1659         }
1660         else
1661         {
1662             *energy_lj = 0;
1663         }
1664     }
1665     return 0;
1666 }
1667
1668 void gmx_pme_destroy(gmx_pme_t* pme)
1669 {
1670     if (!pme)
1671     {
1672         return;
1673     }
1674
1675     delete pme->boxScaler;
1676
1677     sfree(pme->nnx);
1678     sfree(pme->nny);
1679     sfree(pme->nnz);
1680     sfree(pme->fshx);
1681     sfree(pme->fshy);
1682     sfree(pme->fshz);
1683
1684     for (int i = 0; i < pme->ngrids; ++i)
1685     {
1686         pmegrids_destroy(&pme->pmegrid[i]);
1687     }
1688     if (pme->pfft_setup)
1689     {
1690         for (int i = 0; i < pme->ngrids; ++i)
1691         {
1692             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1693         }
1694     }
1695     sfree(pme->fftgrid);
1696     sfree(pme->cfftgrid);
1697     sfree(pme->pfft_setup);
1698
1699     for (int i = 0; i < DIM; i++)
1700     {
1701         sfree(pme->bsp_mod[i]);
1702     }
1703
1704     sfree(pme->bufv);
1705     sfree(pme->bufr);
1706
1707     if (pme->solve_work)
1708     {
1709         pme_free_all_work(&pme->solve_work, pme->nthread);
1710     }
1711
1712     sfree(pme->sum_qgrid_tmp);
1713     sfree(pme->sum_qgrid_dd_tmp);
1714
1715     destroy_pme_spline_work(pme->spline_work);
1716
1717     if (pme_gpu_active(pme) && pme->gpu)
1718     {
1719         pme_gpu_destroy(pme->gpu);
1720     }
1721
1722     delete pme;
1723 }
1724
1725 void gmx_pme_reinit_atoms(gmx_pme_t* pme, const int numAtoms, const real* charges)
1726 {
1727     if (pme_gpu_active(pme))
1728     {
1729         pme_gpu_reinit_atoms(pme->gpu, numAtoms, charges);
1730     }
1731     else
1732     {
1733         pme->atc[0].setNumAtoms(numAtoms);
1734         // TODO: set the charges here as well
1735     }
1736 }