Merge branch release-2020 into master
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
7  * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9  * and including many others, as listed in the AUTHORS file in the
10  * top-level source directory and at http://www.gromacs.org.
11  *
12  * GROMACS is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public License
14  * as published by the Free Software Foundation; either version 2.1
15  * of the License, or (at your option) any later version.
16  *
17  * GROMACS is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with GROMACS; if not, see
24  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
26  *
27  * If you want to redistribute modifications to GROMACS, please
28  * consider that scientific software is very special. Version
29  * control is crucial - bugs must be traceable. We will be happy to
30  * consider code for inclusion in the official distribution, but
31  * derived work must not be called official GROMACS. Details are found
32  * in the README & COPYING files - if they are missing, get the
33  * official version at http://www.gromacs.org.
34  *
35  * To help us fund GROMACS development, we humbly ask that you cite
36  * the research papers on the package. Check out http://www.gromacs.org.
37  */
38 /*! \internal \file
39  *
40  * \brief This file contains function definitions necessary for
41  * computing energies and forces for the PME long-ranged part (Coulomb
42  * and LJ).
43  *
44  * \author Erik Lindahl <erik@kth.se>
45  * \author Berk Hess <hess@kth.se>
46  * \ingroup module_ewald
47  */
48 /* IMPORTANT FOR DEVELOPERS:
49  *
50  * Triclinic pme stuff isn't entirely trivial, and we've experienced
51  * some bugs during development (many of them due to me). To avoid
52  * this in the future, please check the following things if you make
53  * changes in this file:
54  *
55  * 1. You should obtain identical (at least to the PME precision)
56  *    energies, forces, and virial for
57  *    a rectangular box and a triclinic one where the z (or y) axis is
58  *    tilted a whole box side. For instance you could use these boxes:
59  *
60  *    rectangular       triclinic
61  *     2  0  0           2  0  0
62  *     0  2  0           0  2  0
63  *     0  0  6           2  2  6
64  *
65  * 2. You should check the energy conservation in a triclinic box.
66  *
67  * It might seem an overkill, but better safe than sorry.
68  * /Erik 001109
69  */
70
71 #include "gmxpre.h"
72
73 #include "pme.h"
74
75 #include "config.h"
76
77 #include <cassert>
78 #include <cmath>
79 #include <cstdio>
80 #include <cstdlib>
81 #include <cstring>
82
83 #include <algorithm>
84 #include <list>
85
86 #include "gromacs/domdec/domdec.h"
87 #include "gromacs/ewald/ewald_utils.h"
88 #include "gromacs/fft/parallel_3dfft.h"
89 #include "gromacs/fileio/pdbio.h"
90 #include "gromacs/gmxlib/network.h"
91 #include "gromacs/gmxlib/nrnb.h"
92 #include "gromacs/hardware/hw_info.h"
93 #include "gromacs/math/gmxcomplex.h"
94 #include "gromacs/math/invertmatrix.h"
95 #include "gromacs/math/units.h"
96 #include "gromacs/math/vec.h"
97 #include "gromacs/math/vectypes.h"
98 #include "gromacs/mdtypes/commrec.h"
99 #include "gromacs/mdtypes/forcerec.h"
100 #include "gromacs/mdtypes/inputrec.h"
101 #include "gromacs/mdtypes/md_enums.h"
102 #include "gromacs/pbcutil/pbc.h"
103 #include "gromacs/timing/cyclecounter.h"
104 #include "gromacs/timing/wallcycle.h"
105 #include "gromacs/timing/walltime_accounting.h"
106 #include "gromacs/topology/topology.h"
107 #include "gromacs/utility/basedefinitions.h"
108 #include "gromacs/utility/cstringutil.h"
109 #include "gromacs/utility/exceptions.h"
110 #include "gromacs/utility/fatalerror.h"
111 #include "gromacs/utility/gmxmpi.h"
112 #include "gromacs/utility/gmxomp.h"
113 #include "gromacs/utility/logger.h"
114 #include "gromacs/utility/real.h"
115 #include "gromacs/utility/smalloc.h"
116 #include "gromacs/utility/stringutil.h"
117 #include "gromacs/utility/unique_cptr.h"
118
119 #include "calculate_spline_moduli.h"
120 #include "pme_gather.h"
121 #include "pme_gpu_internal.h"
122 #include "pme_grid.h"
123 #include "pme_internal.h"
124 #include "pme_redistribute.h"
125 #include "pme_solve.h"
126 #include "pme_spline_work.h"
127 #include "pme_spread.h"
128
129 /*! \brief Help build a descriptive message in \c error if there are
130  * \c errorReasons why PME on GPU is not supported.
131  *
132  * \returns Whether the lack of errorReasons indicate there is support. */
133 static bool addMessageIfNotSupported(const std::list<std::string>& errorReasons, std::string* error)
134 {
135     bool isSupported = errorReasons.empty();
136     if (!isSupported && error)
137     {
138         std::string regressionTestMarker = "PME GPU does not support";
139         // this prefix is tested for in the regression tests script gmxtest.pl
140         *error = regressionTestMarker;
141         if (errorReasons.size() == 1)
142         {
143             *error += " " + errorReasons.back();
144         }
145         else
146         {
147             *error += ": " + gmx::joinStrings(errorReasons, "; ");
148         }
149         *error += ".";
150     }
151     return isSupported;
152 }
153
154 bool pme_gpu_supports_build(std::string* error)
155 {
156     std::list<std::string> errorReasons;
157     if (GMX_DOUBLE)
158     {
159         errorReasons.emplace_back("a double-precision build");
160     }
161     if (GMX_GPU == GMX_GPU_NONE)
162     {
163         errorReasons.emplace_back("a non-GPU build");
164     }
165     return addMessageIfNotSupported(errorReasons, error);
166 }
167
168 bool pme_gpu_supports_hardware(const gmx_hw_info_t gmx_unused& hwinfo, std::string* error)
169 {
170     std::list<std::string> errorReasons;
171
172     if (GMX_GPU == GMX_GPU_OPENCL)
173     {
174 #ifdef __APPLE__
175         errorReasons.emplace_back("Apple OS X operating system");
176 #endif
177     }
178     return addMessageIfNotSupported(errorReasons, error);
179 }
180
181 bool pme_gpu_supports_input(const t_inputrec& ir, const gmx_mtop_t& mtop, std::string* error)
182 {
183     std::list<std::string> errorReasons;
184     if (!EEL_PME(ir.coulombtype))
185     {
186         errorReasons.emplace_back("systems that do not use PME for electrostatics");
187     }
188     if (ir.pme_order != 4)
189     {
190         errorReasons.emplace_back("interpolation orders other than 4");
191     }
192     if (ir.efep != efepNO)
193     {
194         if (gmx_mtop_has_perturbed_charges(mtop))
195         {
196             errorReasons.emplace_back(
197                     "free energy calculations with perturbed charges (multiple grids)");
198         }
199     }
200     if (EVDW_PME(ir.vdwtype))
201     {
202         errorReasons.emplace_back("Lennard-Jones PME");
203     }
204     if (!EI_DYNAMICS(ir.eI))
205     {
206         errorReasons.emplace_back("not a dynamical integrator");
207     }
208     return addMessageIfNotSupported(errorReasons, error);
209 }
210
211 /*! \brief \libinternal
212  * Finds out if PME with given inputs is possible to run on GPU.
213  * This function is an internal final check, validating the whole PME structure on creation,
214  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
215  *
216  * \param[in]  pme          The PME structure.
217  * \param[out] error        The error message if the input is not supported on GPU.
218  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
219  */
220 static bool pme_gpu_check_restrictions(const gmx_pme_t* pme, std::string* error)
221 {
222     std::list<std::string> errorReasons;
223     if (pme->nnodes != 1)
224     {
225         errorReasons.emplace_back("PME decomposition");
226     }
227     if (pme->pme_order != 4)
228     {
229         errorReasons.emplace_back("interpolation orders other than 4");
230     }
231     if (pme->bFEP)
232     {
233         errorReasons.emplace_back("free energy calculations (multiple grids)");
234     }
235     if (pme->doLJ)
236     {
237         errorReasons.emplace_back("Lennard-Jones PME");
238     }
239     if (GMX_DOUBLE)
240     {
241         errorReasons.emplace_back("double precision");
242     }
243     if (GMX_GPU == GMX_GPU_NONE)
244     {
245         errorReasons.emplace_back("non-GPU build of GROMACS");
246     }
247
248     return addMessageIfNotSupported(errorReasons, error);
249 }
250
251 PmeRunMode pme_run_mode(const gmx_pme_t* pme)
252 {
253     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
254     return pme->runMode;
255 }
256
257 gmx::PinningPolicy pme_get_pinning_policy()
258 {
259     return gmx::PinningPolicy::PinnedIfSupported;
260 }
261
262 /*! \brief Number of bytes in a cache line.
263  *
264  * Must also be a multiple of the SIMD and SIMD4 register size, to
265  * preserve alignment.
266  */
267 const int gmxCacheLineSize = 64;
268
269 //! Set up coordinate communication
270 static void setup_coordinate_communication(PmeAtomComm* atc)
271 {
272     int nslab, n, i;
273     int fw, bw;
274
275     nslab = atc->nslab;
276
277     n = 0;
278     for (i = 1; i <= nslab / 2; i++)
279     {
280         fw = (atc->nodeid + i) % nslab;
281         bw = (atc->nodeid - i + nslab) % nslab;
282         if (n < nslab - 1)
283         {
284             atc->slabCommSetup[n].node_dest = fw;
285             atc->slabCommSetup[n].node_src  = bw;
286             n++;
287         }
288         if (n < nslab - 1)
289         {
290             atc->slabCommSetup[n].node_dest = bw;
291             atc->slabCommSetup[n].node_src  = fw;
292             n++;
293         }
294     }
295 }
296
297 /*! \brief Round \p n up to the next multiple of \p f */
298 static int mult_up(int n, int f)
299 {
300     return ((n + f - 1) / f) * f;
301 }
302
303 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
304 static double estimate_pme_load_imbalance(struct gmx_pme_t* pme)
305 {
306     int    nma, nmi;
307     double n1, n2, n3;
308
309     nma = pme->nnodes_major;
310     nmi = pme->nnodes_minor;
311
312     n1 = mult_up(pme->nkx, nma) * mult_up(pme->nky, nmi) * pme->nkz;
313     n2 = mult_up(pme->nkx, nma) * mult_up(pme->nkz, nmi) * pme->nky;
314     n3 = mult_up(pme->nky, nma) * mult_up(pme->nkz, nmi) * pme->nkx;
315
316     /* pme_solve is roughly double the cost of an fft */
317
318     return (n1 + n2 + 3 * n3) / static_cast<double>(6 * pme->nkx * pme->nky * pme->nkz);
319 }
320
321 #ifndef DOXYGEN
322
323 PmeAtomComm::PmeAtomComm(MPI_Comm   PmeMpiCommunicator,
324                          const int  numThreads,
325                          const int  pmeOrder,
326                          const int  dimIndex,
327                          const bool doSpread) :
328     dimind(dimIndex),
329     bSpread(doSpread),
330     pme_order(pmeOrder),
331     nthread(numThreads),
332     spline(nthread)
333 {
334     if (PmeMpiCommunicator != MPI_COMM_NULL)
335     {
336         mpi_comm = PmeMpiCommunicator;
337 #    if GMX_MPI
338         MPI_Comm_size(mpi_comm, &nslab);
339         MPI_Comm_rank(mpi_comm, &nodeid);
340 #    endif
341     }
342     if (debug)
343     {
344         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", dimind, nslab, nodeid);
345     }
346
347     if (nslab > 1)
348     {
349         slabCommSetup.resize(nslab);
350         setup_coordinate_communication(this);
351
352         count_thread.resize(nthread);
353         for (auto& countThread : count_thread)
354         {
355             countThread.resize(nslab);
356         }
357     }
358
359     if (nthread > 1)
360     {
361         threadMap.resize(nthread);
362
363 #    pragma omp parallel for num_threads(nthread) schedule(static)
364         for (int thread = 0; thread < nthread; thread++)
365         {
366             try
367             {
368                 /* Allocate buffer with padding to avoid cache polution */
369                 threadMap[thread].nBuffer.resize(nthread + 2 * gmxCacheLineSize);
370                 threadMap[thread].n = threadMap[thread].nBuffer.data() + gmxCacheLineSize;
371             }
372             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
373         }
374     }
375 }
376
377 #endif // !DOXYGEN
378
379 /*! \brief Initialize data structure for communication */
380 static void init_overlap_comm(pme_overlap_t* ol, int norder, MPI_Comm comm, int nnodes, int nodeid, int ndata, int commplainsize)
381 {
382     gmx_bool bCont;
383
384     ol->mpi_comm = comm;
385     ol->nnodes   = nnodes;
386     ol->nodeid   = nodeid;
387
388     /* Linear translation of the PME grid won't affect reciprocal space
389      * calculations, so to optimize we only interpolate "upwards",
390      * which also means we only have to consider overlap in one direction.
391      * I.e., particles on this node might also be spread to grid indices
392      * that belong to higher nodes (modulo nnodes)
393      */
394
395     ol->s2g0.resize(ol->nnodes + 1);
396     ol->s2g1.resize(ol->nnodes);
397     if (debug)
398     {
399         fprintf(debug, "PME slab boundaries:");
400     }
401     for (int i = 0; i < nnodes; i++)
402     {
403         /* s2g0 the local interpolation grid start.
404          * s2g1 the local interpolation grid end.
405          * Since in calc_pidx we divide particles, and not grid lines,
406          * spatially uniform along dimension x or y, we need to round
407          * s2g0 down and s2g1 up.
408          */
409         ol->s2g0[i] = (i * ndata + 0) / nnodes;
410         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
411
412         if (debug)
413         {
414             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
415         }
416     }
417     ol->s2g0[nnodes] = ndata;
418     if (debug)
419     {
420         fprintf(debug, "\n");
421     }
422
423     /* Determine with how many nodes we need to communicate the grid overlap */
424     int testRankCount = 0;
425     do
426     {
427         testRankCount++;
428         bCont = FALSE;
429         for (int i = 0; i < nnodes; i++)
430         {
431             if ((i + testRankCount < nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount])
432                 || (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
433             {
434                 bCont = TRUE;
435             }
436         }
437     } while (bCont && testRankCount < nnodes);
438
439     ol->comm_data.resize(testRankCount - 1);
440     ol->send_size = 0;
441
442     for (size_t b = 0; b < ol->comm_data.size(); b++)
443     {
444         pme_grid_comm_t* pgc = &ol->comm_data[b];
445
446         /* Send */
447         pgc->send_id  = (ol->nodeid + (b + 1)) % ol->nnodes;
448         int fft_start = ol->s2g0[pgc->send_id];
449         int fft_end   = ol->s2g0[pgc->send_id + 1];
450         if (pgc->send_id < nodeid)
451         {
452             fft_start += ndata;
453             fft_end += ndata;
454         }
455         int send_index1  = ol->s2g1[nodeid];
456         send_index1      = std::min(send_index1, fft_end);
457         pgc->send_index0 = fft_start;
458         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
459         ol->send_size += pgc->send_nindex;
460
461         /* We always start receiving to the first index of our slab */
462         pgc->recv_id    = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
463         fft_start       = ol->s2g0[ol->nodeid];
464         fft_end         = ol->s2g0[ol->nodeid + 1];
465         int recv_index1 = ol->s2g1[pgc->recv_id];
466         if (pgc->recv_id > nodeid)
467         {
468             recv_index1 -= ndata;
469         }
470         recv_index1      = std::min(recv_index1, fft_end);
471         pgc->recv_index0 = fft_start;
472         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
473     }
474
475 #if GMX_MPI
476     /* Communicate the buffer sizes to receive */
477     MPI_Status stat;
478     for (size_t b = 0; b < ol->comm_data.size(); b++)
479     {
480         MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b, &ol->comm_data[b].recv_size,
481                      1, MPI_INT, ol->comm_data[b].recv_id, b, ol->mpi_comm, &stat);
482     }
483 #endif
484
485     /* For non-divisible grid we need pme_order iso pme_order-1 */
486     ol->sendbuf.resize(norder * commplainsize);
487     ol->recvbuf.resize(norder * commplainsize);
488 }
489
490 int minimalPmeGridSize(int pmeOrder)
491 {
492     /* The actual grid size limitations are:
493      *   serial:        >= pme_order
494      *   DD, no OpenMP: >= 2*(pme_order - 1)
495      *   DD, OpenMP:    >= pme_order + 1
496      * But we use the maximum for simplicity since in practice there is not
497      * much performance difference between pme_order and 2*(pme_order -1).
498      */
499     int minimalSize = 2 * (pmeOrder - 1);
500
501     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
502     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
503
504     return minimalSize;
505 }
506
507 bool gmx_pme_check_restrictions(int pme_order, int nkx, int nky, int nkz, int numPmeDomainsAlongX, bool useThreads, bool errorsAreFatal)
508 {
509     if (pme_order > PME_ORDER_MAX)
510     {
511         if (!errorsAreFatal)
512         {
513             return false;
514         }
515
516         std::string message = gmx::formatString(
517                 "pme_order (%d) is larger than the maximum allowed value (%d). Modify and "
518                 "recompile the code if you really need such a high order.",
519                 pme_order, PME_ORDER_MAX);
520         GMX_THROW(gmx::InconsistentInputError(message));
521     }
522
523     const int minGridSize = minimalPmeGridSize(pme_order);
524     if (nkx < minGridSize || nky < minGridSize || nkz < minGridSize)
525     {
526         if (!errorsAreFatal)
527         {
528             return false;
529         }
530         std::string message =
531                 gmx::formatString("The PME grid sizes need to be >= 2*(pme_order-1) (%d)", minGridSize);
532         GMX_THROW(gmx::InconsistentInputError(message));
533     }
534
535     /* Check for a limitation of the (current) sum_fftgrid_dd code.
536      * We only allow multiple communication pulses in dim 1, not in dim 0.
537      */
538     if (useThreads
539         && (nkx < numPmeDomainsAlongX * pme_order && nkx != numPmeDomainsAlongX * (pme_order - 1)))
540     {
541         if (!errorsAreFatal)
542         {
543             return false;
544         }
545         gmx_fatal(FARGS,
546                   "The number of PME grid lines per rank along x is %g. But when using OpenMP "
547                   "threads, the number of grid lines per rank along x should be >= pme_order (%d) "
548                   "or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly "
549                   "more along y and/or z) by specifying -dd manually.",
550                   nkx / static_cast<double>(numPmeDomainsAlongX), pme_order);
551     }
552
553     return true;
554 }
555
556 /*! \brief Round \p enumerator */
557 static int div_round_up(int enumerator, int denominator)
558 {
559     return (enumerator + denominator - 1) / denominator;
560 }
561
562 gmx_pme_t* gmx_pme_init(const t_commrec*         cr,
563                         const NumPmeDomains&     numPmeDomains,
564                         const t_inputrec*        ir,
565                         gmx_bool                 bFreeEnergy_q,
566                         gmx_bool                 bFreeEnergy_lj,
567                         gmx_bool                 bReproducible,
568                         real                     ewaldcoeff_q,
569                         real                     ewaldcoeff_lj,
570                         int                      nthread,
571                         PmeRunMode               runMode,
572                         PmeGpu*                  pmeGpu,
573                         const DeviceInformation* deviceInfo,
574                         const PmeGpuProgram*     pmeGpuProgram,
575                         const gmx::MDLogger& /*mdlog*/)
576 {
577     int  use_threads, sum_use_threads, i;
578     ivec ndata;
579
580     if (debug)
581     {
582         fprintf(debug, "Creating PME data structures.\n");
583     }
584
585     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
586
587     pme->sum_qgrid_tmp    = nullptr;
588     pme->sum_qgrid_dd_tmp = nullptr;
589
590     pme->buf_nalloc = 0;
591
592     pme->nnodes  = 1;
593     pme->bPPnode = TRUE;
594
595     pme->nnodes_major = numPmeDomains.x;
596     pme->nnodes_minor = numPmeDomains.y;
597
598     if (numPmeDomains.x * numPmeDomains.y > 1)
599     {
600         pme->mpi_comm = cr->mpi_comm_mygroup;
601
602 #if GMX_MPI
603         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
604         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
605 #endif
606         if (pme->nnodes != numPmeDomains.x * numPmeDomains.y)
607         {
608             gmx_incons("PME rank count mismatch");
609         }
610     }
611     else
612     {
613         pme->mpi_comm = MPI_COMM_NULL;
614     }
615
616     if (pme->nnodes == 1)
617     {
618         pme->mpi_comm_d[0] = MPI_COMM_NULL;
619         pme->mpi_comm_d[1] = MPI_COMM_NULL;
620         pme->ndecompdim    = 0;
621         pme->nodeid_major  = 0;
622         pme->nodeid_minor  = 0;
623     }
624     else
625     {
626         if (numPmeDomains.y == 1)
627         {
628             pme->mpi_comm_d[0] = pme->mpi_comm;
629             pme->mpi_comm_d[1] = MPI_COMM_NULL;
630             pme->ndecompdim    = 1;
631             pme->nodeid_major  = pme->nodeid;
632             pme->nodeid_minor  = 0;
633         }
634         else if (numPmeDomains.x == 1)
635         {
636             pme->mpi_comm_d[0] = MPI_COMM_NULL;
637             pme->mpi_comm_d[1] = pme->mpi_comm;
638             pme->ndecompdim    = 1;
639             pme->nodeid_major  = 0;
640             pme->nodeid_minor  = pme->nodeid;
641         }
642         else
643         {
644             if (pme->nnodes % numPmeDomains.x != 0)
645             {
646                 gmx_incons(
647                         "For 2D PME decomposition, #PME ranks must be divisible by the number of "
648                         "domains along x");
649             }
650             pme->ndecompdim = 2;
651
652 #if GMX_MPI
653             MPI_Comm_split(pme->mpi_comm, pme->nodeid % numPmeDomains.y, pme->nodeid,
654                            &pme->mpi_comm_d[0]); /* My communicator along major dimension */
655             MPI_Comm_split(pme->mpi_comm, pme->nodeid / numPmeDomains.y, pme->nodeid,
656                            &pme->mpi_comm_d[1]); /* My communicator along minor dimension */
657
658             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
659             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
660             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
661             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
662 #endif
663         }
664     }
665     // cr is always initialized if there is a a PP rank, so we can safely assume
666     // that when it is not, like in ewald tests, we not on a PP rank.
667     pme->bPPnode = ((cr != nullptr && cr->duty != 0) && thisRankHasDuty(cr, DUTY_PP));
668
669     pme->nthread = nthread;
670
671     /* Check if any of the PME MPI ranks uses threads */
672     use_threads = (pme->nthread > 1 ? 1 : 0);
673 #if GMX_MPI
674     if (pme->nnodes > 1)
675     {
676         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT, MPI_SUM, pme->mpi_comm);
677     }
678     else
679 #endif
680     {
681         sum_use_threads = use_threads;
682     }
683     pme->bUseThreads = (sum_use_threads > 0);
684
685     if (ir->pbcType == PbcType::Screw)
686     {
687         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
688     }
689
690     /* NOTE:
691      * It is likely that the current gmx_pme_do() routine supports calculating
692      * only Coulomb or LJ while gmx_pme_init() configures for both,
693      * but that has never been tested.
694      * It is likely that the current gmx_pme_do() routine supports calculating,
695      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
696      * configures with free-energy, but that has never been tested.
697      */
698     pme->doCoulomb     = EEL_PME(ir->coulombtype);
699     pme->doLJ          = EVDW_PME(ir->vdwtype);
700     pme->bFEP_q        = ((ir->efep != efepNO) && bFreeEnergy_q);
701     pme->bFEP_lj       = ((ir->efep != efepNO) && bFreeEnergy_lj);
702     pme->bFEP          = (pme->bFEP_q || pme->bFEP_lj);
703     pme->nkx           = ir->nkx;
704     pme->nky           = ir->nky;
705     pme->nkz           = ir->nkz;
706     pme->bP3M          = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != nullptr);
707     pme->pme_order     = ir->pme_order;
708     pme->ewaldcoeff_q  = ewaldcoeff_q;
709     pme->ewaldcoeff_lj = ewaldcoeff_lj;
710
711     /* Always constant electrostatics coefficients */
712     pme->epsilon_r = ir->epsilon_r;
713
714     /* Always constant LJ coefficients */
715     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
716
717     // The box requires scaling with nwalls = 2, we store that condition as well
718     // as the scaling factor
719     delete pme->boxScaler;
720     pme->boxScaler = new EwaldBoxZScaler(*ir);
721
722     /* If we violate restrictions, generate a fatal error here */
723     gmx_pme_check_restrictions(pme->pme_order, pme->nkx, pme->nky, pme->nkz, pme->nnodes_major,
724                                pme->bUseThreads, true);
725
726     if (pme->nnodes > 1)
727     {
728         double imbal;
729
730 #if GMX_MPI
731         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
732         MPI_Type_commit(&(pme->rvec_mpi));
733 #endif
734
735         /* Note that the coefficient spreading and force gathering, which usually
736          * takes about the same amount of time as FFT+solve_pme,
737          * is always fully load balanced
738          * (unless the coefficient distribution is inhomogeneous).
739          */
740
741         imbal = estimate_pme_load_imbalance(pme.get());
742         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
743         {
744             fprintf(stderr,
745                     "\n"
746                     "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
747                     "      For optimal PME load balancing\n"
748                     "      PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x "
749                     "(%d)\n"
750                     "      and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y "
751                     "(%d)\n"
752                     "\n",
753                     gmx::roundToInt((imbal - 1) * 100), pme->nkx, pme->nky, pme->nnodes_major,
754                     pme->nky, pme->nkz, pme->nnodes_minor);
755         }
756     }
757
758     /* For non-divisible grid we need pme_order iso pme_order-1 */
759     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
760      * y is always copied through a buffer: we don't need padding in z,
761      * but we do need the overlap in x because of the communication order.
762      */
763     init_overlap_comm(&pme->overlap[0], pme->pme_order, pme->mpi_comm_d[0], pme->nnodes_major,
764                       pme->nodeid_major, pme->nkx,
765                       (div_round_up(pme->nky, pme->nnodes_minor) + pme->pme_order)
766                               * (pme->nkz + pme->pme_order - 1));
767
768     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
769      * We do this with an offset buffer of equal size, so we need to allocate
770      * extra for the offset. That's what the (+1)*pme->nkz is for.
771      */
772     init_overlap_comm(&pme->overlap[1], pme->pme_order, pme->mpi_comm_d[1], pme->nnodes_minor,
773                       pme->nodeid_minor, pme->nky,
774                       (div_round_up(pme->nkx, pme->nnodes_major) + pme->pme_order + 1) * pme->nkz);
775
776     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
777      * Note that gmx_pme_check_restrictions checked for this already.
778      */
779     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
780     {
781         gmx_incons(
782                 "More than one communication pulse required for grid overlap communication along "
783                 "the major dimension while using threads");
784     }
785
786     snew(pme->bsp_mod[XX], pme->nkx);
787     snew(pme->bsp_mod[YY], pme->nky);
788     snew(pme->bsp_mod[ZZ], pme->nkz);
789
790     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
791     pme->runMode = runMode;
792
793     /* The required size of the interpolation grid, including overlap.
794      * The allocated size (pmegrid_n?) might be slightly larger.
795      */
796     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] - pme->overlap[0].s2g0[pme->nodeid_major];
797     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] - pme->overlap[1].s2g0[pme->nodeid_minor];
798     pme->pmegrid_nz_base = pme->nkz;
799     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
800     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
801     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
802     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
803     pme->pmegrid_start_iz = 0;
804
805     make_gridindex_to_localindex(pme->nkx, pme->pmegrid_start_ix,
806                                  pme->pmegrid_nx - (pme->pme_order - 1), &pme->nnx, &pme->fshx);
807     make_gridindex_to_localindex(pme->nky, pme->pmegrid_start_iy,
808                                  pme->pmegrid_ny - (pme->pme_order - 1), &pme->nny, &pme->fshy);
809     make_gridindex_to_localindex(pme->nkz, pme->pmegrid_start_iz, pme->pmegrid_nz_base, &pme->nnz,
810                                  &pme->fshz);
811
812     pme->spline_work = make_pme_spline_work(pme->pme_order);
813
814     ndata[0] = pme->nkx;
815     ndata[1] = pme->nky;
816     ndata[2] = pme->nkz;
817     /* It doesn't matter if we allocate too many grids here,
818      * we only allocate and use the ones we need.
819      */
820     if (pme->doLJ)
821     {
822         pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
823     }
824     else
825     {
826         pme->ngrids = DO_Q;
827     }
828     snew(pme->fftgrid, pme->ngrids);
829     snew(pme->cfftgrid, pme->ngrids);
830     snew(pme->pfft_setup, pme->ngrids);
831
832     for (i = 0; i < pme->ngrids; ++i)
833     {
834         if ((i < DO_Q && pme->doCoulomb && (i == 0 || bFreeEnergy_q))
835             || (i >= DO_Q && pme->doLJ
836                 && (i == 2 || bFreeEnergy_lj || ir->ljpme_combination_rule == eljpmeLB)))
837         {
838             pmegrids_init(&pme->pmegrid[i], pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
839                           pme->pmegrid_nz_base, pme->pme_order, pme->bUseThreads, pme->nthread,
840                           pme->overlap[0].s2g1[pme->nodeid_major]
841                                   - pme->overlap[0].s2g0[pme->nodeid_major + 1],
842                           pme->overlap[1].s2g1[pme->nodeid_minor]
843                                   - pme->overlap[1].s2g0[pme->nodeid_minor + 1]);
844             /* This routine will allocate the grid data to fit the FFTs */
845             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed)
846                                                         ? gmx::PinningPolicy::PinnedIfSupported
847                                                         : gmx::PinningPolicy::CannotBePinned;
848             gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata, &pme->fftgrid[i], &pme->cfftgrid[i],
849                                     pme->mpi_comm_d, bReproducible, pme->nthread, allocateRealGridForGpu);
850         }
851     }
852
853     if (!pme->bP3M)
854     {
855         /* Use plain SPME B-spline interpolation */
856         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
857     }
858     else
859     {
860         /* Use the P3M grid-optimized influence function */
861         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
862     }
863
864     /* Use atc[0] for spreading */
865     const int firstDimIndex   = (numPmeDomains.x > 1 ? 0 : 1);
866     MPI_Comm  mpiCommFirstDim = (pme->nnodes > 1 ? pme->mpi_comm_d[firstDimIndex] : MPI_COMM_NULL);
867     bool      doSpread        = true;
868     pme->atc.emplace_back(mpiCommFirstDim, pme->nthread, pme->pme_order, firstDimIndex, doSpread);
869     if (pme->ndecompdim >= 2)
870     {
871         const int secondDimIndex = 1;
872         doSpread                 = false;
873         pme->atc.emplace_back(pme->mpi_comm_d[1], pme->nthread, pme->pme_order, secondDimIndex, doSpread);
874     }
875
876     // Initial check of validity of the input for running on the GPU
877     if (pme->runMode != PmeRunMode::CPU)
878     {
879         std::string errorString;
880         bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
881         if (!canRunOnGpu)
882         {
883             GMX_THROW(gmx::NotImplementedError(errorString));
884         }
885     }
886     pme_gpu_reinit(pme.get(), deviceInfo, pmeGpuProgram);
887
888     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
889
890     // no exception was thrown during the init, so we hand over the PME structure handle
891     return pme.release();
892 }
893
894 void gmx_pme_reinit(struct gmx_pme_t** pmedata,
895                     const t_commrec*   cr,
896                     struct gmx_pme_t*  pme_src,
897                     const t_inputrec*  ir,
898                     const ivec         grid_size,
899                     real               ewaldcoeff_q,
900                     real               ewaldcoeff_lj)
901 {
902     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
903     // TODO: This would be better as just copying a sub-structure that contains
904     // all the PME parameters and nothing else.
905     t_inputrec irc;
906     irc.pbcType                = ir->pbcType;
907     irc.coulombtype            = ir->coulombtype;
908     irc.vdwtype                = ir->vdwtype;
909     irc.efep                   = ir->efep;
910     irc.pme_order              = ir->pme_order;
911     irc.epsilon_r              = ir->epsilon_r;
912     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
913     irc.nkx                    = grid_size[XX];
914     irc.nky                    = grid_size[YY];
915     irc.nkz                    = grid_size[ZZ];
916
917     try
918     {
919         const gmx::MDLogger dummyLogger;
920         // This is reinit which is currently only changing grid size/coefficients,
921         // so we don't expect the actual logging.
922         // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
923         GMX_ASSERT(pmedata, "Invalid PME pointer");
924         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
925         *pmedata = gmx_pme_init(cr, numPmeDomains, &irc, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE,
926                                 ewaldcoeff_q, ewaldcoeff_lj, pme_src->nthread, pme_src->runMode,
927                                 pme_src->gpu, nullptr, nullptr, dummyLogger);
928         /* When running PME on the CPU not using domain decomposition,
929          * the atom data is allocated once only in gmx_pme_(re)init().
930          */
931         if (!pme_src->gpu && pme_src->nnodes == 1)
932         {
933             gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), nullptr);
934         }
935         // TODO this is mostly passing around current values
936     }
937     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
938
939     /* We can easily reuse the allocated pme grids in pme_src */
940     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
941     /* We would like to reuse the fft grids, but that's harder */
942 }
943
944 void gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q, real* V)
945 {
946     pmegrids_t* grid;
947
948     if (pme->nnodes > 1)
949     {
950         gmx_incons("gmx_pme_calc_energy called in parallel");
951     }
952     if (pme->bFEP_q)
953     {
954         gmx_incons("gmx_pme_calc_energy with free energy");
955     }
956
957     if (!pme->atc_energy)
958     {
959         pme->atc_energy = std::make_unique<PmeAtomComm>(MPI_COMM_NULL, 1, pme->pme_order, 0, true);
960     }
961     PmeAtomComm* atc = pme->atc_energy.get();
962     atc->setNumAtoms(x.ssize());
963     atc->x           = x;
964     atc->coefficient = q;
965
966     /* We only use the A-charges grid */
967     grid = &pme->pmegrid[PME_GRID_QA];
968
969     /* Only calculate the spline coefficients, don't actually spread */
970     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
971
972     *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
973 }
974
975 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
976 static void calc_initial_lb_coeffs(gmx::ArrayRef<real> coefficient, const real* local_c6, const real* local_sigma)
977 {
978     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
979     {
980         real sigma4    = local_sigma[i];
981         sigma4         = sigma4 * sigma4;
982         sigma4         = sigma4 * sigma4;
983         coefficient[i] = local_c6[i] / sigma4;
984     }
985 }
986
987 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
988 static void calc_next_lb_coeffs(gmx::ArrayRef<real> coefficient, const real* local_sigma)
989 {
990     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
991     {
992         coefficient[i] *= local_sigma[i];
993     }
994 }
995
996 int gmx_pme_do(struct gmx_pme_t*              pme,
997                gmx::ArrayRef<const gmx::RVec> coordinates,
998                gmx::ArrayRef<gmx::RVec>       forces,
999                real                           chargeA[],
1000                real                           chargeB[],
1001                real                           c6A[],
1002                real                           c6B[],
1003                real                           sigmaA[],
1004                real                           sigmaB[],
1005                const matrix                   box,
1006                const t_commrec*               cr,
1007                int                            maxshift_x,
1008                int                            maxshift_y,
1009                t_nrnb*                        nrnb,
1010                gmx_wallcycle*                 wcycle,
1011                matrix                         vir_q,
1012                matrix                         vir_lj,
1013                real*                          energy_q,
1014                real*                          energy_lj,
1015                real                           lambda_q,
1016                real                           lambda_lj,
1017                real*                          dvdlambda_q,
1018                real*                          dvdlambda_lj,
1019                int                            flags)
1020 {
1021     GMX_ASSERT(pme->runMode == PmeRunMode::CPU,
1022                "gmx_pme_do should not be called on the GPU PME run.");
1023
1024     int                  d, npme, grid_index, max_grid_index;
1025     PmeAtomComm&         atc         = pme->atc[0];
1026     pmegrids_t*          pmegrid     = nullptr;
1027     real*                grid        = nullptr;
1028     real*                coefficient = nullptr;
1029     PmeOutput            output[2]; // The second is used for the B state with FEP
1030     real                 scale, lambda;
1031     gmx_bool             bClearF;
1032     gmx_parallel_3dfft_t pfft_setup;
1033     real*                fftgrid;
1034     t_complex*           cfftgrid;
1035     int                  thread;
1036     gmx_bool             bFirst, bDoSplines;
1037     int                  fep_state;
1038     int                  fep_states_lj = pme->bFEP_lj ? 2 : 1;
1039     const gmx_bool       bCalcEnerVir  = (flags & GMX_PME_CALC_ENER_VIR) != 0;
1040     const gmx_bool       bBackFFT      = (flags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
1041     const gmx_bool       bCalcF        = (flags & GMX_PME_CALC_F) != 0;
1042
1043     /* We could be passing lambda!=0 while no q or LJ is actually perturbed */
1044     if (!pme->bFEP_q)
1045     {
1046         lambda_q = 0;
1047     }
1048     if (!pme->bFEP_lj)
1049     {
1050         lambda_lj = 0;
1051     }
1052
1053     assert(pme->nnodes > 0);
1054     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1055
1056     if (pme->nnodes > 1)
1057     {
1058         atc.pd.resize(coordinates.ssize());
1059         for (int d = pme->ndecompdim - 1; d >= 0; d--)
1060         {
1061             PmeAtomComm& atc = pme->atc[d];
1062             atc.maxshift     = (atc.dimind == 0 ? maxshift_x : maxshift_y);
1063         }
1064     }
1065     else
1066     {
1067         GMX_ASSERT(coordinates.ssize() == atc.numAtoms(), "We expect atc.numAtoms() coordinates");
1068         GMX_ASSERT(forces.ssize() >= atc.numAtoms(),
1069                    "We need a force buffer with at least atc.numAtoms() elements");
1070
1071         atc.x = coordinates;
1072         atc.f = forces;
1073     }
1074
1075     matrix scaledBox;
1076     pme->boxScaler->scaleBox(box, scaledBox);
1077
1078     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1079     bFirst = TRUE;
1080
1081     /* For simplicity, we construct the splines for all particles if
1082      * more than one PME calculations is needed. Some optimization
1083      * could be done by keeping track of which atoms have splines
1084      * constructed, and construct new splines on each pass for atoms
1085      * that don't yet have them.
1086      */
1087
1088     bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1089
1090     /* We need a maximum of four separate PME calculations:
1091      * grid_index=0: Coulomb PME with charges from state A
1092      * grid_index=1: Coulomb PME with charges from state B
1093      * grid_index=2: LJ PME with C6 from state A
1094      * grid_index=3: LJ PME with C6 from state B
1095      * For Lorentz-Berthelot combination rules, a separate loop is used to
1096      * calculate all the terms
1097      */
1098
1099     /* If we are doing LJ-PME with LB, we only do Q here */
1100     max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
1101
1102     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1103     {
1104         /* Check if we should do calculations at this grid_index
1105          * If grid_index is odd we should be doing FEP
1106          * If grid_index < 2 we should be doing electrostatic PME
1107          * If grid_index >= 2 we should be doing LJ-PME
1108          */
1109         if ((grid_index < DO_Q && (!pme->doCoulomb || (grid_index == 1 && !pme->bFEP_q)))
1110             || (grid_index >= DO_Q && (!pme->doLJ || (grid_index == 3 && !pme->bFEP_lj))))
1111         {
1112             continue;
1113         }
1114         /* Unpack structure */
1115         pmegrid    = &pme->pmegrid[grid_index];
1116         fftgrid    = pme->fftgrid[grid_index];
1117         cfftgrid   = pme->cfftgrid[grid_index];
1118         pfft_setup = pme->pfft_setup[grid_index];
1119         switch (grid_index)
1120         {
1121             case 0: coefficient = chargeA; break;
1122             case 1: coefficient = chargeB; break;
1123             case 2: coefficient = c6A; break;
1124             case 3: coefficient = c6B; break;
1125         }
1126
1127         grid = pmegrid->grid.grid;
1128
1129         if (debug)
1130         {
1131             fprintf(debug, "PME: number of ranks = %d, rank = %d\n", cr->nnodes, cr->nodeid);
1132             fprintf(debug, "Grid = %p\n", static_cast<void*>(grid));
1133             if (grid == nullptr)
1134             {
1135                 gmx_fatal(FARGS, "No grid!");
1136             }
1137         }
1138
1139         if (pme->nnodes == 1)
1140         {
1141             atc.coefficient = gmx::arrayRefFromArray(coefficient, coordinates.size());
1142         }
1143         else
1144         {
1145             wallcycle_start(wcycle, ewcPME_REDISTXF);
1146             do_redist_pos_coeffs(pme, cr, bFirst, coordinates, coefficient);
1147
1148             wallcycle_stop(wcycle, ewcPME_REDISTXF);
1149         }
1150
1151         if (debug)
1152         {
1153             fprintf(debug, "Rank= %6d, pme local particles=%6d\n", cr->nodeid, atc.numAtoms());
1154         }
1155
1156         if (flags & GMX_PME_SPREAD)
1157         {
1158             wallcycle_start(wcycle, ewcPME_SPREAD);
1159
1160             /* Spread the coefficients on a grid */
1161             spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1162
1163             if (bFirst)
1164             {
1165                 inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1166             }
1167             inc_nrnb(nrnb, eNR_SPREADBSP,
1168                      pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1169
1170             if (!pme->bUseThreads)
1171             {
1172                 wrap_periodic_pmegrid(pme, grid);
1173
1174                 /* sum contributions to local grid from other nodes */
1175                 if (pme->nnodes > 1)
1176                 {
1177                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1178                 }
1179
1180                 copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1181             }
1182
1183             wallcycle_stop(wcycle, ewcPME_SPREAD);
1184
1185             /* TODO If the OpenMP and single-threaded implementations
1186                converge, then spread_on_grid() and
1187                copy_pmegrid_to_fftgrid() will perhaps live in the same
1188                source file.
1189              */
1190         }
1191
1192         /* Here we start a large thread parallel region */
1193 #pragma omp parallel num_threads(pme->nthread) private(thread)
1194         {
1195             try
1196             {
1197                 thread = gmx_omp_get_thread_num();
1198                 if (flags & GMX_PME_SOLVE)
1199                 {
1200                     int loop_count;
1201
1202                     /* do 3d-fft */
1203                     if (thread == 0)
1204                     {
1205                         wallcycle_start(wcycle, ewcPME_FFT);
1206                     }
1207                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1208                     if (thread == 0)
1209                     {
1210                         wallcycle_stop(wcycle, ewcPME_FFT);
1211                     }
1212
1213                     /* solve in k-space for our local cells */
1214                     if (thread == 0)
1215                     {
1216                         wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1217                     }
1218                     if (grid_index < DO_Q)
1219                     {
1220                         loop_count = solve_pme_yzx(
1221                                 pme, cfftgrid, scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1222                                 bCalcEnerVir, pme->nthread, thread);
1223                     }
1224                     else
1225                     {
1226                         loop_count = solve_pme_lj_yzx(
1227                                 pme, &cfftgrid, FALSE,
1228                                 scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1229                                 bCalcEnerVir, pme->nthread, thread);
1230                     }
1231
1232                     if (thread == 0)
1233                     {
1234                         wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1235                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1236                     }
1237                 }
1238
1239                 if (bBackFFT)
1240                 {
1241                     /* do 3d-invfft */
1242                     if (thread == 0)
1243                     {
1244                         wallcycle_start(wcycle, ewcPME_FFT);
1245                     }
1246                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1247                     if (thread == 0)
1248                     {
1249                         wallcycle_stop(wcycle, ewcPME_FFT);
1250
1251
1252                         if (pme->nodeid == 0)
1253                         {
1254                             real ntot = pme->nkx * pme->nky * pme->nkz;
1255                             npme      = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1256                             inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1257                         }
1258
1259                         /* Note: this wallcycle region is closed below
1260                            outside an OpenMP region, so take care if
1261                            refactoring code here. */
1262                         wallcycle_start(wcycle, ewcPME_GATHER);
1263                     }
1264
1265                     copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1266                 }
1267             }
1268             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1269         }
1270         /* End of thread parallel section.
1271          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1272          */
1273
1274         if (bBackFFT)
1275         {
1276             /* distribute local grid to all nodes */
1277             if (pme->nnodes > 1)
1278             {
1279                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1280             }
1281
1282             unwrap_periodic_pmegrid(pme, grid);
1283         }
1284
1285         if (bCalcF)
1286         {
1287             /* interpolate forces for our local atoms */
1288
1289
1290             /* If we are running without parallelization,
1291              * atc->f is the actual force array, not a buffer,
1292              * therefore we should not clear it.
1293              */
1294             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
1295             bClearF = (bFirst && PAR(cr));
1296 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1297             for (thread = 0; thread < pme->nthread; thread++)
1298             {
1299                 try
1300                 {
1301                     gather_f_bsplines(pme, grid, bClearF, &atc, &atc.spline[thread],
1302                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0 - lambda : lambda) : 1.0);
1303                 }
1304                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1305             }
1306
1307
1308             inc_nrnb(nrnb, eNR_GATHERFBSP,
1309                      pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1310             /* Note: this wallcycle region is opened above inside an OpenMP
1311                region, so take care if refactoring code here. */
1312             wallcycle_stop(wcycle, ewcPME_GATHER);
1313         }
1314
1315         if (bCalcEnerVir)
1316         {
1317             /* This should only be called on the master thread
1318              * and after the threads have synchronized.
1319              */
1320             if (grid_index < 2)
1321             {
1322                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1323             }
1324             else
1325             {
1326                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1327             }
1328         }
1329         bFirst = FALSE;
1330     } /* of grid_index-loop */
1331
1332     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1333      * seven terms. */
1334
1335     if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB)
1336     {
1337         /* Loop over A- and B-state if we are doing FEP */
1338         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1339         {
1340             real *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
1341             gmx::ArrayRef<real> coefficientBuffer;
1342             if (pme->nnodes == 1)
1343             {
1344                 pme->lb_buf1.resize(atc.numAtoms());
1345                 coefficientBuffer = pme->lb_buf1;
1346                 switch (fep_state)
1347                 {
1348                     case 0:
1349                         local_c6    = c6A;
1350                         local_sigma = sigmaA;
1351                         break;
1352                     case 1:
1353                         local_c6    = c6B;
1354                         local_sigma = sigmaB;
1355                         break;
1356                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1357                 }
1358             }
1359             else
1360             {
1361                 coefficientBuffer = atc.coefficientBuffer;
1362                 switch (fep_state)
1363                 {
1364                     case 0:
1365                         RedistC6    = c6A;
1366                         RedistSigma = sigmaA;
1367                         break;
1368                     case 1:
1369                         RedistC6    = c6B;
1370                         RedistSigma = sigmaB;
1371                         break;
1372                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1373                 }
1374                 wallcycle_start(wcycle, ewcPME_REDISTXF);
1375
1376                 do_redist_pos_coeffs(pme, cr, bFirst, coordinates, RedistC6);
1377                 pme->lb_buf1.resize(atc.numAtoms());
1378                 pme->lb_buf2.resize(atc.numAtoms());
1379                 local_c6 = pme->lb_buf1.data();
1380                 for (int i = 0; i < atc.numAtoms(); ++i)
1381                 {
1382                     local_c6[i] = atc.coefficient[i];
1383                 }
1384
1385                 do_redist_pos_coeffs(pme, cr, FALSE, coordinates, RedistSigma);
1386                 local_sigma = pme->lb_buf2.data();
1387                 for (int i = 0; i < atc.numAtoms(); ++i)
1388                 {
1389                     local_sigma[i] = atc.coefficient[i];
1390                 }
1391
1392                 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1393             }
1394             atc.coefficient = coefficientBuffer;
1395             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1396
1397             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1398             for (grid_index = 2; grid_index < 9; ++grid_index)
1399             {
1400                 /* Unpack structure */
1401                 pmegrid    = &pme->pmegrid[grid_index];
1402                 fftgrid    = pme->fftgrid[grid_index];
1403                 pfft_setup = pme->pfft_setup[grid_index];
1404                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1405                 grid = pmegrid->grid.grid;
1406
1407                 if (flags & GMX_PME_SPREAD)
1408                 {
1409                     wallcycle_start(wcycle, ewcPME_SPREAD);
1410                     /* Spread the c6 on a grid */
1411                     spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1412
1413                     if (bFirst)
1414                     {
1415                         inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1416                     }
1417
1418                     inc_nrnb(nrnb, eNR_SPREADBSP,
1419                              pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1420                     if (pme->nthread == 1)
1421                     {
1422                         wrap_periodic_pmegrid(pme, grid);
1423                         /* sum contributions to local grid from other nodes */
1424                         if (pme->nnodes > 1)
1425                         {
1426                             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1427                         }
1428                         copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1429                     }
1430                     wallcycle_stop(wcycle, ewcPME_SPREAD);
1431                 }
1432                 /*Here we start a large thread parallel region*/
1433 #pragma omp parallel num_threads(pme->nthread) private(thread)
1434                 {
1435                     try
1436                     {
1437                         thread = gmx_omp_get_thread_num();
1438                         if (flags & GMX_PME_SOLVE)
1439                         {
1440                             /* do 3d-fft */
1441                             if (thread == 0)
1442                             {
1443                                 wallcycle_start(wcycle, ewcPME_FFT);
1444                             }
1445
1446                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1447                             if (thread == 0)
1448                             {
1449                                 wallcycle_stop(wcycle, ewcPME_FFT);
1450                             }
1451                         }
1452                     }
1453                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1454                 }
1455                 bFirst = FALSE;
1456             }
1457             if (flags & GMX_PME_SOLVE)
1458             {
1459                 /* solve in k-space for our local cells */
1460 #pragma omp parallel num_threads(pme->nthread) private(thread)
1461                 {
1462                     try
1463                     {
1464                         int loop_count;
1465                         thread = gmx_omp_get_thread_num();
1466                         if (thread == 0)
1467                         {
1468                             wallcycle_start(wcycle, ewcLJPME);
1469                         }
1470
1471                         loop_count = solve_pme_lj_yzx(
1472                                 pme, &pme->cfftgrid[2], TRUE,
1473                                 scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1474                                 bCalcEnerVir, pme->nthread, thread);
1475                         if (thread == 0)
1476                         {
1477                             wallcycle_stop(wcycle, ewcLJPME);
1478                             inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1479                         }
1480                     }
1481                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1482                 }
1483             }
1484
1485             if (bCalcEnerVir)
1486             {
1487                 /* This should only be called on the master thread and
1488                  * after the threads have synchronized.
1489                  */
1490                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
1491             }
1492
1493             if (bBackFFT)
1494             {
1495                 bFirst = !pme->doCoulomb;
1496                 calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1497                 for (grid_index = 8; grid_index >= 2; --grid_index)
1498                 {
1499                     /* Unpack structure */
1500                     pmegrid    = &pme->pmegrid[grid_index];
1501                     fftgrid    = pme->fftgrid[grid_index];
1502                     pfft_setup = pme->pfft_setup[grid_index];
1503                     grid       = pmegrid->grid.grid;
1504                     calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1505 #pragma omp parallel num_threads(pme->nthread) private(thread)
1506                     {
1507                         try
1508                         {
1509                             thread = gmx_omp_get_thread_num();
1510                             /* do 3d-invfft */
1511                             if (thread == 0)
1512                             {
1513                                 wallcycle_start(wcycle, ewcPME_FFT);
1514                             }
1515
1516                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1517                             if (thread == 0)
1518                             {
1519                                 wallcycle_stop(wcycle, ewcPME_FFT);
1520
1521
1522                                 if (pme->nodeid == 0)
1523                                 {
1524                                     real ntot = pme->nkx * pme->nky * pme->nkz;
1525                                     npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1526                                     inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1527                                 }
1528                                 wallcycle_start(wcycle, ewcPME_GATHER);
1529                             }
1530
1531                             copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1532                         }
1533                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1534                     } /*#pragma omp parallel*/
1535
1536                     /* distribute local grid to all nodes */
1537                     if (pme->nnodes > 1)
1538                     {
1539                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1540                     }
1541
1542                     unwrap_periodic_pmegrid(pme, grid);
1543
1544                     if (bCalcF)
1545                     {
1546                         /* interpolate forces for our local atoms */
1547                         bClearF = (bFirst && PAR(cr));
1548                         scale   = pme->bFEP ? (fep_state < 1 ? 1.0 - lambda_lj : lambda_lj) : 1.0;
1549                         scale *= lb_scale_factor[grid_index - 2];
1550
1551 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1552                         for (thread = 0; thread < pme->nthread; thread++)
1553                         {
1554                             try
1555                             {
1556                                 gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
1557                                                   &pme->atc[0].spline[thread], scale);
1558                             }
1559                             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1560                         }
1561
1562
1563                         inc_nrnb(nrnb, eNR_GATHERFBSP,
1564                                  pme->pme_order * pme->pme_order * pme->pme_order * pme->atc[0].numAtoms());
1565                     }
1566                     wallcycle_stop(wcycle, ewcPME_GATHER);
1567
1568                     bFirst = FALSE;
1569                 } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1570             }     /* if (bCalcF) */
1571         }         /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1572     }             /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
1573
1574     if (bCalcF && pme->nnodes > 1)
1575     {
1576         wallcycle_start(wcycle, ewcPME_REDISTXF);
1577         for (d = 0; d < pme->ndecompdim; d++)
1578         {
1579             gmx::ArrayRef<gmx::RVec> forcesRef;
1580             if (d == pme->ndecompdim - 1)
1581             {
1582                 const size_t numAtoms = coordinates.size();
1583                 GMX_ASSERT(forces.size() >= numAtoms, "Need at least numAtoms forces");
1584                 forcesRef = forces.subArray(0, numAtoms);
1585             }
1586             else
1587             {
1588                 forcesRef = pme->atc[d + 1].f;
1589             }
1590             if (DOMAINDECOMP(cr))
1591             {
1592                 dd_pmeredist_f(pme, &pme->atc[d], forcesRef, d == pme->ndecompdim - 1 && pme->bPPnode);
1593             }
1594         }
1595
1596         wallcycle_stop(wcycle, ewcPME_REDISTXF);
1597     }
1598
1599     if (bCalcEnerVir)
1600     {
1601         if (pme->doCoulomb)
1602         {
1603             if (!pme->bFEP_q)
1604             {
1605                 *energy_q = output[0].coulombEnergy_;
1606                 m_add(vir_q, output[0].coulombVirial_, vir_q);
1607             }
1608             else
1609             {
1610                 *energy_q = (1.0 - lambda_q) * output[0].coulombEnergy_ + lambda_q * output[1].coulombEnergy_;
1611                 *dvdlambda_q += output[1].coulombEnergy_ - output[0].coulombEnergy_;
1612                 for (int i = 0; i < DIM; i++)
1613                 {
1614                     for (int j = 0; j < DIM; j++)
1615                     {
1616                         vir_q[i][j] += (1.0 - lambda_q) * output[0].coulombVirial_[i][j]
1617                                        + lambda_q * output[1].coulombVirial_[i][j];
1618                     }
1619                 }
1620             }
1621             if (debug)
1622             {
1623                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1624             }
1625         }
1626         else
1627         {
1628             *energy_q = 0;
1629         }
1630
1631         if (pme->doLJ)
1632         {
1633             if (!pme->bFEP_lj)
1634             {
1635                 *energy_lj = output[0].lennardJonesEnergy_;
1636                 m_add(vir_lj, output[0].lennardJonesVirial_, vir_lj);
1637             }
1638             else
1639             {
1640                 *energy_lj = (1.0 - lambda_lj) * output[0].lennardJonesEnergy_
1641                              + lambda_lj * output[1].lennardJonesEnergy_;
1642                 *dvdlambda_lj += output[1].lennardJonesEnergy_ - output[0].lennardJonesEnergy_;
1643                 for (int i = 0; i < DIM; i++)
1644                 {
1645                     for (int j = 0; j < DIM; j++)
1646                     {
1647                         vir_lj[i][j] += (1.0 - lambda_lj) * output[0].lennardJonesVirial_[i][j]
1648                                         + lambda_lj * output[1].lennardJonesVirial_[i][j];
1649                     }
1650                 }
1651             }
1652             if (debug)
1653             {
1654                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1655             }
1656         }
1657         else
1658         {
1659             *energy_lj = 0;
1660         }
1661     }
1662     return 0;
1663 }
1664
1665 void gmx_pme_destroy(gmx_pme_t* pme)
1666 {
1667     if (!pme)
1668     {
1669         return;
1670     }
1671
1672     delete pme->boxScaler;
1673
1674     sfree(pme->nnx);
1675     sfree(pme->nny);
1676     sfree(pme->nnz);
1677     sfree(pme->fshx);
1678     sfree(pme->fshy);
1679     sfree(pme->fshz);
1680
1681     for (int i = 0; i < pme->ngrids; ++i)
1682     {
1683         pmegrids_destroy(&pme->pmegrid[i]);
1684     }
1685     if (pme->pfft_setup)
1686     {
1687         for (int i = 0; i < pme->ngrids; ++i)
1688         {
1689             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1690         }
1691     }
1692     sfree(pme->fftgrid);
1693     sfree(pme->cfftgrid);
1694     sfree(pme->pfft_setup);
1695
1696     for (int i = 0; i < DIM; i++)
1697     {
1698         sfree(pme->bsp_mod[i]);
1699     }
1700
1701     sfree(pme->bufv);
1702     sfree(pme->bufr);
1703
1704     if (pme->solve_work)
1705     {
1706         pme_free_all_work(&pme->solve_work, pme->nthread);
1707     }
1708
1709     sfree(pme->sum_qgrid_tmp);
1710     sfree(pme->sum_qgrid_dd_tmp);
1711
1712     destroy_pme_spline_work(pme->spline_work);
1713
1714     if (pme->gpu != nullptr)
1715     {
1716         pme_gpu_destroy(pme->gpu);
1717     }
1718
1719     delete pme;
1720 }
1721
1722 void gmx_pme_reinit_atoms(gmx_pme_t* pme, const int numAtoms, const real* charges)
1723 {
1724     if (pme->gpu != nullptr)
1725     {
1726         pme_gpu_reinit_atoms(pme->gpu, numAtoms, charges);
1727     }
1728     else
1729     {
1730         pme->atc[0].setNumAtoms(numAtoms);
1731         // TODO: set the charges here as well
1732     }
1733 }
1734
1735 bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size)
1736 {
1737     return (pme.nkx == grid_size[XX] && pme.nky == grid_size[YY] && pme.nkz == grid_size[ZZ]);
1738 }