Merge release-2021 into master
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
7  * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
8  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9  * and including many others, as listed in the AUTHORS file in the
10  * top-level source directory and at http://www.gromacs.org.
11  *
12  * GROMACS is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public License
14  * as published by the Free Software Foundation; either version 2.1
15  * of the License, or (at your option) any later version.
16  *
17  * GROMACS is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with GROMACS; if not, see
24  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
26  *
27  * If you want to redistribute modifications to GROMACS, please
28  * consider that scientific software is very special. Version
29  * control is crucial - bugs must be traceable. We will be happy to
30  * consider code for inclusion in the official distribution, but
31  * derived work must not be called official GROMACS. Details are found
32  * in the README & COPYING files - if they are missing, get the
33  * official version at http://www.gromacs.org.
34  *
35  * To help us fund GROMACS development, we humbly ask that you cite
36  * the research papers on the package. Check out http://www.gromacs.org.
37  */
38 /*! \internal \file
39  *
40  * \brief This file contains function definitions necessary for
41  * computing energies and forces for the PME long-ranged part (Coulomb
42  * and LJ).
43  *
44  * \author Erik Lindahl <erik@kth.se>
45  * \author Berk Hess <hess@kth.se>
46  * \ingroup module_ewald
47  */
48 /* IMPORTANT FOR DEVELOPERS:
49  *
50  * Triclinic pme stuff isn't entirely trivial, and we've experienced
51  * some bugs during development (many of them due to me). To avoid
52  * this in the future, please check the following things if you make
53  * changes in this file:
54  *
55  * 1. You should obtain identical (at least to the PME precision)
56  *    energies, forces, and virial for
57  *    a rectangular box and a triclinic one where the z (or y) axis is
58  *    tilted a whole box side. For instance you could use these boxes:
59  *
60  *    rectangular       triclinic
61  *     2  0  0           2  0  0
62  *     0  2  0           0  2  0
63  *     0  0  6           2  2  6
64  *
65  * 2. You should check the energy conservation in a triclinic box.
66  *
67  * It might seem an overkill, but better safe than sorry.
68  * /Erik 001109
69  */
70
71 #include "gmxpre.h"
72
73 #include "pme.h"
74
75 #include "config.h"
76
77 #include <cassert>
78 #include <cmath>
79 #include <cstdio>
80 #include <cstdlib>
81 #include <cstring>
82
83 #include <algorithm>
84 #include <list>
85
86 #include "gromacs/domdec/domdec.h"
87 #include "gromacs/ewald/ewald_utils.h"
88 #include "gromacs/fft/parallel_3dfft.h"
89 #include "gromacs/fileio/pdbio.h"
90 #include "gromacs/gmxlib/network.h"
91 #include "gromacs/gmxlib/nrnb.h"
92 #include "gromacs/hardware/hw_info.h"
93 #include "gromacs/math/gmxcomplex.h"
94 #include "gromacs/math/invertmatrix.h"
95 #include "gromacs/math/units.h"
96 #include "gromacs/math/vec.h"
97 #include "gromacs/math/vectypes.h"
98 #include "gromacs/mdtypes/commrec.h"
99 #include "gromacs/mdtypes/forcerec.h"
100 #include "gromacs/mdtypes/inputrec.h"
101 #include "gromacs/mdtypes/md_enums.h"
102 #include "gromacs/mdtypes/simulation_workload.h"
103 #include "gromacs/pbcutil/pbc.h"
104 #include "gromacs/timing/cyclecounter.h"
105 #include "gromacs/timing/wallcycle.h"
106 #include "gromacs/timing/walltime_accounting.h"
107 #include "gromacs/topology/topology.h"
108 #include "gromacs/utility/basedefinitions.h"
109 #include "gromacs/utility/cstringutil.h"
110 #include "gromacs/utility/exceptions.h"
111 #include "gromacs/utility/fatalerror.h"
112 #include "gromacs/utility/gmxmpi.h"
113 #include "gromacs/utility/gmxomp.h"
114 #include "gromacs/utility/logger.h"
115 #include "gromacs/utility/real.h"
116 #include "gromacs/utility/smalloc.h"
117 #include "gromacs/utility/message_string_collector.h"
118 #include "gromacs/utility/unique_cptr.h"
119
120 #include "calculate_spline_moduli.h"
121 #include "pme_gather.h"
122 #include "pme_gpu_internal.h"
123 #include "pme_grid.h"
124 #include "pme_internal.h"
125 #include "pme_redistribute.h"
126 #include "pme_solve.h"
127 #include "pme_spline_work.h"
128 #include "pme_spread.h"
129
130 //NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
131 bool g_allowPmeWithSyclForTesting = false;
132
133 bool pme_gpu_supports_build(std::string* error)
134 {
135     gmx::MessageStringCollector errorReasons;
136     // Before changing the prefix string, make sure that it is not searched for in regression tests.
137     errorReasons.startContext("PME GPU does not support:");
138     errorReasons.appendIf(GMX_DOUBLE, "Double-precision build of GROMACS.");
139     errorReasons.appendIf(!GMX_GPU, "Non-GPU build of GROMACS.");
140     errorReasons.appendIf(GMX_GPU_SYCL && !g_allowPmeWithSyclForTesting, "SYCL build."); // SYCL-TODO
141     errorReasons.finishContext();
142     if (error != nullptr)
143     {
144         *error = errorReasons.toString();
145     }
146     return errorReasons.isEmpty();
147 }
148
149 bool pme_gpu_supports_hardware(const gmx_hw_info_t gmx_unused& hwinfo, std::string* error)
150 {
151     gmx::MessageStringCollector errorReasons;
152     // Before changing the prefix string, make sure that it is not searched for in regression tests.
153     errorReasons.startContext("PME GPU does not support:");
154 #ifdef __APPLE__
155     errorReasons.appendIf(GMX_GPU_OPENCL, "Apple OS X operating system");
156 #endif
157     errorReasons.finishContext();
158     if (error != nullptr)
159     {
160         *error = errorReasons.toString();
161     }
162     return errorReasons.isEmpty();
163 }
164
165 bool pme_gpu_supports_input(const t_inputrec& ir, std::string* error)
166 {
167     gmx::MessageStringCollector errorReasons;
168     // Before changing the prefix string, make sure that it is not searched for in regression tests.
169     errorReasons.startContext("PME GPU does not support:");
170     errorReasons.appendIf(!EEL_PME(ir.coulombtype),
171                           "Systems that do not use PME for electrostatics.");
172     errorReasons.appendIf((ir.pme_order != 4), "Interpolation orders other than 4.");
173     errorReasons.appendIf(EVDW_PME(ir.vdwtype), "Lennard-Jones PME.");
174     errorReasons.appendIf(!EI_DYNAMICS(ir.eI), "Non-dynamical integrator (use md, sd, etc).");
175     errorReasons.finishContext();
176     if (error != nullptr)
177     {
178         *error = errorReasons.toString();
179     }
180     return errorReasons.isEmpty();
181 }
182
183 bool pme_gpu_mixed_mode_supports_input(const t_inputrec& ir, std::string* error)
184 {
185     gmx::MessageStringCollector errorReasons;
186     // Before changing the prefix string, make sure that it is not searched for in regression tests.
187     errorReasons.startContext("PME GPU in Mixed mode does not support:");
188     errorReasons.appendIf(ir.efep != FreeEnergyPerturbationType::No, "Free Energy Perturbation.");
189     errorReasons.finishContext();
190     if (error != nullptr)
191     {
192         *error = errorReasons.toString();
193     }
194     return errorReasons.isEmpty();
195 }
196
197 /*! \brief \libinternal
198  * Finds out if PME with given inputs is possible to run on GPU.
199  * This function is an internal final check, validating the whole PME structure on creation,
200  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
201  *
202  * \param[in]  pme          The PME structure.
203  * \param[out] error        The error message if the input is not supported on GPU.
204  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
205  */
206 static bool pme_gpu_check_restrictions(const gmx_pme_t* pme, std::string* error)
207 {
208     gmx::MessageStringCollector errorReasons;
209     // Before changing the prefix string, make sure that it is not searched for in regression tests.
210     errorReasons.startContext("PME GPU does not support:");
211     errorReasons.appendIf((pme->nnodes != 1), "PME decomposition.");
212     errorReasons.appendIf((pme->pme_order != 4), "interpolation orders other than 4.");
213     errorReasons.appendIf(pme->doLJ, "Lennard-Jones PME.");
214     errorReasons.appendIf(GMX_DOUBLE, "Double precision build of GROMACS.");
215     errorReasons.appendIf(!GMX_GPU, "Non-GPU build of GROMACS.");
216     errorReasons.appendIf(GMX_GPU_SYCL && !g_allowPmeWithSyclForTesting, "SYCL build of GROMACS."); // SYCL-TODO
217     errorReasons.finishContext();
218     if (error != nullptr)
219     {
220         *error = errorReasons.toString();
221     }
222     return errorReasons.isEmpty();
223 }
224
225 PmeRunMode pme_run_mode(const gmx_pme_t* pme)
226 {
227     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
228     return pme->runMode;
229 }
230
231 gmx::PinningPolicy pme_get_pinning_policy()
232 {
233     return gmx::PinningPolicy::PinnedIfSupported;
234 }
235
236 /*! \brief Number of bytes in a cache line.
237  *
238  * Must also be a multiple of the SIMD and SIMD4 register size, to
239  * preserve alignment.
240  */
241 const int gmxCacheLineSize = 64;
242
243 //! Set up coordinate communication
244 static void setup_coordinate_communication(PmeAtomComm* atc)
245 {
246     int nslab, n, i;
247     int fw, bw;
248
249     nslab = atc->nslab;
250
251     n = 0;
252     for (i = 1; i <= nslab / 2; i++)
253     {
254         fw = (atc->nodeid + i) % nslab;
255         bw = (atc->nodeid - i + nslab) % nslab;
256         if (n < nslab - 1)
257         {
258             atc->slabCommSetup[n].node_dest = fw;
259             atc->slabCommSetup[n].node_src  = bw;
260             n++;
261         }
262         if (n < nslab - 1)
263         {
264             atc->slabCommSetup[n].node_dest = bw;
265             atc->slabCommSetup[n].node_src  = fw;
266             n++;
267         }
268     }
269 }
270
271 /*! \brief Round \p n up to the next multiple of \p f */
272 static int mult_up(int n, int f)
273 {
274     return ((n + f - 1) / f) * f;
275 }
276
277 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
278 static double estimate_pme_load_imbalance(struct gmx_pme_t* pme)
279 {
280     int    nma, nmi;
281     double n1, n2, n3;
282
283     nma = pme->nnodes_major;
284     nmi = pme->nnodes_minor;
285
286     n1 = mult_up(pme->nkx, nma) * mult_up(pme->nky, nmi) * pme->nkz;
287     n2 = mult_up(pme->nkx, nma) * mult_up(pme->nkz, nmi) * pme->nky;
288     n3 = mult_up(pme->nky, nma) * mult_up(pme->nkz, nmi) * pme->nkx;
289
290     /* pme_solve is roughly double the cost of an fft */
291
292     return (n1 + n2 + 3 * n3) / static_cast<double>(6 * pme->nkx * pme->nky * pme->nkz);
293 }
294
295 #ifndef DOXYGEN
296
297 PmeAtomComm::PmeAtomComm(MPI_Comm   PmeMpiCommunicator,
298                          const int  numThreads,
299                          const int  pmeOrder,
300                          const int  dimIndex,
301                          const bool doSpread) :
302     dimind(dimIndex), bSpread(doSpread), pme_order(pmeOrder), nthread(numThreads), spline(nthread)
303 {
304     if (PmeMpiCommunicator != MPI_COMM_NULL)
305     {
306         mpi_comm = PmeMpiCommunicator;
307 #    if GMX_MPI
308         MPI_Comm_size(mpi_comm, &nslab);
309         MPI_Comm_rank(mpi_comm, &nodeid);
310 #    endif
311     }
312     if (debug)
313     {
314         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", dimind, nslab, nodeid);
315     }
316
317     if (nslab > 1)
318     {
319         slabCommSetup.resize(nslab);
320         setup_coordinate_communication(this);
321
322         count_thread.resize(nthread);
323         for (auto& countThread : count_thread)
324         {
325             countThread.resize(nslab);
326         }
327     }
328
329     if (nthread > 1)
330     {
331         threadMap.resize(nthread);
332
333 #    pragma omp parallel for num_threads(nthread) schedule(static)
334         for (int thread = 0; thread < nthread; thread++)
335         {
336             try
337             {
338                 /* Allocate buffer with padding to avoid cache polution */
339                 threadMap[thread].nBuffer.resize(nthread + 2 * gmxCacheLineSize);
340                 threadMap[thread].n = threadMap[thread].nBuffer.data() + gmxCacheLineSize;
341             }
342             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
343         }
344     }
345 }
346
347 #endif // !DOXYGEN
348
349 /*! \brief Initialize data structure for communication */
350 static void init_overlap_comm(pme_overlap_t* ol, int norder, MPI_Comm comm, int nnodes, int nodeid, int ndata, int commplainsize)
351 {
352     gmx_bool bCont;
353
354     ol->mpi_comm = comm;
355     ol->nnodes   = nnodes;
356     ol->nodeid   = nodeid;
357
358     /* Linear translation of the PME grid won't affect reciprocal space
359      * calculations, so to optimize we only interpolate "upwards",
360      * which also means we only have to consider overlap in one direction.
361      * I.e., particles on this node might also be spread to grid indices
362      * that belong to higher nodes (modulo nnodes)
363      */
364
365     ol->s2g0.resize(ol->nnodes + 1);
366     ol->s2g1.resize(ol->nnodes);
367     if (debug)
368     {
369         fprintf(debug, "PME slab boundaries:");
370     }
371     for (int i = 0; i < nnodes; i++)
372     {
373         /* s2g0 the local interpolation grid start.
374          * s2g1 the local interpolation grid end.
375          * Since in calc_pidx we divide particles, and not grid lines,
376          * spatially uniform along dimension x or y, we need to round
377          * s2g0 down and s2g1 up.
378          */
379         ol->s2g0[i] = (i * ndata + 0) / nnodes;
380         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
381
382         if (debug)
383         {
384             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
385         }
386     }
387     ol->s2g0[nnodes] = ndata;
388     if (debug)
389     {
390         fprintf(debug, "\n");
391     }
392
393     /* Determine with how many nodes we need to communicate the grid overlap */
394     int testRankCount = 0;
395     do
396     {
397         testRankCount++;
398         bCont = FALSE;
399         for (int i = 0; i < nnodes; i++)
400         {
401             if ((i + testRankCount < nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount])
402                 || (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
403             {
404                 bCont = TRUE;
405             }
406         }
407     } while (bCont && testRankCount < nnodes);
408
409     ol->comm_data.resize(testRankCount - 1);
410     ol->send_size = 0;
411
412     for (size_t b = 0; b < ol->comm_data.size(); b++)
413     {
414         pme_grid_comm_t* pgc = &ol->comm_data[b];
415
416         /* Send */
417         pgc->send_id  = (ol->nodeid + (b + 1)) % ol->nnodes;
418         int fft_start = ol->s2g0[pgc->send_id];
419         int fft_end   = ol->s2g0[pgc->send_id + 1];
420         if (pgc->send_id < nodeid)
421         {
422             fft_start += ndata;
423             fft_end += ndata;
424         }
425         int send_index1  = ol->s2g1[nodeid];
426         send_index1      = std::min(send_index1, fft_end);
427         pgc->send_index0 = fft_start;
428         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
429         ol->send_size += pgc->send_nindex;
430
431         /* We always start receiving to the first index of our slab */
432         pgc->recv_id    = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
433         fft_start       = ol->s2g0[ol->nodeid];
434         fft_end         = ol->s2g0[ol->nodeid + 1];
435         int recv_index1 = ol->s2g1[pgc->recv_id];
436         if (pgc->recv_id > nodeid)
437         {
438             recv_index1 -= ndata;
439         }
440         recv_index1      = std::min(recv_index1, fft_end);
441         pgc->recv_index0 = fft_start;
442         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
443     }
444
445 #if GMX_MPI
446     /* Communicate the buffer sizes to receive */
447     MPI_Status stat;
448     for (size_t b = 0; b < ol->comm_data.size(); b++)
449     {
450         MPI_Sendrecv(&ol->send_size,
451                      1,
452                      MPI_INT,
453                      ol->comm_data[b].send_id,
454                      b,
455                      &ol->comm_data[b].recv_size,
456                      1,
457                      MPI_INT,
458                      ol->comm_data[b].recv_id,
459                      b,
460                      ol->mpi_comm,
461                      &stat);
462     }
463 #endif
464
465     /* For non-divisible grid we need pme_order iso pme_order-1 */
466     ol->sendbuf.resize(norder * commplainsize);
467     ol->recvbuf.resize(norder * commplainsize);
468 }
469
470 int minimalPmeGridSize(int pmeOrder)
471 {
472     /* The actual grid size limitations are:
473      *   serial:        >= pme_order
474      *   DD, no OpenMP: >= 2*(pme_order - 1)
475      *   DD, OpenMP:    >= pme_order + 1
476      * But we use the maximum for simplicity since in practice there is not
477      * much performance difference between pme_order and 2*(pme_order -1).
478      */
479     int minimalSize = 2 * (pmeOrder - 1);
480
481     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
482     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
483
484     return minimalSize;
485 }
486
487 bool gmx_pme_check_restrictions(int pme_order, int nkx, int nky, int nkz, int numPmeDomainsAlongX, bool useThreads, bool errorsAreFatal)
488 {
489     if (pme_order > PME_ORDER_MAX)
490     {
491         if (!errorsAreFatal)
492         {
493             return false;
494         }
495
496         std::string message = gmx::formatString(
497                 "pme_order (%d) is larger than the maximum allowed value (%d). Modify and "
498                 "recompile the code if you really need such a high order.",
499                 pme_order,
500                 PME_ORDER_MAX);
501         GMX_THROW(gmx::InconsistentInputError(message));
502     }
503
504     const int minGridSize = minimalPmeGridSize(pme_order);
505     if (nkx < minGridSize || nky < minGridSize || nkz < minGridSize)
506     {
507         if (!errorsAreFatal)
508         {
509             return false;
510         }
511         std::string message =
512                 gmx::formatString("The PME grid sizes need to be >= 2*(pme_order-1) (%d)", minGridSize);
513         GMX_THROW(gmx::InconsistentInputError(message));
514     }
515
516     /* Check for a limitation of the (current) sum_fftgrid_dd code.
517      * We only allow multiple communication pulses in dim 1, not in dim 0.
518      */
519     if (useThreads
520         && (nkx < numPmeDomainsAlongX * pme_order && nkx != numPmeDomainsAlongX * (pme_order - 1)))
521     {
522         if (!errorsAreFatal)
523         {
524             return false;
525         }
526         gmx_fatal(FARGS,
527                   "The number of PME grid lines per rank along x is %g. But when using OpenMP "
528                   "threads, the number of grid lines per rank along x should be >= pme_order (%d) "
529                   "or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly "
530                   "more along y and/or z) by specifying -dd manually.",
531                   nkx / static_cast<double>(numPmeDomainsAlongX),
532                   pme_order);
533     }
534
535     return true;
536 }
537
538 /*! \brief Round \p enumerator */
539 static int div_round_up(int enumerator, int denominator)
540 {
541     return (enumerator + denominator - 1) / denominator;
542 }
543
544 gmx_pme_t* gmx_pme_init(const t_commrec*     cr,
545                         const NumPmeDomains& numPmeDomains,
546                         const t_inputrec*    ir,
547                         gmx_bool             bFreeEnergy_q,
548                         gmx_bool             bFreeEnergy_lj,
549                         gmx_bool             bReproducible,
550                         real                 ewaldcoeff_q,
551                         real                 ewaldcoeff_lj,
552                         int                  nthread,
553                         PmeRunMode           runMode,
554                         PmeGpu*              pmeGpu,
555                         const DeviceContext* deviceContext,
556                         const DeviceStream*  deviceStream,
557                         const PmeGpuProgram* pmeGpuProgram,
558                         const gmx::MDLogger& mdlog)
559 {
560     int  use_threads, sum_use_threads, i;
561     ivec ndata;
562
563     if (debug)
564     {
565         fprintf(debug, "Creating PME data structures.\n");
566     }
567
568     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
569
570     pme->buf_nalloc = 0;
571
572     pme->nnodes  = 1;
573     pme->bPPnode = TRUE;
574
575     pme->nnodes_major = numPmeDomains.x;
576     pme->nnodes_minor = numPmeDomains.y;
577
578     if (numPmeDomains.x * numPmeDomains.y > 1)
579     {
580         pme->mpi_comm = cr->mpi_comm_mygroup;
581
582 #if GMX_MPI
583         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
584         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
585 #endif
586         if (pme->nnodes != numPmeDomains.x * numPmeDomains.y)
587         {
588             gmx_incons("PME rank count mismatch");
589         }
590     }
591     else
592     {
593         pme->mpi_comm = MPI_COMM_NULL;
594     }
595
596     if (pme->nnodes == 1)
597     {
598         pme->mpi_comm_d[0] = MPI_COMM_NULL;
599         pme->mpi_comm_d[1] = MPI_COMM_NULL;
600         pme->ndecompdim    = 0;
601         pme->nodeid_major  = 0;
602         pme->nodeid_minor  = 0;
603     }
604     else
605     {
606         if (numPmeDomains.y == 1)
607         {
608             pme->mpi_comm_d[0] = pme->mpi_comm;
609             pme->mpi_comm_d[1] = MPI_COMM_NULL;
610             pme->ndecompdim    = 1;
611             pme->nodeid_major  = pme->nodeid;
612             pme->nodeid_minor  = 0;
613         }
614         else if (numPmeDomains.x == 1)
615         {
616             pme->mpi_comm_d[0] = MPI_COMM_NULL;
617             pme->mpi_comm_d[1] = pme->mpi_comm;
618             pme->ndecompdim    = 1;
619             pme->nodeid_major  = 0;
620             pme->nodeid_minor  = pme->nodeid;
621         }
622         else
623         {
624             if (pme->nnodes % numPmeDomains.x != 0)
625             {
626                 gmx_incons(
627                         "For 2D PME decomposition, #PME ranks must be divisible by the number of "
628                         "domains along x");
629             }
630             pme->ndecompdim = 2;
631
632 #if GMX_MPI
633             MPI_Comm_split(pme->mpi_comm,
634                            pme->nodeid % numPmeDomains.y,
635                            pme->nodeid,
636                            &pme->mpi_comm_d[0]); /* My communicator along major dimension */
637             MPI_Comm_split(pme->mpi_comm,
638                            pme->nodeid / numPmeDomains.y,
639                            pme->nodeid,
640                            &pme->mpi_comm_d[1]); /* My communicator along minor dimension */
641
642             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
643             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
644             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
645             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
646 #endif
647         }
648     }
649     // cr is always initialized if there is a a PP rank, so we can safely assume
650     // that when it is not, like in ewald tests, we not on a PP rank.
651     pme->bPPnode = ((cr != nullptr && cr->duty != 0) && thisRankHasDuty(cr, DUTY_PP));
652
653     pme->nthread = nthread;
654
655     /* Check if any of the PME MPI ranks uses threads */
656     use_threads = (pme->nthread > 1 ? 1 : 0);
657 #if GMX_MPI
658     if (pme->nnodes > 1)
659     {
660         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT, MPI_SUM, pme->mpi_comm);
661     }
662     else
663 #endif
664     {
665         sum_use_threads = use_threads;
666     }
667     pme->bUseThreads = (sum_use_threads > 0);
668
669     if (ir->pbcType == PbcType::Screw)
670     {
671         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
672     }
673
674     /* NOTE:
675      * It is likely that the current gmx_pme_do() routine supports calculating
676      * only Coulomb or LJ while gmx_pme_init() configures for both,
677      * but that has never been tested.
678      * It is likely that the current gmx_pme_do() routine supports calculating,
679      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
680      * configures with free-energy, but that has never been tested.
681      */
682     pme->doCoulomb = EEL_PME(ir->coulombtype);
683     pme->doLJ      = EVDW_PME(ir->vdwtype);
684     pme->bFEP_q    = ((ir->efep != FreeEnergyPerturbationType::No) && bFreeEnergy_q);
685     pme->bFEP_lj   = ((ir->efep != FreeEnergyPerturbationType::No) && bFreeEnergy_lj);
686     pme->bFEP      = (pme->bFEP_q || pme->bFEP_lj);
687     pme->nkx       = ir->nkx;
688     pme->nky       = ir->nky;
689     pme->nkz       = ir->nkz;
690     pme->bP3M = (ir->coulombtype == CoulombInteractionType::P3mAD || getenv("GMX_PME_P3M") != nullptr);
691     pme->pme_order     = ir->pme_order;
692     pme->ewaldcoeff_q  = ewaldcoeff_q;
693     pme->ewaldcoeff_lj = ewaldcoeff_lj;
694
695     /* Always constant electrostatics coefficients */
696     pme->epsilon_r = ir->epsilon_r;
697
698     /* Always constant LJ coefficients */
699     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
700
701     // The box requires scaling with nwalls = 2, we store that condition as well
702     // as the scaling factor
703     pme->boxScaler = std::make_unique<EwaldBoxZScaler>(
704             EwaldBoxZScaler(inputrecPbcXY2Walls(ir), ir->wall_ewald_zfac));
705
706     /* If we violate restrictions, generate a fatal error here */
707     gmx_pme_check_restrictions(
708             pme->pme_order, pme->nkx, pme->nky, pme->nkz, pme->nnodes_major, pme->bUseThreads, true);
709
710     if (pme->nnodes > 1)
711     {
712         double imbal;
713
714 #if GMX_MPI
715         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
716         MPI_Type_commit(&(pme->rvec_mpi));
717 #endif
718
719         /* Note that the coefficient spreading and force gathering, which usually
720          * takes about the same amount of time as FFT+solve_pme,
721          * is always fully load balanced
722          * (unless the coefficient distribution is inhomogeneous).
723          */
724
725         imbal = estimate_pme_load_imbalance(pme.get());
726         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
727         {
728             GMX_LOG(mdlog.warning)
729                     .asParagraph()
730                     .appendTextFormatted(
731                             "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
732                             "      For optimal PME load balancing\n"
733                             "      PME grid_x (%d) and grid_y (%d) should be divisible by "
734                             "#PME_ranks_x "
735                             "(%d)\n"
736                             "      and PME grid_y (%d) and grid_z (%d) should be divisible by "
737                             "#PME_ranks_y "
738                             "(%d)",
739                             gmx::roundToInt((imbal - 1) * 100),
740                             pme->nkx,
741                             pme->nky,
742                             pme->nnodes_major,
743                             pme->nky,
744                             pme->nkz,
745                             pme->nnodes_minor);
746         }
747     }
748
749     /* For non-divisible grid we need pme_order iso pme_order-1 */
750     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
751      * y is always copied through a buffer: we don't need padding in z,
752      * but we do need the overlap in x because of the communication order.
753      */
754     init_overlap_comm(&pme->overlap[0],
755                       pme->pme_order,
756                       pme->mpi_comm_d[0],
757                       pme->nnodes_major,
758                       pme->nodeid_major,
759                       pme->nkx,
760                       (div_round_up(pme->nky, pme->nnodes_minor) + pme->pme_order)
761                               * (pme->nkz + pme->pme_order - 1));
762
763     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
764      * We do this with an offset buffer of equal size, so we need to allocate
765      * extra for the offset. That's what the (+1)*pme->nkz is for.
766      */
767     init_overlap_comm(&pme->overlap[1],
768                       pme->pme_order,
769                       pme->mpi_comm_d[1],
770                       pme->nnodes_minor,
771                       pme->nodeid_minor,
772                       pme->nky,
773                       (div_round_up(pme->nkx, pme->nnodes_major) + pme->pme_order + 1) * pme->nkz);
774
775     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
776      * Note that gmx_pme_check_restrictions checked for this already.
777      */
778     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
779     {
780         gmx_incons(
781                 "More than one communication pulse required for grid overlap communication along "
782                 "the major dimension while using threads");
783     }
784
785     snew(pme->bsp_mod[XX], pme->nkx);
786     snew(pme->bsp_mod[YY], pme->nky);
787     snew(pme->bsp_mod[ZZ], pme->nkz);
788
789     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
790     pme->runMode = runMode;
791
792     /* The required size of the interpolation grid, including overlap.
793      * The allocated size (pmegrid_n?) might be slightly larger.
794      */
795     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] - pme->overlap[0].s2g0[pme->nodeid_major];
796     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] - pme->overlap[1].s2g0[pme->nodeid_minor];
797     pme->pmegrid_nz_base = pme->nkz;
798     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
799     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
800     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
801     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
802     pme->pmegrid_start_iz = 0;
803
804     make_gridindex_to_localindex(
805             pme->nkx, pme->pmegrid_start_ix, pme->pmegrid_nx - (pme->pme_order - 1), &pme->nnx, &pme->fshx);
806     make_gridindex_to_localindex(
807             pme->nky, pme->pmegrid_start_iy, pme->pmegrid_ny - (pme->pme_order - 1), &pme->nny, &pme->fshy);
808     make_gridindex_to_localindex(
809             pme->nkz, pme->pmegrid_start_iz, pme->pmegrid_nz_base, &pme->nnz, &pme->fshz);
810
811     pme->spline_work = make_pme_spline_work(pme->pme_order);
812
813     ndata[0] = pme->nkx;
814     ndata[1] = pme->nky;
815     ndata[2] = pme->nkz;
816     /* It doesn't matter if we allocate too many grids here,
817      * we only allocate and use the ones we need.
818      */
819     if (pme->doLJ)
820     {
821         pme->ngrids = ((ir->ljpme_combination_rule == LongRangeVdW::LB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
822     }
823     else
824     {
825         pme->ngrids = DO_Q;
826     }
827     snew(pme->fftgrid, pme->ngrids);
828     snew(pme->cfftgrid, pme->ngrids);
829     snew(pme->pfft_setup, pme->ngrids);
830
831     for (i = 0; i < pme->ngrids; ++i)
832     {
833         if ((i < DO_Q && pme->doCoulomb && (i == 0 || bFreeEnergy_q))
834             || (i >= DO_Q && pme->doLJ
835                 && (i == 2 || bFreeEnergy_lj || ir->ljpme_combination_rule == LongRangeVdW::LB)))
836         {
837             pmegrids_init(&pme->pmegrid[i],
838                           pme->pmegrid_nx,
839                           pme->pmegrid_ny,
840                           pme->pmegrid_nz,
841                           pme->pmegrid_nz_base,
842                           pme->pme_order,
843                           pme->bUseThreads,
844                           pme->nthread,
845                           pme->overlap[0].s2g1[pme->nodeid_major]
846                                   - pme->overlap[0].s2g0[pme->nodeid_major + 1],
847                           pme->overlap[1].s2g1[pme->nodeid_minor]
848                                   - pme->overlap[1].s2g0[pme->nodeid_minor + 1]);
849             /* This routine will allocate the grid data to fit the FFTs */
850             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed)
851                                                         ? gmx::PinningPolicy::PinnedIfSupported
852                                                         : gmx::PinningPolicy::CannotBePinned;
853             gmx_parallel_3dfft_init(&pme->pfft_setup[i],
854                                     ndata,
855                                     &pme->fftgrid[i],
856                                     &pme->cfftgrid[i],
857                                     pme->mpi_comm_d,
858                                     bReproducible,
859                                     pme->nthread,
860                                     allocateRealGridForGpu);
861         }
862     }
863
864     if (!pme->bP3M)
865     {
866         /* Use plain SPME B-spline interpolation */
867         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
868     }
869     else
870     {
871         /* Use the P3M grid-optimized influence function */
872         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
873     }
874
875     /* Use atc[0] for spreading */
876     const int firstDimIndex   = (numPmeDomains.x > 1 ? 0 : 1);
877     MPI_Comm  mpiCommFirstDim = (pme->nnodes > 1 ? pme->mpi_comm_d[firstDimIndex] : MPI_COMM_NULL);
878     bool      doSpread        = true;
879     pme->atc.emplace_back(mpiCommFirstDim, pme->nthread, pme->pme_order, firstDimIndex, doSpread);
880     if (pme->ndecompdim >= 2)
881     {
882         const int secondDimIndex = 1;
883         doSpread                 = false;
884         pme->atc.emplace_back(pme->mpi_comm_d[1], pme->nthread, pme->pme_order, secondDimIndex, doSpread);
885     }
886
887     // Initial check of validity of the input for running on the GPU
888     if (pme->runMode != PmeRunMode::CPU)
889     {
890         std::string errorString;
891         bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
892         if (!canRunOnGpu)
893         {
894             GMX_THROW(gmx::NotImplementedError(errorString));
895         }
896         pme_gpu_reinit(pme.get(), deviceContext, deviceStream, pmeGpuProgram);
897     }
898     else
899     {
900         GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object when PME is on a CPU.");
901     }
902
903
904     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
905
906     // no exception was thrown during the init, so we hand over the PME structure handle
907     return pme.release();
908 }
909
910 void gmx_pme_reinit(struct gmx_pme_t** pmedata,
911                     const t_commrec*   cr,
912                     struct gmx_pme_t*  pme_src,
913                     const t_inputrec*  ir,
914                     const ivec         grid_size,
915                     real               ewaldcoeff_q,
916                     real               ewaldcoeff_lj)
917 {
918     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
919     // TODO: This would be better as just copying a sub-structure that contains
920     // all the PME parameters and nothing else.
921     t_inputrec irc;
922     irc.pbcType                = ir->pbcType;
923     irc.coulombtype            = ir->coulombtype;
924     irc.vdwtype                = ir->vdwtype;
925     irc.efep                   = ir->efep;
926     irc.pme_order              = ir->pme_order;
927     irc.epsilon_r              = ir->epsilon_r;
928     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
929     irc.nkx                    = grid_size[XX];
930     irc.nky                    = grid_size[YY];
931     irc.nkz                    = grid_size[ZZ];
932
933     try
934     {
935         // This is reinit. Any logging should have been done at first init.
936         // Here we should avoid writing notes for settings the user did not
937         // set directly.
938         const gmx::MDLogger dummyLogger;
939         GMX_ASSERT(pmedata, "Invalid PME pointer");
940         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
941         *pmedata                    = gmx_pme_init(cr,
942                                 numPmeDomains,
943                                 &irc,
944                                 pme_src->bFEP_q,
945                                 pme_src->bFEP_lj,
946                                 FALSE,
947                                 ewaldcoeff_q,
948                                 ewaldcoeff_lj,
949                                 pme_src->nthread,
950                                 pme_src->runMode,
951                                 pme_src->gpu,
952                                 nullptr,
953                                 nullptr,
954                                 nullptr,
955                                 dummyLogger);
956         /* When running PME on the CPU not using domain decomposition,
957          * the atom data is allocated once only in gmx_pme_(re)init().
958          */
959         if (!pme_src->gpu && pme_src->nnodes == 1)
960         {
961             gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), {}, {});
962         }
963         // TODO this is mostly passing around current values
964     }
965     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
966
967     /* We can easily reuse the allocated pme grids in pme_src */
968     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
969     /* We would like to reuse the fft grids, but that's harder */
970 }
971
972 real gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q)
973 {
974     pmegrids_t* grid;
975
976     if (pme->nnodes > 1)
977     {
978         gmx_incons("gmx_pme_calc_energy called in parallel");
979     }
980     if (pme->bFEP_q)
981     {
982         gmx_incons("gmx_pme_calc_energy with free energy");
983     }
984
985     if (!pme->atc_energy)
986     {
987         pme->atc_energy = std::make_unique<PmeAtomComm>(MPI_COMM_NULL, 1, pme->pme_order, 0, true);
988     }
989     PmeAtomComm* atc = pme->atc_energy.get();
990     atc->setNumAtoms(x.ssize());
991     atc->x           = x;
992     atc->coefficient = q;
993
994     /* We only use the A-charges grid */
995     grid = &pme->pmegrid[PME_GRID_QA];
996
997     /* Only calculate the spline coefficients, don't actually spread */
998     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
999
1000     return gather_energy_bsplines(pme, grid->grid.grid, atc);
1001 }
1002
1003 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
1004 static void calc_initial_lb_coeffs(gmx::ArrayRef<real>       coefficient,
1005                                    gmx::ArrayRef<const real> local_c6,
1006                                    gmx::ArrayRef<const real> local_sigma)
1007 {
1008     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
1009     {
1010         real sigma4    = local_sigma[i];
1011         sigma4         = sigma4 * sigma4;
1012         sigma4         = sigma4 * sigma4;
1013         coefficient[i] = local_c6[i] / sigma4;
1014     }
1015 }
1016
1017 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
1018 static void calc_next_lb_coeffs(gmx::ArrayRef<real> coefficient, gmx::ArrayRef<const real> local_sigma)
1019 {
1020     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
1021     {
1022         coefficient[i] *= local_sigma[i];
1023     }
1024 }
1025
1026 int gmx_pme_do(struct gmx_pme_t*              pme,
1027                gmx::ArrayRef<const gmx::RVec> coordinates,
1028                gmx::ArrayRef<gmx::RVec>       forces,
1029                gmx::ArrayRef<const real>      chargeA,
1030                gmx::ArrayRef<const real>      chargeB,
1031                gmx::ArrayRef<const real>      c6A,
1032                gmx::ArrayRef<const real>      c6B,
1033                gmx::ArrayRef<const real>      sigmaA,
1034                gmx::ArrayRef<const real>      sigmaB,
1035                const matrix                   box,
1036                const t_commrec*               cr,
1037                int                            maxshift_x,
1038                int                            maxshift_y,
1039                t_nrnb*                        nrnb,
1040                gmx_wallcycle*                 wcycle,
1041                matrix                         vir_q,
1042                matrix                         vir_lj,
1043                real*                          energy_q,
1044                real*                          energy_lj,
1045                real                           lambda_q,
1046                real                           lambda_lj,
1047                real*                          dvdlambda_q,
1048                real*                          dvdlambda_lj,
1049                const gmx::StepWorkload&       stepWork)
1050 {
1051     GMX_ASSERT(pme->runMode == PmeRunMode::CPU,
1052                "gmx_pme_do should not be called on the GPU PME run.");
1053
1054     PmeAtomComm&              atc     = pme->atc[0];
1055     pmegrids_t*               pmegrid = nullptr;
1056     real*                     grid    = nullptr;
1057     gmx::ArrayRef<const real> coefficient;
1058     std::array<PmeOutput, 2>  output; // The second is used for the B state with FEP
1059     gmx_parallel_3dfft_t      pfft_setup;
1060     real*                     fftgrid;
1061     t_complex*                cfftgrid;
1062     int                       thread;
1063     const int                 fep_states_lj = pme->bFEP_lj ? 2 : 1;
1064     // There's no support for computing energy without virial, or vice versa
1065     const bool computeEnergyAndVirial = (stepWork.computeEnergy || stepWork.computeVirial);
1066
1067     /* We could be passing lambda!=0 while no q or LJ is actually perturbed */
1068     if (!pme->bFEP_q)
1069     {
1070         lambda_q = 0;
1071     }
1072     if (!pme->bFEP_lj)
1073     {
1074         lambda_lj = 0;
1075     }
1076
1077     assert(pme->nnodes > 0);
1078     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1079
1080     if (pme->nnodes > 1)
1081     {
1082         atc.pd.resize(coordinates.ssize());
1083         for (int d = pme->ndecompdim - 1; d >= 0; d--)
1084         {
1085             PmeAtomComm& atc = pme->atc[d];
1086             atc.maxshift     = (atc.dimind == 0 ? maxshift_x : maxshift_y);
1087         }
1088     }
1089     else
1090     {
1091         GMX_ASSERT(coordinates.ssize() == atc.numAtoms(), "We expect atc.numAtoms() coordinates");
1092         GMX_ASSERT(forces.ssize() >= atc.numAtoms(),
1093                    "We need a force buffer with at least atc.numAtoms() elements");
1094
1095         atc.x = coordinates;
1096         atc.f = forces;
1097     }
1098
1099     matrix scaledBox;
1100     pme->boxScaler->scaleBox(box, scaledBox);
1101
1102     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1103     bool bFirst = true;
1104
1105     /* For simplicity, we construct the splines for all particles if
1106      * more than one PME calculations is needed. Some optimization
1107      * could be done by keeping track of which atoms have splines
1108      * constructed, and construct new splines on each pass for atoms
1109      * that don't yet have them.
1110      */
1111
1112     bool bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1113
1114     /* We need a maximum of four separate PME calculations:
1115      * grid_index=0: Coulomb PME with charges from state A
1116      * grid_index=1: Coulomb PME with charges from state B
1117      * grid_index=2: LJ PME with C6 from state A
1118      * grid_index=3: LJ PME with C6 from state B
1119      * For Lorentz-Berthelot combination rules, a separate loop is used to
1120      * calculate all the terms
1121      */
1122
1123     /* If we are doing LJ-PME with LB, we only do Q here */
1124     const int max_grid_index = (pme->ljpme_combination_rule == LongRangeVdW::LB) ? DO_Q : DO_Q_AND_LJ;
1125     bool      bClearF;
1126     for (int grid_index = 0; grid_index < max_grid_index; ++grid_index)
1127     {
1128         /* Check if we should do calculations at this grid_index
1129          * If grid_index is odd we should be doing FEP
1130          * If grid_index < 2 we should be doing electrostatic PME
1131          * If grid_index >= 2 we should be doing LJ-PME
1132          */
1133         if ((grid_index < DO_Q && (!pme->doCoulomb || (grid_index == 1 && !pme->bFEP_q)))
1134             || (grid_index >= DO_Q && (!pme->doLJ || (grid_index == 3 && !pme->bFEP_lj))))
1135         {
1136             continue;
1137         }
1138         /* Unpack structure */
1139         pmegrid    = &pme->pmegrid[grid_index];
1140         fftgrid    = pme->fftgrid[grid_index];
1141         cfftgrid   = pme->cfftgrid[grid_index];
1142         pfft_setup = pme->pfft_setup[grid_index];
1143         switch (grid_index)
1144         {
1145             case 0: coefficient = chargeA; break;
1146             case 1: coefficient = chargeB; break;
1147             case 2: coefficient = c6A; break;
1148             case 3: coefficient = c6B; break;
1149         }
1150
1151         grid = pmegrid->grid.grid;
1152
1153         if (pme->nnodes == 1)
1154         {
1155             atc.coefficient = coefficient;
1156         }
1157         else
1158         {
1159             wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
1160             do_redist_pos_coeffs(pme, cr, bFirst, coordinates, coefficient);
1161
1162             wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
1163         }
1164
1165         wallcycle_start(wcycle, WallCycleCounter::PmeSpread);
1166
1167         /* Spread the coefficients on a grid */
1168         spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1169
1170         if (bFirst)
1171         {
1172             inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1173         }
1174         inc_nrnb(nrnb, eNR_SPREADBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1175
1176         if (!pme->bUseThreads)
1177         {
1178             wrap_periodic_pmegrid(pme, grid);
1179
1180             /* sum contributions to local grid from other nodes */
1181             if (pme->nnodes > 1)
1182             {
1183                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1184             }
1185
1186             copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1187         }
1188
1189         wallcycle_stop(wcycle, WallCycleCounter::PmeSpread);
1190
1191         /* TODO If the OpenMP and single-threaded implementations
1192            converge, then spread_on_grid() and
1193            copy_pmegrid_to_fftgrid() will perhaps live in the same
1194            source file.
1195         */
1196
1197         /* Here we start a large thread parallel region */
1198 #pragma omp parallel num_threads(pme->nthread) private(thread)
1199         {
1200             try
1201             {
1202                 thread = gmx_omp_get_thread_num();
1203                 int loop_count;
1204
1205                 /* do 3d-fft */
1206                 if (thread == 0)
1207                 {
1208                     wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1209                 }
1210                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1211                 if (thread == 0)
1212                 {
1213                     wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1214                 }
1215
1216                 /* solve in k-space for our local cells */
1217                 if (thread == 0)
1218                 {
1219                     wallcycle_start(
1220                             wcycle,
1221                             (grid_index < DO_Q ? WallCycleCounter::PmeSolve : WallCycleCounter::LJPme));
1222                 }
1223                 if (grid_index < DO_Q)
1224                 {
1225                     loop_count = solve_pme_yzx(pme,
1226                                                cfftgrid,
1227                                                scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1228                                                computeEnergyAndVirial,
1229                                                pme->nthread,
1230                                                thread);
1231                 }
1232                 else
1233                 {
1234                     loop_count =
1235                             solve_pme_lj_yzx(pme,
1236                                              &cfftgrid,
1237                                              FALSE,
1238                                              scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1239                                              computeEnergyAndVirial,
1240                                              pme->nthread,
1241                                              thread);
1242                 }
1243
1244                 if (thread == 0)
1245                 {
1246                     wallcycle_stop(
1247                             wcycle,
1248                             (grid_index < DO_Q ? WallCycleCounter::PmeSolve : WallCycleCounter::LJPme));
1249                     inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1250                 }
1251
1252                 /* do 3d-invfft */
1253                 if (thread == 0)
1254                 {
1255                     wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1256                 }
1257                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1258                 if (thread == 0)
1259                 {
1260                     wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1261
1262
1263                     if (pme->nodeid == 0)
1264                     {
1265                         real      ntot = pme->nkx * pme->nky * pme->nkz;
1266                         const int npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1267                         inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1268                     }
1269
1270                     /* Note: this wallcycle region is closed below
1271                        outside an OpenMP region, so take care if
1272                        refactoring code here. */
1273                     wallcycle_start(wcycle, WallCycleCounter::PmeGather);
1274                 }
1275
1276                 copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1277             }
1278             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1279         }
1280         /* End of thread parallel section.
1281          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1282          */
1283
1284         /* distribute local grid to all nodes */
1285         if (pme->nnodes > 1)
1286         {
1287             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1288         }
1289
1290         unwrap_periodic_pmegrid(pme, grid);
1291
1292         if (stepWork.computeForces)
1293         {
1294             /* interpolate forces for our local atoms */
1295
1296
1297             /* If we are running without parallelization,
1298              * atc->f is the actual force array, not a buffer,
1299              * therefore we should not clear it.
1300              */
1301             real lambda = grid_index < DO_Q ? lambda_q : lambda_lj;
1302             bClearF     = (bFirst && PAR(cr));
1303 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1304             for (thread = 0; thread < pme->nthread; thread++)
1305             {
1306                 try
1307                 {
1308                     gather_f_bsplines(pme,
1309                                       grid,
1310                                       bClearF,
1311                                       &atc,
1312                                       &atc.spline[thread],
1313                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0 - lambda : lambda) : 1.0);
1314                 }
1315                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1316             }
1317
1318
1319             inc_nrnb(nrnb, eNR_GATHERFBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1320             /* Note: this wallcycle region is opened above inside an OpenMP
1321                region, so take care if refactoring code here. */
1322             wallcycle_stop(wcycle, WallCycleCounter::PmeGather);
1323         }
1324
1325         if (computeEnergyAndVirial)
1326         {
1327             /* This should only be called on the master thread
1328              * and after the threads have synchronized.
1329              */
1330             if (grid_index < 2)
1331             {
1332                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1333             }
1334             else
1335             {
1336                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1337             }
1338         }
1339         bFirst = FALSE;
1340     } /* of grid_index-loop */
1341
1342     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1343      * seven terms. */
1344
1345     if (pme->doLJ && pme->ljpme_combination_rule == LongRangeVdW::LB)
1346     {
1347         /* Loop over A- and B-state if we are doing FEP */
1348         for (int fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1349         {
1350             std::vector<real>         local_c6;
1351             std::vector<real>         local_sigma;
1352             gmx::ArrayRef<const real> RedistC6;
1353             gmx::ArrayRef<const real> RedistSigma;
1354             gmx::ArrayRef<real>       coefficientBuffer;
1355             if (pme->nnodes == 1)
1356             {
1357                 pme->lb_buf1.resize(atc.numAtoms());
1358                 coefficientBuffer = pme->lb_buf1;
1359                 switch (fep_state)
1360                 {
1361                     case 0:
1362                         local_c6.assign(c6A.begin(), c6A.end());
1363                         local_sigma.assign(sigmaA.begin(), sigmaA.end());
1364                         break;
1365                     case 1:
1366                         local_c6.assign(c6B.begin(), c6B.end());
1367                         local_sigma.assign(sigmaB.begin(), sigmaB.end());
1368                         break;
1369                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1370                 }
1371             }
1372             else
1373             {
1374                 coefficientBuffer = atc.coefficientBuffer;
1375                 switch (fep_state)
1376                 {
1377                     case 0:
1378                         RedistC6    = c6A;
1379                         RedistSigma = sigmaA;
1380                         break;
1381                     case 1:
1382                         RedistC6    = c6B;
1383                         RedistSigma = sigmaB;
1384                         break;
1385                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1386                 }
1387                 wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
1388
1389                 do_redist_pos_coeffs(pme, cr, bFirst, coordinates, RedistC6);
1390                 pme->lb_buf1.resize(atc.numAtoms());
1391                 pme->lb_buf2.resize(atc.numAtoms());
1392                 local_c6.assign(pme->lb_buf1.begin(), pme->lb_buf1.end());
1393                 for (int i = 0; i < atc.numAtoms(); ++i)
1394                 {
1395                     local_c6[i] = atc.coefficient[i];
1396                 }
1397
1398                 do_redist_pos_coeffs(pme, cr, FALSE, coordinates, RedistSigma);
1399                 local_sigma.assign(pme->lb_buf2.begin(), pme->lb_buf2.end());
1400                 for (int i = 0; i < atc.numAtoms(); ++i)
1401                 {
1402                     local_sigma[i] = atc.coefficient[i];
1403                 }
1404
1405                 wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
1406             }
1407             atc.coefficient = coefficientBuffer;
1408             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1409
1410             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1411             for (int grid_index = 2; grid_index < 9; ++grid_index)
1412             {
1413                 /* Unpack structure */
1414                 pmegrid    = &pme->pmegrid[grid_index];
1415                 fftgrid    = pme->fftgrid[grid_index];
1416                 pfft_setup = pme->pfft_setup[grid_index];
1417                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1418                 grid = pmegrid->grid.grid;
1419
1420                 wallcycle_start(wcycle, WallCycleCounter::PmeSpread);
1421                 /* Spread the c6 on a grid */
1422                 spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1423
1424                 if (bFirst)
1425                 {
1426                     inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1427                 }
1428
1429                 inc_nrnb(nrnb,
1430                          eNR_SPREADBSP,
1431                          pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1432                 if (pme->nthread == 1)
1433                 {
1434                     wrap_periodic_pmegrid(pme, grid);
1435                     /* sum contributions to local grid from other nodes */
1436                     if (pme->nnodes > 1)
1437                     {
1438                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1439                     }
1440                     copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1441                 }
1442                 wallcycle_stop(wcycle, WallCycleCounter::PmeSpread);
1443
1444                 /*Here we start a large thread parallel region*/
1445 #pragma omp parallel num_threads(pme->nthread) private(thread)
1446                 {
1447                     try
1448                     {
1449                         thread = gmx_omp_get_thread_num();
1450                         /* do 3d-fft */
1451                         if (thread == 0)
1452                         {
1453                             wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1454                         }
1455
1456                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1457                         if (thread == 0)
1458                         {
1459                             wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1460                         }
1461                     }
1462                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1463                 }
1464                 bFirst = FALSE;
1465             }
1466             /* solve in k-space for our local cells */
1467 #pragma omp parallel num_threads(pme->nthread) private(thread)
1468             {
1469                 try
1470                 {
1471                     int loop_count;
1472                     thread = gmx_omp_get_thread_num();
1473                     if (thread == 0)
1474                     {
1475                         wallcycle_start(wcycle, WallCycleCounter::LJPme);
1476                     }
1477
1478                     loop_count =
1479                             solve_pme_lj_yzx(pme,
1480                                              &pme->cfftgrid[2],
1481                                              TRUE,
1482                                              scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1483                                              computeEnergyAndVirial,
1484                                              pme->nthread,
1485                                              thread);
1486                     if (thread == 0)
1487                     {
1488                         wallcycle_stop(wcycle, WallCycleCounter::LJPme);
1489                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1490                     }
1491                 }
1492                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1493             }
1494
1495             if (computeEnergyAndVirial)
1496             {
1497                 /* This should only be called on the master thread and
1498                  * after the threads have synchronized.
1499                  */
1500                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
1501             }
1502
1503             bFirst = !pme->doCoulomb;
1504             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1505             for (int grid_index = 8; grid_index >= 2; --grid_index)
1506             {
1507                 /* Unpack structure */
1508                 pmegrid    = &pme->pmegrid[grid_index];
1509                 fftgrid    = pme->fftgrid[grid_index];
1510                 pfft_setup = pme->pfft_setup[grid_index];
1511                 grid       = pmegrid->grid.grid;
1512                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1513 #pragma omp parallel num_threads(pme->nthread) private(thread)
1514                 {
1515                     try
1516                     {
1517                         thread = gmx_omp_get_thread_num();
1518                         /* do 3d-invfft */
1519                         if (thread == 0)
1520                         {
1521                             wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1522                         }
1523
1524                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1525                         if (thread == 0)
1526                         {
1527                             wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1528
1529
1530                             if (pme->nodeid == 0)
1531                             {
1532                                 real      ntot = pme->nkx * pme->nky * pme->nkz;
1533                                 const int npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1534                                 inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1535                             }
1536                             wallcycle_start(wcycle, WallCycleCounter::PmeGather);
1537                         }
1538
1539                         copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1540                     }
1541                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1542                 } /*#pragma omp parallel*/
1543
1544                 /* distribute local grid to all nodes */
1545                 if (pme->nnodes > 1)
1546                 {
1547                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1548                 }
1549
1550                 unwrap_periodic_pmegrid(pme, grid);
1551
1552                 if (stepWork.computeForces)
1553                 {
1554                     /* interpolate forces for our local atoms */
1555                     bClearF    = (bFirst && PAR(cr));
1556                     real scale = pme->bFEP ? (fep_state < 1 ? 1.0 - lambda_lj : lambda_lj) : 1.0;
1557                     scale *= lb_scale_factor[grid_index - 2];
1558
1559 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1560                     for (thread = 0; thread < pme->nthread; thread++)
1561                     {
1562                         try
1563                         {
1564                             gather_f_bsplines(
1565                                     pme, grid, bClearF, &pme->atc[0], &pme->atc[0].spline[thread], scale);
1566                         }
1567                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1568                     }
1569
1570
1571                     inc_nrnb(nrnb,
1572                              eNR_GATHERFBSP,
1573                              pme->pme_order * pme->pme_order * pme->pme_order * pme->atc[0].numAtoms());
1574                 }
1575                 wallcycle_stop(wcycle, WallCycleCounter::PmeGather);
1576
1577                 bFirst = FALSE;
1578             } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1579         }     /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1580     }         /* if (pme->doLJ && pme->ljpme_combination_rule == LongRangeVdW::LB) */
1581
1582     if (stepWork.computeForces && pme->nnodes > 1)
1583     {
1584         wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
1585         for (int d = 0; d < pme->ndecompdim; d++)
1586         {
1587             gmx::ArrayRef<gmx::RVec> forcesRef;
1588             if (d == pme->ndecompdim - 1)
1589             {
1590                 const size_t numAtoms = coordinates.size();
1591                 GMX_ASSERT(forces.size() >= numAtoms, "Need at least numAtoms forces");
1592                 forcesRef = forces.subArray(0, numAtoms);
1593             }
1594             else
1595             {
1596                 forcesRef = pme->atc[d + 1].f;
1597             }
1598             if (haveDDAtomOrdering(*cr))
1599             {
1600                 dd_pmeredist_f(pme, &pme->atc[d], forcesRef, d == pme->ndecompdim - 1 && pme->bPPnode);
1601             }
1602         }
1603
1604         wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
1605     }
1606
1607     if (computeEnergyAndVirial)
1608     {
1609         if (pme->doCoulomb)
1610         {
1611             if (!pme->bFEP_q)
1612             {
1613                 *energy_q = output[0].coulombEnergy_;
1614                 m_add(vir_q, output[0].coulombVirial_, vir_q);
1615             }
1616             else
1617             {
1618                 *energy_q = (1.0 - lambda_q) * output[0].coulombEnergy_ + lambda_q * output[1].coulombEnergy_;
1619                 *dvdlambda_q += output[1].coulombEnergy_ - output[0].coulombEnergy_;
1620                 for (int i = 0; i < DIM; i++)
1621                 {
1622                     for (int j = 0; j < DIM; j++)
1623                     {
1624                         vir_q[i][j] += (1.0 - lambda_q) * output[0].coulombVirial_[i][j]
1625                                        + lambda_q * output[1].coulombVirial_[i][j];
1626                     }
1627                 }
1628             }
1629         }
1630         else
1631         {
1632             *energy_q = 0;
1633         }
1634
1635         if (pme->doLJ)
1636         {
1637             if (!pme->bFEP_lj)
1638             {
1639                 *energy_lj = output[0].lennardJonesEnergy_;
1640                 m_add(vir_lj, output[0].lennardJonesVirial_, vir_lj);
1641             }
1642             else
1643             {
1644                 *energy_lj = (1.0 - lambda_lj) * output[0].lennardJonesEnergy_
1645                              + lambda_lj * output[1].lennardJonesEnergy_;
1646                 *dvdlambda_lj += output[1].lennardJonesEnergy_ - output[0].lennardJonesEnergy_;
1647                 for (int i = 0; i < DIM; i++)
1648                 {
1649                     for (int j = 0; j < DIM; j++)
1650                     {
1651                         vir_lj[i][j] += (1.0 - lambda_lj) * output[0].lennardJonesVirial_[i][j]
1652                                         + lambda_lj * output[1].lennardJonesVirial_[i][j];
1653                     }
1654                 }
1655             }
1656         }
1657         else
1658         {
1659             *energy_lj = 0;
1660         }
1661     }
1662     return 0;
1663 }
1664
1665 void gmx_pme_destroy(gmx_pme_t* pme)
1666 {
1667     if (!pme)
1668     {
1669         return;
1670     }
1671
1672     sfree(pme->nnx);
1673     sfree(pme->nny);
1674     sfree(pme->nnz);
1675     sfree(pme->fshx);
1676     sfree(pme->fshy);
1677     sfree(pme->fshz);
1678
1679     for (int i = 0; i < pme->ngrids; ++i)
1680     {
1681         pmegrids_destroy(&pme->pmegrid[i]);
1682     }
1683     if (pme->pfft_setup)
1684     {
1685         for (int i = 0; i < pme->ngrids; ++i)
1686         {
1687             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1688         }
1689     }
1690     sfree(pme->fftgrid);
1691     sfree(pme->cfftgrid);
1692     sfree(pme->pfft_setup);
1693
1694     for (int i = 0; i < DIM; i++)
1695     {
1696         sfree(pme->bsp_mod[i]);
1697     }
1698
1699     sfree(pme->bufv);
1700     sfree(pme->bufr);
1701
1702     if (pme->solve_work)
1703     {
1704         pme_free_all_work(&pme->solve_work, pme->nthread);
1705     }
1706
1707     destroy_pme_spline_work(pme->spline_work);
1708
1709     if (pme->gpu != nullptr)
1710     {
1711         pme_gpu_destroy(pme->gpu);
1712     }
1713
1714     delete pme;
1715 }
1716
1717 void gmx_pme_reinit_atoms(gmx_pme_t*                pme,
1718                           const int                 numAtoms,
1719                           gmx::ArrayRef<const real> chargesA,
1720                           gmx::ArrayRef<const real> chargesB)
1721 {
1722     if (pme->gpu != nullptr)
1723     {
1724         GMX_ASSERT(!(pme->bFEP_q && chargesB.empty()),
1725                    "B state charges must be specified if running Coulomb FEP on the GPU");
1726         pme_gpu_reinit_atoms(pme->gpu, numAtoms, chargesA.data(), pme->bFEP_q ? chargesB.data() : nullptr);
1727     }
1728     else
1729     {
1730         pme->atc[0].setNumAtoms(numAtoms);
1731         // TODO: set the charges here as well
1732     }
1733 }
1734
1735 bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size)
1736 {
1737     return (pme.nkx == grid_size[XX] && pme.nky == grid_size[YY] && pme.nkz == grid_size[ZZ]);
1738 }
1739
1740 void gmx::SeparatePmeRanksPermitted::disablePmeRanks(const std::string& reason)
1741 {
1742     permitSeparatePmeRanks_ = false;
1743
1744     if (!reason.empty())
1745     {
1746         reasons_.push_back(reason);
1747     }
1748 }
1749
1750 bool gmx::SeparatePmeRanksPermitted::permitSeparatePmeRanks() const
1751 {
1752     return permitSeparatePmeRanks_;
1753 }
1754
1755 std::string gmx::SeparatePmeRanksPermitted::reasonsWhyDisabled() const
1756 {
1757     return joinStrings(reasons_, "; ");
1758 }