Remove forcerec and inputrec from ewald_LRcorrection
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
7  * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
8  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9  * and including many others, as listed in the AUTHORS file in the
10  * top-level source directory and at http://www.gromacs.org.
11  *
12  * GROMACS is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public License
14  * as published by the Free Software Foundation; either version 2.1
15  * of the License, or (at your option) any later version.
16  *
17  * GROMACS is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with GROMACS; if not, see
24  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
26  *
27  * If you want to redistribute modifications to GROMACS, please
28  * consider that scientific software is very special. Version
29  * control is crucial - bugs must be traceable. We will be happy to
30  * consider code for inclusion in the official distribution, but
31  * derived work must not be called official GROMACS. Details are found
32  * in the README & COPYING files - if they are missing, get the
33  * official version at http://www.gromacs.org.
34  *
35  * To help us fund GROMACS development, we humbly ask that you cite
36  * the research papers on the package. Check out http://www.gromacs.org.
37  */
38 /*! \internal \file
39  *
40  * \brief This file contains function definitions necessary for
41  * computing energies and forces for the PME long-ranged part (Coulomb
42  * and LJ).
43  *
44  * \author Erik Lindahl <erik@kth.se>
45  * \author Berk Hess <hess@kth.se>
46  * \ingroup module_ewald
47  */
48 /* IMPORTANT FOR DEVELOPERS:
49  *
50  * Triclinic pme stuff isn't entirely trivial, and we've experienced
51  * some bugs during development (many of them due to me). To avoid
52  * this in the future, please check the following things if you make
53  * changes in this file:
54  *
55  * 1. You should obtain identical (at least to the PME precision)
56  *    energies, forces, and virial for
57  *    a rectangular box and a triclinic one where the z (or y) axis is
58  *    tilted a whole box side. For instance you could use these boxes:
59  *
60  *    rectangular       triclinic
61  *     2  0  0           2  0  0
62  *     0  2  0           0  2  0
63  *     0  0  6           2  2  6
64  *
65  * 2. You should check the energy conservation in a triclinic box.
66  *
67  * It might seem an overkill, but better safe than sorry.
68  * /Erik 001109
69  */
70
71 #include "gmxpre.h"
72
73 #include "pme.h"
74
75 #include "config.h"
76
77 #include <cassert>
78 #include <cmath>
79 #include <cstdio>
80 #include <cstdlib>
81 #include <cstring>
82
83 #include <algorithm>
84 #include <list>
85
86 #include "gromacs/domdec/domdec.h"
87 #include "gromacs/ewald/ewald_utils.h"
88 #include "gromacs/fft/parallel_3dfft.h"
89 #include "gromacs/fileio/pdbio.h"
90 #include "gromacs/gmxlib/network.h"
91 #include "gromacs/gmxlib/nrnb.h"
92 #include "gromacs/hardware/hw_info.h"
93 #include "gromacs/math/gmxcomplex.h"
94 #include "gromacs/math/invertmatrix.h"
95 #include "gromacs/math/units.h"
96 #include "gromacs/math/vec.h"
97 #include "gromacs/math/vectypes.h"
98 #include "gromacs/mdtypes/commrec.h"
99 #include "gromacs/mdtypes/forcerec.h"
100 #include "gromacs/mdtypes/inputrec.h"
101 #include "gromacs/mdtypes/md_enums.h"
102 #include "gromacs/mdtypes/simulation_workload.h"
103 #include "gromacs/pbcutil/pbc.h"
104 #include "gromacs/timing/cyclecounter.h"
105 #include "gromacs/timing/wallcycle.h"
106 #include "gromacs/timing/walltime_accounting.h"
107 #include "gromacs/topology/topology.h"
108 #include "gromacs/utility/basedefinitions.h"
109 #include "gromacs/utility/cstringutil.h"
110 #include "gromacs/utility/exceptions.h"
111 #include "gromacs/utility/fatalerror.h"
112 #include "gromacs/utility/gmxmpi.h"
113 #include "gromacs/utility/gmxomp.h"
114 #include "gromacs/utility/logger.h"
115 #include "gromacs/utility/real.h"
116 #include "gromacs/utility/smalloc.h"
117 #include "gromacs/utility/stringutil.h"
118 #include "gromacs/utility/unique_cptr.h"
119
120 #include "calculate_spline_moduli.h"
121 #include "pme_gather.h"
122 #include "pme_gpu_internal.h"
123 #include "pme_grid.h"
124 #include "pme_internal.h"
125 #include "pme_redistribute.h"
126 #include "pme_solve.h"
127 #include "pme_spline_work.h"
128 #include "pme_spread.h"
129
130 /*! \brief Help build a descriptive message in \c error if there are
131  * \c errorReasons why PME on GPU is not supported.
132  *
133  * \returns Whether the lack of errorReasons indicate there is support. */
134 static bool addMessageIfNotSupported(const std::list<std::string>& errorReasons, std::string* error)
135 {
136     bool isSupported = errorReasons.empty();
137     if (!isSupported && error)
138     {
139         std::string regressionTestMarker = "PME GPU does not support";
140         // this prefix is tested for in the regression tests script gmxtest.pl
141         *error = regressionTestMarker;
142         if (errorReasons.size() == 1)
143         {
144             *error += " " + errorReasons.back();
145         }
146         else
147         {
148             *error += ": " + gmx::joinStrings(errorReasons, "; ");
149         }
150         *error += ".";
151     }
152     return isSupported;
153 }
154
155 bool pme_gpu_supports_build(std::string* error)
156 {
157     std::list<std::string> errorReasons;
158     if (GMX_DOUBLE)
159     {
160         errorReasons.emplace_back("a double-precision build");
161     }
162     if (!GMX_GPU)
163     {
164         errorReasons.emplace_back("a non-GPU build");
165     }
166     if (GMX_GPU_SYCL)
167     {
168         errorReasons.emplace_back("SYCL build"); // SYCL-TODO
169     }
170     return addMessageIfNotSupported(errorReasons, error);
171 }
172
173 bool pme_gpu_supports_hardware(const gmx_hw_info_t gmx_unused& hwinfo, std::string* error)
174 {
175     std::list<std::string> errorReasons;
176
177     if (GMX_GPU_OPENCL)
178     {
179 #ifdef __APPLE__
180         errorReasons.emplace_back("Apple OS X operating system");
181 #endif
182     }
183     return addMessageIfNotSupported(errorReasons, error);
184 }
185
186 bool pme_gpu_supports_input(const t_inputrec& ir, std::string* error)
187 {
188     std::list<std::string> errorReasons;
189     if (!EEL_PME(ir.coulombtype))
190     {
191         errorReasons.emplace_back("systems that do not use PME for electrostatics");
192     }
193     if (ir.pme_order != 4)
194     {
195         errorReasons.emplace_back("interpolation orders other than 4");
196     }
197     if (EVDW_PME(ir.vdwtype))
198     {
199         errorReasons.emplace_back("Lennard-Jones PME");
200     }
201     if (!EI_DYNAMICS(ir.eI))
202     {
203         errorReasons.emplace_back(
204                 "Cannot compute PME interactions on a GPU, because PME GPU requires a dynamical "
205                 "integrator (md, sd, etc).");
206     }
207     return addMessageIfNotSupported(errorReasons, error);
208 }
209
210 /*! \brief \libinternal
211  * Finds out if PME with given inputs is possible to run on GPU.
212  * This function is an internal final check, validating the whole PME structure on creation,
213  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
214  *
215  * \param[in]  pme          The PME structure.
216  * \param[out] error        The error message if the input is not supported on GPU.
217  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
218  */
219 static bool pme_gpu_check_restrictions(const gmx_pme_t* pme, std::string* error)
220 {
221     std::list<std::string> errorReasons;
222     if (pme->nnodes != 1)
223     {
224         errorReasons.emplace_back("PME decomposition");
225     }
226     if (pme->pme_order != 4)
227     {
228         errorReasons.emplace_back("interpolation orders other than 4");
229     }
230     if (pme->doLJ)
231     {
232         errorReasons.emplace_back("Lennard-Jones PME");
233     }
234     if (GMX_DOUBLE)
235     {
236         errorReasons.emplace_back("double precision");
237     }
238     if (!GMX_GPU)
239     {
240         errorReasons.emplace_back("non-GPU build of GROMACS");
241     }
242     if (GMX_GPU_SYCL)
243     {
244         errorReasons.emplace_back("SYCL build of GROMACS"); // SYCL-TODO
245     }
246     return addMessageIfNotSupported(errorReasons, error);
247 }
248
249 PmeRunMode pme_run_mode(const gmx_pme_t* pme)
250 {
251     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
252     return pme->runMode;
253 }
254
255 gmx::PinningPolicy pme_get_pinning_policy()
256 {
257     return gmx::PinningPolicy::PinnedIfSupported;
258 }
259
260 /*! \brief Number of bytes in a cache line.
261  *
262  * Must also be a multiple of the SIMD and SIMD4 register size, to
263  * preserve alignment.
264  */
265 const int gmxCacheLineSize = 64;
266
267 //! Set up coordinate communication
268 static void setup_coordinate_communication(PmeAtomComm* atc)
269 {
270     int nslab, n, i;
271     int fw, bw;
272
273     nslab = atc->nslab;
274
275     n = 0;
276     for (i = 1; i <= nslab / 2; i++)
277     {
278         fw = (atc->nodeid + i) % nslab;
279         bw = (atc->nodeid - i + nslab) % nslab;
280         if (n < nslab - 1)
281         {
282             atc->slabCommSetup[n].node_dest = fw;
283             atc->slabCommSetup[n].node_src  = bw;
284             n++;
285         }
286         if (n < nslab - 1)
287         {
288             atc->slabCommSetup[n].node_dest = bw;
289             atc->slabCommSetup[n].node_src  = fw;
290             n++;
291         }
292     }
293 }
294
295 /*! \brief Round \p n up to the next multiple of \p f */
296 static int mult_up(int n, int f)
297 {
298     return ((n + f - 1) / f) * f;
299 }
300
301 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
302 static double estimate_pme_load_imbalance(struct gmx_pme_t* pme)
303 {
304     int    nma, nmi;
305     double n1, n2, n3;
306
307     nma = pme->nnodes_major;
308     nmi = pme->nnodes_minor;
309
310     n1 = mult_up(pme->nkx, nma) * mult_up(pme->nky, nmi) * pme->nkz;
311     n2 = mult_up(pme->nkx, nma) * mult_up(pme->nkz, nmi) * pme->nky;
312     n3 = mult_up(pme->nky, nma) * mult_up(pme->nkz, nmi) * pme->nkx;
313
314     /* pme_solve is roughly double the cost of an fft */
315
316     return (n1 + n2 + 3 * n3) / static_cast<double>(6 * pme->nkx * pme->nky * pme->nkz);
317 }
318
319 #ifndef DOXYGEN
320
321 PmeAtomComm::PmeAtomComm(MPI_Comm   PmeMpiCommunicator,
322                          const int  numThreads,
323                          const int  pmeOrder,
324                          const int  dimIndex,
325                          const bool doSpread) :
326     dimind(dimIndex), bSpread(doSpread), pme_order(pmeOrder), nthread(numThreads), spline(nthread)
327 {
328     if (PmeMpiCommunicator != MPI_COMM_NULL)
329     {
330         mpi_comm = PmeMpiCommunicator;
331 #    if GMX_MPI
332         MPI_Comm_size(mpi_comm, &nslab);
333         MPI_Comm_rank(mpi_comm, &nodeid);
334 #    endif
335     }
336     if (debug)
337     {
338         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", dimind, nslab, nodeid);
339     }
340
341     if (nslab > 1)
342     {
343         slabCommSetup.resize(nslab);
344         setup_coordinate_communication(this);
345
346         count_thread.resize(nthread);
347         for (auto& countThread : count_thread)
348         {
349             countThread.resize(nslab);
350         }
351     }
352
353     if (nthread > 1)
354     {
355         threadMap.resize(nthread);
356
357 #    pragma omp parallel for num_threads(nthread) schedule(static)
358         for (int thread = 0; thread < nthread; thread++)
359         {
360             try
361             {
362                 /* Allocate buffer with padding to avoid cache polution */
363                 threadMap[thread].nBuffer.resize(nthread + 2 * gmxCacheLineSize);
364                 threadMap[thread].n = threadMap[thread].nBuffer.data() + gmxCacheLineSize;
365             }
366             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
367         }
368     }
369 }
370
371 #endif // !DOXYGEN
372
373 /*! \brief Initialize data structure for communication */
374 static void init_overlap_comm(pme_overlap_t* ol, int norder, MPI_Comm comm, int nnodes, int nodeid, int ndata, int commplainsize)
375 {
376     gmx_bool bCont;
377
378     ol->mpi_comm = comm;
379     ol->nnodes   = nnodes;
380     ol->nodeid   = nodeid;
381
382     /* Linear translation of the PME grid won't affect reciprocal space
383      * calculations, so to optimize we only interpolate "upwards",
384      * which also means we only have to consider overlap in one direction.
385      * I.e., particles on this node might also be spread to grid indices
386      * that belong to higher nodes (modulo nnodes)
387      */
388
389     ol->s2g0.resize(ol->nnodes + 1);
390     ol->s2g1.resize(ol->nnodes);
391     if (debug)
392     {
393         fprintf(debug, "PME slab boundaries:");
394     }
395     for (int i = 0; i < nnodes; i++)
396     {
397         /* s2g0 the local interpolation grid start.
398          * s2g1 the local interpolation grid end.
399          * Since in calc_pidx we divide particles, and not grid lines,
400          * spatially uniform along dimension x or y, we need to round
401          * s2g0 down and s2g1 up.
402          */
403         ol->s2g0[i] = (i * ndata + 0) / nnodes;
404         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
405
406         if (debug)
407         {
408             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
409         }
410     }
411     ol->s2g0[nnodes] = ndata;
412     if (debug)
413     {
414         fprintf(debug, "\n");
415     }
416
417     /* Determine with how many nodes we need to communicate the grid overlap */
418     int testRankCount = 0;
419     do
420     {
421         testRankCount++;
422         bCont = FALSE;
423         for (int i = 0; i < nnodes; i++)
424         {
425             if ((i + testRankCount < nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount])
426                 || (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
427             {
428                 bCont = TRUE;
429             }
430         }
431     } while (bCont && testRankCount < nnodes);
432
433     ol->comm_data.resize(testRankCount - 1);
434     ol->send_size = 0;
435
436     for (size_t b = 0; b < ol->comm_data.size(); b++)
437     {
438         pme_grid_comm_t* pgc = &ol->comm_data[b];
439
440         /* Send */
441         pgc->send_id  = (ol->nodeid + (b + 1)) % ol->nnodes;
442         int fft_start = ol->s2g0[pgc->send_id];
443         int fft_end   = ol->s2g0[pgc->send_id + 1];
444         if (pgc->send_id < nodeid)
445         {
446             fft_start += ndata;
447             fft_end += ndata;
448         }
449         int send_index1  = ol->s2g1[nodeid];
450         send_index1      = std::min(send_index1, fft_end);
451         pgc->send_index0 = fft_start;
452         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
453         ol->send_size += pgc->send_nindex;
454
455         /* We always start receiving to the first index of our slab */
456         pgc->recv_id    = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
457         fft_start       = ol->s2g0[ol->nodeid];
458         fft_end         = ol->s2g0[ol->nodeid + 1];
459         int recv_index1 = ol->s2g1[pgc->recv_id];
460         if (pgc->recv_id > nodeid)
461         {
462             recv_index1 -= ndata;
463         }
464         recv_index1      = std::min(recv_index1, fft_end);
465         pgc->recv_index0 = fft_start;
466         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
467     }
468
469 #if GMX_MPI
470     /* Communicate the buffer sizes to receive */
471     MPI_Status stat;
472     for (size_t b = 0; b < ol->comm_data.size(); b++)
473     {
474         MPI_Sendrecv(&ol->send_size,
475                      1,
476                      MPI_INT,
477                      ol->comm_data[b].send_id,
478                      b,
479                      &ol->comm_data[b].recv_size,
480                      1,
481                      MPI_INT,
482                      ol->comm_data[b].recv_id,
483                      b,
484                      ol->mpi_comm,
485                      &stat);
486     }
487 #endif
488
489     /* For non-divisible grid we need pme_order iso pme_order-1 */
490     ol->sendbuf.resize(norder * commplainsize);
491     ol->recvbuf.resize(norder * commplainsize);
492 }
493
494 int minimalPmeGridSize(int pmeOrder)
495 {
496     /* The actual grid size limitations are:
497      *   serial:        >= pme_order
498      *   DD, no OpenMP: >= 2*(pme_order - 1)
499      *   DD, OpenMP:    >= pme_order + 1
500      * But we use the maximum for simplicity since in practice there is not
501      * much performance difference between pme_order and 2*(pme_order -1).
502      */
503     int minimalSize = 2 * (pmeOrder - 1);
504
505     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
506     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
507
508     return minimalSize;
509 }
510
511 bool gmx_pme_check_restrictions(int pme_order, int nkx, int nky, int nkz, int numPmeDomainsAlongX, bool useThreads, bool errorsAreFatal)
512 {
513     if (pme_order > PME_ORDER_MAX)
514     {
515         if (!errorsAreFatal)
516         {
517             return false;
518         }
519
520         std::string message = gmx::formatString(
521                 "pme_order (%d) is larger than the maximum allowed value (%d). Modify and "
522                 "recompile the code if you really need such a high order.",
523                 pme_order,
524                 PME_ORDER_MAX);
525         GMX_THROW(gmx::InconsistentInputError(message));
526     }
527
528     const int minGridSize = minimalPmeGridSize(pme_order);
529     if (nkx < minGridSize || nky < minGridSize || nkz < minGridSize)
530     {
531         if (!errorsAreFatal)
532         {
533             return false;
534         }
535         std::string message =
536                 gmx::formatString("The PME grid sizes need to be >= 2*(pme_order-1) (%d)", minGridSize);
537         GMX_THROW(gmx::InconsistentInputError(message));
538     }
539
540     /* Check for a limitation of the (current) sum_fftgrid_dd code.
541      * We only allow multiple communication pulses in dim 1, not in dim 0.
542      */
543     if (useThreads
544         && (nkx < numPmeDomainsAlongX * pme_order && nkx != numPmeDomainsAlongX * (pme_order - 1)))
545     {
546         if (!errorsAreFatal)
547         {
548             return false;
549         }
550         gmx_fatal(FARGS,
551                   "The number of PME grid lines per rank along x is %g. But when using OpenMP "
552                   "threads, the number of grid lines per rank along x should be >= pme_order (%d) "
553                   "or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly "
554                   "more along y and/or z) by specifying -dd manually.",
555                   nkx / static_cast<double>(numPmeDomainsAlongX),
556                   pme_order);
557     }
558
559     return true;
560 }
561
562 /*! \brief Round \p enumerator */
563 static int div_round_up(int enumerator, int denominator)
564 {
565     return (enumerator + denominator - 1) / denominator;
566 }
567
568 gmx_pme_t* gmx_pme_init(const t_commrec*     cr,
569                         const NumPmeDomains& numPmeDomains,
570                         const t_inputrec*    ir,
571                         gmx_bool             bFreeEnergy_q,
572                         gmx_bool             bFreeEnergy_lj,
573                         gmx_bool             bReproducible,
574                         real                 ewaldcoeff_q,
575                         real                 ewaldcoeff_lj,
576                         int                  nthread,
577                         PmeRunMode           runMode,
578                         PmeGpu*              pmeGpu,
579                         const DeviceContext* deviceContext,
580                         const DeviceStream*  deviceStream,
581                         const PmeGpuProgram* pmeGpuProgram,
582                         const gmx::MDLogger& mdlog)
583 {
584     int  use_threads, sum_use_threads, i;
585     ivec ndata;
586
587     if (debug)
588     {
589         fprintf(debug, "Creating PME data structures.\n");
590     }
591
592     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
593
594     pme->sum_qgrid_tmp    = nullptr;
595     pme->sum_qgrid_dd_tmp = nullptr;
596
597     pme->buf_nalloc = 0;
598
599     pme->nnodes  = 1;
600     pme->bPPnode = TRUE;
601
602     pme->nnodes_major = numPmeDomains.x;
603     pme->nnodes_minor = numPmeDomains.y;
604
605     if (numPmeDomains.x * numPmeDomains.y > 1)
606     {
607         pme->mpi_comm = cr->mpi_comm_mygroup;
608
609 #if GMX_MPI
610         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
611         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
612 #endif
613         if (pme->nnodes != numPmeDomains.x * numPmeDomains.y)
614         {
615             gmx_incons("PME rank count mismatch");
616         }
617     }
618     else
619     {
620         pme->mpi_comm = MPI_COMM_NULL;
621     }
622
623     if (pme->nnodes == 1)
624     {
625         pme->mpi_comm_d[0] = MPI_COMM_NULL;
626         pme->mpi_comm_d[1] = MPI_COMM_NULL;
627         pme->ndecompdim    = 0;
628         pme->nodeid_major  = 0;
629         pme->nodeid_minor  = 0;
630     }
631     else
632     {
633         if (numPmeDomains.y == 1)
634         {
635             pme->mpi_comm_d[0] = pme->mpi_comm;
636             pme->mpi_comm_d[1] = MPI_COMM_NULL;
637             pme->ndecompdim    = 1;
638             pme->nodeid_major  = pme->nodeid;
639             pme->nodeid_minor  = 0;
640         }
641         else if (numPmeDomains.x == 1)
642         {
643             pme->mpi_comm_d[0] = MPI_COMM_NULL;
644             pme->mpi_comm_d[1] = pme->mpi_comm;
645             pme->ndecompdim    = 1;
646             pme->nodeid_major  = 0;
647             pme->nodeid_minor  = pme->nodeid;
648         }
649         else
650         {
651             if (pme->nnodes % numPmeDomains.x != 0)
652             {
653                 gmx_incons(
654                         "For 2D PME decomposition, #PME ranks must be divisible by the number of "
655                         "domains along x");
656             }
657             pme->ndecompdim = 2;
658
659 #if GMX_MPI
660             MPI_Comm_split(pme->mpi_comm,
661                            pme->nodeid % numPmeDomains.y,
662                            pme->nodeid,
663                            &pme->mpi_comm_d[0]); /* My communicator along major dimension */
664             MPI_Comm_split(pme->mpi_comm,
665                            pme->nodeid / numPmeDomains.y,
666                            pme->nodeid,
667                            &pme->mpi_comm_d[1]); /* My communicator along minor dimension */
668
669             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
670             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
671             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
672             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
673 #endif
674         }
675     }
676     // cr is always initialized if there is a a PP rank, so we can safely assume
677     // that when it is not, like in ewald tests, we not on a PP rank.
678     pme->bPPnode = ((cr != nullptr && cr->duty != 0) && thisRankHasDuty(cr, DUTY_PP));
679
680     pme->nthread = nthread;
681
682     /* Check if any of the PME MPI ranks uses threads */
683     use_threads = (pme->nthread > 1 ? 1 : 0);
684 #if GMX_MPI
685     if (pme->nnodes > 1)
686     {
687         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT, MPI_SUM, pme->mpi_comm);
688     }
689     else
690 #endif
691     {
692         sum_use_threads = use_threads;
693     }
694     pme->bUseThreads = (sum_use_threads > 0);
695
696     if (ir->pbcType == PbcType::Screw)
697     {
698         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
699     }
700
701     /* NOTE:
702      * It is likely that the current gmx_pme_do() routine supports calculating
703      * only Coulomb or LJ while gmx_pme_init() configures for both,
704      * but that has never been tested.
705      * It is likely that the current gmx_pme_do() routine supports calculating,
706      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
707      * configures with free-energy, but that has never been tested.
708      */
709     pme->doCoulomb = EEL_PME(ir->coulombtype);
710     pme->doLJ      = EVDW_PME(ir->vdwtype);
711     pme->bFEP_q    = ((ir->efep != FreeEnergyPerturbationType::No) && bFreeEnergy_q);
712     pme->bFEP_lj   = ((ir->efep != FreeEnergyPerturbationType::No) && bFreeEnergy_lj);
713     pme->bFEP      = (pme->bFEP_q || pme->bFEP_lj);
714     pme->nkx       = ir->nkx;
715     pme->nky       = ir->nky;
716     pme->nkz       = ir->nkz;
717     pme->bP3M = (ir->coulombtype == CoulombInteractionType::P3mAD || getenv("GMX_PME_P3M") != nullptr);
718     pme->pme_order     = ir->pme_order;
719     pme->ewaldcoeff_q  = ewaldcoeff_q;
720     pme->ewaldcoeff_lj = ewaldcoeff_lj;
721
722     /* Always constant electrostatics coefficients */
723     pme->epsilon_r = ir->epsilon_r;
724
725     /* Always constant LJ coefficients */
726     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
727
728     // The box requires scaling with nwalls = 2, we store that condition as well
729     // as the scaling factor
730     delete pme->boxScaler;
731     pme->boxScaler = new EwaldBoxZScaler(inputrecPbcXY2Walls(ir), ir->wall_ewald_zfac);
732
733     /* If we violate restrictions, generate a fatal error here */
734     gmx_pme_check_restrictions(
735             pme->pme_order, pme->nkx, pme->nky, pme->nkz, pme->nnodes_major, pme->bUseThreads, true);
736
737     if (pme->nnodes > 1)
738     {
739         double imbal;
740
741 #if GMX_MPI
742         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
743         MPI_Type_commit(&(pme->rvec_mpi));
744 #endif
745
746         /* Note that the coefficient spreading and force gathering, which usually
747          * takes about the same amount of time as FFT+solve_pme,
748          * is always fully load balanced
749          * (unless the coefficient distribution is inhomogeneous).
750          */
751
752         imbal = estimate_pme_load_imbalance(pme.get());
753         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
754         {
755             GMX_LOG(mdlog.warning)
756                     .asParagraph()
757                     .appendTextFormatted(
758                             "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
759                             "      For optimal PME load balancing\n"
760                             "      PME grid_x (%d) and grid_y (%d) should be divisible by "
761                             "#PME_ranks_x "
762                             "(%d)\n"
763                             "      and PME grid_y (%d) and grid_z (%d) should be divisible by "
764                             "#PME_ranks_y "
765                             "(%d)",
766                             gmx::roundToInt((imbal - 1) * 100),
767                             pme->nkx,
768                             pme->nky,
769                             pme->nnodes_major,
770                             pme->nky,
771                             pme->nkz,
772                             pme->nnodes_minor);
773         }
774     }
775
776     /* For non-divisible grid we need pme_order iso pme_order-1 */
777     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
778      * y is always copied through a buffer: we don't need padding in z,
779      * but we do need the overlap in x because of the communication order.
780      */
781     init_overlap_comm(&pme->overlap[0],
782                       pme->pme_order,
783                       pme->mpi_comm_d[0],
784                       pme->nnodes_major,
785                       pme->nodeid_major,
786                       pme->nkx,
787                       (div_round_up(pme->nky, pme->nnodes_minor) + pme->pme_order)
788                               * (pme->nkz + pme->pme_order - 1));
789
790     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
791      * We do this with an offset buffer of equal size, so we need to allocate
792      * extra for the offset. That's what the (+1)*pme->nkz is for.
793      */
794     init_overlap_comm(&pme->overlap[1],
795                       pme->pme_order,
796                       pme->mpi_comm_d[1],
797                       pme->nnodes_minor,
798                       pme->nodeid_minor,
799                       pme->nky,
800                       (div_round_up(pme->nkx, pme->nnodes_major) + pme->pme_order + 1) * pme->nkz);
801
802     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
803      * Note that gmx_pme_check_restrictions checked for this already.
804      */
805     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
806     {
807         gmx_incons(
808                 "More than one communication pulse required for grid overlap communication along "
809                 "the major dimension while using threads");
810     }
811
812     snew(pme->bsp_mod[XX], pme->nkx);
813     snew(pme->bsp_mod[YY], pme->nky);
814     snew(pme->bsp_mod[ZZ], pme->nkz);
815
816     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
817     pme->runMode = runMode;
818
819     /* The required size of the interpolation grid, including overlap.
820      * The allocated size (pmegrid_n?) might be slightly larger.
821      */
822     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] - pme->overlap[0].s2g0[pme->nodeid_major];
823     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] - pme->overlap[1].s2g0[pme->nodeid_minor];
824     pme->pmegrid_nz_base = pme->nkz;
825     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
826     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
827     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
828     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
829     pme->pmegrid_start_iz = 0;
830
831     make_gridindex_to_localindex(
832             pme->nkx, pme->pmegrid_start_ix, pme->pmegrid_nx - (pme->pme_order - 1), &pme->nnx, &pme->fshx);
833     make_gridindex_to_localindex(
834             pme->nky, pme->pmegrid_start_iy, pme->pmegrid_ny - (pme->pme_order - 1), &pme->nny, &pme->fshy);
835     make_gridindex_to_localindex(
836             pme->nkz, pme->pmegrid_start_iz, pme->pmegrid_nz_base, &pme->nnz, &pme->fshz);
837
838     pme->spline_work = make_pme_spline_work(pme->pme_order);
839
840     ndata[0] = pme->nkx;
841     ndata[1] = pme->nky;
842     ndata[2] = pme->nkz;
843     /* It doesn't matter if we allocate too many grids here,
844      * we only allocate and use the ones we need.
845      */
846     if (pme->doLJ)
847     {
848         pme->ngrids = ((ir->ljpme_combination_rule == LongRangeVdW::LB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
849     }
850     else
851     {
852         pme->ngrids = DO_Q;
853     }
854     snew(pme->fftgrid, pme->ngrids);
855     snew(pme->cfftgrid, pme->ngrids);
856     snew(pme->pfft_setup, pme->ngrids);
857
858     for (i = 0; i < pme->ngrids; ++i)
859     {
860         if ((i < DO_Q && pme->doCoulomb && (i == 0 || bFreeEnergy_q))
861             || (i >= DO_Q && pme->doLJ
862                 && (i == 2 || bFreeEnergy_lj || ir->ljpme_combination_rule == LongRangeVdW::LB)))
863         {
864             pmegrids_init(&pme->pmegrid[i],
865                           pme->pmegrid_nx,
866                           pme->pmegrid_ny,
867                           pme->pmegrid_nz,
868                           pme->pmegrid_nz_base,
869                           pme->pme_order,
870                           pme->bUseThreads,
871                           pme->nthread,
872                           pme->overlap[0].s2g1[pme->nodeid_major]
873                                   - pme->overlap[0].s2g0[pme->nodeid_major + 1],
874                           pme->overlap[1].s2g1[pme->nodeid_minor]
875                                   - pme->overlap[1].s2g0[pme->nodeid_minor + 1]);
876             /* This routine will allocate the grid data to fit the FFTs */
877             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed)
878                                                         ? gmx::PinningPolicy::PinnedIfSupported
879                                                         : gmx::PinningPolicy::CannotBePinned;
880             gmx_parallel_3dfft_init(&pme->pfft_setup[i],
881                                     ndata,
882                                     &pme->fftgrid[i],
883                                     &pme->cfftgrid[i],
884                                     pme->mpi_comm_d,
885                                     bReproducible,
886                                     pme->nthread,
887                                     allocateRealGridForGpu);
888         }
889     }
890
891     if (!pme->bP3M)
892     {
893         /* Use plain SPME B-spline interpolation */
894         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
895     }
896     else
897     {
898         /* Use the P3M grid-optimized influence function */
899         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
900     }
901
902     /* Use atc[0] for spreading */
903     const int firstDimIndex   = (numPmeDomains.x > 1 ? 0 : 1);
904     MPI_Comm  mpiCommFirstDim = (pme->nnodes > 1 ? pme->mpi_comm_d[firstDimIndex] : MPI_COMM_NULL);
905     bool      doSpread        = true;
906     pme->atc.emplace_back(mpiCommFirstDim, pme->nthread, pme->pme_order, firstDimIndex, doSpread);
907     if (pme->ndecompdim >= 2)
908     {
909         const int secondDimIndex = 1;
910         doSpread                 = false;
911         pme->atc.emplace_back(pme->mpi_comm_d[1], pme->nthread, pme->pme_order, secondDimIndex, doSpread);
912     }
913
914     // Initial check of validity of the input for running on the GPU
915     if (pme->runMode != PmeRunMode::CPU)
916     {
917         std::string errorString;
918         bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
919         if (!canRunOnGpu)
920         {
921             GMX_THROW(gmx::NotImplementedError(errorString));
922         }
923         pme_gpu_reinit(pme.get(), deviceContext, deviceStream, pmeGpuProgram);
924     }
925     else
926     {
927         GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object when PME is on a CPU.");
928     }
929
930
931     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
932
933     // no exception was thrown during the init, so we hand over the PME structure handle
934     return pme.release();
935 }
936
937 void gmx_pme_reinit(struct gmx_pme_t** pmedata,
938                     const t_commrec*   cr,
939                     struct gmx_pme_t*  pme_src,
940                     const t_inputrec*  ir,
941                     const ivec         grid_size,
942                     real               ewaldcoeff_q,
943                     real               ewaldcoeff_lj)
944 {
945     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
946     // TODO: This would be better as just copying a sub-structure that contains
947     // all the PME parameters and nothing else.
948     t_inputrec irc;
949     irc.pbcType                = ir->pbcType;
950     irc.coulombtype            = ir->coulombtype;
951     irc.vdwtype                = ir->vdwtype;
952     irc.efep                   = ir->efep;
953     irc.pme_order              = ir->pme_order;
954     irc.epsilon_r              = ir->epsilon_r;
955     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
956     irc.nkx                    = grid_size[XX];
957     irc.nky                    = grid_size[YY];
958     irc.nkz                    = grid_size[ZZ];
959
960     try
961     {
962         // This is reinit. Any logging should have been done at first init.
963         // Here we should avoid writing notes for settings the user did not
964         // set directly.
965         const gmx::MDLogger dummyLogger;
966         GMX_ASSERT(pmedata, "Invalid PME pointer");
967         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
968         *pmedata                    = gmx_pme_init(cr,
969                                 numPmeDomains,
970                                 &irc,
971                                 pme_src->bFEP_q,
972                                 pme_src->bFEP_lj,
973                                 FALSE,
974                                 ewaldcoeff_q,
975                                 ewaldcoeff_lj,
976                                 pme_src->nthread,
977                                 pme_src->runMode,
978                                 pme_src->gpu,
979                                 nullptr,
980                                 nullptr,
981                                 nullptr,
982                                 dummyLogger);
983         /* When running PME on the CPU not using domain decomposition,
984          * the atom data is allocated once only in gmx_pme_(re)init().
985          */
986         if (!pme_src->gpu && pme_src->nnodes == 1)
987         {
988             gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), {}, {});
989         }
990         // TODO this is mostly passing around current values
991     }
992     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
993
994     /* We can easily reuse the allocated pme grids in pme_src */
995     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
996     /* We would like to reuse the fft grids, but that's harder */
997 }
998
999 real gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q)
1000 {
1001     pmegrids_t* grid;
1002
1003     if (pme->nnodes > 1)
1004     {
1005         gmx_incons("gmx_pme_calc_energy called in parallel");
1006     }
1007     if (pme->bFEP_q)
1008     {
1009         gmx_incons("gmx_pme_calc_energy with free energy");
1010     }
1011
1012     if (!pme->atc_energy)
1013     {
1014         pme->atc_energy = std::make_unique<PmeAtomComm>(MPI_COMM_NULL, 1, pme->pme_order, 0, true);
1015     }
1016     PmeAtomComm* atc = pme->atc_energy.get();
1017     atc->setNumAtoms(x.ssize());
1018     atc->x           = x;
1019     atc->coefficient = q;
1020
1021     /* We only use the A-charges grid */
1022     grid = &pme->pmegrid[PME_GRID_QA];
1023
1024     /* Only calculate the spline coefficients, don't actually spread */
1025     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
1026
1027     return gather_energy_bsplines(pme, grid->grid.grid, atc);
1028 }
1029
1030 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
1031 static void calc_initial_lb_coeffs(gmx::ArrayRef<real>       coefficient,
1032                                    gmx::ArrayRef<const real> local_c6,
1033                                    gmx::ArrayRef<const real> local_sigma)
1034 {
1035     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
1036     {
1037         real sigma4    = local_sigma[i];
1038         sigma4         = sigma4 * sigma4;
1039         sigma4         = sigma4 * sigma4;
1040         coefficient[i] = local_c6[i] / sigma4;
1041     }
1042 }
1043
1044 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
1045 static void calc_next_lb_coeffs(gmx::ArrayRef<real> coefficient, gmx::ArrayRef<const real> local_sigma)
1046 {
1047     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
1048     {
1049         coefficient[i] *= local_sigma[i];
1050     }
1051 }
1052
1053 int gmx_pme_do(struct gmx_pme_t*              pme,
1054                gmx::ArrayRef<const gmx::RVec> coordinates,
1055                gmx::ArrayRef<gmx::RVec>       forces,
1056                gmx::ArrayRef<const real>      chargeA,
1057                gmx::ArrayRef<const real>      chargeB,
1058                gmx::ArrayRef<const real>      c6A,
1059                gmx::ArrayRef<const real>      c6B,
1060                gmx::ArrayRef<const real>      sigmaA,
1061                gmx::ArrayRef<const real>      sigmaB,
1062                const matrix                   box,
1063                const t_commrec*               cr,
1064                int                            maxshift_x,
1065                int                            maxshift_y,
1066                t_nrnb*                        nrnb,
1067                gmx_wallcycle*                 wcycle,
1068                matrix                         vir_q,
1069                matrix                         vir_lj,
1070                real*                          energy_q,
1071                real*                          energy_lj,
1072                real                           lambda_q,
1073                real                           lambda_lj,
1074                real*                          dvdlambda_q,
1075                real*                          dvdlambda_lj,
1076                const gmx::StepWorkload&       stepWork)
1077 {
1078     GMX_ASSERT(pme->runMode == PmeRunMode::CPU,
1079                "gmx_pme_do should not be called on the GPU PME run.");
1080
1081     int                       d, npme, grid_index, max_grid_index;
1082     PmeAtomComm&              atc     = pme->atc[0];
1083     pmegrids_t*               pmegrid = nullptr;
1084     real*                     grid    = nullptr;
1085     gmx::ArrayRef<const real> coefficient;
1086     PmeOutput                 output[2]; // The second is used for the B state with FEP
1087     real                      scale, lambda;
1088     gmx_bool                  bClearF;
1089     gmx_parallel_3dfft_t      pfft_setup;
1090     real*                     fftgrid;
1091     t_complex*                cfftgrid;
1092     int                       thread;
1093     gmx_bool                  bFirst, bDoSplines;
1094     int                       fep_state;
1095     int                       fep_states_lj = pme->bFEP_lj ? 2 : 1;
1096     // There's no support for computing energy without virial, or vice versa
1097     const bool computeEnergyAndVirial = (stepWork.computeEnergy || stepWork.computeVirial);
1098
1099     /* We could be passing lambda!=0 while no q or LJ is actually perturbed */
1100     if (!pme->bFEP_q)
1101     {
1102         lambda_q = 0;
1103     }
1104     if (!pme->bFEP_lj)
1105     {
1106         lambda_lj = 0;
1107     }
1108
1109     assert(pme->nnodes > 0);
1110     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1111
1112     if (pme->nnodes > 1)
1113     {
1114         atc.pd.resize(coordinates.ssize());
1115         for (int d = pme->ndecompdim - 1; d >= 0; d--)
1116         {
1117             PmeAtomComm& atc = pme->atc[d];
1118             atc.maxshift     = (atc.dimind == 0 ? maxshift_x : maxshift_y);
1119         }
1120     }
1121     else
1122     {
1123         GMX_ASSERT(coordinates.ssize() == atc.numAtoms(), "We expect atc.numAtoms() coordinates");
1124         GMX_ASSERT(forces.ssize() >= atc.numAtoms(),
1125                    "We need a force buffer with at least atc.numAtoms() elements");
1126
1127         atc.x = coordinates;
1128         atc.f = forces;
1129     }
1130
1131     matrix scaledBox;
1132     pme->boxScaler->scaleBox(box, scaledBox);
1133
1134     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1135     bFirst = TRUE;
1136
1137     /* For simplicity, we construct the splines for all particles if
1138      * more than one PME calculations is needed. Some optimization
1139      * could be done by keeping track of which atoms have splines
1140      * constructed, and construct new splines on each pass for atoms
1141      * that don't yet have them.
1142      */
1143
1144     bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1145
1146     /* We need a maximum of four separate PME calculations:
1147      * grid_index=0: Coulomb PME with charges from state A
1148      * grid_index=1: Coulomb PME with charges from state B
1149      * grid_index=2: LJ PME with C6 from state A
1150      * grid_index=3: LJ PME with C6 from state B
1151      * For Lorentz-Berthelot combination rules, a separate loop is used to
1152      * calculate all the terms
1153      */
1154
1155     /* If we are doing LJ-PME with LB, we only do Q here */
1156     max_grid_index = (pme->ljpme_combination_rule == LongRangeVdW::LB) ? DO_Q : DO_Q_AND_LJ;
1157
1158     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1159     {
1160         /* Check if we should do calculations at this grid_index
1161          * If grid_index is odd we should be doing FEP
1162          * If grid_index < 2 we should be doing electrostatic PME
1163          * If grid_index >= 2 we should be doing LJ-PME
1164          */
1165         if ((grid_index < DO_Q && (!pme->doCoulomb || (grid_index == 1 && !pme->bFEP_q)))
1166             || (grid_index >= DO_Q && (!pme->doLJ || (grid_index == 3 && !pme->bFEP_lj))))
1167         {
1168             continue;
1169         }
1170         /* Unpack structure */
1171         pmegrid    = &pme->pmegrid[grid_index];
1172         fftgrid    = pme->fftgrid[grid_index];
1173         cfftgrid   = pme->cfftgrid[grid_index];
1174         pfft_setup = pme->pfft_setup[grid_index];
1175         switch (grid_index)
1176         {
1177             case 0: coefficient = chargeA; break;
1178             case 1: coefficient = chargeB; break;
1179             case 2: coefficient = c6A; break;
1180             case 3: coefficient = c6B; break;
1181         }
1182
1183         grid = pmegrid->grid.grid;
1184
1185         if (debug)
1186         {
1187             fprintf(debug, "PME: number of ranks = %d, rank = %d\n", cr->nnodes, cr->nodeid);
1188             fprintf(debug, "Grid = %p\n", static_cast<void*>(grid));
1189             if (grid == nullptr)
1190             {
1191                 gmx_fatal(FARGS, "No grid!");
1192             }
1193         }
1194
1195         if (pme->nnodes == 1)
1196         {
1197             atc.coefficient = coefficient;
1198         }
1199         else
1200         {
1201             wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
1202             do_redist_pos_coeffs(pme, cr, bFirst, coordinates, coefficient);
1203
1204             wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
1205         }
1206
1207         if (debug)
1208         {
1209             fprintf(debug, "Rank= %6d, pme local particles=%6d\n", cr->nodeid, atc.numAtoms());
1210         }
1211
1212         wallcycle_start(wcycle, WallCycleCounter::PmeSpread);
1213
1214         /* Spread the coefficients on a grid */
1215         spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1216
1217         if (bFirst)
1218         {
1219             inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1220         }
1221         inc_nrnb(nrnb, eNR_SPREADBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1222
1223         if (!pme->bUseThreads)
1224         {
1225             wrap_periodic_pmegrid(pme, grid);
1226
1227             /* sum contributions to local grid from other nodes */
1228             if (pme->nnodes > 1)
1229             {
1230                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1231             }
1232
1233             copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1234         }
1235
1236         wallcycle_stop(wcycle, WallCycleCounter::PmeSpread);
1237
1238         /* TODO If the OpenMP and single-threaded implementations
1239            converge, then spread_on_grid() and
1240            copy_pmegrid_to_fftgrid() will perhaps live in the same
1241            source file.
1242         */
1243
1244         /* Here we start a large thread parallel region */
1245 #pragma omp parallel num_threads(pme->nthread) private(thread)
1246         {
1247             try
1248             {
1249                 thread = gmx_omp_get_thread_num();
1250                 int loop_count;
1251
1252                 /* do 3d-fft */
1253                 if (thread == 0)
1254                 {
1255                     wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1256                 }
1257                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1258                 if (thread == 0)
1259                 {
1260                     wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1261                 }
1262
1263                 /* solve in k-space for our local cells */
1264                 if (thread == 0)
1265                 {
1266                     wallcycle_start(
1267                             wcycle,
1268                             (grid_index < DO_Q ? WallCycleCounter::PmeSolve : WallCycleCounter::LJPme));
1269                 }
1270                 if (grid_index < DO_Q)
1271                 {
1272                     loop_count = solve_pme_yzx(pme,
1273                                                cfftgrid,
1274                                                scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1275                                                computeEnergyAndVirial,
1276                                                pme->nthread,
1277                                                thread);
1278                 }
1279                 else
1280                 {
1281                     loop_count =
1282                             solve_pme_lj_yzx(pme,
1283                                              &cfftgrid,
1284                                              FALSE,
1285                                              scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1286                                              computeEnergyAndVirial,
1287                                              pme->nthread,
1288                                              thread);
1289                 }
1290
1291                 if (thread == 0)
1292                 {
1293                     wallcycle_stop(
1294                             wcycle,
1295                             (grid_index < DO_Q ? WallCycleCounter::PmeSolve : WallCycleCounter::LJPme));
1296                     inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1297                 }
1298
1299                 /* do 3d-invfft */
1300                 if (thread == 0)
1301                 {
1302                     wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1303                 }
1304                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1305                 if (thread == 0)
1306                 {
1307                     wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1308
1309
1310                     if (pme->nodeid == 0)
1311                     {
1312                         real ntot = pme->nkx * pme->nky * pme->nkz;
1313                         npme      = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1314                         inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1315                     }
1316
1317                     /* Note: this wallcycle region is closed below
1318                        outside an OpenMP region, so take care if
1319                        refactoring code here. */
1320                     wallcycle_start(wcycle, WallCycleCounter::PmeGather);
1321                 }
1322
1323                 copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1324             }
1325             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1326         }
1327         /* End of thread parallel section.
1328          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1329          */
1330
1331         /* distribute local grid to all nodes */
1332         if (pme->nnodes > 1)
1333         {
1334             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1335         }
1336
1337         unwrap_periodic_pmegrid(pme, grid);
1338
1339         if (stepWork.computeForces)
1340         {
1341             /* interpolate forces for our local atoms */
1342
1343
1344             /* If we are running without parallelization,
1345              * atc->f is the actual force array, not a buffer,
1346              * therefore we should not clear it.
1347              */
1348             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
1349             bClearF = (bFirst && PAR(cr));
1350 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1351             for (thread = 0; thread < pme->nthread; thread++)
1352             {
1353                 try
1354                 {
1355                     gather_f_bsplines(pme,
1356                                       grid,
1357                                       bClearF,
1358                                       &atc,
1359                                       &atc.spline[thread],
1360                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0 - lambda : lambda) : 1.0);
1361                 }
1362                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1363             }
1364
1365
1366             inc_nrnb(nrnb, eNR_GATHERFBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1367             /* Note: this wallcycle region is opened above inside an OpenMP
1368                region, so take care if refactoring code here. */
1369             wallcycle_stop(wcycle, WallCycleCounter::PmeGather);
1370         }
1371
1372         if (computeEnergyAndVirial)
1373         {
1374             /* This should only be called on the master thread
1375              * and after the threads have synchronized.
1376              */
1377             if (grid_index < 2)
1378             {
1379                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1380             }
1381             else
1382             {
1383                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1384             }
1385         }
1386         bFirst = FALSE;
1387     } /* of grid_index-loop */
1388
1389     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1390      * seven terms. */
1391
1392     if (pme->doLJ && pme->ljpme_combination_rule == LongRangeVdW::LB)
1393     {
1394         /* Loop over A- and B-state if we are doing FEP */
1395         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1396         {
1397             std::vector<real>         local_c6;
1398             std::vector<real>         local_sigma;
1399             gmx::ArrayRef<const real> RedistC6;
1400             gmx::ArrayRef<const real> RedistSigma;
1401             gmx::ArrayRef<real>       coefficientBuffer;
1402             if (pme->nnodes == 1)
1403             {
1404                 pme->lb_buf1.resize(atc.numAtoms());
1405                 coefficientBuffer = pme->lb_buf1;
1406                 switch (fep_state)
1407                 {
1408                     case 0:
1409                         local_c6.assign(c6A.begin(), c6A.end());
1410                         local_sigma.assign(sigmaA.begin(), sigmaA.end());
1411                         break;
1412                     case 1:
1413                         local_c6.assign(c6B.begin(), c6B.end());
1414                         local_sigma.assign(sigmaB.begin(), sigmaB.end());
1415                         break;
1416                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1417                 }
1418             }
1419             else
1420             {
1421                 coefficientBuffer = atc.coefficientBuffer;
1422                 switch (fep_state)
1423                 {
1424                     case 0:
1425                         RedistC6    = c6A;
1426                         RedistSigma = sigmaA;
1427                         break;
1428                     case 1:
1429                         RedistC6    = c6B;
1430                         RedistSigma = sigmaB;
1431                         break;
1432                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1433                 }
1434                 wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
1435
1436                 do_redist_pos_coeffs(pme, cr, bFirst, coordinates, RedistC6);
1437                 pme->lb_buf1.resize(atc.numAtoms());
1438                 pme->lb_buf2.resize(atc.numAtoms());
1439                 local_c6.assign(pme->lb_buf1.begin(), pme->lb_buf1.end());
1440                 for (int i = 0; i < atc.numAtoms(); ++i)
1441                 {
1442                     local_c6[i] = atc.coefficient[i];
1443                 }
1444
1445                 do_redist_pos_coeffs(pme, cr, FALSE, coordinates, RedistSigma);
1446                 local_sigma.assign(pme->lb_buf2.begin(), pme->lb_buf2.end());
1447                 for (int i = 0; i < atc.numAtoms(); ++i)
1448                 {
1449                     local_sigma[i] = atc.coefficient[i];
1450                 }
1451
1452                 wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
1453             }
1454             atc.coefficient = coefficientBuffer;
1455             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1456
1457             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1458             for (grid_index = 2; grid_index < 9; ++grid_index)
1459             {
1460                 /* Unpack structure */
1461                 pmegrid    = &pme->pmegrid[grid_index];
1462                 fftgrid    = pme->fftgrid[grid_index];
1463                 pfft_setup = pme->pfft_setup[grid_index];
1464                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1465                 grid = pmegrid->grid.grid;
1466
1467                 wallcycle_start(wcycle, WallCycleCounter::PmeSpread);
1468                 /* Spread the c6 on a grid */
1469                 spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1470
1471                 if (bFirst)
1472                 {
1473                     inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1474                 }
1475
1476                 inc_nrnb(nrnb,
1477                          eNR_SPREADBSP,
1478                          pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1479                 if (pme->nthread == 1)
1480                 {
1481                     wrap_periodic_pmegrid(pme, grid);
1482                     /* sum contributions to local grid from other nodes */
1483                     if (pme->nnodes > 1)
1484                     {
1485                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1486                     }
1487                     copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1488                 }
1489                 wallcycle_stop(wcycle, WallCycleCounter::PmeSpread);
1490
1491                 /*Here we start a large thread parallel region*/
1492 #pragma omp parallel num_threads(pme->nthread) private(thread)
1493                 {
1494                     try
1495                     {
1496                         thread = gmx_omp_get_thread_num();
1497                         /* do 3d-fft */
1498                         if (thread == 0)
1499                         {
1500                             wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1501                         }
1502
1503                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1504                         if (thread == 0)
1505                         {
1506                             wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1507                         }
1508                     }
1509                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1510                 }
1511                 bFirst = FALSE;
1512             }
1513             /* solve in k-space for our local cells */
1514 #pragma omp parallel num_threads(pme->nthread) private(thread)
1515             {
1516                 try
1517                 {
1518                     int loop_count;
1519                     thread = gmx_omp_get_thread_num();
1520                     if (thread == 0)
1521                     {
1522                         wallcycle_start(wcycle, WallCycleCounter::LJPme);
1523                     }
1524
1525                     loop_count =
1526                             solve_pme_lj_yzx(pme,
1527                                              &pme->cfftgrid[2],
1528                                              TRUE,
1529                                              scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1530                                              computeEnergyAndVirial,
1531                                              pme->nthread,
1532                                              thread);
1533                     if (thread == 0)
1534                     {
1535                         wallcycle_stop(wcycle, WallCycleCounter::LJPme);
1536                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1537                     }
1538                 }
1539                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1540             }
1541
1542             if (computeEnergyAndVirial)
1543             {
1544                 /* This should only be called on the master thread and
1545                  * after the threads have synchronized.
1546                  */
1547                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
1548             }
1549
1550             bFirst = !pme->doCoulomb;
1551             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1552             for (grid_index = 8; grid_index >= 2; --grid_index)
1553             {
1554                 /* Unpack structure */
1555                 pmegrid    = &pme->pmegrid[grid_index];
1556                 fftgrid    = pme->fftgrid[grid_index];
1557                 pfft_setup = pme->pfft_setup[grid_index];
1558                 grid       = pmegrid->grid.grid;
1559                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1560 #pragma omp parallel num_threads(pme->nthread) private(thread)
1561                 {
1562                     try
1563                     {
1564                         thread = gmx_omp_get_thread_num();
1565                         /* do 3d-invfft */
1566                         if (thread == 0)
1567                         {
1568                             wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1569                         }
1570
1571                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1572                         if (thread == 0)
1573                         {
1574                             wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1575
1576
1577                             if (pme->nodeid == 0)
1578                             {
1579                                 real ntot = pme->nkx * pme->nky * pme->nkz;
1580                                 npme      = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1581                                 inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1582                             }
1583                             wallcycle_start(wcycle, WallCycleCounter::PmeGather);
1584                         }
1585
1586                         copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1587                     }
1588                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1589                 } /*#pragma omp parallel*/
1590
1591                 /* distribute local grid to all nodes */
1592                 if (pme->nnodes > 1)
1593                 {
1594                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1595                 }
1596
1597                 unwrap_periodic_pmegrid(pme, grid);
1598
1599                 if (stepWork.computeForces)
1600                 {
1601                     /* interpolate forces for our local atoms */
1602                     bClearF = (bFirst && PAR(cr));
1603                     scale   = pme->bFEP ? (fep_state < 1 ? 1.0 - lambda_lj : lambda_lj) : 1.0;
1604                     scale *= lb_scale_factor[grid_index - 2];
1605
1606 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1607                     for (thread = 0; thread < pme->nthread; thread++)
1608                     {
1609                         try
1610                         {
1611                             gather_f_bsplines(
1612                                     pme, grid, bClearF, &pme->atc[0], &pme->atc[0].spline[thread], scale);
1613                         }
1614                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1615                     }
1616
1617
1618                     inc_nrnb(nrnb,
1619                              eNR_GATHERFBSP,
1620                              pme->pme_order * pme->pme_order * pme->pme_order * pme->atc[0].numAtoms());
1621                 }
1622                 wallcycle_stop(wcycle, WallCycleCounter::PmeGather);
1623
1624                 bFirst = FALSE;
1625             } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1626         }     /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1627     }         /* if (pme->doLJ && pme->ljpme_combination_rule == LongRangeVdW::LB) */
1628
1629     if (stepWork.computeForces && pme->nnodes > 1)
1630     {
1631         wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
1632         for (d = 0; d < pme->ndecompdim; d++)
1633         {
1634             gmx::ArrayRef<gmx::RVec> forcesRef;
1635             if (d == pme->ndecompdim - 1)
1636             {
1637                 const size_t numAtoms = coordinates.size();
1638                 GMX_ASSERT(forces.size() >= numAtoms, "Need at least numAtoms forces");
1639                 forcesRef = forces.subArray(0, numAtoms);
1640             }
1641             else
1642             {
1643                 forcesRef = pme->atc[d + 1].f;
1644             }
1645             if (DOMAINDECOMP(cr))
1646             {
1647                 dd_pmeredist_f(pme, &pme->atc[d], forcesRef, d == pme->ndecompdim - 1 && pme->bPPnode);
1648             }
1649         }
1650
1651         wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
1652     }
1653
1654     if (computeEnergyAndVirial)
1655     {
1656         if (pme->doCoulomb)
1657         {
1658             if (!pme->bFEP_q)
1659             {
1660                 *energy_q = output[0].coulombEnergy_;
1661                 m_add(vir_q, output[0].coulombVirial_, vir_q);
1662             }
1663             else
1664             {
1665                 *energy_q = (1.0 - lambda_q) * output[0].coulombEnergy_ + lambda_q * output[1].coulombEnergy_;
1666                 *dvdlambda_q += output[1].coulombEnergy_ - output[0].coulombEnergy_;
1667                 for (int i = 0; i < DIM; i++)
1668                 {
1669                     for (int j = 0; j < DIM; j++)
1670                     {
1671                         vir_q[i][j] += (1.0 - lambda_q) * output[0].coulombVirial_[i][j]
1672                                        + lambda_q * output[1].coulombVirial_[i][j];
1673                     }
1674                 }
1675             }
1676             if (debug)
1677             {
1678                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1679             }
1680         }
1681         else
1682         {
1683             *energy_q = 0;
1684         }
1685
1686         if (pme->doLJ)
1687         {
1688             if (!pme->bFEP_lj)
1689             {
1690                 *energy_lj = output[0].lennardJonesEnergy_;
1691                 m_add(vir_lj, output[0].lennardJonesVirial_, vir_lj);
1692             }
1693             else
1694             {
1695                 *energy_lj = (1.0 - lambda_lj) * output[0].lennardJonesEnergy_
1696                              + lambda_lj * output[1].lennardJonesEnergy_;
1697                 *dvdlambda_lj += output[1].lennardJonesEnergy_ - output[0].lennardJonesEnergy_;
1698                 for (int i = 0; i < DIM; i++)
1699                 {
1700                     for (int j = 0; j < DIM; j++)
1701                     {
1702                         vir_lj[i][j] += (1.0 - lambda_lj) * output[0].lennardJonesVirial_[i][j]
1703                                         + lambda_lj * output[1].lennardJonesVirial_[i][j];
1704                     }
1705                 }
1706             }
1707             if (debug)
1708             {
1709                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1710             }
1711         }
1712         else
1713         {
1714             *energy_lj = 0;
1715         }
1716     }
1717     return 0;
1718 }
1719
1720 void gmx_pme_destroy(gmx_pme_t* pme)
1721 {
1722     if (!pme)
1723     {
1724         return;
1725     }
1726
1727     delete pme->boxScaler;
1728
1729     sfree(pme->nnx);
1730     sfree(pme->nny);
1731     sfree(pme->nnz);
1732     sfree(pme->fshx);
1733     sfree(pme->fshy);
1734     sfree(pme->fshz);
1735
1736     for (int i = 0; i < pme->ngrids; ++i)
1737     {
1738         pmegrids_destroy(&pme->pmegrid[i]);
1739     }
1740     if (pme->pfft_setup)
1741     {
1742         for (int i = 0; i < pme->ngrids; ++i)
1743         {
1744             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1745         }
1746     }
1747     sfree(pme->fftgrid);
1748     sfree(pme->cfftgrid);
1749     sfree(pme->pfft_setup);
1750
1751     for (int i = 0; i < DIM; i++)
1752     {
1753         sfree(pme->bsp_mod[i]);
1754     }
1755
1756     sfree(pme->bufv);
1757     sfree(pme->bufr);
1758
1759     if (pme->solve_work)
1760     {
1761         pme_free_all_work(&pme->solve_work, pme->nthread);
1762     }
1763
1764     sfree(pme->sum_qgrid_tmp);
1765     sfree(pme->sum_qgrid_dd_tmp);
1766
1767     destroy_pme_spline_work(pme->spline_work);
1768
1769     if (pme->gpu != nullptr)
1770     {
1771         pme_gpu_destroy(pme->gpu);
1772     }
1773
1774     delete pme;
1775 }
1776
1777 void gmx_pme_reinit_atoms(gmx_pme_t*                pme,
1778                           const int                 numAtoms,
1779                           gmx::ArrayRef<const real> chargesA,
1780                           gmx::ArrayRef<const real> chargesB)
1781 {
1782     if (pme->gpu != nullptr)
1783     {
1784         GMX_ASSERT(!(pme->bFEP_q && chargesB.empty()),
1785                    "B state charges must be specified if running Coulomb FEP on the GPU");
1786         pme_gpu_reinit_atoms(pme->gpu, numAtoms, chargesA.data(), pme->bFEP_q ? chargesB.data() : nullptr);
1787     }
1788     else
1789     {
1790         pme->atc[0].setNumAtoms(numAtoms);
1791         // TODO: set the charges here as well
1792     }
1793 }
1794
1795 bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size)
1796 {
1797     return (pme.nkx == grid_size[XX] && pme.nky == grid_size[YY] && pme.nkz == grid_size[ZZ]);
1798 }
1799
1800 void gmx::SeparatePmeRanksPermitted::disablePmeRanks(const std::string& reason)
1801 {
1802     permitSeparatePmeRanks_ = false;
1803
1804     if (!reason.empty())
1805     {
1806         reasons_.push_back(reason);
1807     }
1808 }
1809
1810 bool gmx::SeparatePmeRanksPermitted::permitSeparatePmeRanks() const
1811 {
1812     return permitSeparatePmeRanks_;
1813 }
1814
1815 std::string gmx::SeparatePmeRanksPermitted::reasonsWhyDisabled() const
1816 {
1817     return joinStrings(reasons_, "; ");
1818 }