0f59ebe63e81bd7730b9abe9ca3038843890ec18
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
7  * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9  * and including many others, as listed in the AUTHORS file in the
10  * top-level source directory and at http://www.gromacs.org.
11  *
12  * GROMACS is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public License
14  * as published by the Free Software Foundation; either version 2.1
15  * of the License, or (at your option) any later version.
16  *
17  * GROMACS is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with GROMACS; if not, see
24  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
26  *
27  * If you want to redistribute modifications to GROMACS, please
28  * consider that scientific software is very special. Version
29  * control is crucial - bugs must be traceable. We will be happy to
30  * consider code for inclusion in the official distribution, but
31  * derived work must not be called official GROMACS. Details are found
32  * in the README & COPYING files - if they are missing, get the
33  * official version at http://www.gromacs.org.
34  *
35  * To help us fund GROMACS development, we humbly ask that you cite
36  * the research papers on the package. Check out http://www.gromacs.org.
37  */
38 /*! \internal \file
39  *
40  * \brief This file contains function definitions necessary for
41  * computing energies and forces for the PME long-ranged part (Coulomb
42  * and LJ).
43  *
44  * \author Erik Lindahl <erik@kth.se>
45  * \author Berk Hess <hess@kth.se>
46  * \ingroup module_ewald
47  */
48 /* IMPORTANT FOR DEVELOPERS:
49  *
50  * Triclinic pme stuff isn't entirely trivial, and we've experienced
51  * some bugs during development (many of them due to me). To avoid
52  * this in the future, please check the following things if you make
53  * changes in this file:
54  *
55  * 1. You should obtain identical (at least to the PME precision)
56  *    energies, forces, and virial for
57  *    a rectangular box and a triclinic one where the z (or y) axis is
58  *    tilted a whole box side. For instance you could use these boxes:
59  *
60  *    rectangular       triclinic
61  *     2  0  0           2  0  0
62  *     0  2  0           0  2  0
63  *     0  0  6           2  2  6
64  *
65  * 2. You should check the energy conservation in a triclinic box.
66  *
67  * It might seem an overkill, but better safe than sorry.
68  * /Erik 001109
69  */
70
71 #include "gmxpre.h"
72
73 #include "pme.h"
74
75 #include "config.h"
76
77 #include <cassert>
78 #include <cmath>
79 #include <cstdio>
80 #include <cstdlib>
81 #include <cstring>
82
83 #include <algorithm>
84 #include <list>
85
86 #include "gromacs/domdec/domdec.h"
87 #include "gromacs/ewald/ewald_utils.h"
88 #include "gromacs/fft/parallel_3dfft.h"
89 #include "gromacs/fileio/pdbio.h"
90 #include "gromacs/gmxlib/network.h"
91 #include "gromacs/gmxlib/nrnb.h"
92 #include "gromacs/hardware/hw_info.h"
93 #include "gromacs/math/gmxcomplex.h"
94 #include "gromacs/math/invertmatrix.h"
95 #include "gromacs/math/units.h"
96 #include "gromacs/math/vec.h"
97 #include "gromacs/math/vectypes.h"
98 #include "gromacs/mdtypes/commrec.h"
99 #include "gromacs/mdtypes/forcerec.h"
100 #include "gromacs/mdtypes/inputrec.h"
101 #include "gromacs/mdtypes/md_enums.h"
102 #include "gromacs/mdtypes/simulation_workload.h"
103 #include "gromacs/pbcutil/pbc.h"
104 #include "gromacs/timing/cyclecounter.h"
105 #include "gromacs/timing/wallcycle.h"
106 #include "gromacs/timing/walltime_accounting.h"
107 #include "gromacs/topology/topology.h"
108 #include "gromacs/utility/basedefinitions.h"
109 #include "gromacs/utility/cstringutil.h"
110 #include "gromacs/utility/exceptions.h"
111 #include "gromacs/utility/fatalerror.h"
112 #include "gromacs/utility/gmxmpi.h"
113 #include "gromacs/utility/gmxomp.h"
114 #include "gromacs/utility/logger.h"
115 #include "gromacs/utility/real.h"
116 #include "gromacs/utility/smalloc.h"
117 #include "gromacs/utility/stringutil.h"
118 #include "gromacs/utility/unique_cptr.h"
119
120 #include "calculate_spline_moduli.h"
121 #include "pme_gather.h"
122 #include "pme_gpu_internal.h"
123 #include "pme_grid.h"
124 #include "pme_internal.h"
125 #include "pme_redistribute.h"
126 #include "pme_solve.h"
127 #include "pme_spline_work.h"
128 #include "pme_spread.h"
129
130 /*! \brief Help build a descriptive message in \c error if there are
131  * \c errorReasons why PME on GPU is not supported.
132  *
133  * \returns Whether the lack of errorReasons indicate there is support. */
134 static bool addMessageIfNotSupported(const std::list<std::string>& errorReasons, std::string* error)
135 {
136     bool isSupported = errorReasons.empty();
137     if (!isSupported && error)
138     {
139         std::string regressionTestMarker = "PME GPU does not support";
140         // this prefix is tested for in the regression tests script gmxtest.pl
141         *error = regressionTestMarker;
142         if (errorReasons.size() == 1)
143         {
144             *error += " " + errorReasons.back();
145         }
146         else
147         {
148             *error += ": " + gmx::joinStrings(errorReasons, "; ");
149         }
150         *error += ".";
151     }
152     return isSupported;
153 }
154
155 bool pme_gpu_supports_build(std::string* error)
156 {
157     std::list<std::string> errorReasons;
158     if (GMX_DOUBLE)
159     {
160         errorReasons.emplace_back("a double-precision build");
161     }
162     if (GMX_GPU == GMX_GPU_NONE)
163     {
164         errorReasons.emplace_back("a non-GPU build");
165     }
166     return addMessageIfNotSupported(errorReasons, error);
167 }
168
169 bool pme_gpu_supports_hardware(const gmx_hw_info_t gmx_unused& hwinfo, std::string* error)
170 {
171     std::list<std::string> errorReasons;
172
173     if (GMX_GPU == GMX_GPU_OPENCL)
174     {
175 #ifdef __APPLE__
176         errorReasons.emplace_back("Apple OS X operating system");
177 #endif
178     }
179     return addMessageIfNotSupported(errorReasons, error);
180 }
181
182 bool pme_gpu_supports_input(const t_inputrec& ir, const gmx_mtop_t& mtop, std::string* error)
183 {
184     std::list<std::string> errorReasons;
185     if (!EEL_PME(ir.coulombtype))
186     {
187         errorReasons.emplace_back("systems that do not use PME for electrostatics");
188     }
189     if (ir.pme_order != 4)
190     {
191         errorReasons.emplace_back("interpolation orders other than 4");
192     }
193     if (ir.efep != efepNO)
194     {
195         if (gmx_mtop_has_perturbed_charges(mtop))
196         {
197             errorReasons.emplace_back(
198                     "free energy calculations with perturbed charges (multiple grids)");
199         }
200     }
201     if (EVDW_PME(ir.vdwtype))
202     {
203         errorReasons.emplace_back("Lennard-Jones PME");
204     }
205     if (!EI_DYNAMICS(ir.eI))
206     {
207         errorReasons.emplace_back(
208                 "Cannot compute PME interactions on a GPU, because PME GPU requires a dynamical "
209                 "integrator (md, sd, etc).");
210     }
211     return addMessageIfNotSupported(errorReasons, error);
212 }
213
214 /*! \brief \libinternal
215  * Finds out if PME with given inputs is possible to run on GPU.
216  * This function is an internal final check, validating the whole PME structure on creation,
217  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
218  *
219  * \param[in]  pme          The PME structure.
220  * \param[out] error        The error message if the input is not supported on GPU.
221  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
222  */
223 static bool pme_gpu_check_restrictions(const gmx_pme_t* pme, std::string* error)
224 {
225     std::list<std::string> errorReasons;
226     if (pme->nnodes != 1)
227     {
228         errorReasons.emplace_back("PME decomposition");
229     }
230     if (pme->pme_order != 4)
231     {
232         errorReasons.emplace_back("interpolation orders other than 4");
233     }
234     if (pme->bFEP)
235     {
236         errorReasons.emplace_back("free energy calculations (multiple grids)");
237     }
238     if (pme->doLJ)
239     {
240         errorReasons.emplace_back("Lennard-Jones PME");
241     }
242     if (GMX_DOUBLE)
243     {
244         errorReasons.emplace_back("double precision");
245     }
246     if (GMX_GPU == GMX_GPU_NONE)
247     {
248         errorReasons.emplace_back("non-GPU build of GROMACS");
249     }
250
251     return addMessageIfNotSupported(errorReasons, error);
252 }
253
254 PmeRunMode pme_run_mode(const gmx_pme_t* pme)
255 {
256     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
257     return pme->runMode;
258 }
259
260 gmx::PinningPolicy pme_get_pinning_policy()
261 {
262     return gmx::PinningPolicy::PinnedIfSupported;
263 }
264
265 /*! \brief Number of bytes in a cache line.
266  *
267  * Must also be a multiple of the SIMD and SIMD4 register size, to
268  * preserve alignment.
269  */
270 const int gmxCacheLineSize = 64;
271
272 //! Set up coordinate communication
273 static void setup_coordinate_communication(PmeAtomComm* atc)
274 {
275     int nslab, n, i;
276     int fw, bw;
277
278     nslab = atc->nslab;
279
280     n = 0;
281     for (i = 1; i <= nslab / 2; i++)
282     {
283         fw = (atc->nodeid + i) % nslab;
284         bw = (atc->nodeid - i + nslab) % nslab;
285         if (n < nslab - 1)
286         {
287             atc->slabCommSetup[n].node_dest = fw;
288             atc->slabCommSetup[n].node_src  = bw;
289             n++;
290         }
291         if (n < nslab - 1)
292         {
293             atc->slabCommSetup[n].node_dest = bw;
294             atc->slabCommSetup[n].node_src  = fw;
295             n++;
296         }
297     }
298 }
299
300 /*! \brief Round \p n up to the next multiple of \p f */
301 static int mult_up(int n, int f)
302 {
303     return ((n + f - 1) / f) * f;
304 }
305
306 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
307 static double estimate_pme_load_imbalance(struct gmx_pme_t* pme)
308 {
309     int    nma, nmi;
310     double n1, n2, n3;
311
312     nma = pme->nnodes_major;
313     nmi = pme->nnodes_minor;
314
315     n1 = mult_up(pme->nkx, nma) * mult_up(pme->nky, nmi) * pme->nkz;
316     n2 = mult_up(pme->nkx, nma) * mult_up(pme->nkz, nmi) * pme->nky;
317     n3 = mult_up(pme->nky, nma) * mult_up(pme->nkz, nmi) * pme->nkx;
318
319     /* pme_solve is roughly double the cost of an fft */
320
321     return (n1 + n2 + 3 * n3) / static_cast<double>(6 * pme->nkx * pme->nky * pme->nkz);
322 }
323
324 #ifndef DOXYGEN
325
326 PmeAtomComm::PmeAtomComm(MPI_Comm   PmeMpiCommunicator,
327                          const int  numThreads,
328                          const int  pmeOrder,
329                          const int  dimIndex,
330                          const bool doSpread) :
331     dimind(dimIndex),
332     bSpread(doSpread),
333     pme_order(pmeOrder),
334     nthread(numThreads),
335     spline(nthread)
336 {
337     if (PmeMpiCommunicator != MPI_COMM_NULL)
338     {
339         mpi_comm = PmeMpiCommunicator;
340 #    if GMX_MPI
341         MPI_Comm_size(mpi_comm, &nslab);
342         MPI_Comm_rank(mpi_comm, &nodeid);
343 #    endif
344     }
345     if (debug)
346     {
347         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", dimind, nslab, nodeid);
348     }
349
350     if (nslab > 1)
351     {
352         slabCommSetup.resize(nslab);
353         setup_coordinate_communication(this);
354
355         count_thread.resize(nthread);
356         for (auto& countThread : count_thread)
357         {
358             countThread.resize(nslab);
359         }
360     }
361
362     if (nthread > 1)
363     {
364         threadMap.resize(nthread);
365
366 #    pragma omp parallel for num_threads(nthread) schedule(static)
367         for (int thread = 0; thread < nthread; thread++)
368         {
369             try
370             {
371                 /* Allocate buffer with padding to avoid cache polution */
372                 threadMap[thread].nBuffer.resize(nthread + 2 * gmxCacheLineSize);
373                 threadMap[thread].n = threadMap[thread].nBuffer.data() + gmxCacheLineSize;
374             }
375             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
376         }
377     }
378 }
379
380 #endif // !DOXYGEN
381
382 /*! \brief Initialize data structure for communication */
383 static void init_overlap_comm(pme_overlap_t* ol, int norder, MPI_Comm comm, int nnodes, int nodeid, int ndata, int commplainsize)
384 {
385     gmx_bool bCont;
386
387     ol->mpi_comm = comm;
388     ol->nnodes   = nnodes;
389     ol->nodeid   = nodeid;
390
391     /* Linear translation of the PME grid won't affect reciprocal space
392      * calculations, so to optimize we only interpolate "upwards",
393      * which also means we only have to consider overlap in one direction.
394      * I.e., particles on this node might also be spread to grid indices
395      * that belong to higher nodes (modulo nnodes)
396      */
397
398     ol->s2g0.resize(ol->nnodes + 1);
399     ol->s2g1.resize(ol->nnodes);
400     if (debug)
401     {
402         fprintf(debug, "PME slab boundaries:");
403     }
404     for (int i = 0; i < nnodes; i++)
405     {
406         /* s2g0 the local interpolation grid start.
407          * s2g1 the local interpolation grid end.
408          * Since in calc_pidx we divide particles, and not grid lines,
409          * spatially uniform along dimension x or y, we need to round
410          * s2g0 down and s2g1 up.
411          */
412         ol->s2g0[i] = (i * ndata + 0) / nnodes;
413         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
414
415         if (debug)
416         {
417             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
418         }
419     }
420     ol->s2g0[nnodes] = ndata;
421     if (debug)
422     {
423         fprintf(debug, "\n");
424     }
425
426     /* Determine with how many nodes we need to communicate the grid overlap */
427     int testRankCount = 0;
428     do
429     {
430         testRankCount++;
431         bCont = FALSE;
432         for (int i = 0; i < nnodes; i++)
433         {
434             if ((i + testRankCount < nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount])
435                 || (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
436             {
437                 bCont = TRUE;
438             }
439         }
440     } while (bCont && testRankCount < nnodes);
441
442     ol->comm_data.resize(testRankCount - 1);
443     ol->send_size = 0;
444
445     for (size_t b = 0; b < ol->comm_data.size(); b++)
446     {
447         pme_grid_comm_t* pgc = &ol->comm_data[b];
448
449         /* Send */
450         pgc->send_id  = (ol->nodeid + (b + 1)) % ol->nnodes;
451         int fft_start = ol->s2g0[pgc->send_id];
452         int fft_end   = ol->s2g0[pgc->send_id + 1];
453         if (pgc->send_id < nodeid)
454         {
455             fft_start += ndata;
456             fft_end += ndata;
457         }
458         int send_index1  = ol->s2g1[nodeid];
459         send_index1      = std::min(send_index1, fft_end);
460         pgc->send_index0 = fft_start;
461         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
462         ol->send_size += pgc->send_nindex;
463
464         /* We always start receiving to the first index of our slab */
465         pgc->recv_id    = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
466         fft_start       = ol->s2g0[ol->nodeid];
467         fft_end         = ol->s2g0[ol->nodeid + 1];
468         int recv_index1 = ol->s2g1[pgc->recv_id];
469         if (pgc->recv_id > nodeid)
470         {
471             recv_index1 -= ndata;
472         }
473         recv_index1      = std::min(recv_index1, fft_end);
474         pgc->recv_index0 = fft_start;
475         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
476     }
477
478 #if GMX_MPI
479     /* Communicate the buffer sizes to receive */
480     MPI_Status stat;
481     for (size_t b = 0; b < ol->comm_data.size(); b++)
482     {
483         MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b, &ol->comm_data[b].recv_size,
484                      1, MPI_INT, ol->comm_data[b].recv_id, b, ol->mpi_comm, &stat);
485     }
486 #endif
487
488     /* For non-divisible grid we need pme_order iso pme_order-1 */
489     ol->sendbuf.resize(norder * commplainsize);
490     ol->recvbuf.resize(norder * commplainsize);
491 }
492
493 int minimalPmeGridSize(int pmeOrder)
494 {
495     /* The actual grid size limitations are:
496      *   serial:        >= pme_order
497      *   DD, no OpenMP: >= 2*(pme_order - 1)
498      *   DD, OpenMP:    >= pme_order + 1
499      * But we use the maximum for simplicity since in practice there is not
500      * much performance difference between pme_order and 2*(pme_order -1).
501      */
502     int minimalSize = 2 * (pmeOrder - 1);
503
504     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
505     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
506
507     return minimalSize;
508 }
509
510 bool gmx_pme_check_restrictions(int pme_order, int nkx, int nky, int nkz, int numPmeDomainsAlongX, bool useThreads, bool errorsAreFatal)
511 {
512     if (pme_order > PME_ORDER_MAX)
513     {
514         if (!errorsAreFatal)
515         {
516             return false;
517         }
518
519         std::string message = gmx::formatString(
520                 "pme_order (%d) is larger than the maximum allowed value (%d). Modify and "
521                 "recompile the code if you really need such a high order.",
522                 pme_order, PME_ORDER_MAX);
523         GMX_THROW(gmx::InconsistentInputError(message));
524     }
525
526     const int minGridSize = minimalPmeGridSize(pme_order);
527     if (nkx < minGridSize || nky < minGridSize || nkz < minGridSize)
528     {
529         if (!errorsAreFatal)
530         {
531             return false;
532         }
533         std::string message =
534                 gmx::formatString("The PME grid sizes need to be >= 2*(pme_order-1) (%d)", minGridSize);
535         GMX_THROW(gmx::InconsistentInputError(message));
536     }
537
538     /* Check for a limitation of the (current) sum_fftgrid_dd code.
539      * We only allow multiple communication pulses in dim 1, not in dim 0.
540      */
541     if (useThreads
542         && (nkx < numPmeDomainsAlongX * pme_order && nkx != numPmeDomainsAlongX * (pme_order - 1)))
543     {
544         if (!errorsAreFatal)
545         {
546             return false;
547         }
548         gmx_fatal(FARGS,
549                   "The number of PME grid lines per rank along x is %g. But when using OpenMP "
550                   "threads, the number of grid lines per rank along x should be >= pme_order (%d) "
551                   "or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly "
552                   "more along y and/or z) by specifying -dd manually.",
553                   nkx / static_cast<double>(numPmeDomainsAlongX), pme_order);
554     }
555
556     return true;
557 }
558
559 /*! \brief Round \p enumerator */
560 static int div_round_up(int enumerator, int denominator)
561 {
562     return (enumerator + denominator - 1) / denominator;
563 }
564
565 gmx_pme_t* gmx_pme_init(const t_commrec*     cr,
566                         const NumPmeDomains& numPmeDomains,
567                         const t_inputrec*    ir,
568                         gmx_bool             bFreeEnergy_q,
569                         gmx_bool             bFreeEnergy_lj,
570                         gmx_bool             bReproducible,
571                         real                 ewaldcoeff_q,
572                         real                 ewaldcoeff_lj,
573                         int                  nthread,
574                         PmeRunMode           runMode,
575                         PmeGpu*              pmeGpu,
576                         const DeviceContext* deviceContext,
577                         const DeviceStream*  deviceStream,
578                         const PmeGpuProgram* pmeGpuProgram,
579                         const gmx::MDLogger& /*mdlog*/)
580 {
581     int  use_threads, sum_use_threads, i;
582     ivec ndata;
583
584     if (debug)
585     {
586         fprintf(debug, "Creating PME data structures.\n");
587     }
588
589     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
590
591     pme->sum_qgrid_tmp    = nullptr;
592     pme->sum_qgrid_dd_tmp = nullptr;
593
594     pme->buf_nalloc = 0;
595
596     pme->nnodes  = 1;
597     pme->bPPnode = TRUE;
598
599     pme->nnodes_major = numPmeDomains.x;
600     pme->nnodes_minor = numPmeDomains.y;
601
602     if (numPmeDomains.x * numPmeDomains.y > 1)
603     {
604         pme->mpi_comm = cr->mpi_comm_mygroup;
605
606 #if GMX_MPI
607         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
608         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
609 #endif
610         if (pme->nnodes != numPmeDomains.x * numPmeDomains.y)
611         {
612             gmx_incons("PME rank count mismatch");
613         }
614     }
615     else
616     {
617         pme->mpi_comm = MPI_COMM_NULL;
618     }
619
620     if (pme->nnodes == 1)
621     {
622         pme->mpi_comm_d[0] = MPI_COMM_NULL;
623         pme->mpi_comm_d[1] = MPI_COMM_NULL;
624         pme->ndecompdim    = 0;
625         pme->nodeid_major  = 0;
626         pme->nodeid_minor  = 0;
627     }
628     else
629     {
630         if (numPmeDomains.y == 1)
631         {
632             pme->mpi_comm_d[0] = pme->mpi_comm;
633             pme->mpi_comm_d[1] = MPI_COMM_NULL;
634             pme->ndecompdim    = 1;
635             pme->nodeid_major  = pme->nodeid;
636             pme->nodeid_minor  = 0;
637         }
638         else if (numPmeDomains.x == 1)
639         {
640             pme->mpi_comm_d[0] = MPI_COMM_NULL;
641             pme->mpi_comm_d[1] = pme->mpi_comm;
642             pme->ndecompdim    = 1;
643             pme->nodeid_major  = 0;
644             pme->nodeid_minor  = pme->nodeid;
645         }
646         else
647         {
648             if (pme->nnodes % numPmeDomains.x != 0)
649             {
650                 gmx_incons(
651                         "For 2D PME decomposition, #PME ranks must be divisible by the number of "
652                         "domains along x");
653             }
654             pme->ndecompdim = 2;
655
656 #if GMX_MPI
657             MPI_Comm_split(pme->mpi_comm, pme->nodeid % numPmeDomains.y, pme->nodeid,
658                            &pme->mpi_comm_d[0]); /* My communicator along major dimension */
659             MPI_Comm_split(pme->mpi_comm, pme->nodeid / numPmeDomains.y, pme->nodeid,
660                            &pme->mpi_comm_d[1]); /* My communicator along minor dimension */
661
662             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
663             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
664             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
665             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
666 #endif
667         }
668     }
669     // cr is always initialized if there is a a PP rank, so we can safely assume
670     // that when it is not, like in ewald tests, we not on a PP rank.
671     pme->bPPnode = ((cr != nullptr && cr->duty != 0) && thisRankHasDuty(cr, DUTY_PP));
672
673     pme->nthread = nthread;
674
675     /* Check if any of the PME MPI ranks uses threads */
676     use_threads = (pme->nthread > 1 ? 1 : 0);
677 #if GMX_MPI
678     if (pme->nnodes > 1)
679     {
680         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT, MPI_SUM, pme->mpi_comm);
681     }
682     else
683 #endif
684     {
685         sum_use_threads = use_threads;
686     }
687     pme->bUseThreads = (sum_use_threads > 0);
688
689     if (ir->pbcType == PbcType::Screw)
690     {
691         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
692     }
693
694     /* NOTE:
695      * It is likely that the current gmx_pme_do() routine supports calculating
696      * only Coulomb or LJ while gmx_pme_init() configures for both,
697      * but that has never been tested.
698      * It is likely that the current gmx_pme_do() routine supports calculating,
699      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
700      * configures with free-energy, but that has never been tested.
701      */
702     pme->doCoulomb     = EEL_PME(ir->coulombtype);
703     pme->doLJ          = EVDW_PME(ir->vdwtype);
704     pme->bFEP_q        = ((ir->efep != efepNO) && bFreeEnergy_q);
705     pme->bFEP_lj       = ((ir->efep != efepNO) && bFreeEnergy_lj);
706     pme->bFEP          = (pme->bFEP_q || pme->bFEP_lj);
707     pme->nkx           = ir->nkx;
708     pme->nky           = ir->nky;
709     pme->nkz           = ir->nkz;
710     pme->bP3M          = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != nullptr);
711     pme->pme_order     = ir->pme_order;
712     pme->ewaldcoeff_q  = ewaldcoeff_q;
713     pme->ewaldcoeff_lj = ewaldcoeff_lj;
714
715     /* Always constant electrostatics coefficients */
716     pme->epsilon_r = ir->epsilon_r;
717
718     /* Always constant LJ coefficients */
719     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
720
721     // The box requires scaling with nwalls = 2, we store that condition as well
722     // as the scaling factor
723     delete pme->boxScaler;
724     pme->boxScaler = new EwaldBoxZScaler(*ir);
725
726     /* If we violate restrictions, generate a fatal error here */
727     gmx_pme_check_restrictions(pme->pme_order, pme->nkx, pme->nky, pme->nkz, pme->nnodes_major,
728                                pme->bUseThreads, true);
729
730     if (pme->nnodes > 1)
731     {
732         double imbal;
733
734 #if GMX_MPI
735         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
736         MPI_Type_commit(&(pme->rvec_mpi));
737 #endif
738
739         /* Note that the coefficient spreading and force gathering, which usually
740          * takes about the same amount of time as FFT+solve_pme,
741          * is always fully load balanced
742          * (unless the coefficient distribution is inhomogeneous).
743          */
744
745         imbal = estimate_pme_load_imbalance(pme.get());
746         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
747         {
748             fprintf(stderr,
749                     "\n"
750                     "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
751                     "      For optimal PME load balancing\n"
752                     "      PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x "
753                     "(%d)\n"
754                     "      and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y "
755                     "(%d)\n"
756                     "\n",
757                     gmx::roundToInt((imbal - 1) * 100), pme->nkx, pme->nky, pme->nnodes_major,
758                     pme->nky, pme->nkz, pme->nnodes_minor);
759         }
760     }
761
762     /* For non-divisible grid we need pme_order iso pme_order-1 */
763     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
764      * y is always copied through a buffer: we don't need padding in z,
765      * but we do need the overlap in x because of the communication order.
766      */
767     init_overlap_comm(&pme->overlap[0], pme->pme_order, pme->mpi_comm_d[0], pme->nnodes_major,
768                       pme->nodeid_major, pme->nkx,
769                       (div_round_up(pme->nky, pme->nnodes_minor) + pme->pme_order)
770                               * (pme->nkz + pme->pme_order - 1));
771
772     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
773      * We do this with an offset buffer of equal size, so we need to allocate
774      * extra for the offset. That's what the (+1)*pme->nkz is for.
775      */
776     init_overlap_comm(&pme->overlap[1], pme->pme_order, pme->mpi_comm_d[1], pme->nnodes_minor,
777                       pme->nodeid_minor, pme->nky,
778                       (div_round_up(pme->nkx, pme->nnodes_major) + pme->pme_order + 1) * pme->nkz);
779
780     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
781      * Note that gmx_pme_check_restrictions checked for this already.
782      */
783     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
784     {
785         gmx_incons(
786                 "More than one communication pulse required for grid overlap communication along "
787                 "the major dimension while using threads");
788     }
789
790     snew(pme->bsp_mod[XX], pme->nkx);
791     snew(pme->bsp_mod[YY], pme->nky);
792     snew(pme->bsp_mod[ZZ], pme->nkz);
793
794     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
795     pme->runMode = runMode;
796
797     /* The required size of the interpolation grid, including overlap.
798      * The allocated size (pmegrid_n?) might be slightly larger.
799      */
800     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] - pme->overlap[0].s2g0[pme->nodeid_major];
801     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] - pme->overlap[1].s2g0[pme->nodeid_minor];
802     pme->pmegrid_nz_base = pme->nkz;
803     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
804     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
805     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
806     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
807     pme->pmegrid_start_iz = 0;
808
809     make_gridindex_to_localindex(pme->nkx, pme->pmegrid_start_ix,
810                                  pme->pmegrid_nx - (pme->pme_order - 1), &pme->nnx, &pme->fshx);
811     make_gridindex_to_localindex(pme->nky, pme->pmegrid_start_iy,
812                                  pme->pmegrid_ny - (pme->pme_order - 1), &pme->nny, &pme->fshy);
813     make_gridindex_to_localindex(pme->nkz, pme->pmegrid_start_iz, pme->pmegrid_nz_base, &pme->nnz,
814                                  &pme->fshz);
815
816     pme->spline_work = make_pme_spline_work(pme->pme_order);
817
818     ndata[0] = pme->nkx;
819     ndata[1] = pme->nky;
820     ndata[2] = pme->nkz;
821     /* It doesn't matter if we allocate too many grids here,
822      * we only allocate and use the ones we need.
823      */
824     if (pme->doLJ)
825     {
826         pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
827     }
828     else
829     {
830         pme->ngrids = DO_Q;
831     }
832     snew(pme->fftgrid, pme->ngrids);
833     snew(pme->cfftgrid, pme->ngrids);
834     snew(pme->pfft_setup, pme->ngrids);
835
836     for (i = 0; i < pme->ngrids; ++i)
837     {
838         if ((i < DO_Q && pme->doCoulomb && (i == 0 || bFreeEnergy_q))
839             || (i >= DO_Q && pme->doLJ
840                 && (i == 2 || bFreeEnergy_lj || ir->ljpme_combination_rule == eljpmeLB)))
841         {
842             pmegrids_init(&pme->pmegrid[i], pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
843                           pme->pmegrid_nz_base, pme->pme_order, pme->bUseThreads, pme->nthread,
844                           pme->overlap[0].s2g1[pme->nodeid_major]
845                                   - pme->overlap[0].s2g0[pme->nodeid_major + 1],
846                           pme->overlap[1].s2g1[pme->nodeid_minor]
847                                   - pme->overlap[1].s2g0[pme->nodeid_minor + 1]);
848             /* This routine will allocate the grid data to fit the FFTs */
849             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed)
850                                                         ? gmx::PinningPolicy::PinnedIfSupported
851                                                         : gmx::PinningPolicy::CannotBePinned;
852             gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata, &pme->fftgrid[i], &pme->cfftgrid[i],
853                                     pme->mpi_comm_d, bReproducible, pme->nthread, allocateRealGridForGpu);
854         }
855     }
856
857     if (!pme->bP3M)
858     {
859         /* Use plain SPME B-spline interpolation */
860         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
861     }
862     else
863     {
864         /* Use the P3M grid-optimized influence function */
865         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
866     }
867
868     /* Use atc[0] for spreading */
869     const int firstDimIndex   = (numPmeDomains.x > 1 ? 0 : 1);
870     MPI_Comm  mpiCommFirstDim = (pme->nnodes > 1 ? pme->mpi_comm_d[firstDimIndex] : MPI_COMM_NULL);
871     bool      doSpread        = true;
872     pme->atc.emplace_back(mpiCommFirstDim, pme->nthread, pme->pme_order, firstDimIndex, doSpread);
873     if (pme->ndecompdim >= 2)
874     {
875         const int secondDimIndex = 1;
876         doSpread                 = false;
877         pme->atc.emplace_back(pme->mpi_comm_d[1], pme->nthread, pme->pme_order, secondDimIndex, doSpread);
878     }
879
880     // Initial check of validity of the input for running on the GPU
881     if (pme->runMode != PmeRunMode::CPU)
882     {
883         std::string errorString;
884         bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
885         if (!canRunOnGpu)
886         {
887             GMX_THROW(gmx::NotImplementedError(errorString));
888         }
889         pme_gpu_reinit(pme.get(), deviceContext, deviceStream, pmeGpuProgram);
890     }
891     else
892     {
893         GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object when PME is on a CPU.");
894     }
895
896
897     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
898
899     // no exception was thrown during the init, so we hand over the PME structure handle
900     return pme.release();
901 }
902
903 void gmx_pme_reinit(struct gmx_pme_t** pmedata,
904                     const t_commrec*   cr,
905                     struct gmx_pme_t*  pme_src,
906                     const t_inputrec*  ir,
907                     const ivec         grid_size,
908                     real               ewaldcoeff_q,
909                     real               ewaldcoeff_lj)
910 {
911     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
912     // TODO: This would be better as just copying a sub-structure that contains
913     // all the PME parameters and nothing else.
914     t_inputrec irc;
915     irc.pbcType                = ir->pbcType;
916     irc.coulombtype            = ir->coulombtype;
917     irc.vdwtype                = ir->vdwtype;
918     irc.efep                   = ir->efep;
919     irc.pme_order              = ir->pme_order;
920     irc.epsilon_r              = ir->epsilon_r;
921     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
922     irc.nkx                    = grid_size[XX];
923     irc.nky                    = grid_size[YY];
924     irc.nkz                    = grid_size[ZZ];
925
926     try
927     {
928         const gmx::MDLogger dummyLogger;
929         // This is reinit which is currently only changing grid size/coefficients,
930         // so we don't expect the actual logging.
931         // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
932         GMX_ASSERT(pmedata, "Invalid PME pointer");
933         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
934         *pmedata = gmx_pme_init(cr, numPmeDomains, &irc, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE,
935                                 ewaldcoeff_q, ewaldcoeff_lj, pme_src->nthread, pme_src->runMode,
936                                 pme_src->gpu, nullptr, nullptr, nullptr, dummyLogger);
937         /* When running PME on the CPU not using domain decomposition,
938          * the atom data is allocated once only in gmx_pme_(re)init().
939          */
940         if (!pme_src->gpu && pme_src->nnodes == 1)
941         {
942             gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), nullptr);
943         }
944         // TODO this is mostly passing around current values
945     }
946     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
947
948     /* We can easily reuse the allocated pme grids in pme_src */
949     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
950     /* We would like to reuse the fft grids, but that's harder */
951 }
952
953 void gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q, real* V)
954 {
955     pmegrids_t* grid;
956
957     if (pme->nnodes > 1)
958     {
959         gmx_incons("gmx_pme_calc_energy called in parallel");
960     }
961     if (pme->bFEP_q)
962     {
963         gmx_incons("gmx_pme_calc_energy with free energy");
964     }
965
966     if (!pme->atc_energy)
967     {
968         pme->atc_energy = std::make_unique<PmeAtomComm>(MPI_COMM_NULL, 1, pme->pme_order, 0, true);
969     }
970     PmeAtomComm* atc = pme->atc_energy.get();
971     atc->setNumAtoms(x.ssize());
972     atc->x           = x;
973     atc->coefficient = q;
974
975     /* We only use the A-charges grid */
976     grid = &pme->pmegrid[PME_GRID_QA];
977
978     /* Only calculate the spline coefficients, don't actually spread */
979     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
980
981     *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
982 }
983
984 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
985 static void calc_initial_lb_coeffs(gmx::ArrayRef<real> coefficient, const real* local_c6, const real* local_sigma)
986 {
987     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
988     {
989         real sigma4    = local_sigma[i];
990         sigma4         = sigma4 * sigma4;
991         sigma4         = sigma4 * sigma4;
992         coefficient[i] = local_c6[i] / sigma4;
993     }
994 }
995
996 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
997 static void calc_next_lb_coeffs(gmx::ArrayRef<real> coefficient, const real* local_sigma)
998 {
999     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
1000     {
1001         coefficient[i] *= local_sigma[i];
1002     }
1003 }
1004
1005 int gmx_pme_do(struct gmx_pme_t*              pme,
1006                gmx::ArrayRef<const gmx::RVec> coordinates,
1007                gmx::ArrayRef<gmx::RVec>       forces,
1008                real                           chargeA[],
1009                real                           chargeB[],
1010                real                           c6A[],
1011                real                           c6B[],
1012                real                           sigmaA[],
1013                real                           sigmaB[],
1014                const matrix                   box,
1015                const t_commrec*               cr,
1016                int                            maxshift_x,
1017                int                            maxshift_y,
1018                t_nrnb*                        nrnb,
1019                gmx_wallcycle*                 wcycle,
1020                matrix                         vir_q,
1021                matrix                         vir_lj,
1022                real*                          energy_q,
1023                real*                          energy_lj,
1024                real                           lambda_q,
1025                real                           lambda_lj,
1026                real*                          dvdlambda_q,
1027                real*                          dvdlambda_lj,
1028                const gmx::StepWorkload&       stepWork)
1029 {
1030     GMX_ASSERT(pme->runMode == PmeRunMode::CPU,
1031                "gmx_pme_do should not be called on the GPU PME run.");
1032
1033     int                  d, npme, grid_index, max_grid_index;
1034     PmeAtomComm&         atc         = pme->atc[0];
1035     pmegrids_t*          pmegrid     = nullptr;
1036     real*                grid        = nullptr;
1037     real*                coefficient = nullptr;
1038     PmeOutput            output[2]; // The second is used for the B state with FEP
1039     real                 scale, lambda;
1040     gmx_bool             bClearF;
1041     gmx_parallel_3dfft_t pfft_setup;
1042     real*                fftgrid;
1043     t_complex*           cfftgrid;
1044     int                  thread;
1045     gmx_bool             bFirst, bDoSplines;
1046     int                  fep_state;
1047     int                  fep_states_lj = pme->bFEP_lj ? 2 : 1;
1048     // There's no support for computing energy without virial, or vice versa
1049     const bool computeEnergyAndVirial = (stepWork.computeEnergy || stepWork.computeVirial);
1050
1051     /* We could be passing lambda!=0 while no q or LJ is actually perturbed */
1052     if (!pme->bFEP_q)
1053     {
1054         lambda_q = 0;
1055     }
1056     if (!pme->bFEP_lj)
1057     {
1058         lambda_lj = 0;
1059     }
1060
1061     assert(pme->nnodes > 0);
1062     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1063
1064     if (pme->nnodes > 1)
1065     {
1066         atc.pd.resize(coordinates.ssize());
1067         for (int d = pme->ndecompdim - 1; d >= 0; d--)
1068         {
1069             PmeAtomComm& atc = pme->atc[d];
1070             atc.maxshift     = (atc.dimind == 0 ? maxshift_x : maxshift_y);
1071         }
1072     }
1073     else
1074     {
1075         GMX_ASSERT(coordinates.ssize() == atc.numAtoms(), "We expect atc.numAtoms() coordinates");
1076         GMX_ASSERT(forces.ssize() >= atc.numAtoms(),
1077                    "We need a force buffer with at least atc.numAtoms() elements");
1078
1079         atc.x = coordinates;
1080         atc.f = forces;
1081     }
1082
1083     matrix scaledBox;
1084     pme->boxScaler->scaleBox(box, scaledBox);
1085
1086     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1087     bFirst = TRUE;
1088
1089     /* For simplicity, we construct the splines for all particles if
1090      * more than one PME calculations is needed. Some optimization
1091      * could be done by keeping track of which atoms have splines
1092      * constructed, and construct new splines on each pass for atoms
1093      * that don't yet have them.
1094      */
1095
1096     bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1097
1098     /* We need a maximum of four separate PME calculations:
1099      * grid_index=0: Coulomb PME with charges from state A
1100      * grid_index=1: Coulomb PME with charges from state B
1101      * grid_index=2: LJ PME with C6 from state A
1102      * grid_index=3: LJ PME with C6 from state B
1103      * For Lorentz-Berthelot combination rules, a separate loop is used to
1104      * calculate all the terms
1105      */
1106
1107     /* If we are doing LJ-PME with LB, we only do Q here */
1108     max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
1109
1110     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1111     {
1112         /* Check if we should do calculations at this grid_index
1113          * If grid_index is odd we should be doing FEP
1114          * If grid_index < 2 we should be doing electrostatic PME
1115          * If grid_index >= 2 we should be doing LJ-PME
1116          */
1117         if ((grid_index < DO_Q && (!pme->doCoulomb || (grid_index == 1 && !pme->bFEP_q)))
1118             || (grid_index >= DO_Q && (!pme->doLJ || (grid_index == 3 && !pme->bFEP_lj))))
1119         {
1120             continue;
1121         }
1122         /* Unpack structure */
1123         pmegrid    = &pme->pmegrid[grid_index];
1124         fftgrid    = pme->fftgrid[grid_index];
1125         cfftgrid   = pme->cfftgrid[grid_index];
1126         pfft_setup = pme->pfft_setup[grid_index];
1127         switch (grid_index)
1128         {
1129             case 0: coefficient = chargeA; break;
1130             case 1: coefficient = chargeB; break;
1131             case 2: coefficient = c6A; break;
1132             case 3: coefficient = c6B; break;
1133         }
1134
1135         grid = pmegrid->grid.grid;
1136
1137         if (debug)
1138         {
1139             fprintf(debug, "PME: number of ranks = %d, rank = %d\n", cr->nnodes, cr->nodeid);
1140             fprintf(debug, "Grid = %p\n", static_cast<void*>(grid));
1141             if (grid == nullptr)
1142             {
1143                 gmx_fatal(FARGS, "No grid!");
1144             }
1145         }
1146
1147         if (pme->nnodes == 1)
1148         {
1149             atc.coefficient = gmx::arrayRefFromArray(coefficient, coordinates.size());
1150         }
1151         else
1152         {
1153             wallcycle_start(wcycle, ewcPME_REDISTXF);
1154             do_redist_pos_coeffs(pme, cr, bFirst, coordinates, coefficient);
1155
1156             wallcycle_stop(wcycle, ewcPME_REDISTXF);
1157         }
1158
1159         if (debug)
1160         {
1161             fprintf(debug, "Rank= %6d, pme local particles=%6d\n", cr->nodeid, atc.numAtoms());
1162         }
1163
1164         wallcycle_start(wcycle, ewcPME_SPREAD);
1165
1166         /* Spread the coefficients on a grid */
1167         spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1168
1169         if (bFirst)
1170         {
1171             inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1172         }
1173         inc_nrnb(nrnb, eNR_SPREADBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1174
1175         if (!pme->bUseThreads)
1176         {
1177             wrap_periodic_pmegrid(pme, grid);
1178
1179             /* sum contributions to local grid from other nodes */
1180             if (pme->nnodes > 1)
1181             {
1182                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1183             }
1184
1185             copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1186         }
1187
1188         wallcycle_stop(wcycle, ewcPME_SPREAD);
1189
1190         /* TODO If the OpenMP and single-threaded implementations
1191            converge, then spread_on_grid() and
1192            copy_pmegrid_to_fftgrid() will perhaps live in the same
1193            source file.
1194         */
1195
1196         /* Here we start a large thread parallel region */
1197 #pragma omp parallel num_threads(pme->nthread) private(thread)
1198         {
1199             try
1200             {
1201                 thread = gmx_omp_get_thread_num();
1202                 int loop_count;
1203
1204                 /* do 3d-fft */
1205                 if (thread == 0)
1206                 {
1207                     wallcycle_start(wcycle, ewcPME_FFT);
1208                 }
1209                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1210                 if (thread == 0)
1211                 {
1212                     wallcycle_stop(wcycle, ewcPME_FFT);
1213                 }
1214
1215                 /* solve in k-space for our local cells */
1216                 if (thread == 0)
1217                 {
1218                     wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1219                 }
1220                 if (grid_index < DO_Q)
1221                 {
1222                     loop_count = solve_pme_yzx(
1223                             pme, cfftgrid, scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1224                             computeEnergyAndVirial, pme->nthread, thread);
1225                 }
1226                 else
1227                 {
1228                     loop_count =
1229                             solve_pme_lj_yzx(pme, &cfftgrid, FALSE,
1230                                              scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1231                                              computeEnergyAndVirial, pme->nthread, thread);
1232                 }
1233
1234                 if (thread == 0)
1235                 {
1236                     wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1237                     inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1238                 }
1239
1240                 /* do 3d-invfft */
1241                 if (thread == 0)
1242                 {
1243                     wallcycle_start(wcycle, ewcPME_FFT);
1244                 }
1245                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1246                 if (thread == 0)
1247                 {
1248                     wallcycle_stop(wcycle, ewcPME_FFT);
1249
1250
1251                     if (pme->nodeid == 0)
1252                     {
1253                         real ntot = pme->nkx * pme->nky * pme->nkz;
1254                         npme      = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1255                         inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1256                     }
1257
1258                     /* Note: this wallcycle region is closed below
1259                        outside an OpenMP region, so take care if
1260                        refactoring code here. */
1261                     wallcycle_start(wcycle, ewcPME_GATHER);
1262                 }
1263
1264                 copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1265             }
1266             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1267         }
1268         /* End of thread parallel section.
1269          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1270          */
1271
1272         /* distribute local grid to all nodes */
1273         if (pme->nnodes > 1)
1274         {
1275             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1276         }
1277
1278         unwrap_periodic_pmegrid(pme, grid);
1279
1280         if (stepWork.computeForces)
1281         {
1282             /* interpolate forces for our local atoms */
1283
1284
1285             /* If we are running without parallelization,
1286              * atc->f is the actual force array, not a buffer,
1287              * therefore we should not clear it.
1288              */
1289             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
1290             bClearF = (bFirst && PAR(cr));
1291 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1292             for (thread = 0; thread < pme->nthread; thread++)
1293             {
1294                 try
1295                 {
1296                     gather_f_bsplines(pme, grid, bClearF, &atc, &atc.spline[thread],
1297                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0 - lambda : lambda) : 1.0);
1298                 }
1299                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1300             }
1301
1302
1303             inc_nrnb(nrnb, eNR_GATHERFBSP,
1304                      pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1305             /* Note: this wallcycle region is opened above inside an OpenMP
1306                region, so take care if refactoring code here. */
1307             wallcycle_stop(wcycle, ewcPME_GATHER);
1308         }
1309
1310         if (computeEnergyAndVirial)
1311         {
1312             /* This should only be called on the master thread
1313              * and after the threads have synchronized.
1314              */
1315             if (grid_index < 2)
1316             {
1317                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1318             }
1319             else
1320             {
1321                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1322             }
1323         }
1324         bFirst = FALSE;
1325     } /* of grid_index-loop */
1326
1327     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1328      * seven terms. */
1329
1330     if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB)
1331     {
1332         /* Loop over A- and B-state if we are doing FEP */
1333         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1334         {
1335             real *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
1336             gmx::ArrayRef<real> coefficientBuffer;
1337             if (pme->nnodes == 1)
1338             {
1339                 pme->lb_buf1.resize(atc.numAtoms());
1340                 coefficientBuffer = pme->lb_buf1;
1341                 switch (fep_state)
1342                 {
1343                     case 0:
1344                         local_c6    = c6A;
1345                         local_sigma = sigmaA;
1346                         break;
1347                     case 1:
1348                         local_c6    = c6B;
1349                         local_sigma = sigmaB;
1350                         break;
1351                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1352                 }
1353             }
1354             else
1355             {
1356                 coefficientBuffer = atc.coefficientBuffer;
1357                 switch (fep_state)
1358                 {
1359                     case 0:
1360                         RedistC6    = c6A;
1361                         RedistSigma = sigmaA;
1362                         break;
1363                     case 1:
1364                         RedistC6    = c6B;
1365                         RedistSigma = sigmaB;
1366                         break;
1367                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1368                 }
1369                 wallcycle_start(wcycle, ewcPME_REDISTXF);
1370
1371                 do_redist_pos_coeffs(pme, cr, bFirst, coordinates, RedistC6);
1372                 pme->lb_buf1.resize(atc.numAtoms());
1373                 pme->lb_buf2.resize(atc.numAtoms());
1374                 local_c6 = pme->lb_buf1.data();
1375                 for (int i = 0; i < atc.numAtoms(); ++i)
1376                 {
1377                     local_c6[i] = atc.coefficient[i];
1378                 }
1379
1380                 do_redist_pos_coeffs(pme, cr, FALSE, coordinates, RedistSigma);
1381                 local_sigma = pme->lb_buf2.data();
1382                 for (int i = 0; i < atc.numAtoms(); ++i)
1383                 {
1384                     local_sigma[i] = atc.coefficient[i];
1385                 }
1386
1387                 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1388             }
1389             atc.coefficient = coefficientBuffer;
1390             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1391
1392             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1393             for (grid_index = 2; grid_index < 9; ++grid_index)
1394             {
1395                 /* Unpack structure */
1396                 pmegrid    = &pme->pmegrid[grid_index];
1397                 fftgrid    = pme->fftgrid[grid_index];
1398                 pfft_setup = pme->pfft_setup[grid_index];
1399                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1400                 grid = pmegrid->grid.grid;
1401
1402                 wallcycle_start(wcycle, ewcPME_SPREAD);
1403                 /* Spread the c6 on a grid */
1404                 spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1405
1406                 if (bFirst)
1407                 {
1408                     inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1409                 }
1410
1411                 inc_nrnb(nrnb, eNR_SPREADBSP,
1412                          pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1413                 if (pme->nthread == 1)
1414                 {
1415                     wrap_periodic_pmegrid(pme, grid);
1416                     /* sum contributions to local grid from other nodes */
1417                     if (pme->nnodes > 1)
1418                     {
1419                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1420                     }
1421                     copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1422                 }
1423                 wallcycle_stop(wcycle, ewcPME_SPREAD);
1424
1425                 /*Here we start a large thread parallel region*/
1426 #pragma omp parallel num_threads(pme->nthread) private(thread)
1427                 {
1428                     try
1429                     {
1430                         thread = gmx_omp_get_thread_num();
1431                         /* do 3d-fft */
1432                         if (thread == 0)
1433                         {
1434                             wallcycle_start(wcycle, ewcPME_FFT);
1435                         }
1436
1437                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1438                         if (thread == 0)
1439                         {
1440                             wallcycle_stop(wcycle, ewcPME_FFT);
1441                         }
1442                     }
1443                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1444                 }
1445                 bFirst = FALSE;
1446             }
1447             /* solve in k-space for our local cells */
1448 #pragma omp parallel num_threads(pme->nthread) private(thread)
1449             {
1450                 try
1451                 {
1452                     int loop_count;
1453                     thread = gmx_omp_get_thread_num();
1454                     if (thread == 0)
1455                     {
1456                         wallcycle_start(wcycle, ewcLJPME);
1457                     }
1458
1459                     loop_count =
1460                             solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE,
1461                                              scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1462                                              computeEnergyAndVirial, pme->nthread, thread);
1463                     if (thread == 0)
1464                     {
1465                         wallcycle_stop(wcycle, ewcLJPME);
1466                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1467                     }
1468                 }
1469                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1470             }
1471
1472             if (computeEnergyAndVirial)
1473             {
1474                 /* This should only be called on the master thread and
1475                  * after the threads have synchronized.
1476                  */
1477                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
1478             }
1479
1480             bFirst = !pme->doCoulomb;
1481             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1482             for (grid_index = 8; grid_index >= 2; --grid_index)
1483             {
1484                 /* Unpack structure */
1485                 pmegrid    = &pme->pmegrid[grid_index];
1486                 fftgrid    = pme->fftgrid[grid_index];
1487                 pfft_setup = pme->pfft_setup[grid_index];
1488                 grid       = pmegrid->grid.grid;
1489                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1490 #pragma omp parallel num_threads(pme->nthread) private(thread)
1491                 {
1492                     try
1493                     {
1494                         thread = gmx_omp_get_thread_num();
1495                         /* do 3d-invfft */
1496                         if (thread == 0)
1497                         {
1498                             wallcycle_start(wcycle, ewcPME_FFT);
1499                         }
1500
1501                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1502                         if (thread == 0)
1503                         {
1504                             wallcycle_stop(wcycle, ewcPME_FFT);
1505
1506
1507                             if (pme->nodeid == 0)
1508                             {
1509                                 real ntot = pme->nkx * pme->nky * pme->nkz;
1510                                 npme      = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1511                                 inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1512                             }
1513                             wallcycle_start(wcycle, ewcPME_GATHER);
1514                         }
1515
1516                         copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1517                     }
1518                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1519                 } /*#pragma omp parallel*/
1520
1521                 /* distribute local grid to all nodes */
1522                 if (pme->nnodes > 1)
1523                 {
1524                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1525                 }
1526
1527                 unwrap_periodic_pmegrid(pme, grid);
1528
1529                 if (stepWork.computeForces)
1530                 {
1531                     /* interpolate forces for our local atoms */
1532                     bClearF = (bFirst && PAR(cr));
1533                     scale   = pme->bFEP ? (fep_state < 1 ? 1.0 - lambda_lj : lambda_lj) : 1.0;
1534                     scale *= lb_scale_factor[grid_index - 2];
1535
1536 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1537                     for (thread = 0; thread < pme->nthread; thread++)
1538                     {
1539                         try
1540                         {
1541                             gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
1542                                               &pme->atc[0].spline[thread], scale);
1543                         }
1544                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1545                     }
1546
1547
1548                     inc_nrnb(nrnb, eNR_GATHERFBSP,
1549                              pme->pme_order * pme->pme_order * pme->pme_order * pme->atc[0].numAtoms());
1550                 }
1551                 wallcycle_stop(wcycle, ewcPME_GATHER);
1552
1553                 bFirst = FALSE;
1554             } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1555         }     /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1556     }         /* if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB) */
1557
1558     if (stepWork.computeForces && pme->nnodes > 1)
1559     {
1560         wallcycle_start(wcycle, ewcPME_REDISTXF);
1561         for (d = 0; d < pme->ndecompdim; d++)
1562         {
1563             gmx::ArrayRef<gmx::RVec> forcesRef;
1564             if (d == pme->ndecompdim - 1)
1565             {
1566                 const size_t numAtoms = coordinates.size();
1567                 GMX_ASSERT(forces.size() >= numAtoms, "Need at least numAtoms forces");
1568                 forcesRef = forces.subArray(0, numAtoms);
1569             }
1570             else
1571             {
1572                 forcesRef = pme->atc[d + 1].f;
1573             }
1574             if (DOMAINDECOMP(cr))
1575             {
1576                 dd_pmeredist_f(pme, &pme->atc[d], forcesRef, d == pme->ndecompdim - 1 && pme->bPPnode);
1577             }
1578         }
1579
1580         wallcycle_stop(wcycle, ewcPME_REDISTXF);
1581     }
1582
1583     if (computeEnergyAndVirial)
1584     {
1585         if (pme->doCoulomb)
1586         {
1587             if (!pme->bFEP_q)
1588             {
1589                 *energy_q = output[0].coulombEnergy_;
1590                 m_add(vir_q, output[0].coulombVirial_, vir_q);
1591             }
1592             else
1593             {
1594                 *energy_q = (1.0 - lambda_q) * output[0].coulombEnergy_ + lambda_q * output[1].coulombEnergy_;
1595                 *dvdlambda_q += output[1].coulombEnergy_ - output[0].coulombEnergy_;
1596                 for (int i = 0; i < DIM; i++)
1597                 {
1598                     for (int j = 0; j < DIM; j++)
1599                     {
1600                         vir_q[i][j] += (1.0 - lambda_q) * output[0].coulombVirial_[i][j]
1601                                        + lambda_q * output[1].coulombVirial_[i][j];
1602                     }
1603                 }
1604             }
1605             if (debug)
1606             {
1607                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1608             }
1609         }
1610         else
1611         {
1612             *energy_q = 0;
1613         }
1614
1615         if (pme->doLJ)
1616         {
1617             if (!pme->bFEP_lj)
1618             {
1619                 *energy_lj = output[0].lennardJonesEnergy_;
1620                 m_add(vir_lj, output[0].lennardJonesVirial_, vir_lj);
1621             }
1622             else
1623             {
1624                 *energy_lj = (1.0 - lambda_lj) * output[0].lennardJonesEnergy_
1625                              + lambda_lj * output[1].lennardJonesEnergy_;
1626                 *dvdlambda_lj += output[1].lennardJonesEnergy_ - output[0].lennardJonesEnergy_;
1627                 for (int i = 0; i < DIM; i++)
1628                 {
1629                     for (int j = 0; j < DIM; j++)
1630                     {
1631                         vir_lj[i][j] += (1.0 - lambda_lj) * output[0].lennardJonesVirial_[i][j]
1632                                         + lambda_lj * output[1].lennardJonesVirial_[i][j];
1633                     }
1634                 }
1635             }
1636             if (debug)
1637             {
1638                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1639             }
1640         }
1641         else
1642         {
1643             *energy_lj = 0;
1644         }
1645     }
1646     return 0;
1647 }
1648
1649 void gmx_pme_destroy(gmx_pme_t* pme)
1650 {
1651     if (!pme)
1652     {
1653         return;
1654     }
1655
1656     delete pme->boxScaler;
1657
1658     sfree(pme->nnx);
1659     sfree(pme->nny);
1660     sfree(pme->nnz);
1661     sfree(pme->fshx);
1662     sfree(pme->fshy);
1663     sfree(pme->fshz);
1664
1665     for (int i = 0; i < pme->ngrids; ++i)
1666     {
1667         pmegrids_destroy(&pme->pmegrid[i]);
1668     }
1669     if (pme->pfft_setup)
1670     {
1671         for (int i = 0; i < pme->ngrids; ++i)
1672         {
1673             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1674         }
1675     }
1676     sfree(pme->fftgrid);
1677     sfree(pme->cfftgrid);
1678     sfree(pme->pfft_setup);
1679
1680     for (int i = 0; i < DIM; i++)
1681     {
1682         sfree(pme->bsp_mod[i]);
1683     }
1684
1685     sfree(pme->bufv);
1686     sfree(pme->bufr);
1687
1688     if (pme->solve_work)
1689     {
1690         pme_free_all_work(&pme->solve_work, pme->nthread);
1691     }
1692
1693     sfree(pme->sum_qgrid_tmp);
1694     sfree(pme->sum_qgrid_dd_tmp);
1695
1696     destroy_pme_spline_work(pme->spline_work);
1697
1698     if (pme->gpu != nullptr)
1699     {
1700         pme_gpu_destroy(pme->gpu);
1701     }
1702
1703     delete pme;
1704 }
1705
1706 void gmx_pme_reinit_atoms(gmx_pme_t* pme, const int numAtoms, const real* charges)
1707 {
1708     if (pme->gpu != nullptr)
1709     {
1710         pme_gpu_reinit_atoms(pme->gpu, numAtoms, charges);
1711     }
1712     else
1713     {
1714         pme->atc[0].setNumAtoms(numAtoms);
1715         // TODO: set the charges here as well
1716     }
1717 }
1718
1719 bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size)
1720 {
1721     return (pme.nkx == grid_size[XX] && pme.nky == grid_size[YY] && pme.nkz == grid_size[ZZ]);
1722 }