Disable PME Mixed mode with FEP
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
7  * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
8  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9  * and including many others, as listed in the AUTHORS file in the
10  * top-level source directory and at http://www.gromacs.org.
11  *
12  * GROMACS is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public License
14  * as published by the Free Software Foundation; either version 2.1
15  * of the License, or (at your option) any later version.
16  *
17  * GROMACS is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with GROMACS; if not, see
24  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
26  *
27  * If you want to redistribute modifications to GROMACS, please
28  * consider that scientific software is very special. Version
29  * control is crucial - bugs must be traceable. We will be happy to
30  * consider code for inclusion in the official distribution, but
31  * derived work must not be called official GROMACS. Details are found
32  * in the README & COPYING files - if they are missing, get the
33  * official version at http://www.gromacs.org.
34  *
35  * To help us fund GROMACS development, we humbly ask that you cite
36  * the research papers on the package. Check out http://www.gromacs.org.
37  */
38 /*! \internal \file
39  *
40  * \brief This file contains function definitions necessary for
41  * computing energies and forces for the PME long-ranged part (Coulomb
42  * and LJ).
43  *
44  * \author Erik Lindahl <erik@kth.se>
45  * \author Berk Hess <hess@kth.se>
46  * \ingroup module_ewald
47  */
48 /* IMPORTANT FOR DEVELOPERS:
49  *
50  * Triclinic pme stuff isn't entirely trivial, and we've experienced
51  * some bugs during development (many of them due to me). To avoid
52  * this in the future, please check the following things if you make
53  * changes in this file:
54  *
55  * 1. You should obtain identical (at least to the PME precision)
56  *    energies, forces, and virial for
57  *    a rectangular box and a triclinic one where the z (or y) axis is
58  *    tilted a whole box side. For instance you could use these boxes:
59  *
60  *    rectangular       triclinic
61  *     2  0  0           2  0  0
62  *     0  2  0           0  2  0
63  *     0  0  6           2  2  6
64  *
65  * 2. You should check the energy conservation in a triclinic box.
66  *
67  * It might seem an overkill, but better safe than sorry.
68  * /Erik 001109
69  */
70
71 #include "gmxpre.h"
72
73 #include "pme.h"
74
75 #include "config.h"
76
77 #include <cassert>
78 #include <cmath>
79 #include <cstdio>
80 #include <cstdlib>
81 #include <cstring>
82
83 #include <algorithm>
84 #include <list>
85
86 #include "gromacs/domdec/domdec.h"
87 #include "gromacs/ewald/ewald_utils.h"
88 #include "gromacs/fft/parallel_3dfft.h"
89 #include "gromacs/fileio/pdbio.h"
90 #include "gromacs/gmxlib/network.h"
91 #include "gromacs/gmxlib/nrnb.h"
92 #include "gromacs/hardware/hw_info.h"
93 #include "gromacs/math/gmxcomplex.h"
94 #include "gromacs/math/invertmatrix.h"
95 #include "gromacs/math/units.h"
96 #include "gromacs/math/vec.h"
97 #include "gromacs/math/vectypes.h"
98 #include "gromacs/mdtypes/commrec.h"
99 #include "gromacs/mdtypes/forcerec.h"
100 #include "gromacs/mdtypes/inputrec.h"
101 #include "gromacs/mdtypes/md_enums.h"
102 #include "gromacs/mdtypes/simulation_workload.h"
103 #include "gromacs/pbcutil/pbc.h"
104 #include "gromacs/timing/cyclecounter.h"
105 #include "gromacs/timing/wallcycle.h"
106 #include "gromacs/timing/walltime_accounting.h"
107 #include "gromacs/topology/topology.h"
108 #include "gromacs/utility/basedefinitions.h"
109 #include "gromacs/utility/cstringutil.h"
110 #include "gromacs/utility/exceptions.h"
111 #include "gromacs/utility/fatalerror.h"
112 #include "gromacs/utility/gmxmpi.h"
113 #include "gromacs/utility/gmxomp.h"
114 #include "gromacs/utility/logger.h"
115 #include "gromacs/utility/real.h"
116 #include "gromacs/utility/smalloc.h"
117 #include "gromacs/utility/stringutil.h"
118 #include "gromacs/utility/unique_cptr.h"
119
120 #include "calculate_spline_moduli.h"
121 #include "pme_gather.h"
122 #include "pme_gpu_internal.h"
123 #include "pme_grid.h"
124 #include "pme_internal.h"
125 #include "pme_redistribute.h"
126 #include "pme_solve.h"
127 #include "pme_spline_work.h"
128 #include "pme_spread.h"
129
130 /*! \brief Help build a descriptive message in \c error if there are
131  * \c errorReasons why PME on GPU is not supported.
132  *
133  * \returns Whether the lack of errorReasons indicate there is support. */
134 static bool addMessageIfNotSupported(const std::list<std::string>& errorReasons, std::string* error)
135 {
136     bool isSupported = errorReasons.empty();
137     if (!isSupported && error)
138     {
139         std::string regressionTestMarker = "PME GPU does not support";
140         // this prefix is tested for in the regression tests script gmxtest.pl
141         *error = regressionTestMarker;
142         if (errorReasons.size() == 1)
143         {
144             *error += " " + errorReasons.back();
145         }
146         else
147         {
148             *error += ": " + gmx::joinStrings(errorReasons, "; ");
149         }
150         *error += ".";
151     }
152     return isSupported;
153 }
154
155 bool pme_gpu_supports_build(std::string* error)
156 {
157     std::list<std::string> errorReasons;
158     if (GMX_DOUBLE)
159     {
160         errorReasons.emplace_back("a double-precision build");
161     }
162     if (!GMX_GPU)
163     {
164         errorReasons.emplace_back("a non-GPU build");
165     }
166     if (GMX_GPU_SYCL)
167     {
168         errorReasons.emplace_back("SYCL build"); // SYCL-TODO
169     }
170     return addMessageIfNotSupported(errorReasons, error);
171 }
172
173 bool pme_gpu_supports_hardware(const gmx_hw_info_t gmx_unused& hwinfo, std::string* error)
174 {
175     std::list<std::string> errorReasons;
176
177     if (GMX_GPU_OPENCL)
178     {
179 #ifdef __APPLE__
180         errorReasons.emplace_back("Apple OS X operating system");
181 #endif
182     }
183     return addMessageIfNotSupported(errorReasons, error);
184 }
185
186 bool pme_gpu_supports_input(const t_inputrec& ir, std::string* error)
187 {
188     std::list<std::string> errorReasons;
189     if (!EEL_PME(ir.coulombtype))
190     {
191         errorReasons.emplace_back("systems that do not use PME for electrostatics");
192     }
193     if (ir.pme_order != 4)
194     {
195         errorReasons.emplace_back("interpolation orders other than 4");
196     }
197     if (EVDW_PME(ir.vdwtype))
198     {
199         errorReasons.emplace_back("Lennard-Jones PME");
200     }
201     if (!EI_DYNAMICS(ir.eI))
202     {
203         errorReasons.emplace_back(
204                 "Cannot compute PME interactions on a GPU, because PME GPU requires a dynamical "
205                 "integrator (md, sd, etc).");
206     }
207     return addMessageIfNotSupported(errorReasons, error);
208 }
209
210 bool pme_gpu_mixed_mode_supports_input(const t_inputrec& ir, std::string* error)
211 {
212     std::list<std::string> errorReasons;
213     if (ir.efep != efepNO)
214     {
215         errorReasons.emplace_back("Free Energy Perturbation (in PME GPU mixed mode)");
216     }
217     return addMessageIfNotSupported(errorReasons, error);
218 }
219
220 /*! \brief \libinternal
221  * Finds out if PME with given inputs is possible to run on GPU.
222  * This function is an internal final check, validating the whole PME structure on creation,
223  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
224  *
225  * \param[in]  pme          The PME structure.
226  * \param[out] error        The error message if the input is not supported on GPU.
227  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
228  */
229 static bool pme_gpu_check_restrictions(const gmx_pme_t* pme, std::string* error)
230 {
231     std::list<std::string> errorReasons;
232     if (pme->nnodes != 1)
233     {
234         errorReasons.emplace_back("PME decomposition");
235     }
236     if (pme->pme_order != 4)
237     {
238         errorReasons.emplace_back("interpolation orders other than 4");
239     }
240     if (pme->doLJ)
241     {
242         errorReasons.emplace_back("Lennard-Jones PME");
243     }
244     if (GMX_DOUBLE)
245     {
246         errorReasons.emplace_back("double precision");
247     }
248     if (!GMX_GPU)
249     {
250         errorReasons.emplace_back("non-GPU build of GROMACS");
251     }
252     if (GMX_GPU_SYCL)
253     {
254         errorReasons.emplace_back("SYCL build of GROMACS"); // SYCL-TODO
255     }
256     return addMessageIfNotSupported(errorReasons, error);
257 }
258
259 PmeRunMode pme_run_mode(const gmx_pme_t* pme)
260 {
261     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
262     return pme->runMode;
263 }
264
265 gmx::PinningPolicy pme_get_pinning_policy()
266 {
267     return gmx::PinningPolicy::PinnedIfSupported;
268 }
269
270 /*! \brief Number of bytes in a cache line.
271  *
272  * Must also be a multiple of the SIMD and SIMD4 register size, to
273  * preserve alignment.
274  */
275 const int gmxCacheLineSize = 64;
276
277 //! Set up coordinate communication
278 static void setup_coordinate_communication(PmeAtomComm* atc)
279 {
280     int nslab, n, i;
281     int fw, bw;
282
283     nslab = atc->nslab;
284
285     n = 0;
286     for (i = 1; i <= nslab / 2; i++)
287     {
288         fw = (atc->nodeid + i) % nslab;
289         bw = (atc->nodeid - i + nslab) % nslab;
290         if (n < nslab - 1)
291         {
292             atc->slabCommSetup[n].node_dest = fw;
293             atc->slabCommSetup[n].node_src  = bw;
294             n++;
295         }
296         if (n < nslab - 1)
297         {
298             atc->slabCommSetup[n].node_dest = bw;
299             atc->slabCommSetup[n].node_src  = fw;
300             n++;
301         }
302     }
303 }
304
305 /*! \brief Round \p n up to the next multiple of \p f */
306 static int mult_up(int n, int f)
307 {
308     return ((n + f - 1) / f) * f;
309 }
310
311 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
312 static double estimate_pme_load_imbalance(struct gmx_pme_t* pme)
313 {
314     int    nma, nmi;
315     double n1, n2, n3;
316
317     nma = pme->nnodes_major;
318     nmi = pme->nnodes_minor;
319
320     n1 = mult_up(pme->nkx, nma) * mult_up(pme->nky, nmi) * pme->nkz;
321     n2 = mult_up(pme->nkx, nma) * mult_up(pme->nkz, nmi) * pme->nky;
322     n3 = mult_up(pme->nky, nma) * mult_up(pme->nkz, nmi) * pme->nkx;
323
324     /* pme_solve is roughly double the cost of an fft */
325
326     return (n1 + n2 + 3 * n3) / static_cast<double>(6 * pme->nkx * pme->nky * pme->nkz);
327 }
328
329 #ifndef DOXYGEN
330
331 PmeAtomComm::PmeAtomComm(MPI_Comm   PmeMpiCommunicator,
332                          const int  numThreads,
333                          const int  pmeOrder,
334                          const int  dimIndex,
335                          const bool doSpread) :
336     dimind(dimIndex),
337     bSpread(doSpread),
338     pme_order(pmeOrder),
339     nthread(numThreads),
340     spline(nthread)
341 {
342     if (PmeMpiCommunicator != MPI_COMM_NULL)
343     {
344         mpi_comm = PmeMpiCommunicator;
345 #    if GMX_MPI
346         MPI_Comm_size(mpi_comm, &nslab);
347         MPI_Comm_rank(mpi_comm, &nodeid);
348 #    endif
349     }
350     if (debug)
351     {
352         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", dimind, nslab, nodeid);
353     }
354
355     if (nslab > 1)
356     {
357         slabCommSetup.resize(nslab);
358         setup_coordinate_communication(this);
359
360         count_thread.resize(nthread);
361         for (auto& countThread : count_thread)
362         {
363             countThread.resize(nslab);
364         }
365     }
366
367     if (nthread > 1)
368     {
369         threadMap.resize(nthread);
370
371 #    pragma omp parallel for num_threads(nthread) schedule(static)
372         for (int thread = 0; thread < nthread; thread++)
373         {
374             try
375             {
376                 /* Allocate buffer with padding to avoid cache polution */
377                 threadMap[thread].nBuffer.resize(nthread + 2 * gmxCacheLineSize);
378                 threadMap[thread].n = threadMap[thread].nBuffer.data() + gmxCacheLineSize;
379             }
380             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
381         }
382     }
383 }
384
385 #endif // !DOXYGEN
386
387 /*! \brief Initialize data structure for communication */
388 static void init_overlap_comm(pme_overlap_t* ol, int norder, MPI_Comm comm, int nnodes, int nodeid, int ndata, int commplainsize)
389 {
390     gmx_bool bCont;
391
392     ol->mpi_comm = comm;
393     ol->nnodes   = nnodes;
394     ol->nodeid   = nodeid;
395
396     /* Linear translation of the PME grid won't affect reciprocal space
397      * calculations, so to optimize we only interpolate "upwards",
398      * which also means we only have to consider overlap in one direction.
399      * I.e., particles on this node might also be spread to grid indices
400      * that belong to higher nodes (modulo nnodes)
401      */
402
403     ol->s2g0.resize(ol->nnodes + 1);
404     ol->s2g1.resize(ol->nnodes);
405     if (debug)
406     {
407         fprintf(debug, "PME slab boundaries:");
408     }
409     for (int i = 0; i < nnodes; i++)
410     {
411         /* s2g0 the local interpolation grid start.
412          * s2g1 the local interpolation grid end.
413          * Since in calc_pidx we divide particles, and not grid lines,
414          * spatially uniform along dimension x or y, we need to round
415          * s2g0 down and s2g1 up.
416          */
417         ol->s2g0[i] = (i * ndata + 0) / nnodes;
418         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
419
420         if (debug)
421         {
422             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
423         }
424     }
425     ol->s2g0[nnodes] = ndata;
426     if (debug)
427     {
428         fprintf(debug, "\n");
429     }
430
431     /* Determine with how many nodes we need to communicate the grid overlap */
432     int testRankCount = 0;
433     do
434     {
435         testRankCount++;
436         bCont = FALSE;
437         for (int i = 0; i < nnodes; i++)
438         {
439             if ((i + testRankCount < nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount])
440                 || (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
441             {
442                 bCont = TRUE;
443             }
444         }
445     } while (bCont && testRankCount < nnodes);
446
447     ol->comm_data.resize(testRankCount - 1);
448     ol->send_size = 0;
449
450     for (size_t b = 0; b < ol->comm_data.size(); b++)
451     {
452         pme_grid_comm_t* pgc = &ol->comm_data[b];
453
454         /* Send */
455         pgc->send_id  = (ol->nodeid + (b + 1)) % ol->nnodes;
456         int fft_start = ol->s2g0[pgc->send_id];
457         int fft_end   = ol->s2g0[pgc->send_id + 1];
458         if (pgc->send_id < nodeid)
459         {
460             fft_start += ndata;
461             fft_end += ndata;
462         }
463         int send_index1  = ol->s2g1[nodeid];
464         send_index1      = std::min(send_index1, fft_end);
465         pgc->send_index0 = fft_start;
466         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
467         ol->send_size += pgc->send_nindex;
468
469         /* We always start receiving to the first index of our slab */
470         pgc->recv_id    = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
471         fft_start       = ol->s2g0[ol->nodeid];
472         fft_end         = ol->s2g0[ol->nodeid + 1];
473         int recv_index1 = ol->s2g1[pgc->recv_id];
474         if (pgc->recv_id > nodeid)
475         {
476             recv_index1 -= ndata;
477         }
478         recv_index1      = std::min(recv_index1, fft_end);
479         pgc->recv_index0 = fft_start;
480         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
481     }
482
483 #if GMX_MPI
484     /* Communicate the buffer sizes to receive */
485     MPI_Status stat;
486     for (size_t b = 0; b < ol->comm_data.size(); b++)
487     {
488         MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b, &ol->comm_data[b].recv_size,
489                      1, MPI_INT, ol->comm_data[b].recv_id, b, ol->mpi_comm, &stat);
490     }
491 #endif
492
493     /* For non-divisible grid we need pme_order iso pme_order-1 */
494     ol->sendbuf.resize(norder * commplainsize);
495     ol->recvbuf.resize(norder * commplainsize);
496 }
497
498 int minimalPmeGridSize(int pmeOrder)
499 {
500     /* The actual grid size limitations are:
501      *   serial:        >= pme_order
502      *   DD, no OpenMP: >= 2*(pme_order - 1)
503      *   DD, OpenMP:    >= pme_order + 1
504      * But we use the maximum for simplicity since in practice there is not
505      * much performance difference between pme_order and 2*(pme_order -1).
506      */
507     int minimalSize = 2 * (pmeOrder - 1);
508
509     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
510     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
511
512     return minimalSize;
513 }
514
515 bool gmx_pme_check_restrictions(int pme_order, int nkx, int nky, int nkz, int numPmeDomainsAlongX, bool useThreads, bool errorsAreFatal)
516 {
517     if (pme_order > PME_ORDER_MAX)
518     {
519         if (!errorsAreFatal)
520         {
521             return false;
522         }
523
524         std::string message = gmx::formatString(
525                 "pme_order (%d) is larger than the maximum allowed value (%d). Modify and "
526                 "recompile the code if you really need such a high order.",
527                 pme_order, PME_ORDER_MAX);
528         GMX_THROW(gmx::InconsistentInputError(message));
529     }
530
531     const int minGridSize = minimalPmeGridSize(pme_order);
532     if (nkx < minGridSize || nky < minGridSize || nkz < minGridSize)
533     {
534         if (!errorsAreFatal)
535         {
536             return false;
537         }
538         std::string message =
539                 gmx::formatString("The PME grid sizes need to be >= 2*(pme_order-1) (%d)", minGridSize);
540         GMX_THROW(gmx::InconsistentInputError(message));
541     }
542
543     /* Check for a limitation of the (current) sum_fftgrid_dd code.
544      * We only allow multiple communication pulses in dim 1, not in dim 0.
545      */
546     if (useThreads
547         && (nkx < numPmeDomainsAlongX * pme_order && nkx != numPmeDomainsAlongX * (pme_order - 1)))
548     {
549         if (!errorsAreFatal)
550         {
551             return false;
552         }
553         gmx_fatal(FARGS,
554                   "The number of PME grid lines per rank along x is %g. But when using OpenMP "
555                   "threads, the number of grid lines per rank along x should be >= pme_order (%d) "
556                   "or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly "
557                   "more along y and/or z) by specifying -dd manually.",
558                   nkx / static_cast<double>(numPmeDomainsAlongX), pme_order);
559     }
560
561     return true;
562 }
563
564 /*! \brief Round \p enumerator */
565 static int div_round_up(int enumerator, int denominator)
566 {
567     return (enumerator + denominator - 1) / denominator;
568 }
569
570 gmx_pme_t* gmx_pme_init(const t_commrec*     cr,
571                         const NumPmeDomains& numPmeDomains,
572                         const t_inputrec*    ir,
573                         gmx_bool             bFreeEnergy_q,
574                         gmx_bool             bFreeEnergy_lj,
575                         gmx_bool             bReproducible,
576                         real                 ewaldcoeff_q,
577                         real                 ewaldcoeff_lj,
578                         int                  nthread,
579                         PmeRunMode           runMode,
580                         PmeGpu*              pmeGpu,
581                         const DeviceContext* deviceContext,
582                         const DeviceStream*  deviceStream,
583                         const PmeGpuProgram* pmeGpuProgram,
584                         const gmx::MDLogger& mdlog)
585 {
586     int  use_threads, sum_use_threads, i;
587     ivec ndata;
588
589     if (debug)
590     {
591         fprintf(debug, "Creating PME data structures.\n");
592     }
593
594     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
595
596     pme->sum_qgrid_tmp    = nullptr;
597     pme->sum_qgrid_dd_tmp = nullptr;
598
599     pme->buf_nalloc = 0;
600
601     pme->nnodes  = 1;
602     pme->bPPnode = TRUE;
603
604     pme->nnodes_major = numPmeDomains.x;
605     pme->nnodes_minor = numPmeDomains.y;
606
607     if (numPmeDomains.x * numPmeDomains.y > 1)
608     {
609         pme->mpi_comm = cr->mpi_comm_mygroup;
610
611 #if GMX_MPI
612         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
613         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
614 #endif
615         if (pme->nnodes != numPmeDomains.x * numPmeDomains.y)
616         {
617             gmx_incons("PME rank count mismatch");
618         }
619     }
620     else
621     {
622         pme->mpi_comm = MPI_COMM_NULL;
623     }
624
625     if (pme->nnodes == 1)
626     {
627         pme->mpi_comm_d[0] = MPI_COMM_NULL;
628         pme->mpi_comm_d[1] = MPI_COMM_NULL;
629         pme->ndecompdim    = 0;
630         pme->nodeid_major  = 0;
631         pme->nodeid_minor  = 0;
632     }
633     else
634     {
635         if (numPmeDomains.y == 1)
636         {
637             pme->mpi_comm_d[0] = pme->mpi_comm;
638             pme->mpi_comm_d[1] = MPI_COMM_NULL;
639             pme->ndecompdim    = 1;
640             pme->nodeid_major  = pme->nodeid;
641             pme->nodeid_minor  = 0;
642         }
643         else if (numPmeDomains.x == 1)
644         {
645             pme->mpi_comm_d[0] = MPI_COMM_NULL;
646             pme->mpi_comm_d[1] = pme->mpi_comm;
647             pme->ndecompdim    = 1;
648             pme->nodeid_major  = 0;
649             pme->nodeid_minor  = pme->nodeid;
650         }
651         else
652         {
653             if (pme->nnodes % numPmeDomains.x != 0)
654             {
655                 gmx_incons(
656                         "For 2D PME decomposition, #PME ranks must be divisible by the number of "
657                         "domains along x");
658             }
659             pme->ndecompdim = 2;
660
661 #if GMX_MPI
662             MPI_Comm_split(pme->mpi_comm, pme->nodeid % numPmeDomains.y, pme->nodeid,
663                            &pme->mpi_comm_d[0]); /* My communicator along major dimension */
664             MPI_Comm_split(pme->mpi_comm, pme->nodeid / numPmeDomains.y, pme->nodeid,
665                            &pme->mpi_comm_d[1]); /* My communicator along minor dimension */
666
667             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
668             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
669             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
670             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
671 #endif
672         }
673     }
674     // cr is always initialized if there is a a PP rank, so we can safely assume
675     // that when it is not, like in ewald tests, we not on a PP rank.
676     pme->bPPnode = ((cr != nullptr && cr->duty != 0) && thisRankHasDuty(cr, DUTY_PP));
677
678     pme->nthread = nthread;
679
680     /* Check if any of the PME MPI ranks uses threads */
681     use_threads = (pme->nthread > 1 ? 1 : 0);
682 #if GMX_MPI
683     if (pme->nnodes > 1)
684     {
685         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT, MPI_SUM, pme->mpi_comm);
686     }
687     else
688 #endif
689     {
690         sum_use_threads = use_threads;
691     }
692     pme->bUseThreads = (sum_use_threads > 0);
693
694     if (ir->pbcType == PbcType::Screw)
695     {
696         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
697     }
698
699     /* NOTE:
700      * It is likely that the current gmx_pme_do() routine supports calculating
701      * only Coulomb or LJ while gmx_pme_init() configures for both,
702      * but that has never been tested.
703      * It is likely that the current gmx_pme_do() routine supports calculating,
704      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
705      * configures with free-energy, but that has never been tested.
706      */
707     pme->doCoulomb     = EEL_PME(ir->coulombtype);
708     pme->doLJ          = EVDW_PME(ir->vdwtype);
709     pme->bFEP_q        = ((ir->efep != efepNO) && bFreeEnergy_q);
710     pme->bFEP_lj       = ((ir->efep != efepNO) && bFreeEnergy_lj);
711     pme->bFEP          = (pme->bFEP_q || pme->bFEP_lj);
712     pme->nkx           = ir->nkx;
713     pme->nky           = ir->nky;
714     pme->nkz           = ir->nkz;
715     pme->bP3M          = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != nullptr);
716     pme->pme_order     = ir->pme_order;
717     pme->ewaldcoeff_q  = ewaldcoeff_q;
718     pme->ewaldcoeff_lj = ewaldcoeff_lj;
719
720     /* Always constant electrostatics coefficients */
721     pme->epsilon_r = ir->epsilon_r;
722
723     /* Always constant LJ coefficients */
724     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
725
726     // The box requires scaling with nwalls = 2, we store that condition as well
727     // as the scaling factor
728     delete pme->boxScaler;
729     pme->boxScaler = new EwaldBoxZScaler(*ir);
730
731     /* If we violate restrictions, generate a fatal error here */
732     gmx_pme_check_restrictions(pme->pme_order, pme->nkx, pme->nky, pme->nkz, pme->nnodes_major,
733                                pme->bUseThreads, true);
734
735     if (pme->nnodes > 1)
736     {
737         double imbal;
738
739 #if GMX_MPI
740         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
741         MPI_Type_commit(&(pme->rvec_mpi));
742 #endif
743
744         /* Note that the coefficient spreading and force gathering, which usually
745          * takes about the same amount of time as FFT+solve_pme,
746          * is always fully load balanced
747          * (unless the coefficient distribution is inhomogeneous).
748          */
749
750         imbal = estimate_pme_load_imbalance(pme.get());
751         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
752         {
753             GMX_LOG(mdlog.warning)
754                     .asParagraph()
755                     .appendTextFormatted(
756                             "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
757                             "      For optimal PME load balancing\n"
758                             "      PME grid_x (%d) and grid_y (%d) should be divisible by "
759                             "#PME_ranks_x "
760                             "(%d)\n"
761                             "      and PME grid_y (%d) and grid_z (%d) should be divisible by "
762                             "#PME_ranks_y "
763                             "(%d)",
764                             gmx::roundToInt((imbal - 1) * 100), pme->nkx, pme->nky,
765                             pme->nnodes_major, pme->nky, pme->nkz, pme->nnodes_minor);
766         }
767     }
768
769     /* For non-divisible grid we need pme_order iso pme_order-1 */
770     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
771      * y is always copied through a buffer: we don't need padding in z,
772      * but we do need the overlap in x because of the communication order.
773      */
774     init_overlap_comm(&pme->overlap[0], pme->pme_order, pme->mpi_comm_d[0], pme->nnodes_major,
775                       pme->nodeid_major, pme->nkx,
776                       (div_round_up(pme->nky, pme->nnodes_minor) + pme->pme_order)
777                               * (pme->nkz + pme->pme_order - 1));
778
779     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
780      * We do this with an offset buffer of equal size, so we need to allocate
781      * extra for the offset. That's what the (+1)*pme->nkz is for.
782      */
783     init_overlap_comm(&pme->overlap[1], pme->pme_order, pme->mpi_comm_d[1], pme->nnodes_minor,
784                       pme->nodeid_minor, pme->nky,
785                       (div_round_up(pme->nkx, pme->nnodes_major) + pme->pme_order + 1) * pme->nkz);
786
787     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
788      * Note that gmx_pme_check_restrictions checked for this already.
789      */
790     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
791     {
792         gmx_incons(
793                 "More than one communication pulse required for grid overlap communication along "
794                 "the major dimension while using threads");
795     }
796
797     snew(pme->bsp_mod[XX], pme->nkx);
798     snew(pme->bsp_mod[YY], pme->nky);
799     snew(pme->bsp_mod[ZZ], pme->nkz);
800
801     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
802     pme->runMode = runMode;
803
804     /* The required size of the interpolation grid, including overlap.
805      * The allocated size (pmegrid_n?) might be slightly larger.
806      */
807     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] - pme->overlap[0].s2g0[pme->nodeid_major];
808     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] - pme->overlap[1].s2g0[pme->nodeid_minor];
809     pme->pmegrid_nz_base = pme->nkz;
810     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
811     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
812     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
813     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
814     pme->pmegrid_start_iz = 0;
815
816     make_gridindex_to_localindex(pme->nkx, pme->pmegrid_start_ix,
817                                  pme->pmegrid_nx - (pme->pme_order - 1), &pme->nnx, &pme->fshx);
818     make_gridindex_to_localindex(pme->nky, pme->pmegrid_start_iy,
819                                  pme->pmegrid_ny - (pme->pme_order - 1), &pme->nny, &pme->fshy);
820     make_gridindex_to_localindex(pme->nkz, pme->pmegrid_start_iz, pme->pmegrid_nz_base, &pme->nnz,
821                                  &pme->fshz);
822
823     pme->spline_work = make_pme_spline_work(pme->pme_order);
824
825     ndata[0] = pme->nkx;
826     ndata[1] = pme->nky;
827     ndata[2] = pme->nkz;
828     /* It doesn't matter if we allocate too many grids here,
829      * we only allocate and use the ones we need.
830      */
831     if (pme->doLJ)
832     {
833         pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
834     }
835     else
836     {
837         pme->ngrids = DO_Q;
838     }
839     snew(pme->fftgrid, pme->ngrids);
840     snew(pme->cfftgrid, pme->ngrids);
841     snew(pme->pfft_setup, pme->ngrids);
842
843     for (i = 0; i < pme->ngrids; ++i)
844     {
845         if ((i < DO_Q && pme->doCoulomb && (i == 0 || bFreeEnergy_q))
846             || (i >= DO_Q && pme->doLJ
847                 && (i == 2 || bFreeEnergy_lj || ir->ljpme_combination_rule == eljpmeLB)))
848         {
849             pmegrids_init(&pme->pmegrid[i], pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
850                           pme->pmegrid_nz_base, pme->pme_order, pme->bUseThreads, pme->nthread,
851                           pme->overlap[0].s2g1[pme->nodeid_major]
852                                   - pme->overlap[0].s2g0[pme->nodeid_major + 1],
853                           pme->overlap[1].s2g1[pme->nodeid_minor]
854                                   - pme->overlap[1].s2g0[pme->nodeid_minor + 1]);
855             /* This routine will allocate the grid data to fit the FFTs */
856             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed)
857                                                         ? gmx::PinningPolicy::PinnedIfSupported
858                                                         : gmx::PinningPolicy::CannotBePinned;
859             gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata, &pme->fftgrid[i], &pme->cfftgrid[i],
860                                     pme->mpi_comm_d, bReproducible, pme->nthread, allocateRealGridForGpu);
861         }
862     }
863
864     if (!pme->bP3M)
865     {
866         /* Use plain SPME B-spline interpolation */
867         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
868     }
869     else
870     {
871         /* Use the P3M grid-optimized influence function */
872         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
873     }
874
875     /* Use atc[0] for spreading */
876     const int firstDimIndex   = (numPmeDomains.x > 1 ? 0 : 1);
877     MPI_Comm  mpiCommFirstDim = (pme->nnodes > 1 ? pme->mpi_comm_d[firstDimIndex] : MPI_COMM_NULL);
878     bool      doSpread        = true;
879     pme->atc.emplace_back(mpiCommFirstDim, pme->nthread, pme->pme_order, firstDimIndex, doSpread);
880     if (pme->ndecompdim >= 2)
881     {
882         const int secondDimIndex = 1;
883         doSpread                 = false;
884         pme->atc.emplace_back(pme->mpi_comm_d[1], pme->nthread, pme->pme_order, secondDimIndex, doSpread);
885     }
886
887     // Initial check of validity of the input for running on the GPU
888     if (pme->runMode != PmeRunMode::CPU)
889     {
890         std::string errorString;
891         bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
892         if (!canRunOnGpu)
893         {
894             GMX_THROW(gmx::NotImplementedError(errorString));
895         }
896         pme_gpu_reinit(pme.get(), deviceContext, deviceStream, pmeGpuProgram);
897     }
898     else
899     {
900         GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object when PME is on a CPU.");
901     }
902
903
904     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
905
906     // no exception was thrown during the init, so we hand over the PME structure handle
907     return pme.release();
908 }
909
910 void gmx_pme_reinit(struct gmx_pme_t** pmedata,
911                     const t_commrec*   cr,
912                     struct gmx_pme_t*  pme_src,
913                     const t_inputrec*  ir,
914                     const ivec         grid_size,
915                     real               ewaldcoeff_q,
916                     real               ewaldcoeff_lj)
917 {
918     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
919     // TODO: This would be better as just copying a sub-structure that contains
920     // all the PME parameters and nothing else.
921     t_inputrec irc;
922     irc.pbcType                = ir->pbcType;
923     irc.coulombtype            = ir->coulombtype;
924     irc.vdwtype                = ir->vdwtype;
925     irc.efep                   = ir->efep;
926     irc.pme_order              = ir->pme_order;
927     irc.epsilon_r              = ir->epsilon_r;
928     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
929     irc.nkx                    = grid_size[XX];
930     irc.nky                    = grid_size[YY];
931     irc.nkz                    = grid_size[ZZ];
932
933     try
934     {
935         // This is reinit. Any logging should have been done at first init.
936         // Here we should avoid writing notes for settings the user did not
937         // set directly.
938         const gmx::MDLogger dummyLogger;
939         GMX_ASSERT(pmedata, "Invalid PME pointer");
940         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
941         *pmedata = gmx_pme_init(cr, numPmeDomains, &irc, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE,
942                                 ewaldcoeff_q, ewaldcoeff_lj, pme_src->nthread, pme_src->runMode,
943                                 pme_src->gpu, nullptr, nullptr, nullptr, dummyLogger);
944         /* When running PME on the CPU not using domain decomposition,
945          * the atom data is allocated once only in gmx_pme_(re)init().
946          */
947         if (!pme_src->gpu && pme_src->nnodes == 1)
948         {
949             gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), nullptr, nullptr);
950         }
951         // TODO this is mostly passing around current values
952     }
953     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
954
955     /* We can easily reuse the allocated pme grids in pme_src */
956     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
957     /* We would like to reuse the fft grids, but that's harder */
958 }
959
960 void gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q, real* V)
961 {
962     pmegrids_t* grid;
963
964     if (pme->nnodes > 1)
965     {
966         gmx_incons("gmx_pme_calc_energy called in parallel");
967     }
968     if (pme->bFEP_q)
969     {
970         gmx_incons("gmx_pme_calc_energy with free energy");
971     }
972
973     if (!pme->atc_energy)
974     {
975         pme->atc_energy = std::make_unique<PmeAtomComm>(MPI_COMM_NULL, 1, pme->pme_order, 0, true);
976     }
977     PmeAtomComm* atc = pme->atc_energy.get();
978     atc->setNumAtoms(x.ssize());
979     atc->x           = x;
980     atc->coefficient = q;
981
982     /* We only use the A-charges grid */
983     grid = &pme->pmegrid[PME_GRID_QA];
984
985     /* Only calculate the spline coefficients, don't actually spread */
986     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
987
988     *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
989 }
990
991 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
992 static void calc_initial_lb_coeffs(gmx::ArrayRef<real> coefficient, const real* local_c6, const real* local_sigma)
993 {
994     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
995     {
996         real sigma4    = local_sigma[i];
997         sigma4         = sigma4 * sigma4;
998         sigma4         = sigma4 * sigma4;
999         coefficient[i] = local_c6[i] / sigma4;
1000     }
1001 }
1002
1003 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
1004 static void calc_next_lb_coeffs(gmx::ArrayRef<real> coefficient, const real* local_sigma)
1005 {
1006     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
1007     {
1008         coefficient[i] *= local_sigma[i];
1009     }
1010 }
1011
1012 int gmx_pme_do(struct gmx_pme_t*              pme,
1013                gmx::ArrayRef<const gmx::RVec> coordinates,
1014                gmx::ArrayRef<gmx::RVec>       forces,
1015                real                           chargeA[],
1016                real                           chargeB[],
1017                real                           c6A[],
1018                real                           c6B[],
1019                real                           sigmaA[],
1020                real                           sigmaB[],
1021                const matrix                   box,
1022                const t_commrec*               cr,
1023                int                            maxshift_x,
1024                int                            maxshift_y,
1025                t_nrnb*                        nrnb,
1026                gmx_wallcycle*                 wcycle,
1027                matrix                         vir_q,
1028                matrix                         vir_lj,
1029                real*                          energy_q,
1030                real*                          energy_lj,
1031                real                           lambda_q,
1032                real                           lambda_lj,
1033                real*                          dvdlambda_q,
1034                real*                          dvdlambda_lj,
1035                const gmx::StepWorkload&       stepWork)
1036 {
1037     GMX_ASSERT(pme->runMode == PmeRunMode::CPU,
1038                "gmx_pme_do should not be called on the GPU PME run.");
1039
1040     int                  d, npme, grid_index, max_grid_index;
1041     PmeAtomComm&         atc         = pme->atc[0];
1042     pmegrids_t*          pmegrid     = nullptr;
1043     real*                grid        = nullptr;
1044     real*                coefficient = nullptr;
1045     PmeOutput            output[2]; // The second is used for the B state with FEP
1046     real                 scale, lambda;
1047     gmx_bool             bClearF;
1048     gmx_parallel_3dfft_t pfft_setup;
1049     real*                fftgrid;
1050     t_complex*           cfftgrid;
1051     int                  thread;
1052     gmx_bool             bFirst, bDoSplines;
1053     int                  fep_state;
1054     int                  fep_states_lj = pme->bFEP_lj ? 2 : 1;
1055     // There's no support for computing energy without virial, or vice versa
1056     const bool computeEnergyAndVirial = (stepWork.computeEnergy || stepWork.computeVirial);
1057
1058     /* We could be passing lambda!=0 while no q or LJ is actually perturbed */
1059     if (!pme->bFEP_q)
1060     {
1061         lambda_q = 0;
1062     }
1063     if (!pme->bFEP_lj)
1064     {
1065         lambda_lj = 0;
1066     }
1067
1068     assert(pme->nnodes > 0);
1069     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1070
1071     if (pme->nnodes > 1)
1072     {
1073         atc.pd.resize(coordinates.ssize());
1074         for (int d = pme->ndecompdim - 1; d >= 0; d--)
1075         {
1076             PmeAtomComm& atc = pme->atc[d];
1077             atc.maxshift     = (atc.dimind == 0 ? maxshift_x : maxshift_y);
1078         }
1079     }
1080     else
1081     {
1082         GMX_ASSERT(coordinates.ssize() == atc.numAtoms(), "We expect atc.numAtoms() coordinates");
1083         GMX_ASSERT(forces.ssize() >= atc.numAtoms(),
1084                    "We need a force buffer with at least atc.numAtoms() elements");
1085
1086         atc.x = coordinates;
1087         atc.f = forces;
1088     }
1089
1090     matrix scaledBox;
1091     pme->boxScaler->scaleBox(box, scaledBox);
1092
1093     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1094     bFirst = TRUE;
1095
1096     /* For simplicity, we construct the splines for all particles if
1097      * more than one PME calculations is needed. Some optimization
1098      * could be done by keeping track of which atoms have splines
1099      * constructed, and construct new splines on each pass for atoms
1100      * that don't yet have them.
1101      */
1102
1103     bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1104
1105     /* We need a maximum of four separate PME calculations:
1106      * grid_index=0: Coulomb PME with charges from state A
1107      * grid_index=1: Coulomb PME with charges from state B
1108      * grid_index=2: LJ PME with C6 from state A
1109      * grid_index=3: LJ PME with C6 from state B
1110      * For Lorentz-Berthelot combination rules, a separate loop is used to
1111      * calculate all the terms
1112      */
1113
1114     /* If we are doing LJ-PME with LB, we only do Q here */
1115     max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
1116
1117     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1118     {
1119         /* Check if we should do calculations at this grid_index
1120          * If grid_index is odd we should be doing FEP
1121          * If grid_index < 2 we should be doing electrostatic PME
1122          * If grid_index >= 2 we should be doing LJ-PME
1123          */
1124         if ((grid_index < DO_Q && (!pme->doCoulomb || (grid_index == 1 && !pme->bFEP_q)))
1125             || (grid_index >= DO_Q && (!pme->doLJ || (grid_index == 3 && !pme->bFEP_lj))))
1126         {
1127             continue;
1128         }
1129         /* Unpack structure */
1130         pmegrid    = &pme->pmegrid[grid_index];
1131         fftgrid    = pme->fftgrid[grid_index];
1132         cfftgrid   = pme->cfftgrid[grid_index];
1133         pfft_setup = pme->pfft_setup[grid_index];
1134         switch (grid_index)
1135         {
1136             case 0: coefficient = chargeA; break;
1137             case 1: coefficient = chargeB; break;
1138             case 2: coefficient = c6A; break;
1139             case 3: coefficient = c6B; break;
1140         }
1141
1142         grid = pmegrid->grid.grid;
1143
1144         if (debug)
1145         {
1146             fprintf(debug, "PME: number of ranks = %d, rank = %d\n", cr->nnodes, cr->nodeid);
1147             fprintf(debug, "Grid = %p\n", static_cast<void*>(grid));
1148             if (grid == nullptr)
1149             {
1150                 gmx_fatal(FARGS, "No grid!");
1151             }
1152         }
1153
1154         if (pme->nnodes == 1)
1155         {
1156             atc.coefficient = gmx::arrayRefFromArray(coefficient, coordinates.size());
1157         }
1158         else
1159         {
1160             wallcycle_start(wcycle, ewcPME_REDISTXF);
1161             do_redist_pos_coeffs(pme, cr, bFirst, coordinates, coefficient);
1162
1163             wallcycle_stop(wcycle, ewcPME_REDISTXF);
1164         }
1165
1166         if (debug)
1167         {
1168             fprintf(debug, "Rank= %6d, pme local particles=%6d\n", cr->nodeid, atc.numAtoms());
1169         }
1170
1171         wallcycle_start(wcycle, ewcPME_SPREAD);
1172
1173         /* Spread the coefficients on a grid */
1174         spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1175
1176         if (bFirst)
1177         {
1178             inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1179         }
1180         inc_nrnb(nrnb, eNR_SPREADBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1181
1182         if (!pme->bUseThreads)
1183         {
1184             wrap_periodic_pmegrid(pme, grid);
1185
1186             /* sum contributions to local grid from other nodes */
1187             if (pme->nnodes > 1)
1188             {
1189                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1190             }
1191
1192             copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1193         }
1194
1195         wallcycle_stop(wcycle, ewcPME_SPREAD);
1196
1197         /* TODO If the OpenMP and single-threaded implementations
1198            converge, then spread_on_grid() and
1199            copy_pmegrid_to_fftgrid() will perhaps live in the same
1200            source file.
1201         */
1202
1203         /* Here we start a large thread parallel region */
1204 #pragma omp parallel num_threads(pme->nthread) private(thread)
1205         {
1206             try
1207             {
1208                 thread = gmx_omp_get_thread_num();
1209                 int loop_count;
1210
1211                 /* do 3d-fft */
1212                 if (thread == 0)
1213                 {
1214                     wallcycle_start(wcycle, ewcPME_FFT);
1215                 }
1216                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1217                 if (thread == 0)
1218                 {
1219                     wallcycle_stop(wcycle, ewcPME_FFT);
1220                 }
1221
1222                 /* solve in k-space for our local cells */
1223                 if (thread == 0)
1224                 {
1225                     wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1226                 }
1227                 if (grid_index < DO_Q)
1228                 {
1229                     loop_count = solve_pme_yzx(
1230                             pme, cfftgrid, scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1231                             computeEnergyAndVirial, pme->nthread, thread);
1232                 }
1233                 else
1234                 {
1235                     loop_count =
1236                             solve_pme_lj_yzx(pme, &cfftgrid, FALSE,
1237                                              scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1238                                              computeEnergyAndVirial, pme->nthread, thread);
1239                 }
1240
1241                 if (thread == 0)
1242                 {
1243                     wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1244                     inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1245                 }
1246
1247                 /* do 3d-invfft */
1248                 if (thread == 0)
1249                 {
1250                     wallcycle_start(wcycle, ewcPME_FFT);
1251                 }
1252                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1253                 if (thread == 0)
1254                 {
1255                     wallcycle_stop(wcycle, ewcPME_FFT);
1256
1257
1258                     if (pme->nodeid == 0)
1259                     {
1260                         real ntot = pme->nkx * pme->nky * pme->nkz;
1261                         npme      = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1262                         inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1263                     }
1264
1265                     /* Note: this wallcycle region is closed below
1266                        outside an OpenMP region, so take care if
1267                        refactoring code here. */
1268                     wallcycle_start(wcycle, ewcPME_GATHER);
1269                 }
1270
1271                 copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1272             }
1273             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1274         }
1275         /* End of thread parallel section.
1276          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1277          */
1278
1279         /* distribute local grid to all nodes */
1280         if (pme->nnodes > 1)
1281         {
1282             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1283         }
1284
1285         unwrap_periodic_pmegrid(pme, grid);
1286
1287         if (stepWork.computeForces)
1288         {
1289             /* interpolate forces for our local atoms */
1290
1291
1292             /* If we are running without parallelization,
1293              * atc->f is the actual force array, not a buffer,
1294              * therefore we should not clear it.
1295              */
1296             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
1297             bClearF = (bFirst && PAR(cr));
1298 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1299             for (thread = 0; thread < pme->nthread; thread++)
1300             {
1301                 try
1302                 {
1303                     gather_f_bsplines(pme, grid, bClearF, &atc, &atc.spline[thread],
1304                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0 - lambda : lambda) : 1.0);
1305                 }
1306                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1307             }
1308
1309
1310             inc_nrnb(nrnb, eNR_GATHERFBSP,
1311                      pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1312             /* Note: this wallcycle region is opened above inside an OpenMP
1313                region, so take care if refactoring code here. */
1314             wallcycle_stop(wcycle, ewcPME_GATHER);
1315         }
1316
1317         if (computeEnergyAndVirial)
1318         {
1319             /* This should only be called on the master thread
1320              * and after the threads have synchronized.
1321              */
1322             if (grid_index < 2)
1323             {
1324                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1325             }
1326             else
1327             {
1328                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1329             }
1330         }
1331         bFirst = FALSE;
1332     } /* of grid_index-loop */
1333
1334     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1335      * seven terms. */
1336
1337     if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB)
1338     {
1339         /* Loop over A- and B-state if we are doing FEP */
1340         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1341         {
1342             real *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
1343             gmx::ArrayRef<real> coefficientBuffer;
1344             if (pme->nnodes == 1)
1345             {
1346                 pme->lb_buf1.resize(atc.numAtoms());
1347                 coefficientBuffer = pme->lb_buf1;
1348                 switch (fep_state)
1349                 {
1350                     case 0:
1351                         local_c6    = c6A;
1352                         local_sigma = sigmaA;
1353                         break;
1354                     case 1:
1355                         local_c6    = c6B;
1356                         local_sigma = sigmaB;
1357                         break;
1358                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1359                 }
1360             }
1361             else
1362             {
1363                 coefficientBuffer = atc.coefficientBuffer;
1364                 switch (fep_state)
1365                 {
1366                     case 0:
1367                         RedistC6    = c6A;
1368                         RedistSigma = sigmaA;
1369                         break;
1370                     case 1:
1371                         RedistC6    = c6B;
1372                         RedistSigma = sigmaB;
1373                         break;
1374                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1375                 }
1376                 wallcycle_start(wcycle, ewcPME_REDISTXF);
1377
1378                 do_redist_pos_coeffs(pme, cr, bFirst, coordinates, RedistC6);
1379                 pme->lb_buf1.resize(atc.numAtoms());
1380                 pme->lb_buf2.resize(atc.numAtoms());
1381                 local_c6 = pme->lb_buf1.data();
1382                 for (int i = 0; i < atc.numAtoms(); ++i)
1383                 {
1384                     local_c6[i] = atc.coefficient[i];
1385                 }
1386
1387                 do_redist_pos_coeffs(pme, cr, FALSE, coordinates, RedistSigma);
1388                 local_sigma = pme->lb_buf2.data();
1389                 for (int i = 0; i < atc.numAtoms(); ++i)
1390                 {
1391                     local_sigma[i] = atc.coefficient[i];
1392                 }
1393
1394                 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1395             }
1396             atc.coefficient = coefficientBuffer;
1397             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1398
1399             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1400             for (grid_index = 2; grid_index < 9; ++grid_index)
1401             {
1402                 /* Unpack structure */
1403                 pmegrid    = &pme->pmegrid[grid_index];
1404                 fftgrid    = pme->fftgrid[grid_index];
1405                 pfft_setup = pme->pfft_setup[grid_index];
1406                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1407                 grid = pmegrid->grid.grid;
1408
1409                 wallcycle_start(wcycle, ewcPME_SPREAD);
1410                 /* Spread the c6 on a grid */
1411                 spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1412
1413                 if (bFirst)
1414                 {
1415                     inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1416                 }
1417
1418                 inc_nrnb(nrnb, eNR_SPREADBSP,
1419                          pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1420                 if (pme->nthread == 1)
1421                 {
1422                     wrap_periodic_pmegrid(pme, grid);
1423                     /* sum contributions to local grid from other nodes */
1424                     if (pme->nnodes > 1)
1425                     {
1426                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1427                     }
1428                     copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1429                 }
1430                 wallcycle_stop(wcycle, ewcPME_SPREAD);
1431
1432                 /*Here we start a large thread parallel region*/
1433 #pragma omp parallel num_threads(pme->nthread) private(thread)
1434                 {
1435                     try
1436                     {
1437                         thread = gmx_omp_get_thread_num();
1438                         /* do 3d-fft */
1439                         if (thread == 0)
1440                         {
1441                             wallcycle_start(wcycle, ewcPME_FFT);
1442                         }
1443
1444                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1445                         if (thread == 0)
1446                         {
1447                             wallcycle_stop(wcycle, ewcPME_FFT);
1448                         }
1449                     }
1450                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1451                 }
1452                 bFirst = FALSE;
1453             }
1454             /* solve in k-space for our local cells */
1455 #pragma omp parallel num_threads(pme->nthread) private(thread)
1456             {
1457                 try
1458                 {
1459                     int loop_count;
1460                     thread = gmx_omp_get_thread_num();
1461                     if (thread == 0)
1462                     {
1463                         wallcycle_start(wcycle, ewcLJPME);
1464                     }
1465
1466                     loop_count =
1467                             solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE,
1468                                              scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1469                                              computeEnergyAndVirial, pme->nthread, thread);
1470                     if (thread == 0)
1471                     {
1472                         wallcycle_stop(wcycle, ewcLJPME);
1473                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1474                     }
1475                 }
1476                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1477             }
1478
1479             if (computeEnergyAndVirial)
1480             {
1481                 /* This should only be called on the master thread and
1482                  * after the threads have synchronized.
1483                  */
1484                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
1485             }
1486
1487             bFirst = !pme->doCoulomb;
1488             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1489             for (grid_index = 8; grid_index >= 2; --grid_index)
1490             {
1491                 /* Unpack structure */
1492                 pmegrid    = &pme->pmegrid[grid_index];
1493                 fftgrid    = pme->fftgrid[grid_index];
1494                 pfft_setup = pme->pfft_setup[grid_index];
1495                 grid       = pmegrid->grid.grid;
1496                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1497 #pragma omp parallel num_threads(pme->nthread) private(thread)
1498                 {
1499                     try
1500                     {
1501                         thread = gmx_omp_get_thread_num();
1502                         /* do 3d-invfft */
1503                         if (thread == 0)
1504                         {
1505                             wallcycle_start(wcycle, ewcPME_FFT);
1506                         }
1507
1508                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1509                         if (thread == 0)
1510                         {
1511                             wallcycle_stop(wcycle, ewcPME_FFT);
1512
1513
1514                             if (pme->nodeid == 0)
1515                             {
1516                                 real ntot = pme->nkx * pme->nky * pme->nkz;
1517                                 npme      = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1518                                 inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1519                             }
1520                             wallcycle_start(wcycle, ewcPME_GATHER);
1521                         }
1522
1523                         copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1524                     }
1525                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1526                 } /*#pragma omp parallel*/
1527
1528                 /* distribute local grid to all nodes */
1529                 if (pme->nnodes > 1)
1530                 {
1531                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1532                 }
1533
1534                 unwrap_periodic_pmegrid(pme, grid);
1535
1536                 if (stepWork.computeForces)
1537                 {
1538                     /* interpolate forces for our local atoms */
1539                     bClearF = (bFirst && PAR(cr));
1540                     scale   = pme->bFEP ? (fep_state < 1 ? 1.0 - lambda_lj : lambda_lj) : 1.0;
1541                     scale *= lb_scale_factor[grid_index - 2];
1542
1543 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1544                     for (thread = 0; thread < pme->nthread; thread++)
1545                     {
1546                         try
1547                         {
1548                             gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
1549                                               &pme->atc[0].spline[thread], scale);
1550                         }
1551                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1552                     }
1553
1554
1555                     inc_nrnb(nrnb, eNR_GATHERFBSP,
1556                              pme->pme_order * pme->pme_order * pme->pme_order * pme->atc[0].numAtoms());
1557                 }
1558                 wallcycle_stop(wcycle, ewcPME_GATHER);
1559
1560                 bFirst = FALSE;
1561             } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1562         }     /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1563     }         /* if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB) */
1564
1565     if (stepWork.computeForces && pme->nnodes > 1)
1566     {
1567         wallcycle_start(wcycle, ewcPME_REDISTXF);
1568         for (d = 0; d < pme->ndecompdim; d++)
1569         {
1570             gmx::ArrayRef<gmx::RVec> forcesRef;
1571             if (d == pme->ndecompdim - 1)
1572             {
1573                 const size_t numAtoms = coordinates.size();
1574                 GMX_ASSERT(forces.size() >= numAtoms, "Need at least numAtoms forces");
1575                 forcesRef = forces.subArray(0, numAtoms);
1576             }
1577             else
1578             {
1579                 forcesRef = pme->atc[d + 1].f;
1580             }
1581             if (DOMAINDECOMP(cr))
1582             {
1583                 dd_pmeredist_f(pme, &pme->atc[d], forcesRef, d == pme->ndecompdim - 1 && pme->bPPnode);
1584             }
1585         }
1586
1587         wallcycle_stop(wcycle, ewcPME_REDISTXF);
1588     }
1589
1590     if (computeEnergyAndVirial)
1591     {
1592         if (pme->doCoulomb)
1593         {
1594             if (!pme->bFEP_q)
1595             {
1596                 *energy_q = output[0].coulombEnergy_;
1597                 m_add(vir_q, output[0].coulombVirial_, vir_q);
1598             }
1599             else
1600             {
1601                 *energy_q = (1.0 - lambda_q) * output[0].coulombEnergy_ + lambda_q * output[1].coulombEnergy_;
1602                 *dvdlambda_q += output[1].coulombEnergy_ - output[0].coulombEnergy_;
1603                 for (int i = 0; i < DIM; i++)
1604                 {
1605                     for (int j = 0; j < DIM; j++)
1606                     {
1607                         vir_q[i][j] += (1.0 - lambda_q) * output[0].coulombVirial_[i][j]
1608                                        + lambda_q * output[1].coulombVirial_[i][j];
1609                     }
1610                 }
1611             }
1612             if (debug)
1613             {
1614                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1615             }
1616         }
1617         else
1618         {
1619             *energy_q = 0;
1620         }
1621
1622         if (pme->doLJ)
1623         {
1624             if (!pme->bFEP_lj)
1625             {
1626                 *energy_lj = output[0].lennardJonesEnergy_;
1627                 m_add(vir_lj, output[0].lennardJonesVirial_, vir_lj);
1628             }
1629             else
1630             {
1631                 *energy_lj = (1.0 - lambda_lj) * output[0].lennardJonesEnergy_
1632                              + lambda_lj * output[1].lennardJonesEnergy_;
1633                 *dvdlambda_lj += output[1].lennardJonesEnergy_ - output[0].lennardJonesEnergy_;
1634                 for (int i = 0; i < DIM; i++)
1635                 {
1636                     for (int j = 0; j < DIM; j++)
1637                     {
1638                         vir_lj[i][j] += (1.0 - lambda_lj) * output[0].lennardJonesVirial_[i][j]
1639                                         + lambda_lj * output[1].lennardJonesVirial_[i][j];
1640                     }
1641                 }
1642             }
1643             if (debug)
1644             {
1645                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1646             }
1647         }
1648         else
1649         {
1650             *energy_lj = 0;
1651         }
1652     }
1653     return 0;
1654 }
1655
1656 void gmx_pme_destroy(gmx_pme_t* pme)
1657 {
1658     if (!pme)
1659     {
1660         return;
1661     }
1662
1663     delete pme->boxScaler;
1664
1665     sfree(pme->nnx);
1666     sfree(pme->nny);
1667     sfree(pme->nnz);
1668     sfree(pme->fshx);
1669     sfree(pme->fshy);
1670     sfree(pme->fshz);
1671
1672     for (int i = 0; i < pme->ngrids; ++i)
1673     {
1674         pmegrids_destroy(&pme->pmegrid[i]);
1675     }
1676     if (pme->pfft_setup)
1677     {
1678         for (int i = 0; i < pme->ngrids; ++i)
1679         {
1680             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1681         }
1682     }
1683     sfree(pme->fftgrid);
1684     sfree(pme->cfftgrid);
1685     sfree(pme->pfft_setup);
1686
1687     for (int i = 0; i < DIM; i++)
1688     {
1689         sfree(pme->bsp_mod[i]);
1690     }
1691
1692     sfree(pme->bufv);
1693     sfree(pme->bufr);
1694
1695     if (pme->solve_work)
1696     {
1697         pme_free_all_work(&pme->solve_work, pme->nthread);
1698     }
1699
1700     sfree(pme->sum_qgrid_tmp);
1701     sfree(pme->sum_qgrid_dd_tmp);
1702
1703     destroy_pme_spline_work(pme->spline_work);
1704
1705     if (pme->gpu != nullptr)
1706     {
1707         pme_gpu_destroy(pme->gpu);
1708     }
1709
1710     delete pme;
1711 }
1712
1713 void gmx_pme_reinit_atoms(gmx_pme_t* pme, const int numAtoms, const real* chargesA, const real* chargesB)
1714 {
1715     if (pme->gpu != nullptr)
1716     {
1717         GMX_ASSERT(!(pme->bFEP_q && chargesB == nullptr),
1718                    "B state charges must be specified if running Coulomb FEP on the GPU");
1719         pme_gpu_reinit_atoms(pme->gpu, numAtoms, chargesA, pme->bFEP_q ? chargesB : nullptr);
1720     }
1721     else
1722     {
1723         pme->atc[0].setNumAtoms(numAtoms);
1724         // TODO: set the charges here as well
1725     }
1726 }
1727
1728 bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size)
1729 {
1730     return (pme.nkx == grid_size[XX] && pme.nky == grid_size[YY] && pme.nkz == grid_size[ZZ]);
1731 }