Merge release-2019 into master
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /*! \internal \file
38  *
39  * \brief This file contains function definitions necessary for
40  * computing energies and forces for the PME long-ranged part (Coulomb
41  * and LJ).
42  *
43  * \author Erik Lindahl <erik@kth.se>
44  * \author Berk Hess <hess@kth.se>
45  * \ingroup module_ewald
46  */
47 /* IMPORTANT FOR DEVELOPERS:
48  *
49  * Triclinic pme stuff isn't entirely trivial, and we've experienced
50  * some bugs during development (many of them due to me). To avoid
51  * this in the future, please check the following things if you make
52  * changes in this file:
53  *
54  * 1. You should obtain identical (at least to the PME precision)
55  *    energies, forces, and virial for
56  *    a rectangular box and a triclinic one where the z (or y) axis is
57  *    tilted a whole box side. For instance you could use these boxes:
58  *
59  *    rectangular       triclinic
60  *     2  0  0           2  0  0
61  *     0  2  0           0  2  0
62  *     0  0  6           2  2  6
63  *
64  * 2. You should check the energy conservation in a triclinic box.
65  *
66  * It might seem an overkill, but better safe than sorry.
67  * /Erik 001109
68  */
69
70 #include "gmxpre.h"
71
72 #include "pme.h"
73
74 #include "config.h"
75
76 #include <cassert>
77 #include <cmath>
78 #include <cstdio>
79 #include <cstdlib>
80 #include <cstring>
81
82 #include <algorithm>
83 #include <list>
84
85 #include "gromacs/domdec/domdec.h"
86 #include "gromacs/ewald/ewald_utils.h"
87 #include "gromacs/fft/parallel_3dfft.h"
88 #include "gromacs/fileio/pdbio.h"
89 #include "gromacs/gmxlib/network.h"
90 #include "gromacs/gmxlib/nrnb.h"
91 #include "gromacs/hardware/hw_info.h"
92 #include "gromacs/math/gmxcomplex.h"
93 #include "gromacs/math/invertmatrix.h"
94 #include "gromacs/math/units.h"
95 #include "gromacs/math/vec.h"
96 #include "gromacs/math/vectypes.h"
97 #include "gromacs/mdtypes/commrec.h"
98 #include "gromacs/mdtypes/forcerec.h"
99 #include "gromacs/mdtypes/inputrec.h"
100 #include "gromacs/mdtypes/md_enums.h"
101 #include "gromacs/pbcutil/pbc.h"
102 #include "gromacs/timing/cyclecounter.h"
103 #include "gromacs/timing/wallcycle.h"
104 #include "gromacs/timing/walltime_accounting.h"
105 #include "gromacs/topology/topology.h"
106 #include "gromacs/utility/basedefinitions.h"
107 #include "gromacs/utility/cstringutil.h"
108 #include "gromacs/utility/exceptions.h"
109 #include "gromacs/utility/fatalerror.h"
110 #include "gromacs/utility/gmxmpi.h"
111 #include "gromacs/utility/gmxomp.h"
112 #include "gromacs/utility/logger.h"
113 #include "gromacs/utility/real.h"
114 #include "gromacs/utility/smalloc.h"
115 #include "gromacs/utility/stringutil.h"
116 #include "gromacs/utility/unique_cptr.h"
117
118 #include "calculate_spline_moduli.h"
119 #include "pme_gather.h"
120 #include "pme_gpu_internal.h"
121 #include "pme_grid.h"
122 #include "pme_internal.h"
123 #include "pme_redistribute.h"
124 #include "pme_solve.h"
125 #include "pme_spline_work.h"
126 #include "pme_spread.h"
127
128 /*! \brief Help build a descriptive message in \c error if there are
129  * \c errorReasons why PME on GPU is not supported.
130  *
131  * \returns Whether the lack of errorReasons indicate there is support. */
132 static bool
133 addMessageIfNotSupported(const std::list<std::string> &errorReasons,
134                          std::string                  *error)
135 {
136     bool isSupported = errorReasons.empty();
137     if (!isSupported && error)
138     {
139         std::string regressionTestMarker = "PME GPU does not support";
140         // this prefix is tested for in the regression tests script gmxtest.pl
141         *error = regressionTestMarker;
142         if (errorReasons.size() == 1)
143         {
144             *error += " " + errorReasons.back();
145         }
146         else
147         {
148             *error += ": " + gmx::joinStrings(errorReasons, "; ");
149         }
150         *error += ".";
151     }
152     return isSupported;
153 }
154
155 bool pme_gpu_supports_build(std::string *error)
156 {
157     std::list<std::string> errorReasons;
158     if (GMX_DOUBLE)
159     {
160         errorReasons.emplace_back("a double-precision build");
161     }
162     if (GMX_GPU == GMX_GPU_NONE)
163     {
164         errorReasons.emplace_back("a non-GPU build");
165     }
166     return addMessageIfNotSupported(errorReasons, error);
167 }
168
169 bool pme_gpu_supports_hardware(const gmx_hw_info_t &hwinfo,
170                                std::string         *error)
171 {
172     std::list<std::string> errorReasons;
173     if (GMX_GPU == GMX_GPU_OPENCL)
174     {
175         if (!areAllGpuDevicesFromAmd(hwinfo.gpu_info))
176         {
177             errorReasons.emplace_back("non-AMD devices");
178         }
179 #ifdef __APPLE__
180         errorReasons.emplace_back("Apple OS X operating system");
181 #endif
182     }
183     return addMessageIfNotSupported(errorReasons, error);
184 }
185
186 bool pme_gpu_supports_input(const t_inputrec &ir, const gmx_mtop_t &mtop, std::string *error)
187 {
188     std::list<std::string> errorReasons;
189     if (!EEL_PME(ir.coulombtype))
190     {
191         errorReasons.emplace_back("systems that do not use PME for electrostatics");
192     }
193     if (ir.pme_order != 4)
194     {
195         errorReasons.emplace_back("interpolation orders other than 4");
196     }
197     if (ir.efep != efepNO)
198     {
199         if (gmx_mtop_has_perturbed_charges(mtop))
200         {
201             errorReasons.emplace_back("free energy calculations with perturbed charges (multiple grids)");
202         }
203     }
204     if (EVDW_PME(ir.vdwtype))
205     {
206         errorReasons.emplace_back("Lennard-Jones PME");
207     }
208     if (!EI_DYNAMICS(ir.eI))
209     {
210         errorReasons.emplace_back("not a dynamical integrator");
211     }
212     return addMessageIfNotSupported(errorReasons, error);
213 }
214
215 /*! \brief \libinternal
216  * Finds out if PME with given inputs is possible to run on GPU.
217  * This function is an internal final check, validating the whole PME structure on creation,
218  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
219  *
220  * \param[in]  pme          The PME structure.
221  * \param[out] error        The error message if the input is not supported on GPU.
222  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
223  */
224 static bool pme_gpu_check_restrictions(const gmx_pme_t *pme, std::string *error)
225 {
226     std::list<std::string> errorReasons;
227     if (pme->nnodes != 1)
228     {
229         errorReasons.emplace_back("PME decomposition");
230     }
231     if (pme->pme_order != 4)
232     {
233         errorReasons.emplace_back("interpolation orders other than 4");
234     }
235     if (pme->bFEP)
236     {
237         errorReasons.emplace_back("free energy calculations (multiple grids)");
238     }
239     if (pme->doLJ)
240     {
241         errorReasons.emplace_back("Lennard-Jones PME");
242     }
243     if (GMX_DOUBLE)
244     {
245         errorReasons.emplace_back("double precision");
246     }
247     if (GMX_GPU == GMX_GPU_NONE)
248     {
249         errorReasons.emplace_back("non-GPU build of GROMACS");
250     }
251
252     return addMessageIfNotSupported(errorReasons, error);
253 }
254
255 PmeRunMode pme_run_mode(const gmx_pme_t *pme)
256 {
257     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
258     return pme->runMode;
259 }
260
261 gmx::PinningPolicy pme_get_pinning_policy()
262 {
263     return gmx::PinningPolicy::PinnedIfSupported;
264 }
265
266 /*! \brief Number of bytes in a cache line.
267  *
268  * Must also be a multiple of the SIMD and SIMD4 register size, to
269  * preserve alignment.
270  */
271 const int gmxCacheLineSize = 64;
272
273 //! Set up coordinate communication
274 static void setup_coordinate_communication(PmeAtomComm *atc)
275 {
276     int nslab, n, i;
277     int fw, bw;
278
279     nslab = atc->nslab;
280
281     n = 0;
282     for (i = 1; i <= nslab/2; i++)
283     {
284         fw = (atc->nodeid + i) % nslab;
285         bw = (atc->nodeid - i + nslab) % nslab;
286         if (n < nslab - 1)
287         {
288             atc->slabCommSetup[n].node_dest = fw;
289             atc->slabCommSetup[n].node_src  = bw;
290             n++;
291         }
292         if (n < nslab - 1)
293         {
294             atc->slabCommSetup[n].node_dest = bw;
295             atc->slabCommSetup[n].node_src  = fw;
296             n++;
297         }
298     }
299 }
300
301 /*! \brief Round \p n up to the next multiple of \p f */
302 static int mult_up(int n, int f)
303 {
304     return ((n + f - 1)/f)*f;
305 }
306
307 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
308 static double estimate_pme_load_imbalance(struct gmx_pme_t *pme)
309 {
310     int    nma, nmi;
311     double n1, n2, n3;
312
313     nma = pme->nnodes_major;
314     nmi = pme->nnodes_minor;
315
316     n1 = mult_up(pme->nkx, nma)*mult_up(pme->nky, nmi)*pme->nkz;
317     n2 = mult_up(pme->nkx, nma)*mult_up(pme->nkz, nmi)*pme->nky;
318     n3 = mult_up(pme->nky, nma)*mult_up(pme->nkz, nmi)*pme->nkx;
319
320     /* pme_solve is roughly double the cost of an fft */
321
322     return (n1 + n2 + 3*n3)/static_cast<double>(6*pme->nkx*pme->nky*pme->nkz);
323 }
324
325 #ifndef DOXYGEN
326
327 PmeAtomComm::PmeAtomComm(MPI_Comm   PmeMpiCommunicator,
328                          const int  numThreads,
329                          const int  pmeOrder,
330                          const int  dimIndex,
331                          const bool doSpread) :
332     dimind(dimIndex),
333     bSpread(doSpread),
334     pme_order(pmeOrder),
335     nthread(numThreads),
336     spline(nthread)
337 {
338     if (PmeMpiCommunicator != MPI_COMM_NULL)
339     {
340         mpi_comm = PmeMpiCommunicator;
341 #if GMX_MPI
342         MPI_Comm_size(mpi_comm, &nslab);
343         MPI_Comm_rank(mpi_comm, &nodeid);
344 #endif
345     }
346     if (debug)
347     {
348         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", dimind, nslab, nodeid);
349     }
350
351     if (nslab > 1)
352     {
353         slabCommSetup.resize(nslab);
354         setup_coordinate_communication(this);
355
356         count_thread.resize(nthread);
357         for (auto &countThread : count_thread)
358         {
359             countThread.resize(nslab);
360         }
361     }
362
363     if (nthread > 1)
364     {
365         threadMap.resize(nthread);
366
367 #pragma omp parallel for num_threads(nthread) schedule(static)
368         for (int thread = 0; thread < nthread; thread++)
369         {
370             try
371             {
372                 /* Allocate buffer with padding to avoid cache polution */
373                 threadMap[thread].nBuffer.resize(nthread + 2*gmxCacheLineSize);
374                 threadMap[thread].n = threadMap[thread].nBuffer.data() + gmxCacheLineSize;
375             }
376             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
377         }
378     }
379 }
380
381 #endif // !DOXYGEN
382
383 /*! \brief Initialize data structure for communication */
384 static void
385 init_overlap_comm(pme_overlap_t *  ol,
386                   int              norder,
387                   MPI_Comm         comm,
388                   int              nnodes,
389                   int              nodeid,
390                   int              ndata,
391                   int              commplainsize)
392 {
393     gmx_bool         bCont;
394
395     ol->mpi_comm = comm;
396     ol->nnodes   = nnodes;
397     ol->nodeid   = nodeid;
398
399     /* Linear translation of the PME grid won't affect reciprocal space
400      * calculations, so to optimize we only interpolate "upwards",
401      * which also means we only have to consider overlap in one direction.
402      * I.e., particles on this node might also be spread to grid indices
403      * that belong to higher nodes (modulo nnodes)
404      */
405
406     ol->s2g0.resize(ol->nnodes + 1);
407     ol->s2g1.resize(ol->nnodes);
408     if (debug)
409     {
410         fprintf(debug, "PME slab boundaries:");
411     }
412     for (int i = 0; i < nnodes; i++)
413     {
414         /* s2g0 the local interpolation grid start.
415          * s2g1 the local interpolation grid end.
416          * Since in calc_pidx we divide particles, and not grid lines,
417          * spatially uniform along dimension x or y, we need to round
418          * s2g0 down and s2g1 up.
419          */
420         ol->s2g0[i] = (i * ndata + 0) / nnodes;
421         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
422
423         if (debug)
424         {
425             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
426         }
427     }
428     ol->s2g0[nnodes] = ndata;
429     if (debug)
430     {
431         fprintf(debug, "\n");
432     }
433
434     /* Determine with how many nodes we need to communicate the grid overlap */
435     int testRankCount = 0;
436     do
437     {
438         testRankCount++;
439         bCont = FALSE;
440         for (int i = 0; i < nnodes; i++)
441         {
442             if ((i + testRankCount <  nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount]) ||
443                 (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
444             {
445                 bCont = TRUE;
446             }
447         }
448     }
449     while (bCont && testRankCount < nnodes);
450
451     ol->comm_data.resize(testRankCount - 1);
452     ol->send_size = 0;
453
454     for (size_t b = 0; b < ol->comm_data.size(); b++)
455     {
456         pme_grid_comm_t *pgc = &ol->comm_data[b];
457
458         /* Send */
459         pgc->send_id = (ol->nodeid + (b + 1)) % ol->nnodes;
460         int fft_start = ol->s2g0[pgc->send_id];
461         int fft_end   = ol->s2g0[pgc->send_id + 1];
462         if (pgc->send_id < nodeid)
463         {
464             fft_start += ndata;
465             fft_end   += ndata;
466         }
467         int send_index1  = ol->s2g1[nodeid];
468         send_index1      = std::min(send_index1, fft_end);
469         pgc->send_index0 = fft_start;
470         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
471         ol->send_size   += pgc->send_nindex;
472
473         /* We always start receiving to the first index of our slab */
474         pgc->recv_id     = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
475         fft_start        = ol->s2g0[ol->nodeid];
476         fft_end          = ol->s2g0[ol->nodeid + 1];
477         int recv_index1  = ol->s2g1[pgc->recv_id];
478         if (pgc->recv_id > nodeid)
479         {
480             recv_index1 -= ndata;
481         }
482         recv_index1      = std::min(recv_index1, fft_end);
483         pgc->recv_index0 = fft_start;
484         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
485     }
486
487 #if GMX_MPI
488     /* Communicate the buffer sizes to receive */
489     MPI_Status       stat;
490     for (size_t b = 0; b < ol->comm_data.size(); b++)
491     {
492         MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b,
493                      &ol->comm_data[b].recv_size, 1, MPI_INT, ol->comm_data[b].recv_id, b,
494                      ol->mpi_comm, &stat);
495     }
496 #endif
497
498     /* For non-divisible grid we need pme_order iso pme_order-1 */
499     ol->sendbuf.resize(norder * commplainsize);
500     ol->recvbuf.resize(norder * commplainsize);
501 }
502
503 int minimalPmeGridSize(int pmeOrder)
504 {
505     /* The actual grid size limitations are:
506      *   serial:        >= pme_order
507      *   DD, no OpenMP: >= 2*(pme_order - 1)
508      *   DD, OpenMP:    >= pme_order + 1
509      * But we use the maximum for simplicity since in practice there is not
510      * much performance difference between pme_order and 2*(pme_order -1).
511      */
512     int minimalSize = 2*(pmeOrder - 1);
513
514     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
515     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
516
517     return minimalSize;
518 }
519
520 bool gmx_pme_check_restrictions(int pme_order,
521                                 int nkx, int nky, int nkz,
522                                 int numPmeDomainsAlongX,
523                                 bool useThreads,
524                                 bool errorsAreFatal)
525 {
526     if (pme_order > PME_ORDER_MAX)
527     {
528         if (!errorsAreFatal)
529         {
530             return false;
531         }
532
533         std::string message = gmx::formatString(
534                     "pme_order (%d) is larger than the maximum allowed value (%d). Modify and recompile the code if you really need such a high order.",
535                     pme_order, PME_ORDER_MAX);
536         GMX_THROW(gmx::InconsistentInputError(message));
537     }
538
539     const int minGridSize = minimalPmeGridSize(pme_order);
540     if (nkx < minGridSize ||
541         nky < minGridSize ||
542         nkz < minGridSize)
543     {
544         if (!errorsAreFatal)
545         {
546             return false;
547         }
548         std::string message = gmx::formatString(
549                     "The PME grid sizes need to be >= 2*(pme_order-1) (%d)",
550                     minGridSize);
551         GMX_THROW(gmx::InconsistentInputError(message));
552     }
553
554     /* Check for a limitation of the (current) sum_fftgrid_dd code.
555      * We only allow multiple communication pulses in dim 1, not in dim 0.
556      */
557     if (useThreads && (nkx < numPmeDomainsAlongX*pme_order &&
558                        nkx != numPmeDomainsAlongX*(pme_order - 1)))
559     {
560         if (!errorsAreFatal)
561         {
562             return false;
563         }
564         gmx_fatal(FARGS, "The number of PME grid lines per rank along x is %g. But when using OpenMP threads, the number of grid lines per rank along x should be >= pme_order (%d) or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly more along y and/or z) by specifying -dd manually.",
565                   nkx/static_cast<double>(numPmeDomainsAlongX), pme_order);
566     }
567
568     return true;
569 }
570
571 /*! \brief Round \p enumerator */
572 static int div_round_up(int enumerator, int denominator)
573 {
574     return (enumerator + denominator - 1)/denominator;
575 }
576
577 gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
578                         const NumPmeDomains     &numPmeDomains,
579                         const t_inputrec        *ir,
580                         gmx_bool                 bFreeEnergy_q,
581                         gmx_bool                 bFreeEnergy_lj,
582                         gmx_bool                 bReproducible,
583                         real                     ewaldcoeff_q,
584                         real                     ewaldcoeff_lj,
585                         int                      nthread,
586                         PmeRunMode               runMode,
587                         PmeGpu                  *pmeGpu,
588                         const gmx_device_info_t *gpuInfo,
589                         PmeGpuProgramHandle      pmeGpuProgram,
590                         const gmx::MDLogger      & /*mdlog*/)
591 {
592     int               use_threads, sum_use_threads, i;
593     ivec              ndata;
594
595     if (debug)
596     {
597         fprintf(debug, "Creating PME data structures.\n");
598     }
599
600     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
601
602     pme->sum_qgrid_tmp       = nullptr;
603     pme->sum_qgrid_dd_tmp    = nullptr;
604
605     pme->buf_nalloc          = 0;
606
607     pme->nnodes              = 1;
608     pme->bPPnode             = TRUE;
609
610     pme->nnodes_major        = numPmeDomains.x;
611     pme->nnodes_minor        = numPmeDomains.y;
612
613     if (numPmeDomains.x*numPmeDomains.y > 1)
614     {
615         pme->mpi_comm = cr->mpi_comm_mygroup;
616
617 #if GMX_MPI
618         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
619         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
620 #endif
621         if (pme->nnodes != numPmeDomains.x*numPmeDomains.y)
622         {
623             gmx_incons("PME rank count mismatch");
624         }
625     }
626     else
627     {
628         pme->mpi_comm = MPI_COMM_NULL;
629     }
630
631     if (pme->nnodes == 1)
632     {
633         pme->mpi_comm_d[0] = MPI_COMM_NULL;
634         pme->mpi_comm_d[1] = MPI_COMM_NULL;
635         pme->ndecompdim    = 0;
636         pme->nodeid_major  = 0;
637         pme->nodeid_minor  = 0;
638     }
639     else
640     {
641         if (numPmeDomains.y == 1)
642         {
643             pme->mpi_comm_d[0] = pme->mpi_comm;
644             pme->mpi_comm_d[1] = MPI_COMM_NULL;
645             pme->ndecompdim    = 1;
646             pme->nodeid_major  = pme->nodeid;
647             pme->nodeid_minor  = 0;
648
649         }
650         else if (numPmeDomains.x == 1)
651         {
652             pme->mpi_comm_d[0] = MPI_COMM_NULL;
653             pme->mpi_comm_d[1] = pme->mpi_comm;
654             pme->ndecompdim    = 1;
655             pme->nodeid_major  = 0;
656             pme->nodeid_minor  = pme->nodeid;
657         }
658         else
659         {
660             if (pme->nnodes % numPmeDomains.x != 0)
661             {
662                 gmx_incons("For 2D PME decomposition, #PME ranks must be divisible by the number of domains along x");
663             }
664             pme->ndecompdim = 2;
665
666 #if GMX_MPI
667             MPI_Comm_split(pme->mpi_comm, pme->nodeid % numPmeDomains.y,
668                            pme->nodeid, &pme->mpi_comm_d[0]);  /* My communicator along major dimension */
669             MPI_Comm_split(pme->mpi_comm, pme->nodeid/numPmeDomains.y,
670                            pme->nodeid, &pme->mpi_comm_d[1]);  /* My communicator along minor dimension */
671
672             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
673             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
674             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
675             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
676 #endif
677         }
678         pme->bPPnode = thisRankHasDuty(cr, DUTY_PP);
679     }
680
681     pme->nthread = nthread;
682
683     /* Check if any of the PME MPI ranks uses threads */
684     use_threads = (pme->nthread > 1 ? 1 : 0);
685 #if GMX_MPI
686     if (pme->nnodes > 1)
687     {
688         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT,
689                       MPI_SUM, pme->mpi_comm);
690     }
691     else
692 #endif
693     {
694         sum_use_threads = use_threads;
695     }
696     pme->bUseThreads = (sum_use_threads > 0);
697
698     if (ir->ePBC == epbcSCREW)
699     {
700         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
701     }
702
703     /* NOTE:
704      * It is likely that the current gmx_pme_do() routine supports calculating
705      * only Coulomb or LJ while gmx_pme_init() configures for both,
706      * but that has never been tested.
707      * It is likely that the current gmx_pme_do() routine supports calculating,
708      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
709      * configures with free-energy, but that has never been tested.
710      */
711     pme->doCoulomb     = EEL_PME(ir->coulombtype);
712     pme->doLJ          = EVDW_PME(ir->vdwtype);
713     pme->bFEP_q        = ((ir->efep != efepNO) && bFreeEnergy_q);
714     pme->bFEP_lj       = ((ir->efep != efepNO) && bFreeEnergy_lj);
715     pme->bFEP          = (pme->bFEP_q || pme->bFEP_lj);
716     pme->nkx           = ir->nkx;
717     pme->nky           = ir->nky;
718     pme->nkz           = ir->nkz;
719     pme->bP3M          = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != nullptr);
720     pme->pme_order     = ir->pme_order;
721     pme->ewaldcoeff_q  = ewaldcoeff_q;
722     pme->ewaldcoeff_lj = ewaldcoeff_lj;
723
724     /* Always constant electrostatics coefficients */
725     pme->epsilon_r     = ir->epsilon_r;
726
727     /* Always constant LJ coefficients */
728     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
729
730     // The box requires scaling with nwalls = 2, we store that condition as well
731     // as the scaling factor
732     delete pme->boxScaler;
733     pme->boxScaler = new EwaldBoxZScaler(*ir);
734
735     /* If we violate restrictions, generate a fatal error here */
736     gmx_pme_check_restrictions(pme->pme_order,
737                                pme->nkx, pme->nky, pme->nkz,
738                                pme->nnodes_major,
739                                pme->bUseThreads,
740                                true);
741
742     if (pme->nnodes > 1)
743     {
744         double imbal;
745
746 #if GMX_MPI
747         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
748         MPI_Type_commit(&(pme->rvec_mpi));
749 #endif
750
751         /* Note that the coefficient spreading and force gathering, which usually
752          * takes about the same amount of time as FFT+solve_pme,
753          * is always fully load balanced
754          * (unless the coefficient distribution is inhomogeneous).
755          */
756
757         imbal = estimate_pme_load_imbalance(pme.get());
758         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
759         {
760             fprintf(stderr,
761                     "\n"
762                     "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
763                     "      For optimal PME load balancing\n"
764                     "      PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x (%d)\n"
765                     "      and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y (%d)\n"
766                     "\n",
767                     gmx::roundToInt((imbal-1)*100),
768                     pme->nkx, pme->nky, pme->nnodes_major,
769                     pme->nky, pme->nkz, pme->nnodes_minor);
770         }
771     }
772
773     /* For non-divisible grid we need pme_order iso pme_order-1 */
774     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
775      * y is always copied through a buffer: we don't need padding in z,
776      * but we do need the overlap in x because of the communication order.
777      */
778     init_overlap_comm(&pme->overlap[0], pme->pme_order,
779                       pme->mpi_comm_d[0],
780                       pme->nnodes_major, pme->nodeid_major,
781                       pme->nkx,
782                       (div_round_up(pme->nky, pme->nnodes_minor)+pme->pme_order)*(pme->nkz+pme->pme_order-1));
783
784     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
785      * We do this with an offset buffer of equal size, so we need to allocate
786      * extra for the offset. That's what the (+1)*pme->nkz is for.
787      */
788     init_overlap_comm(&pme->overlap[1], pme->pme_order,
789                       pme->mpi_comm_d[1],
790                       pme->nnodes_minor, pme->nodeid_minor,
791                       pme->nky,
792                       (div_round_up(pme->nkx, pme->nnodes_major)+pme->pme_order+1)*pme->nkz);
793
794     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
795      * Note that gmx_pme_check_restrictions checked for this already.
796      */
797     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
798     {
799         gmx_incons("More than one communication pulse required for grid overlap communication along the major dimension while using threads");
800     }
801
802     snew(pme->bsp_mod[XX], pme->nkx);
803     snew(pme->bsp_mod[YY], pme->nky);
804     snew(pme->bsp_mod[ZZ], pme->nkz);
805
806     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
807     pme->runMode = runMode;
808
809     /* The required size of the interpolation grid, including overlap.
810      * The allocated size (pmegrid_n?) might be slightly larger.
811      */
812     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] -
813         pme->overlap[0].s2g0[pme->nodeid_major];
814     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] -
815         pme->overlap[1].s2g0[pme->nodeid_minor];
816     pme->pmegrid_nz_base = pme->nkz;
817     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
818     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
819     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
820     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
821     pme->pmegrid_start_iz = 0;
822
823     make_gridindex_to_localindex(pme->nkx,
824                                  pme->pmegrid_start_ix,
825                                  pme->pmegrid_nx - (pme->pme_order-1),
826                                  &pme->nnx, &pme->fshx);
827     make_gridindex_to_localindex(pme->nky,
828                                  pme->pmegrid_start_iy,
829                                  pme->pmegrid_ny - (pme->pme_order-1),
830                                  &pme->nny, &pme->fshy);
831     make_gridindex_to_localindex(pme->nkz,
832                                  pme->pmegrid_start_iz,
833                                  pme->pmegrid_nz_base,
834                                  &pme->nnz, &pme->fshz);
835
836     pme->spline_work = make_pme_spline_work(pme->pme_order);
837
838     ndata[0]    = pme->nkx;
839     ndata[1]    = pme->nky;
840     ndata[2]    = pme->nkz;
841     /* It doesn't matter if we allocate too many grids here,
842      * we only allocate and use the ones we need.
843      */
844     if (pme->doLJ)
845     {
846         pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
847     }
848     else
849     {
850         pme->ngrids = DO_Q;
851     }
852     snew(pme->fftgrid, pme->ngrids);
853     snew(pme->cfftgrid, pme->ngrids);
854     snew(pme->pfft_setup, pme->ngrids);
855
856     for (i = 0; i < pme->ngrids; ++i)
857     {
858         if ((i <  DO_Q && pme->doCoulomb && (i == 0 ||
859                                              bFreeEnergy_q)) ||
860             (i >= DO_Q && pme->doLJ && (i == 2 ||
861                                         bFreeEnergy_lj ||
862                                         ir->ljpme_combination_rule == eljpmeLB)))
863         {
864             pmegrids_init(&pme->pmegrid[i],
865                           pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
866                           pme->pmegrid_nz_base,
867                           pme->pme_order,
868                           pme->bUseThreads,
869                           pme->nthread,
870                           pme->overlap[0].s2g1[pme->nodeid_major]-pme->overlap[0].s2g0[pme->nodeid_major+1],
871                           pme->overlap[1].s2g1[pme->nodeid_minor]-pme->overlap[1].s2g0[pme->nodeid_minor+1]);
872             /* This routine will allocate the grid data to fit the FFTs */
873             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed) ? gmx::PinningPolicy::PinnedIfSupported : gmx::PinningPolicy::CannotBePinned;
874             gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata,
875                                     &pme->fftgrid[i], &pme->cfftgrid[i],
876                                     pme->mpi_comm_d,
877                                     bReproducible, pme->nthread, allocateRealGridForGpu);
878
879         }
880     }
881
882     if (!pme->bP3M)
883     {
884         /* Use plain SPME B-spline interpolation */
885         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
886     }
887     else
888     {
889         /* Use the P3M grid-optimized influence function */
890         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
891     }
892
893     /* Use atc[0] for spreading */
894     const int firstDimIndex   = (numPmeDomains.x > 1 ? 0 : 1);
895     MPI_Comm  mpiCommFirstDim = (pme->nnodes > 1 ? pme->mpi_comm_d[firstDimIndex] : MPI_COMM_NULL);
896     bool      doSpread        = true;
897     pme->atc.emplace_back(mpiCommFirstDim, pme->nthread,
898                           pme->pme_order,
899                           firstDimIndex, doSpread);
900     if (pme->ndecompdim >= 2)
901     {
902         const int secondDimIndex = 1;
903         doSpread                 = false;
904         pme->atc.emplace_back(pme->mpi_comm_d[1], pme->nthread,
905                               pme->pme_order,
906                               secondDimIndex, doSpread);
907     }
908
909     if (pme_gpu_active(pme.get()))
910     {
911         if (!pme->gpu)
912         {
913             // Initial check of validity of the data
914             std::string errorString;
915             bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
916             if (!canRunOnGpu)
917             {
918                 GMX_THROW(gmx::NotImplementedError(errorString));
919             }
920         }
921
922         pme_gpu_reinit(pme.get(), gpuInfo, pmeGpuProgram);
923     }
924
925     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
926
927     // no exception was thrown during the init, so we hand over the PME structure handle
928     return pme.release();
929 }
930
931 void gmx_pme_reinit(struct gmx_pme_t **pmedata,
932                     const t_commrec   *cr,
933                     struct gmx_pme_t * pme_src,
934                     const t_inputrec * ir,
935                     const ivec         grid_size,
936                     real               ewaldcoeff_q,
937                     real               ewaldcoeff_lj)
938 {
939     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
940     // TODO: This would be better as just copying a sub-structure that contains
941     // all the PME parameters and nothing else.
942     t_inputrec irc;
943     irc.ePBC                   = ir->ePBC;
944     irc.coulombtype            = ir->coulombtype;
945     irc.vdwtype                = ir->vdwtype;
946     irc.efep                   = ir->efep;
947     irc.pme_order              = ir->pme_order;
948     irc.epsilon_r              = ir->epsilon_r;
949     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
950     irc.nkx                    = grid_size[XX];
951     irc.nky                    = grid_size[YY];
952     irc.nkz                    = grid_size[ZZ];
953
954     try
955     {
956         const gmx::MDLogger dummyLogger;
957         // This is reinit which is currently only changing grid size/coefficients,
958         // so we don't expect the actual logging.
959         // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
960         GMX_ASSERT(pmedata, "Invalid PME pointer");
961         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
962         *pmedata = gmx_pme_init(cr, numPmeDomains,
963                                 &irc, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE, ewaldcoeff_q, ewaldcoeff_lj,
964                                 pme_src->nthread, pme_src->runMode, pme_src->gpu, nullptr, nullptr, dummyLogger);
965         /* When running PME on the CPU not using domain decomposition,
966          * the atom data is allocated once only in gmx_pme_(re)init().
967          */
968         if (!pme_src->gpu && pme_src->nnodes == 1)
969         {
970             gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), nullptr);
971         }
972         //TODO this is mostly passing around current values
973     }
974     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
975
976     /* We can easily reuse the allocated pme grids in pme_src */
977     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
978     /* We would like to reuse the fft grids, but that's harder */
979 }
980
981 void gmx_pme_calc_energy(gmx_pme_t                      *pme,
982                          gmx::ArrayRef<const gmx::RVec>  x,
983                          gmx::ArrayRef<const real>       q,
984                          real                           *V)
985 {
986     pmegrids_t     *grid;
987
988     if (pme->nnodes > 1)
989     {
990         gmx_incons("gmx_pme_calc_energy called in parallel");
991     }
992     if (pme->bFEP_q)
993     {
994         gmx_incons("gmx_pme_calc_energy with free energy");
995     }
996
997     if (!pme->atc_energy)
998     {
999         pme->atc_energy = std::make_unique<PmeAtomComm>(MPI_COMM_NULL, 1, pme->pme_order,
1000                                                         0, true);
1001     }
1002     PmeAtomComm *atc = pme->atc_energy.get();
1003     atc->setNumAtoms(x.ssize());
1004     atc->x           = x;
1005     atc->coefficient = q;
1006
1007     /* We only use the A-charges grid */
1008     grid = &pme->pmegrid[PME_GRID_QA];
1009
1010     /* Only calculate the spline coefficients, don't actually spread */
1011     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
1012
1013     *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
1014 }
1015
1016 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
1017 static void
1018 calc_initial_lb_coeffs(gmx::ArrayRef<real>  coefficient,
1019                        const real          *local_c6,
1020                        const real          *local_sigma)
1021 {
1022     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
1023     {
1024         real sigma4    = local_sigma[i];
1025         sigma4         = sigma4*sigma4;
1026         sigma4         = sigma4*sigma4;
1027         coefficient[i] = local_c6[i] / sigma4;
1028     }
1029 }
1030
1031 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
1032 static void
1033 calc_next_lb_coeffs(gmx::ArrayRef<real>  coefficient,
1034                     const real          *local_sigma)
1035 {
1036     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
1037     {
1038         coefficient[i] *= local_sigma[i];
1039     }
1040 }
1041
1042 int gmx_pme_do(struct gmx_pme_t *pme,
1043                gmx::ArrayRef<const gmx::RVec> coordinates,
1044                gmx::ArrayRef<gmx::RVec>       forces,
1045                real chargeA[],  real chargeB[],
1046                real c6A[],      real c6B[],
1047                real sigmaA[],   real sigmaB[],
1048                const matrix box, const t_commrec *cr,
1049                int  maxshift_x, int maxshift_y,
1050                t_nrnb *nrnb,    gmx_wallcycle *wcycle,
1051                matrix vir_q,    matrix vir_lj,
1052                real *energy_q,  real *energy_lj,
1053                real lambda_q,   real lambda_lj,
1054                real *dvdlambda_q, real *dvdlambda_lj,
1055                int flags)
1056 {
1057     GMX_ASSERT(pme->runMode == PmeRunMode::CPU, "gmx_pme_do should not be called on the GPU PME run.");
1058
1059     int                  d, npme, grid_index, max_grid_index;
1060     PmeAtomComm         &atc         = pme->atc[0];
1061     pmegrids_t          *pmegrid     = nullptr;
1062     real                *grid        = nullptr;
1063     real                *coefficient = nullptr;
1064     PmeOutput            output[2]; // The second is used for the B state with FEP
1065     real                 scale, lambda;
1066     gmx_bool             bClearF;
1067     gmx_parallel_3dfft_t pfft_setup;
1068     real              *  fftgrid;
1069     t_complex          * cfftgrid;
1070     int                  thread;
1071     gmx_bool             bFirst, bDoSplines;
1072     int                  fep_state;
1073     int                  fep_states_lj           = pme->bFEP_lj ? 2 : 1;
1074     const gmx_bool       bCalcEnerVir            = (flags & GMX_PME_CALC_ENER_VIR) != 0;
1075     const gmx_bool       bBackFFT                = (flags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
1076     const gmx_bool       bCalcF                  = (flags & GMX_PME_CALC_F) != 0;
1077
1078     /* We could be passing lambda!=1 while no q or LJ is actually perturbed */
1079     if (!pme->bFEP_q)
1080     {
1081         lambda_q  = 1;
1082     }
1083     if (!pme->bFEP_lj)
1084     {
1085         lambda_lj = 1;
1086     }
1087
1088     assert(pme->nnodes > 0);
1089     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1090
1091     if (pme->nnodes > 1)
1092     {
1093         atc.pd.resize(coordinates.ssize());
1094         for (int d = pme->ndecompdim-1; d >= 0; d--)
1095         {
1096             PmeAtomComm &atc    = pme->atc[d];
1097             atc.maxshift        = (atc.dimind == 0 ? maxshift_x : maxshift_y);
1098         }
1099     }
1100     else
1101     {
1102         GMX_ASSERT(coordinates.ssize() == atc.numAtoms(), "We expect atc.numAtoms() coordinates");
1103         GMX_ASSERT(forces.ssize() >= atc.numAtoms(), "We need a force buffer with at least atc.numAtoms() elements");
1104
1105         atc.x = coordinates;
1106         atc.f = forces;
1107     }
1108
1109     matrix scaledBox;
1110     pme->boxScaler->scaleBox(box, scaledBox);
1111
1112     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1113     bFirst = TRUE;
1114
1115     /* For simplicity, we construct the splines for all particles if
1116      * more than one PME calculations is needed. Some optimization
1117      * could be done by keeping track of which atoms have splines
1118      * constructed, and construct new splines on each pass for atoms
1119      * that don't yet have them.
1120      */
1121
1122     bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1123
1124     /* We need a maximum of four separate PME calculations:
1125      * grid_index=0: Coulomb PME with charges from state A
1126      * grid_index=1: Coulomb PME with charges from state B
1127      * grid_index=2: LJ PME with C6 from state A
1128      * grid_index=3: LJ PME with C6 from state B
1129      * For Lorentz-Berthelot combination rules, a separate loop is used to
1130      * calculate all the terms
1131      */
1132
1133     /* If we are doing LJ-PME with LB, we only do Q here */
1134     max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
1135
1136     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1137     {
1138         /* Check if we should do calculations at this grid_index
1139          * If grid_index is odd we should be doing FEP
1140          * If grid_index < 2 we should be doing electrostatic PME
1141          * If grid_index >= 2 we should be doing LJ-PME
1142          */
1143         if ((grid_index <  DO_Q && (!pme->doCoulomb ||
1144                                     (grid_index == 1 && !pme->bFEP_q))) ||
1145             (grid_index >= DO_Q && (!pme->doLJ ||
1146                                     (grid_index == 3 && !pme->bFEP_lj))))
1147         {
1148             continue;
1149         }
1150         /* Unpack structure */
1151         pmegrid    = &pme->pmegrid[grid_index];
1152         fftgrid    = pme->fftgrid[grid_index];
1153         cfftgrid   = pme->cfftgrid[grid_index];
1154         pfft_setup = pme->pfft_setup[grid_index];
1155         switch (grid_index)
1156         {
1157             case 0: coefficient = chargeA; break;
1158             case 1: coefficient = chargeB; break;
1159             case 2: coefficient = c6A; break;
1160             case 3: coefficient = c6B; break;
1161         }
1162
1163         grid = pmegrid->grid.grid;
1164
1165         if (debug)
1166         {
1167             fprintf(debug, "PME: number of ranks = %d, rank = %d\n",
1168                     cr->nnodes, cr->nodeid);
1169             fprintf(debug, "Grid = %p\n", static_cast<void*>(grid));
1170             if (grid == nullptr)
1171             {
1172                 gmx_fatal(FARGS, "No grid!");
1173             }
1174         }
1175
1176         if (pme->nnodes == 1)
1177         {
1178             atc.coefficient = gmx::arrayRefFromArray(coefficient, coordinates.size());
1179         }
1180         else
1181         {
1182             wallcycle_start(wcycle, ewcPME_REDISTXF);
1183             do_redist_pos_coeffs(pme, cr, bFirst, coordinates, coefficient);
1184
1185             wallcycle_stop(wcycle, ewcPME_REDISTXF);
1186         }
1187
1188         if (debug)
1189         {
1190             fprintf(debug, "Rank= %6d, pme local particles=%6d\n",
1191                     cr->nodeid, atc.numAtoms());
1192         }
1193
1194         if (flags & GMX_PME_SPREAD)
1195         {
1196             wallcycle_start(wcycle, ewcPME_SPREAD);
1197
1198             /* Spread the coefficients on a grid */
1199             spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1200
1201             if (bFirst)
1202             {
1203                 inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc.numAtoms());
1204             }
1205             inc_nrnb(nrnb, eNR_SPREADBSP,
1206                      pme->pme_order*pme->pme_order*pme->pme_order*atc.numAtoms());
1207
1208             if (!pme->bUseThreads)
1209             {
1210                 wrap_periodic_pmegrid(pme, grid);
1211
1212                 /* sum contributions to local grid from other nodes */
1213                 if (pme->nnodes > 1)
1214                 {
1215                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1216                 }
1217
1218                 copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1219             }
1220
1221             wallcycle_stop(wcycle, ewcPME_SPREAD);
1222
1223             /* TODO If the OpenMP and single-threaded implementations
1224                converge, then spread_on_grid() and
1225                copy_pmegrid_to_fftgrid() will perhaps live in the same
1226                source file.
1227              */
1228         }
1229
1230         /* Here we start a large thread parallel region */
1231 #pragma omp parallel num_threads(pme->nthread) private(thread)
1232         {
1233             try
1234             {
1235                 thread = gmx_omp_get_thread_num();
1236                 if (flags & GMX_PME_SOLVE)
1237                 {
1238                     int loop_count;
1239
1240                     /* do 3d-fft */
1241                     if (thread == 0)
1242                     {
1243                         wallcycle_start(wcycle, ewcPME_FFT);
1244                     }
1245                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1246                                                thread, wcycle);
1247                     if (thread == 0)
1248                     {
1249                         wallcycle_stop(wcycle, ewcPME_FFT);
1250                     }
1251
1252                     /* solve in k-space for our local cells */
1253                     if (thread == 0)
1254                     {
1255                         wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1256                     }
1257                     if (grid_index < DO_Q)
1258                     {
1259                         loop_count =
1260                             solve_pme_yzx(pme, cfftgrid,
1261                                           scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1262                                           bCalcEnerVir,
1263                                           pme->nthread, thread);
1264                     }
1265                     else
1266                     {
1267                         loop_count =
1268                             solve_pme_lj_yzx(pme, &cfftgrid, FALSE,
1269                                              scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1270                                              bCalcEnerVir,
1271                                              pme->nthread, thread);
1272                     }
1273
1274                     if (thread == 0)
1275                     {
1276                         wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1277                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1278                     }
1279                 }
1280
1281                 if (bBackFFT)
1282                 {
1283                     /* do 3d-invfft */
1284                     if (thread == 0)
1285                     {
1286                         wallcycle_start(wcycle, ewcPME_FFT);
1287                     }
1288                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1289                                                thread, wcycle);
1290                     if (thread == 0)
1291                     {
1292                         wallcycle_stop(wcycle, ewcPME_FFT);
1293
1294
1295                         if (pme->nodeid == 0)
1296                         {
1297                             real ntot = pme->nkx*pme->nky*pme->nkz;
1298                             npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1299                             inc_nrnb(nrnb, eNR_FFT, 2*npme);
1300                         }
1301
1302                         /* Note: this wallcycle region is closed below
1303                            outside an OpenMP region, so take care if
1304                            refactoring code here. */
1305                         wallcycle_start(wcycle, ewcPME_GATHER);
1306                     }
1307
1308                     copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1309                 }
1310             } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1311         }
1312         /* End of thread parallel section.
1313          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1314          */
1315
1316         if (bBackFFT)
1317         {
1318             /* distribute local grid to all nodes */
1319             if (pme->nnodes > 1)
1320             {
1321                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1322             }
1323
1324             unwrap_periodic_pmegrid(pme, grid);
1325         }
1326
1327         if (bCalcF)
1328         {
1329             /* interpolate forces for our local atoms */
1330
1331
1332             /* If we are running without parallelization,
1333              * atc->f is the actual force array, not a buffer,
1334              * therefore we should not clear it.
1335              */
1336             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
1337             bClearF = (bFirst && PAR(cr));
1338 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1339             for (thread = 0; thread < pme->nthread; thread++)
1340             {
1341                 try
1342                 {
1343                     gather_f_bsplines(pme, grid, bClearF, &atc,
1344                                       &atc.spline[thread],
1345                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0-lambda : lambda) : 1.0);
1346                 }
1347                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1348             }
1349
1350
1351             inc_nrnb(nrnb, eNR_GATHERFBSP,
1352                      pme->pme_order*pme->pme_order*pme->pme_order*atc.numAtoms());
1353             /* Note: this wallcycle region is opened above inside an OpenMP
1354                region, so take care if refactoring code here. */
1355             wallcycle_stop(wcycle, ewcPME_GATHER);
1356         }
1357
1358         if (bCalcEnerVir)
1359         {
1360             /* This should only be called on the master thread
1361              * and after the threads have synchronized.
1362              */
1363             if (grid_index < 2)
1364             {
1365                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1366             }
1367             else
1368             {
1369                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1370             }
1371         }
1372         bFirst = FALSE;
1373     } /* of grid_index-loop */
1374
1375     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1376      * seven terms. */
1377
1378     if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB)
1379     {
1380         /* Loop over A- and B-state if we are doing FEP */
1381         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1382         {
1383             real               *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
1384             gmx::ArrayRef<real> coefficientBuffer;
1385             if (pme->nnodes == 1)
1386             {
1387                 pme->lb_buf1.resize(atc.numAtoms());
1388                 coefficientBuffer = pme->lb_buf1;
1389                 switch (fep_state)
1390                 {
1391                     case 0:
1392                         local_c6      = c6A;
1393                         local_sigma   = sigmaA;
1394                         break;
1395                     case 1:
1396                         local_c6      = c6B;
1397                         local_sigma   = sigmaB;
1398                         break;
1399                     default:
1400                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1401                 }
1402             }
1403             else
1404             {
1405                 coefficientBuffer = atc.coefficientBuffer;
1406                 switch (fep_state)
1407                 {
1408                     case 0:
1409                         RedistC6      = c6A;
1410                         RedistSigma   = sigmaA;
1411                         break;
1412                     case 1:
1413                         RedistC6      = c6B;
1414                         RedistSigma   = sigmaB;
1415                         break;
1416                     default:
1417                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1418                 }
1419                 wallcycle_start(wcycle, ewcPME_REDISTXF);
1420
1421                 do_redist_pos_coeffs(pme, cr, bFirst, coordinates, RedistC6);
1422                 pme->lb_buf1.resize(atc.numAtoms());
1423                 pme->lb_buf2.resize(atc.numAtoms());
1424                 local_c6 = pme->lb_buf1.data();
1425                 for (int i = 0; i < atc.numAtoms(); ++i)
1426                 {
1427                     local_c6[i] = atc.coefficient[i];
1428                 }
1429
1430                 do_redist_pos_coeffs(pme, cr, FALSE, coordinates, RedistSigma);
1431                 local_sigma = pme->lb_buf2.data();
1432                 for (int i = 0; i < atc.numAtoms(); ++i)
1433                 {
1434                     local_sigma[i] = atc.coefficient[i];
1435                 }
1436
1437                 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1438             }
1439             atc.coefficient = coefficientBuffer;
1440             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1441
1442             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1443             for (grid_index = 2; grid_index < 9; ++grid_index)
1444             {
1445                 /* Unpack structure */
1446                 pmegrid    = &pme->pmegrid[grid_index];
1447                 fftgrid    = pme->fftgrid[grid_index];
1448                 pfft_setup = pme->pfft_setup[grid_index];
1449                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1450                 grid = pmegrid->grid.grid;
1451
1452                 if (flags & GMX_PME_SPREAD)
1453                 {
1454                     wallcycle_start(wcycle, ewcPME_SPREAD);
1455                     /* Spread the c6 on a grid */
1456                     spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1457
1458                     if (bFirst)
1459                     {
1460                         inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc.numAtoms());
1461                     }
1462
1463                     inc_nrnb(nrnb, eNR_SPREADBSP,
1464                              pme->pme_order*pme->pme_order*pme->pme_order*atc.numAtoms());
1465                     if (pme->nthread == 1)
1466                     {
1467                         wrap_periodic_pmegrid(pme, grid);
1468                         /* sum contributions to local grid from other nodes */
1469                         if (pme->nnodes > 1)
1470                         {
1471                             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1472                         }
1473                         copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1474                     }
1475                     wallcycle_stop(wcycle, ewcPME_SPREAD);
1476                 }
1477                 /*Here we start a large thread parallel region*/
1478 #pragma omp parallel num_threads(pme->nthread) private(thread)
1479                 {
1480                     try
1481                     {
1482                         thread = gmx_omp_get_thread_num();
1483                         if (flags & GMX_PME_SOLVE)
1484                         {
1485                             /* do 3d-fft */
1486                             if (thread == 0)
1487                             {
1488                                 wallcycle_start(wcycle, ewcPME_FFT);
1489                             }
1490
1491                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1492                                                        thread, wcycle);
1493                             if (thread == 0)
1494                             {
1495                                 wallcycle_stop(wcycle, ewcPME_FFT);
1496                             }
1497                         }
1498                     }
1499                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1500                 }
1501                 bFirst = FALSE;
1502             }
1503             if (flags & GMX_PME_SOLVE)
1504             {
1505                 /* solve in k-space for our local cells */
1506 #pragma omp parallel num_threads(pme->nthread) private(thread)
1507                 {
1508                     try
1509                     {
1510                         int loop_count;
1511                         thread = gmx_omp_get_thread_num();
1512                         if (thread == 0)
1513                         {
1514                             wallcycle_start(wcycle, ewcLJPME);
1515                         }
1516
1517                         loop_count =
1518                             solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE,
1519                                              scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1520                                              bCalcEnerVir,
1521                                              pme->nthread, thread);
1522                         if (thread == 0)
1523                         {
1524                             wallcycle_stop(wcycle, ewcLJPME);
1525                             inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1526                         }
1527                     }
1528                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1529                 }
1530             }
1531
1532             if (bCalcEnerVir)
1533             {
1534                 /* This should only be called on the master thread and
1535                  * after the threads have synchronized.
1536                  */
1537                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
1538             }
1539
1540             if (bBackFFT)
1541             {
1542                 bFirst = !pme->doCoulomb;
1543                 calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1544                 for (grid_index = 8; grid_index >= 2; --grid_index)
1545                 {
1546                     /* Unpack structure */
1547                     pmegrid    = &pme->pmegrid[grid_index];
1548                     fftgrid    = pme->fftgrid[grid_index];
1549                     pfft_setup = pme->pfft_setup[grid_index];
1550                     grid       = pmegrid->grid.grid;
1551                     calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1552 #pragma omp parallel num_threads(pme->nthread) private(thread)
1553                     {
1554                         try
1555                         {
1556                             thread = gmx_omp_get_thread_num();
1557                             /* do 3d-invfft */
1558                             if (thread == 0)
1559                             {
1560                                 wallcycle_start(wcycle, ewcPME_FFT);
1561                             }
1562
1563                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1564                                                        thread, wcycle);
1565                             if (thread == 0)
1566                             {
1567                                 wallcycle_stop(wcycle, ewcPME_FFT);
1568
1569
1570                                 if (pme->nodeid == 0)
1571                                 {
1572                                     real ntot = pme->nkx*pme->nky*pme->nkz;
1573                                     npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1574                                     inc_nrnb(nrnb, eNR_FFT, 2*npme);
1575                                 }
1576                                 wallcycle_start(wcycle, ewcPME_GATHER);
1577                             }
1578
1579                             copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1580                         }
1581                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1582                     } /*#pragma omp parallel*/
1583
1584                     /* distribute local grid to all nodes */
1585                     if (pme->nnodes > 1)
1586                     {
1587                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1588                     }
1589
1590                     unwrap_periodic_pmegrid(pme, grid);
1591
1592                     if (bCalcF)
1593                     {
1594                         /* interpolate forces for our local atoms */
1595                         bClearF = (bFirst && PAR(cr));
1596                         scale   = pme->bFEP ? (fep_state < 1 ? 1.0-lambda_lj : lambda_lj) : 1.0;
1597                         scale  *= lb_scale_factor[grid_index-2];
1598
1599 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1600                         for (thread = 0; thread < pme->nthread; thread++)
1601                         {
1602                             try
1603                             {
1604                                 gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
1605                                                   &pme->atc[0].spline[thread],
1606                                                   scale);
1607                             }
1608                             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1609                         }
1610
1611
1612                         inc_nrnb(nrnb, eNR_GATHERFBSP,
1613                                  pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].numAtoms());
1614                     }
1615                     wallcycle_stop(wcycle, ewcPME_GATHER);
1616
1617                     bFirst = FALSE;
1618                 } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1619             }     /* if (bCalcF) */
1620         }         /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1621     }             /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
1622
1623     if (bCalcF && pme->nnodes > 1)
1624     {
1625         wallcycle_start(wcycle, ewcPME_REDISTXF);
1626         for (d = 0; d < pme->ndecompdim; d++)
1627         {
1628             gmx::ArrayRef<gmx::RVec> forcesRef;
1629             if (d == pme->ndecompdim - 1)
1630             {
1631                 const size_t numAtoms = coordinates.size();
1632                 GMX_ASSERT(forces.size() >= numAtoms, "Need at least numAtoms forces");
1633                 forcesRef = forces.subArray(0, numAtoms);
1634             }
1635             else
1636             {
1637                 forcesRef = pme->atc[d + 1].f;
1638             }
1639             if (DOMAINDECOMP(cr))
1640             {
1641                 dd_pmeredist_f(pme, &pme->atc[d], forcesRef,
1642                                d == pme->ndecompdim-1 && pme->bPPnode);
1643             }
1644         }
1645
1646         wallcycle_stop(wcycle, ewcPME_REDISTXF);
1647     }
1648
1649     if (bCalcEnerVir)
1650     {
1651         if (pme->doCoulomb)
1652         {
1653             if (!pme->bFEP_q)
1654             {
1655                 *energy_q = output[0].coulombEnergy_;
1656                 m_add(vir_q, output[0].coulombVirial_, vir_q);
1657             }
1658             else
1659             {
1660                 *energy_q       = (1.0-lambda_q)*output[0].coulombEnergy_ + lambda_q*output[1].coulombEnergy_;
1661                 *dvdlambda_q   += output[1].coulombEnergy_ - output[0].coulombEnergy_;
1662                 for (int i = 0; i < DIM; i++)
1663                 {
1664                     for (int j = 0; j < DIM; j++)
1665                     {
1666                         vir_q[i][j] += (1.0-lambda_q)*output[0].coulombVirial_[i][j] +
1667                             lambda_q*output[1].coulombVirial_[i][j];
1668                     }
1669                 }
1670             }
1671             if (debug)
1672             {
1673                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1674             }
1675         }
1676         else
1677         {
1678             *energy_q = 0;
1679         }
1680
1681         if (pme->doLJ)
1682         {
1683             if (!pme->bFEP_lj)
1684             {
1685                 *energy_lj = output[0].lennardJonesEnergy_;
1686                 m_add(vir_lj, output[0].lennardJonesVirial_, vir_lj);
1687             }
1688             else
1689             {
1690                 *energy_lj     = (1.0-lambda_lj)*output[0].lennardJonesEnergy_ + lambda_lj*output[1].lennardJonesEnergy_;
1691                 *dvdlambda_lj += output[1].lennardJonesEnergy_ - output[0].lennardJonesEnergy_;
1692                 for (int i = 0; i < DIM; i++)
1693                 {
1694                     for (int j = 0; j < DIM; j++)
1695                     {
1696                         vir_lj[i][j] += (1.0-lambda_lj)*output[0].lennardJonesVirial_[i][j] + lambda_lj*output[1].lennardJonesVirial_[i][j];
1697                     }
1698                 }
1699             }
1700             if (debug)
1701             {
1702                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1703             }
1704         }
1705         else
1706         {
1707             *energy_lj = 0;
1708         }
1709     }
1710     return 0;
1711 }
1712
1713 void gmx_pme_destroy(gmx_pme_t *pme)
1714 {
1715     if (!pme)
1716     {
1717         return;
1718     }
1719
1720     delete pme->boxScaler;
1721
1722     sfree(pme->nnx);
1723     sfree(pme->nny);
1724     sfree(pme->nnz);
1725     sfree(pme->fshx);
1726     sfree(pme->fshy);
1727     sfree(pme->fshz);
1728
1729     for (int i = 0; i < pme->ngrids; ++i)
1730     {
1731         pmegrids_destroy(&pme->pmegrid[i]);
1732     }
1733     if (pme->pfft_setup)
1734     {
1735         for (int i = 0; i < pme->ngrids; ++i)
1736         {
1737             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1738         }
1739     }
1740     sfree(pme->fftgrid);
1741     sfree(pme->cfftgrid);
1742     sfree(pme->pfft_setup);
1743
1744     for (int i = 0; i < DIM; i++)
1745     {
1746         sfree(pme->bsp_mod[i]);
1747     }
1748
1749     sfree(pme->bufv);
1750     sfree(pme->bufr);
1751
1752     if (pme->solve_work)
1753     {
1754         pme_free_all_work(&pme->solve_work, pme->nthread);
1755     }
1756
1757     sfree(pme->sum_qgrid_tmp);
1758     sfree(pme->sum_qgrid_dd_tmp);
1759
1760     destroy_pme_spline_work(pme->spline_work);
1761
1762     if (pme_gpu_active(pme) && pme->gpu)
1763     {
1764         pme_gpu_destroy(pme->gpu);
1765     }
1766
1767     delete pme;
1768 }
1769
1770 void gmx_pme_reinit_atoms(gmx_pme_t  *pme,
1771                           const int   numAtoms,
1772                           const real *charges)
1773 {
1774     if (pme_gpu_active(pme))
1775     {
1776         pme_gpu_reinit_atoms(pme->gpu, numAtoms, charges);
1777     }
1778     else
1779     {
1780         pme->atc[0].setNumAtoms(numAtoms);
1781         // TODO: set the charges here as well
1782     }
1783 }