Use MessageStringCollector class to construct error messages
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
7  * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
8  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9  * and including many others, as listed in the AUTHORS file in the
10  * top-level source directory and at http://www.gromacs.org.
11  *
12  * GROMACS is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public License
14  * as published by the Free Software Foundation; either version 2.1
15  * of the License, or (at your option) any later version.
16  *
17  * GROMACS is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with GROMACS; if not, see
24  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
26  *
27  * If you want to redistribute modifications to GROMACS, please
28  * consider that scientific software is very special. Version
29  * control is crucial - bugs must be traceable. We will be happy to
30  * consider code for inclusion in the official distribution, but
31  * derived work must not be called official GROMACS. Details are found
32  * in the README & COPYING files - if they are missing, get the
33  * official version at http://www.gromacs.org.
34  *
35  * To help us fund GROMACS development, we humbly ask that you cite
36  * the research papers on the package. Check out http://www.gromacs.org.
37  */
38 /*! \internal \file
39  *
40  * \brief This file contains function definitions necessary for
41  * computing energies and forces for the PME long-ranged part (Coulomb
42  * and LJ).
43  *
44  * \author Erik Lindahl <erik@kth.se>
45  * \author Berk Hess <hess@kth.se>
46  * \ingroup module_ewald
47  */
48 /* IMPORTANT FOR DEVELOPERS:
49  *
50  * Triclinic pme stuff isn't entirely trivial, and we've experienced
51  * some bugs during development (many of them due to me). To avoid
52  * this in the future, please check the following things if you make
53  * changes in this file:
54  *
55  * 1. You should obtain identical (at least to the PME precision)
56  *    energies, forces, and virial for
57  *    a rectangular box and a triclinic one where the z (or y) axis is
58  *    tilted a whole box side. For instance you could use these boxes:
59  *
60  *    rectangular       triclinic
61  *     2  0  0           2  0  0
62  *     0  2  0           0  2  0
63  *     0  0  6           2  2  6
64  *
65  * 2. You should check the energy conservation in a triclinic box.
66  *
67  * It might seem an overkill, but better safe than sorry.
68  * /Erik 001109
69  */
70
71 #include "gmxpre.h"
72
73 #include "pme.h"
74
75 #include "config.h"
76
77 #include <cassert>
78 #include <cmath>
79 #include <cstdio>
80 #include <cstdlib>
81 #include <cstring>
82
83 #include <algorithm>
84 #include <list>
85
86 #include "gromacs/domdec/domdec.h"
87 #include "gromacs/ewald/ewald_utils.h"
88 #include "gromacs/fft/parallel_3dfft.h"
89 #include "gromacs/fileio/pdbio.h"
90 #include "gromacs/gmxlib/network.h"
91 #include "gromacs/gmxlib/nrnb.h"
92 #include "gromacs/hardware/hw_info.h"
93 #include "gromacs/math/gmxcomplex.h"
94 #include "gromacs/math/invertmatrix.h"
95 #include "gromacs/math/units.h"
96 #include "gromacs/math/vec.h"
97 #include "gromacs/math/vectypes.h"
98 #include "gromacs/mdtypes/commrec.h"
99 #include "gromacs/mdtypes/forcerec.h"
100 #include "gromacs/mdtypes/inputrec.h"
101 #include "gromacs/mdtypes/md_enums.h"
102 #include "gromacs/mdtypes/simulation_workload.h"
103 #include "gromacs/pbcutil/pbc.h"
104 #include "gromacs/timing/cyclecounter.h"
105 #include "gromacs/timing/wallcycle.h"
106 #include "gromacs/timing/walltime_accounting.h"
107 #include "gromacs/topology/topology.h"
108 #include "gromacs/utility/basedefinitions.h"
109 #include "gromacs/utility/cstringutil.h"
110 #include "gromacs/utility/exceptions.h"
111 #include "gromacs/utility/fatalerror.h"
112 #include "gromacs/utility/gmxmpi.h"
113 #include "gromacs/utility/gmxomp.h"
114 #include "gromacs/utility/logger.h"
115 #include "gromacs/utility/real.h"
116 #include "gromacs/utility/smalloc.h"
117 #include "gromacs/utility/message_string_collector.h"
118 #include "gromacs/utility/unique_cptr.h"
119
120 #include "calculate_spline_moduli.h"
121 #include "pme_gather.h"
122 #include "pme_gpu_internal.h"
123 #include "pme_grid.h"
124 #include "pme_internal.h"
125 #include "pme_redistribute.h"
126 #include "pme_solve.h"
127 #include "pme_spline_work.h"
128 #include "pme_spread.h"
129
130 bool pme_gpu_supports_build(std::string* error)
131 {
132     gmx::MessageStringCollector errorReasons;
133     // Before changing the prefix string, make sure that it is not searched for in regression tests.
134     errorReasons.startContext("PME GPU does not support:");
135     errorReasons.appendIf(GMX_DOUBLE, "Double-precision build of GROMACS.");
136     errorReasons.appendIf(!GMX_GPU, "Non-GPU build of GROMACS.");
137     errorReasons.appendIf(GMX_GPU_SYCL, "SYCL build."); // SYCL-TODO
138     errorReasons.finishContext();
139     if (error != nullptr)
140     {
141         *error = errorReasons.toString();
142     }
143     return errorReasons.isEmpty();
144 }
145
146 bool pme_gpu_supports_hardware(const gmx_hw_info_t gmx_unused& hwinfo, std::string* error)
147 {
148     gmx::MessageStringCollector errorReasons;
149     // Before changing the prefix string, make sure that it is not searched for in regression tests.
150     errorReasons.startContext("PME GPU does not support:");
151 #ifdef __APPLE__
152     errorReasons.appendIf(GMX_GPU_OPENCL, "Apple OS X operating system");
153 #endif
154     errorReasons.finishContext();
155     if (error != nullptr)
156     {
157         *error = errorReasons.toString();
158     }
159     return errorReasons.isEmpty();
160 }
161
162 bool pme_gpu_supports_input(const t_inputrec& ir, std::string* error)
163 {
164     gmx::MessageStringCollector errorReasons;
165     // Before changing the prefix string, make sure that it is not searched for in regression tests.
166     errorReasons.startContext("PME GPU does not support:");
167     errorReasons.appendIf(!EEL_PME(ir.coulombtype),
168                           "Systems that do not use PME for electrostatics.");
169     errorReasons.appendIf((ir.pme_order != 4), "Interpolation orders other than 4.");
170     errorReasons.appendIf(EVDW_PME(ir.vdwtype), "Lennard-Jones PME.");
171     errorReasons.appendIf(!EI_DYNAMICS(ir.eI), "Non-dynamical integrator (use md, sd, etc).");
172     errorReasons.finishContext();
173     if (error != nullptr)
174     {
175         *error = errorReasons.toString();
176     }
177     return errorReasons.isEmpty();
178 }
179
180 /*! \brief \libinternal
181  * Finds out if PME with given inputs is possible to run on GPU.
182  * This function is an internal final check, validating the whole PME structure on creation,
183  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
184  *
185  * \param[in]  pme          The PME structure.
186  * \param[out] error        The error message if the input is not supported on GPU.
187  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
188  */
189 static bool pme_gpu_check_restrictions(const gmx_pme_t* pme, std::string* error)
190 {
191     gmx::MessageStringCollector errorReasons;
192     // Before changing the prefix string, make sure that it is not searched for in regression tests.
193     errorReasons.startContext("PME GPU does not support:");
194     errorReasons.appendIf((pme->nnodes != 1), "PME decomposition.");
195     errorReasons.appendIf((pme->pme_order != 4), "interpolation orders other than 4.");
196     errorReasons.appendIf(pme->doLJ, "Lennard-Jones PME.");
197     errorReasons.appendIf(GMX_DOUBLE, "Double precision build of GROMACS.");
198     errorReasons.appendIf(!GMX_GPU, "Non-GPU build of GROMACS.");
199     errorReasons.appendIf(GMX_GPU_SYCL, "SYCL build of GROMACS."); // SYCL-TODO
200     errorReasons.finishContext();
201     if (error != nullptr)
202     {
203         *error = errorReasons.toString();
204     }
205     return errorReasons.isEmpty();
206 }
207
208 PmeRunMode pme_run_mode(const gmx_pme_t* pme)
209 {
210     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
211     return pme->runMode;
212 }
213
214 gmx::PinningPolicy pme_get_pinning_policy()
215 {
216     return gmx::PinningPolicy::PinnedIfSupported;
217 }
218
219 /*! \brief Number of bytes in a cache line.
220  *
221  * Must also be a multiple of the SIMD and SIMD4 register size, to
222  * preserve alignment.
223  */
224 const int gmxCacheLineSize = 64;
225
226 //! Set up coordinate communication
227 static void setup_coordinate_communication(PmeAtomComm* atc)
228 {
229     int nslab, n, i;
230     int fw, bw;
231
232     nslab = atc->nslab;
233
234     n = 0;
235     for (i = 1; i <= nslab / 2; i++)
236     {
237         fw = (atc->nodeid + i) % nslab;
238         bw = (atc->nodeid - i + nslab) % nslab;
239         if (n < nslab - 1)
240         {
241             atc->slabCommSetup[n].node_dest = fw;
242             atc->slabCommSetup[n].node_src  = bw;
243             n++;
244         }
245         if (n < nslab - 1)
246         {
247             atc->slabCommSetup[n].node_dest = bw;
248             atc->slabCommSetup[n].node_src  = fw;
249             n++;
250         }
251     }
252 }
253
254 /*! \brief Round \p n up to the next multiple of \p f */
255 static int mult_up(int n, int f)
256 {
257     return ((n + f - 1) / f) * f;
258 }
259
260 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
261 static double estimate_pme_load_imbalance(struct gmx_pme_t* pme)
262 {
263     int    nma, nmi;
264     double n1, n2, n3;
265
266     nma = pme->nnodes_major;
267     nmi = pme->nnodes_minor;
268
269     n1 = mult_up(pme->nkx, nma) * mult_up(pme->nky, nmi) * pme->nkz;
270     n2 = mult_up(pme->nkx, nma) * mult_up(pme->nkz, nmi) * pme->nky;
271     n3 = mult_up(pme->nky, nma) * mult_up(pme->nkz, nmi) * pme->nkx;
272
273     /* pme_solve is roughly double the cost of an fft */
274
275     return (n1 + n2 + 3 * n3) / static_cast<double>(6 * pme->nkx * pme->nky * pme->nkz);
276 }
277
278 #ifndef DOXYGEN
279
280 PmeAtomComm::PmeAtomComm(MPI_Comm   PmeMpiCommunicator,
281                          const int  numThreads,
282                          const int  pmeOrder,
283                          const int  dimIndex,
284                          const bool doSpread) :
285     dimind(dimIndex), bSpread(doSpread), pme_order(pmeOrder), nthread(numThreads), spline(nthread)
286 {
287     if (PmeMpiCommunicator != MPI_COMM_NULL)
288     {
289         mpi_comm = PmeMpiCommunicator;
290 #    if GMX_MPI
291         MPI_Comm_size(mpi_comm, &nslab);
292         MPI_Comm_rank(mpi_comm, &nodeid);
293 #    endif
294     }
295     if (debug)
296     {
297         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", dimind, nslab, nodeid);
298     }
299
300     if (nslab > 1)
301     {
302         slabCommSetup.resize(nslab);
303         setup_coordinate_communication(this);
304
305         count_thread.resize(nthread);
306         for (auto& countThread : count_thread)
307         {
308             countThread.resize(nslab);
309         }
310     }
311
312     if (nthread > 1)
313     {
314         threadMap.resize(nthread);
315
316 #    pragma omp parallel for num_threads(nthread) schedule(static)
317         for (int thread = 0; thread < nthread; thread++)
318         {
319             try
320             {
321                 /* Allocate buffer with padding to avoid cache polution */
322                 threadMap[thread].nBuffer.resize(nthread + 2 * gmxCacheLineSize);
323                 threadMap[thread].n = threadMap[thread].nBuffer.data() + gmxCacheLineSize;
324             }
325             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
326         }
327     }
328 }
329
330 #endif // !DOXYGEN
331
332 /*! \brief Initialize data structure for communication */
333 static void init_overlap_comm(pme_overlap_t* ol, int norder, MPI_Comm comm, int nnodes, int nodeid, int ndata, int commplainsize)
334 {
335     gmx_bool bCont;
336
337     ol->mpi_comm = comm;
338     ol->nnodes   = nnodes;
339     ol->nodeid   = nodeid;
340
341     /* Linear translation of the PME grid won't affect reciprocal space
342      * calculations, so to optimize we only interpolate "upwards",
343      * which also means we only have to consider overlap in one direction.
344      * I.e., particles on this node might also be spread to grid indices
345      * that belong to higher nodes (modulo nnodes)
346      */
347
348     ol->s2g0.resize(ol->nnodes + 1);
349     ol->s2g1.resize(ol->nnodes);
350     if (debug)
351     {
352         fprintf(debug, "PME slab boundaries:");
353     }
354     for (int i = 0; i < nnodes; i++)
355     {
356         /* s2g0 the local interpolation grid start.
357          * s2g1 the local interpolation grid end.
358          * Since in calc_pidx we divide particles, and not grid lines,
359          * spatially uniform along dimension x or y, we need to round
360          * s2g0 down and s2g1 up.
361          */
362         ol->s2g0[i] = (i * ndata + 0) / nnodes;
363         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
364
365         if (debug)
366         {
367             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
368         }
369     }
370     ol->s2g0[nnodes] = ndata;
371     if (debug)
372     {
373         fprintf(debug, "\n");
374     }
375
376     /* Determine with how many nodes we need to communicate the grid overlap */
377     int testRankCount = 0;
378     do
379     {
380         testRankCount++;
381         bCont = FALSE;
382         for (int i = 0; i < nnodes; i++)
383         {
384             if ((i + testRankCount < nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount])
385                 || (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
386             {
387                 bCont = TRUE;
388             }
389         }
390     } while (bCont && testRankCount < nnodes);
391
392     ol->comm_data.resize(testRankCount - 1);
393     ol->send_size = 0;
394
395     for (size_t b = 0; b < ol->comm_data.size(); b++)
396     {
397         pme_grid_comm_t* pgc = &ol->comm_data[b];
398
399         /* Send */
400         pgc->send_id  = (ol->nodeid + (b + 1)) % ol->nnodes;
401         int fft_start = ol->s2g0[pgc->send_id];
402         int fft_end   = ol->s2g0[pgc->send_id + 1];
403         if (pgc->send_id < nodeid)
404         {
405             fft_start += ndata;
406             fft_end += ndata;
407         }
408         int send_index1  = ol->s2g1[nodeid];
409         send_index1      = std::min(send_index1, fft_end);
410         pgc->send_index0 = fft_start;
411         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
412         ol->send_size += pgc->send_nindex;
413
414         /* We always start receiving to the first index of our slab */
415         pgc->recv_id    = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
416         fft_start       = ol->s2g0[ol->nodeid];
417         fft_end         = ol->s2g0[ol->nodeid + 1];
418         int recv_index1 = ol->s2g1[pgc->recv_id];
419         if (pgc->recv_id > nodeid)
420         {
421             recv_index1 -= ndata;
422         }
423         recv_index1      = std::min(recv_index1, fft_end);
424         pgc->recv_index0 = fft_start;
425         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
426     }
427
428 #if GMX_MPI
429     /* Communicate the buffer sizes to receive */
430     MPI_Status stat;
431     for (size_t b = 0; b < ol->comm_data.size(); b++)
432     {
433         MPI_Sendrecv(&ol->send_size,
434                      1,
435                      MPI_INT,
436                      ol->comm_data[b].send_id,
437                      b,
438                      &ol->comm_data[b].recv_size,
439                      1,
440                      MPI_INT,
441                      ol->comm_data[b].recv_id,
442                      b,
443                      ol->mpi_comm,
444                      &stat);
445     }
446 #endif
447
448     /* For non-divisible grid we need pme_order iso pme_order-1 */
449     ol->sendbuf.resize(norder * commplainsize);
450     ol->recvbuf.resize(norder * commplainsize);
451 }
452
453 int minimalPmeGridSize(int pmeOrder)
454 {
455     /* The actual grid size limitations are:
456      *   serial:        >= pme_order
457      *   DD, no OpenMP: >= 2*(pme_order - 1)
458      *   DD, OpenMP:    >= pme_order + 1
459      * But we use the maximum for simplicity since in practice there is not
460      * much performance difference between pme_order and 2*(pme_order -1).
461      */
462     int minimalSize = 2 * (pmeOrder - 1);
463
464     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
465     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
466
467     return minimalSize;
468 }
469
470 bool gmx_pme_check_restrictions(int pme_order, int nkx, int nky, int nkz, int numPmeDomainsAlongX, bool useThreads, bool errorsAreFatal)
471 {
472     if (pme_order > PME_ORDER_MAX)
473     {
474         if (!errorsAreFatal)
475         {
476             return false;
477         }
478
479         std::string message = gmx::formatString(
480                 "pme_order (%d) is larger than the maximum allowed value (%d). Modify and "
481                 "recompile the code if you really need such a high order.",
482                 pme_order,
483                 PME_ORDER_MAX);
484         GMX_THROW(gmx::InconsistentInputError(message));
485     }
486
487     const int minGridSize = minimalPmeGridSize(pme_order);
488     if (nkx < minGridSize || nky < minGridSize || nkz < minGridSize)
489     {
490         if (!errorsAreFatal)
491         {
492             return false;
493         }
494         std::string message =
495                 gmx::formatString("The PME grid sizes need to be >= 2*(pme_order-1) (%d)", minGridSize);
496         GMX_THROW(gmx::InconsistentInputError(message));
497     }
498
499     /* Check for a limitation of the (current) sum_fftgrid_dd code.
500      * We only allow multiple communication pulses in dim 1, not in dim 0.
501      */
502     if (useThreads
503         && (nkx < numPmeDomainsAlongX * pme_order && nkx != numPmeDomainsAlongX * (pme_order - 1)))
504     {
505         if (!errorsAreFatal)
506         {
507             return false;
508         }
509         gmx_fatal(FARGS,
510                   "The number of PME grid lines per rank along x is %g. But when using OpenMP "
511                   "threads, the number of grid lines per rank along x should be >= pme_order (%d) "
512                   "or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly "
513                   "more along y and/or z) by specifying -dd manually.",
514                   nkx / static_cast<double>(numPmeDomainsAlongX),
515                   pme_order);
516     }
517
518     return true;
519 }
520
521 /*! \brief Round \p enumerator */
522 static int div_round_up(int enumerator, int denominator)
523 {
524     return (enumerator + denominator - 1) / denominator;
525 }
526
527 gmx_pme_t* gmx_pme_init(const t_commrec*     cr,
528                         const NumPmeDomains& numPmeDomains,
529                         const t_inputrec*    ir,
530                         gmx_bool             bFreeEnergy_q,
531                         gmx_bool             bFreeEnergy_lj,
532                         gmx_bool             bReproducible,
533                         real                 ewaldcoeff_q,
534                         real                 ewaldcoeff_lj,
535                         int                  nthread,
536                         PmeRunMode           runMode,
537                         PmeGpu*              pmeGpu,
538                         const DeviceContext* deviceContext,
539                         const DeviceStream*  deviceStream,
540                         const PmeGpuProgram* pmeGpuProgram,
541                         const gmx::MDLogger& mdlog)
542 {
543     int  use_threads, sum_use_threads, i;
544     ivec ndata;
545
546     if (debug)
547     {
548         fprintf(debug, "Creating PME data structures.\n");
549     }
550
551     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
552
553     pme->sum_qgrid_tmp    = nullptr;
554     pme->sum_qgrid_dd_tmp = nullptr;
555
556     pme->buf_nalloc = 0;
557
558     pme->nnodes  = 1;
559     pme->bPPnode = TRUE;
560
561     pme->nnodes_major = numPmeDomains.x;
562     pme->nnodes_minor = numPmeDomains.y;
563
564     if (numPmeDomains.x * numPmeDomains.y > 1)
565     {
566         pme->mpi_comm = cr->mpi_comm_mygroup;
567
568 #if GMX_MPI
569         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
570         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
571 #endif
572         if (pme->nnodes != numPmeDomains.x * numPmeDomains.y)
573         {
574             gmx_incons("PME rank count mismatch");
575         }
576     }
577     else
578     {
579         pme->mpi_comm = MPI_COMM_NULL;
580     }
581
582     if (pme->nnodes == 1)
583     {
584         pme->mpi_comm_d[0] = MPI_COMM_NULL;
585         pme->mpi_comm_d[1] = MPI_COMM_NULL;
586         pme->ndecompdim    = 0;
587         pme->nodeid_major  = 0;
588         pme->nodeid_minor  = 0;
589     }
590     else
591     {
592         if (numPmeDomains.y == 1)
593         {
594             pme->mpi_comm_d[0] = pme->mpi_comm;
595             pme->mpi_comm_d[1] = MPI_COMM_NULL;
596             pme->ndecompdim    = 1;
597             pme->nodeid_major  = pme->nodeid;
598             pme->nodeid_minor  = 0;
599         }
600         else if (numPmeDomains.x == 1)
601         {
602             pme->mpi_comm_d[0] = MPI_COMM_NULL;
603             pme->mpi_comm_d[1] = pme->mpi_comm;
604             pme->ndecompdim    = 1;
605             pme->nodeid_major  = 0;
606             pme->nodeid_minor  = pme->nodeid;
607         }
608         else
609         {
610             if (pme->nnodes % numPmeDomains.x != 0)
611             {
612                 gmx_incons(
613                         "For 2D PME decomposition, #PME ranks must be divisible by the number of "
614                         "domains along x");
615             }
616             pme->ndecompdim = 2;
617
618 #if GMX_MPI
619             MPI_Comm_split(pme->mpi_comm,
620                            pme->nodeid % numPmeDomains.y,
621                            pme->nodeid,
622                            &pme->mpi_comm_d[0]); /* My communicator along major dimension */
623             MPI_Comm_split(pme->mpi_comm,
624                            pme->nodeid / numPmeDomains.y,
625                            pme->nodeid,
626                            &pme->mpi_comm_d[1]); /* My communicator along minor dimension */
627
628             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
629             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
630             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
631             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
632 #endif
633         }
634     }
635     // cr is always initialized if there is a a PP rank, so we can safely assume
636     // that when it is not, like in ewald tests, we not on a PP rank.
637     pme->bPPnode = ((cr != nullptr && cr->duty != 0) && thisRankHasDuty(cr, DUTY_PP));
638
639     pme->nthread = nthread;
640
641     /* Check if any of the PME MPI ranks uses threads */
642     use_threads = (pme->nthread > 1 ? 1 : 0);
643 #if GMX_MPI
644     if (pme->nnodes > 1)
645     {
646         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT, MPI_SUM, pme->mpi_comm);
647     }
648     else
649 #endif
650     {
651         sum_use_threads = use_threads;
652     }
653     pme->bUseThreads = (sum_use_threads > 0);
654
655     if (ir->pbcType == PbcType::Screw)
656     {
657         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
658     }
659
660     /* NOTE:
661      * It is likely that the current gmx_pme_do() routine supports calculating
662      * only Coulomb or LJ while gmx_pme_init() configures for both,
663      * but that has never been tested.
664      * It is likely that the current gmx_pme_do() routine supports calculating,
665      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
666      * configures with free-energy, but that has never been tested.
667      */
668     pme->doCoulomb = EEL_PME(ir->coulombtype);
669     pme->doLJ      = EVDW_PME(ir->vdwtype);
670     pme->bFEP_q    = ((ir->efep != FreeEnergyPerturbationType::No) && bFreeEnergy_q);
671     pme->bFEP_lj   = ((ir->efep != FreeEnergyPerturbationType::No) && bFreeEnergy_lj);
672     pme->bFEP      = (pme->bFEP_q || pme->bFEP_lj);
673     pme->nkx       = ir->nkx;
674     pme->nky       = ir->nky;
675     pme->nkz       = ir->nkz;
676     pme->bP3M = (ir->coulombtype == CoulombInteractionType::P3mAD || getenv("GMX_PME_P3M") != nullptr);
677     pme->pme_order     = ir->pme_order;
678     pme->ewaldcoeff_q  = ewaldcoeff_q;
679     pme->ewaldcoeff_lj = ewaldcoeff_lj;
680
681     /* Always constant electrostatics coefficients */
682     pme->epsilon_r = ir->epsilon_r;
683
684     /* Always constant LJ coefficients */
685     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
686
687     // The box requires scaling with nwalls = 2, we store that condition as well
688     // as the scaling factor
689     delete pme->boxScaler;
690     pme->boxScaler = new EwaldBoxZScaler(inputrecPbcXY2Walls(ir), ir->wall_ewald_zfac);
691
692     /* If we violate restrictions, generate a fatal error here */
693     gmx_pme_check_restrictions(
694             pme->pme_order, pme->nkx, pme->nky, pme->nkz, pme->nnodes_major, pme->bUseThreads, true);
695
696     if (pme->nnodes > 1)
697     {
698         double imbal;
699
700 #if GMX_MPI
701         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
702         MPI_Type_commit(&(pme->rvec_mpi));
703 #endif
704
705         /* Note that the coefficient spreading and force gathering, which usually
706          * takes about the same amount of time as FFT+solve_pme,
707          * is always fully load balanced
708          * (unless the coefficient distribution is inhomogeneous).
709          */
710
711         imbal = estimate_pme_load_imbalance(pme.get());
712         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
713         {
714             GMX_LOG(mdlog.warning)
715                     .asParagraph()
716                     .appendTextFormatted(
717                             "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
718                             "      For optimal PME load balancing\n"
719                             "      PME grid_x (%d) and grid_y (%d) should be divisible by "
720                             "#PME_ranks_x "
721                             "(%d)\n"
722                             "      and PME grid_y (%d) and grid_z (%d) should be divisible by "
723                             "#PME_ranks_y "
724                             "(%d)",
725                             gmx::roundToInt((imbal - 1) * 100),
726                             pme->nkx,
727                             pme->nky,
728                             pme->nnodes_major,
729                             pme->nky,
730                             pme->nkz,
731                             pme->nnodes_minor);
732         }
733     }
734
735     /* For non-divisible grid we need pme_order iso pme_order-1 */
736     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
737      * y is always copied through a buffer: we don't need padding in z,
738      * but we do need the overlap in x because of the communication order.
739      */
740     init_overlap_comm(&pme->overlap[0],
741                       pme->pme_order,
742                       pme->mpi_comm_d[0],
743                       pme->nnodes_major,
744                       pme->nodeid_major,
745                       pme->nkx,
746                       (div_round_up(pme->nky, pme->nnodes_minor) + pme->pme_order)
747                               * (pme->nkz + pme->pme_order - 1));
748
749     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
750      * We do this with an offset buffer of equal size, so we need to allocate
751      * extra for the offset. That's what the (+1)*pme->nkz is for.
752      */
753     init_overlap_comm(&pme->overlap[1],
754                       pme->pme_order,
755                       pme->mpi_comm_d[1],
756                       pme->nnodes_minor,
757                       pme->nodeid_minor,
758                       pme->nky,
759                       (div_round_up(pme->nkx, pme->nnodes_major) + pme->pme_order + 1) * pme->nkz);
760
761     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
762      * Note that gmx_pme_check_restrictions checked for this already.
763      */
764     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
765     {
766         gmx_incons(
767                 "More than one communication pulse required for grid overlap communication along "
768                 "the major dimension while using threads");
769     }
770
771     snew(pme->bsp_mod[XX], pme->nkx);
772     snew(pme->bsp_mod[YY], pme->nky);
773     snew(pme->bsp_mod[ZZ], pme->nkz);
774
775     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
776     pme->runMode = runMode;
777
778     /* The required size of the interpolation grid, including overlap.
779      * The allocated size (pmegrid_n?) might be slightly larger.
780      */
781     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] - pme->overlap[0].s2g0[pme->nodeid_major];
782     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] - pme->overlap[1].s2g0[pme->nodeid_minor];
783     pme->pmegrid_nz_base = pme->nkz;
784     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
785     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
786     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
787     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
788     pme->pmegrid_start_iz = 0;
789
790     make_gridindex_to_localindex(
791             pme->nkx, pme->pmegrid_start_ix, pme->pmegrid_nx - (pme->pme_order - 1), &pme->nnx, &pme->fshx);
792     make_gridindex_to_localindex(
793             pme->nky, pme->pmegrid_start_iy, pme->pmegrid_ny - (pme->pme_order - 1), &pme->nny, &pme->fshy);
794     make_gridindex_to_localindex(
795             pme->nkz, pme->pmegrid_start_iz, pme->pmegrid_nz_base, &pme->nnz, &pme->fshz);
796
797     pme->spline_work = make_pme_spline_work(pme->pme_order);
798
799     ndata[0] = pme->nkx;
800     ndata[1] = pme->nky;
801     ndata[2] = pme->nkz;
802     /* It doesn't matter if we allocate too many grids here,
803      * we only allocate and use the ones we need.
804      */
805     if (pme->doLJ)
806     {
807         pme->ngrids = ((ir->ljpme_combination_rule == LongRangeVdW::LB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
808     }
809     else
810     {
811         pme->ngrids = DO_Q;
812     }
813     snew(pme->fftgrid, pme->ngrids);
814     snew(pme->cfftgrid, pme->ngrids);
815     snew(pme->pfft_setup, pme->ngrids);
816
817     for (i = 0; i < pme->ngrids; ++i)
818     {
819         if ((i < DO_Q && pme->doCoulomb && (i == 0 || bFreeEnergy_q))
820             || (i >= DO_Q && pme->doLJ
821                 && (i == 2 || bFreeEnergy_lj || ir->ljpme_combination_rule == LongRangeVdW::LB)))
822         {
823             pmegrids_init(&pme->pmegrid[i],
824                           pme->pmegrid_nx,
825                           pme->pmegrid_ny,
826                           pme->pmegrid_nz,
827                           pme->pmegrid_nz_base,
828                           pme->pme_order,
829                           pme->bUseThreads,
830                           pme->nthread,
831                           pme->overlap[0].s2g1[pme->nodeid_major]
832                                   - pme->overlap[0].s2g0[pme->nodeid_major + 1],
833                           pme->overlap[1].s2g1[pme->nodeid_minor]
834                                   - pme->overlap[1].s2g0[pme->nodeid_minor + 1]);
835             /* This routine will allocate the grid data to fit the FFTs */
836             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed)
837                                                         ? gmx::PinningPolicy::PinnedIfSupported
838                                                         : gmx::PinningPolicy::CannotBePinned;
839             gmx_parallel_3dfft_init(&pme->pfft_setup[i],
840                                     ndata,
841                                     &pme->fftgrid[i],
842                                     &pme->cfftgrid[i],
843                                     pme->mpi_comm_d,
844                                     bReproducible,
845                                     pme->nthread,
846                                     allocateRealGridForGpu);
847         }
848     }
849
850     if (!pme->bP3M)
851     {
852         /* Use plain SPME B-spline interpolation */
853         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
854     }
855     else
856     {
857         /* Use the P3M grid-optimized influence function */
858         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
859     }
860
861     /* Use atc[0] for spreading */
862     const int firstDimIndex   = (numPmeDomains.x > 1 ? 0 : 1);
863     MPI_Comm  mpiCommFirstDim = (pme->nnodes > 1 ? pme->mpi_comm_d[firstDimIndex] : MPI_COMM_NULL);
864     bool      doSpread        = true;
865     pme->atc.emplace_back(mpiCommFirstDim, pme->nthread, pme->pme_order, firstDimIndex, doSpread);
866     if (pme->ndecompdim >= 2)
867     {
868         const int secondDimIndex = 1;
869         doSpread                 = false;
870         pme->atc.emplace_back(pme->mpi_comm_d[1], pme->nthread, pme->pme_order, secondDimIndex, doSpread);
871     }
872
873     // Initial check of validity of the input for running on the GPU
874     if (pme->runMode != PmeRunMode::CPU)
875     {
876         std::string errorString;
877         bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
878         if (!canRunOnGpu)
879         {
880             GMX_THROW(gmx::NotImplementedError(errorString));
881         }
882         pme_gpu_reinit(pme.get(), deviceContext, deviceStream, pmeGpuProgram);
883     }
884     else
885     {
886         GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object when PME is on a CPU.");
887     }
888
889
890     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
891
892     // no exception was thrown during the init, so we hand over the PME structure handle
893     return pme.release();
894 }
895
896 void gmx_pme_reinit(struct gmx_pme_t** pmedata,
897                     const t_commrec*   cr,
898                     struct gmx_pme_t*  pme_src,
899                     const t_inputrec*  ir,
900                     const ivec         grid_size,
901                     real               ewaldcoeff_q,
902                     real               ewaldcoeff_lj)
903 {
904     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
905     // TODO: This would be better as just copying a sub-structure that contains
906     // all the PME parameters and nothing else.
907     t_inputrec irc;
908     irc.pbcType                = ir->pbcType;
909     irc.coulombtype            = ir->coulombtype;
910     irc.vdwtype                = ir->vdwtype;
911     irc.efep                   = ir->efep;
912     irc.pme_order              = ir->pme_order;
913     irc.epsilon_r              = ir->epsilon_r;
914     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
915     irc.nkx                    = grid_size[XX];
916     irc.nky                    = grid_size[YY];
917     irc.nkz                    = grid_size[ZZ];
918
919     try
920     {
921         // This is reinit. Any logging should have been done at first init.
922         // Here we should avoid writing notes for settings the user did not
923         // set directly.
924         const gmx::MDLogger dummyLogger;
925         GMX_ASSERT(pmedata, "Invalid PME pointer");
926         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
927         *pmedata                    = gmx_pme_init(cr,
928                                 numPmeDomains,
929                                 &irc,
930                                 pme_src->bFEP_q,
931                                 pme_src->bFEP_lj,
932                                 FALSE,
933                                 ewaldcoeff_q,
934                                 ewaldcoeff_lj,
935                                 pme_src->nthread,
936                                 pme_src->runMode,
937                                 pme_src->gpu,
938                                 nullptr,
939                                 nullptr,
940                                 nullptr,
941                                 dummyLogger);
942         /* When running PME on the CPU not using domain decomposition,
943          * the atom data is allocated once only in gmx_pme_(re)init().
944          */
945         if (!pme_src->gpu && pme_src->nnodes == 1)
946         {
947             gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), {}, {});
948         }
949         // TODO this is mostly passing around current values
950     }
951     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
952
953     /* We can easily reuse the allocated pme grids in pme_src */
954     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
955     /* We would like to reuse the fft grids, but that's harder */
956 }
957
958 real gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q)
959 {
960     pmegrids_t* grid;
961
962     if (pme->nnodes > 1)
963     {
964         gmx_incons("gmx_pme_calc_energy called in parallel");
965     }
966     if (pme->bFEP_q)
967     {
968         gmx_incons("gmx_pme_calc_energy with free energy");
969     }
970
971     if (!pme->atc_energy)
972     {
973         pme->atc_energy = std::make_unique<PmeAtomComm>(MPI_COMM_NULL, 1, pme->pme_order, 0, true);
974     }
975     PmeAtomComm* atc = pme->atc_energy.get();
976     atc->setNumAtoms(x.ssize());
977     atc->x           = x;
978     atc->coefficient = q;
979
980     /* We only use the A-charges grid */
981     grid = &pme->pmegrid[PME_GRID_QA];
982
983     /* Only calculate the spline coefficients, don't actually spread */
984     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
985
986     return gather_energy_bsplines(pme, grid->grid.grid, atc);
987 }
988
989 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
990 static void calc_initial_lb_coeffs(gmx::ArrayRef<real>       coefficient,
991                                    gmx::ArrayRef<const real> local_c6,
992                                    gmx::ArrayRef<const real> local_sigma)
993 {
994     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
995     {
996         real sigma4    = local_sigma[i];
997         sigma4         = sigma4 * sigma4;
998         sigma4         = sigma4 * sigma4;
999         coefficient[i] = local_c6[i] / sigma4;
1000     }
1001 }
1002
1003 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
1004 static void calc_next_lb_coeffs(gmx::ArrayRef<real> coefficient, gmx::ArrayRef<const real> local_sigma)
1005 {
1006     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
1007     {
1008         coefficient[i] *= local_sigma[i];
1009     }
1010 }
1011
1012 int gmx_pme_do(struct gmx_pme_t*              pme,
1013                gmx::ArrayRef<const gmx::RVec> coordinates,
1014                gmx::ArrayRef<gmx::RVec>       forces,
1015                gmx::ArrayRef<const real>      chargeA,
1016                gmx::ArrayRef<const real>      chargeB,
1017                gmx::ArrayRef<const real>      c6A,
1018                gmx::ArrayRef<const real>      c6B,
1019                gmx::ArrayRef<const real>      sigmaA,
1020                gmx::ArrayRef<const real>      sigmaB,
1021                const matrix                   box,
1022                const t_commrec*               cr,
1023                int                            maxshift_x,
1024                int                            maxshift_y,
1025                t_nrnb*                        nrnb,
1026                gmx_wallcycle*                 wcycle,
1027                matrix                         vir_q,
1028                matrix                         vir_lj,
1029                real*                          energy_q,
1030                real*                          energy_lj,
1031                real                           lambda_q,
1032                real                           lambda_lj,
1033                real*                          dvdlambda_q,
1034                real*                          dvdlambda_lj,
1035                const gmx::StepWorkload&       stepWork)
1036 {
1037     GMX_ASSERT(pme->runMode == PmeRunMode::CPU,
1038                "gmx_pme_do should not be called on the GPU PME run.");
1039
1040     int                       d, npme, grid_index, max_grid_index;
1041     PmeAtomComm&              atc     = pme->atc[0];
1042     pmegrids_t*               pmegrid = nullptr;
1043     real*                     grid    = nullptr;
1044     gmx::ArrayRef<const real> coefficient;
1045     PmeOutput                 output[2]; // The second is used for the B state with FEP
1046     real                      scale, lambda;
1047     gmx_bool                  bClearF;
1048     gmx_parallel_3dfft_t      pfft_setup;
1049     real*                     fftgrid;
1050     t_complex*                cfftgrid;
1051     int                       thread;
1052     gmx_bool                  bFirst, bDoSplines;
1053     int                       fep_state;
1054     int                       fep_states_lj = pme->bFEP_lj ? 2 : 1;
1055     // There's no support for computing energy without virial, or vice versa
1056     const bool computeEnergyAndVirial = (stepWork.computeEnergy || stepWork.computeVirial);
1057
1058     /* We could be passing lambda!=0 while no q or LJ is actually perturbed */
1059     if (!pme->bFEP_q)
1060     {
1061         lambda_q = 0;
1062     }
1063     if (!pme->bFEP_lj)
1064     {
1065         lambda_lj = 0;
1066     }
1067
1068     assert(pme->nnodes > 0);
1069     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1070
1071     if (pme->nnodes > 1)
1072     {
1073         atc.pd.resize(coordinates.ssize());
1074         for (int d = pme->ndecompdim - 1; d >= 0; d--)
1075         {
1076             PmeAtomComm& atc = pme->atc[d];
1077             atc.maxshift     = (atc.dimind == 0 ? maxshift_x : maxshift_y);
1078         }
1079     }
1080     else
1081     {
1082         GMX_ASSERT(coordinates.ssize() == atc.numAtoms(), "We expect atc.numAtoms() coordinates");
1083         GMX_ASSERT(forces.ssize() >= atc.numAtoms(),
1084                    "We need a force buffer with at least atc.numAtoms() elements");
1085
1086         atc.x = coordinates;
1087         atc.f = forces;
1088     }
1089
1090     matrix scaledBox;
1091     pme->boxScaler->scaleBox(box, scaledBox);
1092
1093     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1094     bFirst = TRUE;
1095
1096     /* For simplicity, we construct the splines for all particles if
1097      * more than one PME calculations is needed. Some optimization
1098      * could be done by keeping track of which atoms have splines
1099      * constructed, and construct new splines on each pass for atoms
1100      * that don't yet have them.
1101      */
1102
1103     bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1104
1105     /* We need a maximum of four separate PME calculations:
1106      * grid_index=0: Coulomb PME with charges from state A
1107      * grid_index=1: Coulomb PME with charges from state B
1108      * grid_index=2: LJ PME with C6 from state A
1109      * grid_index=3: LJ PME with C6 from state B
1110      * For Lorentz-Berthelot combination rules, a separate loop is used to
1111      * calculate all the terms
1112      */
1113
1114     /* If we are doing LJ-PME with LB, we only do Q here */
1115     max_grid_index = (pme->ljpme_combination_rule == LongRangeVdW::LB) ? DO_Q : DO_Q_AND_LJ;
1116
1117     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1118     {
1119         /* Check if we should do calculations at this grid_index
1120          * If grid_index is odd we should be doing FEP
1121          * If grid_index < 2 we should be doing electrostatic PME
1122          * If grid_index >= 2 we should be doing LJ-PME
1123          */
1124         if ((grid_index < DO_Q && (!pme->doCoulomb || (grid_index == 1 && !pme->bFEP_q)))
1125             || (grid_index >= DO_Q && (!pme->doLJ || (grid_index == 3 && !pme->bFEP_lj))))
1126         {
1127             continue;
1128         }
1129         /* Unpack structure */
1130         pmegrid    = &pme->pmegrid[grid_index];
1131         fftgrid    = pme->fftgrid[grid_index];
1132         cfftgrid   = pme->cfftgrid[grid_index];
1133         pfft_setup = pme->pfft_setup[grid_index];
1134         switch (grid_index)
1135         {
1136             case 0: coefficient = chargeA; break;
1137             case 1: coefficient = chargeB; break;
1138             case 2: coefficient = c6A; break;
1139             case 3: coefficient = c6B; break;
1140         }
1141
1142         grid = pmegrid->grid.grid;
1143
1144         if (debug)
1145         {
1146             fprintf(debug, "PME: number of ranks = %d, rank = %d\n", cr->nnodes, cr->nodeid);
1147             fprintf(debug, "Grid = %p\n", static_cast<void*>(grid));
1148             if (grid == nullptr)
1149             {
1150                 gmx_fatal(FARGS, "No grid!");
1151             }
1152         }
1153
1154         if (pme->nnodes == 1)
1155         {
1156             atc.coefficient = coefficient;
1157         }
1158         else
1159         {
1160             wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
1161             do_redist_pos_coeffs(pme, cr, bFirst, coordinates, coefficient);
1162
1163             wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
1164         }
1165
1166         if (debug)
1167         {
1168             fprintf(debug, "Rank= %6d, pme local particles=%6d\n", cr->nodeid, atc.numAtoms());
1169         }
1170
1171         wallcycle_start(wcycle, WallCycleCounter::PmeSpread);
1172
1173         /* Spread the coefficients on a grid */
1174         spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1175
1176         if (bFirst)
1177         {
1178             inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1179         }
1180         inc_nrnb(nrnb, eNR_SPREADBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1181
1182         if (!pme->bUseThreads)
1183         {
1184             wrap_periodic_pmegrid(pme, grid);
1185
1186             /* sum contributions to local grid from other nodes */
1187             if (pme->nnodes > 1)
1188             {
1189                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1190             }
1191
1192             copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1193         }
1194
1195         wallcycle_stop(wcycle, WallCycleCounter::PmeSpread);
1196
1197         /* TODO If the OpenMP and single-threaded implementations
1198            converge, then spread_on_grid() and
1199            copy_pmegrid_to_fftgrid() will perhaps live in the same
1200            source file.
1201         */
1202
1203         /* Here we start a large thread parallel region */
1204 #pragma omp parallel num_threads(pme->nthread) private(thread)
1205         {
1206             try
1207             {
1208                 thread = gmx_omp_get_thread_num();
1209                 int loop_count;
1210
1211                 /* do 3d-fft */
1212                 if (thread == 0)
1213                 {
1214                     wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1215                 }
1216                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1217                 if (thread == 0)
1218                 {
1219                     wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1220                 }
1221
1222                 /* solve in k-space for our local cells */
1223                 if (thread == 0)
1224                 {
1225                     wallcycle_start(
1226                             wcycle,
1227                             (grid_index < DO_Q ? WallCycleCounter::PmeSolve : WallCycleCounter::LJPme));
1228                 }
1229                 if (grid_index < DO_Q)
1230                 {
1231                     loop_count = solve_pme_yzx(pme,
1232                                                cfftgrid,
1233                                                scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1234                                                computeEnergyAndVirial,
1235                                                pme->nthread,
1236                                                thread);
1237                 }
1238                 else
1239                 {
1240                     loop_count =
1241                             solve_pme_lj_yzx(pme,
1242                                              &cfftgrid,
1243                                              FALSE,
1244                                              scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1245                                              computeEnergyAndVirial,
1246                                              pme->nthread,
1247                                              thread);
1248                 }
1249
1250                 if (thread == 0)
1251                 {
1252                     wallcycle_stop(
1253                             wcycle,
1254                             (grid_index < DO_Q ? WallCycleCounter::PmeSolve : WallCycleCounter::LJPme));
1255                     inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1256                 }
1257
1258                 /* do 3d-invfft */
1259                 if (thread == 0)
1260                 {
1261                     wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1262                 }
1263                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1264                 if (thread == 0)
1265                 {
1266                     wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1267
1268
1269                     if (pme->nodeid == 0)
1270                     {
1271                         real ntot = pme->nkx * pme->nky * pme->nkz;
1272                         npme      = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1273                         inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1274                     }
1275
1276                     /* Note: this wallcycle region is closed below
1277                        outside an OpenMP region, so take care if
1278                        refactoring code here. */
1279                     wallcycle_start(wcycle, WallCycleCounter::PmeGather);
1280                 }
1281
1282                 copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1283             }
1284             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1285         }
1286         /* End of thread parallel section.
1287          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1288          */
1289
1290         /* distribute local grid to all nodes */
1291         if (pme->nnodes > 1)
1292         {
1293             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1294         }
1295
1296         unwrap_periodic_pmegrid(pme, grid);
1297
1298         if (stepWork.computeForces)
1299         {
1300             /* interpolate forces for our local atoms */
1301
1302
1303             /* If we are running without parallelization,
1304              * atc->f is the actual force array, not a buffer,
1305              * therefore we should not clear it.
1306              */
1307             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
1308             bClearF = (bFirst && PAR(cr));
1309 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1310             for (thread = 0; thread < pme->nthread; thread++)
1311             {
1312                 try
1313                 {
1314                     gather_f_bsplines(pme,
1315                                       grid,
1316                                       bClearF,
1317                                       &atc,
1318                                       &atc.spline[thread],
1319                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0 - lambda : lambda) : 1.0);
1320                 }
1321                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1322             }
1323
1324
1325             inc_nrnb(nrnb, eNR_GATHERFBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1326             /* Note: this wallcycle region is opened above inside an OpenMP
1327                region, so take care if refactoring code here. */
1328             wallcycle_stop(wcycle, WallCycleCounter::PmeGather);
1329         }
1330
1331         if (computeEnergyAndVirial)
1332         {
1333             /* This should only be called on the master thread
1334              * and after the threads have synchronized.
1335              */
1336             if (grid_index < 2)
1337             {
1338                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1339             }
1340             else
1341             {
1342                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1343             }
1344         }
1345         bFirst = FALSE;
1346     } /* of grid_index-loop */
1347
1348     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1349      * seven terms. */
1350
1351     if (pme->doLJ && pme->ljpme_combination_rule == LongRangeVdW::LB)
1352     {
1353         /* Loop over A- and B-state if we are doing FEP */
1354         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1355         {
1356             std::vector<real>         local_c6;
1357             std::vector<real>         local_sigma;
1358             gmx::ArrayRef<const real> RedistC6;
1359             gmx::ArrayRef<const real> RedistSigma;
1360             gmx::ArrayRef<real>       coefficientBuffer;
1361             if (pme->nnodes == 1)
1362             {
1363                 pme->lb_buf1.resize(atc.numAtoms());
1364                 coefficientBuffer = pme->lb_buf1;
1365                 switch (fep_state)
1366                 {
1367                     case 0:
1368                         local_c6.assign(c6A.begin(), c6A.end());
1369                         local_sigma.assign(sigmaA.begin(), sigmaA.end());
1370                         break;
1371                     case 1:
1372                         local_c6.assign(c6B.begin(), c6B.end());
1373                         local_sigma.assign(sigmaB.begin(), sigmaB.end());
1374                         break;
1375                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1376                 }
1377             }
1378             else
1379             {
1380                 coefficientBuffer = atc.coefficientBuffer;
1381                 switch (fep_state)
1382                 {
1383                     case 0:
1384                         RedistC6    = c6A;
1385                         RedistSigma = sigmaA;
1386                         break;
1387                     case 1:
1388                         RedistC6    = c6B;
1389                         RedistSigma = sigmaB;
1390                         break;
1391                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1392                 }
1393                 wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
1394
1395                 do_redist_pos_coeffs(pme, cr, bFirst, coordinates, RedistC6);
1396                 pme->lb_buf1.resize(atc.numAtoms());
1397                 pme->lb_buf2.resize(atc.numAtoms());
1398                 local_c6.assign(pme->lb_buf1.begin(), pme->lb_buf1.end());
1399                 for (int i = 0; i < atc.numAtoms(); ++i)
1400                 {
1401                     local_c6[i] = atc.coefficient[i];
1402                 }
1403
1404                 do_redist_pos_coeffs(pme, cr, FALSE, coordinates, RedistSigma);
1405                 local_sigma.assign(pme->lb_buf2.begin(), pme->lb_buf2.end());
1406                 for (int i = 0; i < atc.numAtoms(); ++i)
1407                 {
1408                     local_sigma[i] = atc.coefficient[i];
1409                 }
1410
1411                 wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
1412             }
1413             atc.coefficient = coefficientBuffer;
1414             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1415
1416             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1417             for (grid_index = 2; grid_index < 9; ++grid_index)
1418             {
1419                 /* Unpack structure */
1420                 pmegrid    = &pme->pmegrid[grid_index];
1421                 fftgrid    = pme->fftgrid[grid_index];
1422                 pfft_setup = pme->pfft_setup[grid_index];
1423                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1424                 grid = pmegrid->grid.grid;
1425
1426                 wallcycle_start(wcycle, WallCycleCounter::PmeSpread);
1427                 /* Spread the c6 on a grid */
1428                 spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1429
1430                 if (bFirst)
1431                 {
1432                     inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1433                 }
1434
1435                 inc_nrnb(nrnb,
1436                          eNR_SPREADBSP,
1437                          pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1438                 if (pme->nthread == 1)
1439                 {
1440                     wrap_periodic_pmegrid(pme, grid);
1441                     /* sum contributions to local grid from other nodes */
1442                     if (pme->nnodes > 1)
1443                     {
1444                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1445                     }
1446                     copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1447                 }
1448                 wallcycle_stop(wcycle, WallCycleCounter::PmeSpread);
1449
1450                 /*Here we start a large thread parallel region*/
1451 #pragma omp parallel num_threads(pme->nthread) private(thread)
1452                 {
1453                     try
1454                     {
1455                         thread = gmx_omp_get_thread_num();
1456                         /* do 3d-fft */
1457                         if (thread == 0)
1458                         {
1459                             wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1460                         }
1461
1462                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1463                         if (thread == 0)
1464                         {
1465                             wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1466                         }
1467                     }
1468                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1469                 }
1470                 bFirst = FALSE;
1471             }
1472             /* solve in k-space for our local cells */
1473 #pragma omp parallel num_threads(pme->nthread) private(thread)
1474             {
1475                 try
1476                 {
1477                     int loop_count;
1478                     thread = gmx_omp_get_thread_num();
1479                     if (thread == 0)
1480                     {
1481                         wallcycle_start(wcycle, WallCycleCounter::LJPme);
1482                     }
1483
1484                     loop_count =
1485                             solve_pme_lj_yzx(pme,
1486                                              &pme->cfftgrid[2],
1487                                              TRUE,
1488                                              scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1489                                              computeEnergyAndVirial,
1490                                              pme->nthread,
1491                                              thread);
1492                     if (thread == 0)
1493                     {
1494                         wallcycle_stop(wcycle, WallCycleCounter::LJPme);
1495                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1496                     }
1497                 }
1498                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1499             }
1500
1501             if (computeEnergyAndVirial)
1502             {
1503                 /* This should only be called on the master thread and
1504                  * after the threads have synchronized.
1505                  */
1506                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
1507             }
1508
1509             bFirst = !pme->doCoulomb;
1510             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1511             for (grid_index = 8; grid_index >= 2; --grid_index)
1512             {
1513                 /* Unpack structure */
1514                 pmegrid    = &pme->pmegrid[grid_index];
1515                 fftgrid    = pme->fftgrid[grid_index];
1516                 pfft_setup = pme->pfft_setup[grid_index];
1517                 grid       = pmegrid->grid.grid;
1518                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1519 #pragma omp parallel num_threads(pme->nthread) private(thread)
1520                 {
1521                     try
1522                     {
1523                         thread = gmx_omp_get_thread_num();
1524                         /* do 3d-invfft */
1525                         if (thread == 0)
1526                         {
1527                             wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1528                         }
1529
1530                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1531                         if (thread == 0)
1532                         {
1533                             wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1534
1535
1536                             if (pme->nodeid == 0)
1537                             {
1538                                 real ntot = pme->nkx * pme->nky * pme->nkz;
1539                                 npme      = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1540                                 inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1541                             }
1542                             wallcycle_start(wcycle, WallCycleCounter::PmeGather);
1543                         }
1544
1545                         copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1546                     }
1547                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1548                 } /*#pragma omp parallel*/
1549
1550                 /* distribute local grid to all nodes */
1551                 if (pme->nnodes > 1)
1552                 {
1553                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1554                 }
1555
1556                 unwrap_periodic_pmegrid(pme, grid);
1557
1558                 if (stepWork.computeForces)
1559                 {
1560                     /* interpolate forces for our local atoms */
1561                     bClearF = (bFirst && PAR(cr));
1562                     scale   = pme->bFEP ? (fep_state < 1 ? 1.0 - lambda_lj : lambda_lj) : 1.0;
1563                     scale *= lb_scale_factor[grid_index - 2];
1564
1565 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1566                     for (thread = 0; thread < pme->nthread; thread++)
1567                     {
1568                         try
1569                         {
1570                             gather_f_bsplines(
1571                                     pme, grid, bClearF, &pme->atc[0], &pme->atc[0].spline[thread], scale);
1572                         }
1573                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1574                     }
1575
1576
1577                     inc_nrnb(nrnb,
1578                              eNR_GATHERFBSP,
1579                              pme->pme_order * pme->pme_order * pme->pme_order * pme->atc[0].numAtoms());
1580                 }
1581                 wallcycle_stop(wcycle, WallCycleCounter::PmeGather);
1582
1583                 bFirst = FALSE;
1584             } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1585         }     /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1586     }         /* if (pme->doLJ && pme->ljpme_combination_rule == LongRangeVdW::LB) */
1587
1588     if (stepWork.computeForces && pme->nnodes > 1)
1589     {
1590         wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
1591         for (d = 0; d < pme->ndecompdim; d++)
1592         {
1593             gmx::ArrayRef<gmx::RVec> forcesRef;
1594             if (d == pme->ndecompdim - 1)
1595             {
1596                 const size_t numAtoms = coordinates.size();
1597                 GMX_ASSERT(forces.size() >= numAtoms, "Need at least numAtoms forces");
1598                 forcesRef = forces.subArray(0, numAtoms);
1599             }
1600             else
1601             {
1602                 forcesRef = pme->atc[d + 1].f;
1603             }
1604             if (DOMAINDECOMP(cr))
1605             {
1606                 dd_pmeredist_f(pme, &pme->atc[d], forcesRef, d == pme->ndecompdim - 1 && pme->bPPnode);
1607             }
1608         }
1609
1610         wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
1611     }
1612
1613     if (computeEnergyAndVirial)
1614     {
1615         if (pme->doCoulomb)
1616         {
1617             if (!pme->bFEP_q)
1618             {
1619                 *energy_q = output[0].coulombEnergy_;
1620                 m_add(vir_q, output[0].coulombVirial_, vir_q);
1621             }
1622             else
1623             {
1624                 *energy_q = (1.0 - lambda_q) * output[0].coulombEnergy_ + lambda_q * output[1].coulombEnergy_;
1625                 *dvdlambda_q += output[1].coulombEnergy_ - output[0].coulombEnergy_;
1626                 for (int i = 0; i < DIM; i++)
1627                 {
1628                     for (int j = 0; j < DIM; j++)
1629                     {
1630                         vir_q[i][j] += (1.0 - lambda_q) * output[0].coulombVirial_[i][j]
1631                                        + lambda_q * output[1].coulombVirial_[i][j];
1632                     }
1633                 }
1634             }
1635             if (debug)
1636             {
1637                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1638             }
1639         }
1640         else
1641         {
1642             *energy_q = 0;
1643         }
1644
1645         if (pme->doLJ)
1646         {
1647             if (!pme->bFEP_lj)
1648             {
1649                 *energy_lj = output[0].lennardJonesEnergy_;
1650                 m_add(vir_lj, output[0].lennardJonesVirial_, vir_lj);
1651             }
1652             else
1653             {
1654                 *energy_lj = (1.0 - lambda_lj) * output[0].lennardJonesEnergy_
1655                              + lambda_lj * output[1].lennardJonesEnergy_;
1656                 *dvdlambda_lj += output[1].lennardJonesEnergy_ - output[0].lennardJonesEnergy_;
1657                 for (int i = 0; i < DIM; i++)
1658                 {
1659                     for (int j = 0; j < DIM; j++)
1660                     {
1661                         vir_lj[i][j] += (1.0 - lambda_lj) * output[0].lennardJonesVirial_[i][j]
1662                                         + lambda_lj * output[1].lennardJonesVirial_[i][j];
1663                     }
1664                 }
1665             }
1666             if (debug)
1667             {
1668                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1669             }
1670         }
1671         else
1672         {
1673             *energy_lj = 0;
1674         }
1675     }
1676     return 0;
1677 }
1678
1679 void gmx_pme_destroy(gmx_pme_t* pme)
1680 {
1681     if (!pme)
1682     {
1683         return;
1684     }
1685
1686     delete pme->boxScaler;
1687
1688     sfree(pme->nnx);
1689     sfree(pme->nny);
1690     sfree(pme->nnz);
1691     sfree(pme->fshx);
1692     sfree(pme->fshy);
1693     sfree(pme->fshz);
1694
1695     for (int i = 0; i < pme->ngrids; ++i)
1696     {
1697         pmegrids_destroy(&pme->pmegrid[i]);
1698     }
1699     if (pme->pfft_setup)
1700     {
1701         for (int i = 0; i < pme->ngrids; ++i)
1702         {
1703             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1704         }
1705     }
1706     sfree(pme->fftgrid);
1707     sfree(pme->cfftgrid);
1708     sfree(pme->pfft_setup);
1709
1710     for (int i = 0; i < DIM; i++)
1711     {
1712         sfree(pme->bsp_mod[i]);
1713     }
1714
1715     sfree(pme->bufv);
1716     sfree(pme->bufr);
1717
1718     if (pme->solve_work)
1719     {
1720         pme_free_all_work(&pme->solve_work, pme->nthread);
1721     }
1722
1723     sfree(pme->sum_qgrid_tmp);
1724     sfree(pme->sum_qgrid_dd_tmp);
1725
1726     destroy_pme_spline_work(pme->spline_work);
1727
1728     if (pme->gpu != nullptr)
1729     {
1730         pme_gpu_destroy(pme->gpu);
1731     }
1732
1733     delete pme;
1734 }
1735
1736 void gmx_pme_reinit_atoms(gmx_pme_t*                pme,
1737                           const int                 numAtoms,
1738                           gmx::ArrayRef<const real> chargesA,
1739                           gmx::ArrayRef<const real> chargesB)
1740 {
1741     if (pme->gpu != nullptr)
1742     {
1743         GMX_ASSERT(!(pme->bFEP_q && chargesB.empty()),
1744                    "B state charges must be specified if running Coulomb FEP on the GPU");
1745         pme_gpu_reinit_atoms(pme->gpu, numAtoms, chargesA.data(), pme->bFEP_q ? chargesB.data() : nullptr);
1746     }
1747     else
1748     {
1749         pme->atc[0].setNumAtoms(numAtoms);
1750         // TODO: set the charges here as well
1751     }
1752 }
1753
1754 bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size)
1755 {
1756     return (pme.nkx == grid_size[XX] && pme.nky == grid_size[YY] && pme.nkz == grid_size[ZZ]);
1757 }
1758
1759 void gmx::SeparatePmeRanksPermitted::disablePmeRanks(const std::string& reason)
1760 {
1761     permitSeparatePmeRanks_ = false;
1762
1763     if (!reason.empty())
1764     {
1765         reasons_.push_back(reason);
1766     }
1767 }
1768
1769 bool gmx::SeparatePmeRanksPermitted::permitSeparatePmeRanks() const
1770 {
1771     return permitSeparatePmeRanks_;
1772 }
1773
1774 std::string gmx::SeparatePmeRanksPermitted::reasonsWhyDisabled() const
1775 {
1776     return joinStrings(reasons_, "; ");
1777 }