fe65e9d6e3168973d4c59bb2d439b0cb25074775
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
7  * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
8  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9  * and including many others, as listed in the AUTHORS file in the
10  * top-level source directory and at http://www.gromacs.org.
11  *
12  * GROMACS is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public License
14  * as published by the Free Software Foundation; either version 2.1
15  * of the License, or (at your option) any later version.
16  *
17  * GROMACS is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with GROMACS; if not, see
24  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
26  *
27  * If you want to redistribute modifications to GROMACS, please
28  * consider that scientific software is very special. Version
29  * control is crucial - bugs must be traceable. We will be happy to
30  * consider code for inclusion in the official distribution, but
31  * derived work must not be called official GROMACS. Details are found
32  * in the README & COPYING files - if they are missing, get the
33  * official version at http://www.gromacs.org.
34  *
35  * To help us fund GROMACS development, we humbly ask that you cite
36  * the research papers on the package. Check out http://www.gromacs.org.
37  */
38 /*! \internal \file
39  *
40  * \brief This file contains function definitions necessary for
41  * computing energies and forces for the PME long-ranged part (Coulomb
42  * and LJ).
43  *
44  * \author Erik Lindahl <erik@kth.se>
45  * \author Berk Hess <hess@kth.se>
46  * \ingroup module_ewald
47  */
48 /* IMPORTANT FOR DEVELOPERS:
49  *
50  * Triclinic pme stuff isn't entirely trivial, and we've experienced
51  * some bugs during development (many of them due to me). To avoid
52  * this in the future, please check the following things if you make
53  * changes in this file:
54  *
55  * 1. You should obtain identical (at least to the PME precision)
56  *    energies, forces, and virial for
57  *    a rectangular box and a triclinic one where the z (or y) axis is
58  *    tilted a whole box side. For instance you could use these boxes:
59  *
60  *    rectangular       triclinic
61  *     2  0  0           2  0  0
62  *     0  2  0           0  2  0
63  *     0  0  6           2  2  6
64  *
65  * 2. You should check the energy conservation in a triclinic box.
66  *
67  * It might seem an overkill, but better safe than sorry.
68  * /Erik 001109
69  */
70
71 #include "gmxpre.h"
72
73 #include "pme.h"
74
75 #include "config.h"
76
77 #include <cassert>
78 #include <cmath>
79 #include <cstdio>
80 #include <cstdlib>
81 #include <cstring>
82
83 #include <algorithm>
84 #include <list>
85
86 #include "gromacs/domdec/domdec.h"
87 #include "gromacs/ewald/ewald_utils.h"
88 #include "gromacs/fft/parallel_3dfft.h"
89 #include "gromacs/fileio/pdbio.h"
90 #include "gromacs/gmxlib/network.h"
91 #include "gromacs/gmxlib/nrnb.h"
92 #include "gromacs/hardware/hw_info.h"
93 #include "gromacs/math/gmxcomplex.h"
94 #include "gromacs/math/invertmatrix.h"
95 #include "gromacs/math/units.h"
96 #include "gromacs/math/vec.h"
97 #include "gromacs/math/vectypes.h"
98 #include "gromacs/mdtypes/commrec.h"
99 #include "gromacs/mdtypes/forcerec.h"
100 #include "gromacs/mdtypes/inputrec.h"
101 #include "gromacs/mdtypes/md_enums.h"
102 #include "gromacs/mdtypes/simulation_workload.h"
103 #include "gromacs/pbcutil/pbc.h"
104 #include "gromacs/timing/cyclecounter.h"
105 #include "gromacs/timing/wallcycle.h"
106 #include "gromacs/timing/walltime_accounting.h"
107 #include "gromacs/topology/topology.h"
108 #include "gromacs/utility/basedefinitions.h"
109 #include "gromacs/utility/cstringutil.h"
110 #include "gromacs/utility/exceptions.h"
111 #include "gromacs/utility/fatalerror.h"
112 #include "gromacs/utility/gmxmpi.h"
113 #include "gromacs/utility/gmxomp.h"
114 #include "gromacs/utility/logger.h"
115 #include "gromacs/utility/real.h"
116 #include "gromacs/utility/smalloc.h"
117 #include "gromacs/utility/message_string_collector.h"
118 #include "gromacs/utility/unique_cptr.h"
119
120 #include "calculate_spline_moduli.h"
121 #include "pme_gather.h"
122 #include "pme_gpu_internal.h"
123 #include "pme_grid.h"
124 #include "pme_internal.h"
125 #include "pme_redistribute.h"
126 #include "pme_solve.h"
127 #include "pme_spline_work.h"
128 #include "pme_spread.h"
129
130 bool g_allowPmeWithSyclForTesting = false;
131
132 bool pme_gpu_supports_build(std::string* error)
133 {
134     gmx::MessageStringCollector errorReasons;
135     // Before changing the prefix string, make sure that it is not searched for in regression tests.
136     errorReasons.startContext("PME GPU does not support:");
137     errorReasons.appendIf(GMX_DOUBLE, "Double-precision build of GROMACS.");
138     errorReasons.appendIf(!GMX_GPU, "Non-GPU build of GROMACS.");
139     errorReasons.appendIf(GMX_GPU_SYCL && !g_allowPmeWithSyclForTesting, "SYCL build."); // SYCL-TODO
140     errorReasons.finishContext();
141     if (error != nullptr)
142     {
143         *error = errorReasons.toString();
144     }
145     return errorReasons.isEmpty();
146 }
147
148 bool pme_gpu_supports_hardware(const gmx_hw_info_t gmx_unused& hwinfo, std::string* error)
149 {
150     gmx::MessageStringCollector errorReasons;
151     // Before changing the prefix string, make sure that it is not searched for in regression tests.
152     errorReasons.startContext("PME GPU does not support:");
153 #ifdef __APPLE__
154     errorReasons.appendIf(GMX_GPU_OPENCL, "Apple OS X operating system");
155 #endif
156     errorReasons.finishContext();
157     if (error != nullptr)
158     {
159         *error = errorReasons.toString();
160     }
161     return errorReasons.isEmpty();
162 }
163
164 bool pme_gpu_supports_input(const t_inputrec& ir, std::string* error)
165 {
166     gmx::MessageStringCollector errorReasons;
167     // Before changing the prefix string, make sure that it is not searched for in regression tests.
168     errorReasons.startContext("PME GPU does not support:");
169     errorReasons.appendIf(!EEL_PME(ir.coulombtype),
170                           "Systems that do not use PME for electrostatics.");
171     errorReasons.appendIf((ir.pme_order != 4), "Interpolation orders other than 4.");
172     errorReasons.appendIf(EVDW_PME(ir.vdwtype), "Lennard-Jones PME.");
173     errorReasons.appendIf(!EI_DYNAMICS(ir.eI), "Non-dynamical integrator (use md, sd, etc).");
174     errorReasons.finishContext();
175     if (error != nullptr)
176     {
177         *error = errorReasons.toString();
178     }
179     return errorReasons.isEmpty();
180 }
181
182 /*! \brief \libinternal
183  * Finds out if PME with given inputs is possible to run on GPU.
184  * This function is an internal final check, validating the whole PME structure on creation,
185  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
186  *
187  * \param[in]  pme          The PME structure.
188  * \param[out] error        The error message if the input is not supported on GPU.
189  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
190  */
191 static bool pme_gpu_check_restrictions(const gmx_pme_t* pme, std::string* error)
192 {
193     gmx::MessageStringCollector errorReasons;
194     // Before changing the prefix string, make sure that it is not searched for in regression tests.
195     errorReasons.startContext("PME GPU does not support:");
196     errorReasons.appendIf((pme->nnodes != 1), "PME decomposition.");
197     errorReasons.appendIf((pme->pme_order != 4), "interpolation orders other than 4.");
198     errorReasons.appendIf(pme->doLJ, "Lennard-Jones PME.");
199     errorReasons.appendIf(GMX_DOUBLE, "Double precision build of GROMACS.");
200     errorReasons.appendIf(!GMX_GPU, "Non-GPU build of GROMACS.");
201     errorReasons.appendIf(GMX_GPU_SYCL && !g_allowPmeWithSyclForTesting, "SYCL build of GROMACS."); // SYCL-TODO
202     errorReasons.finishContext();
203     if (error != nullptr)
204     {
205         *error = errorReasons.toString();
206     }
207     return errorReasons.isEmpty();
208 }
209
210 PmeRunMode pme_run_mode(const gmx_pme_t* pme)
211 {
212     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
213     return pme->runMode;
214 }
215
216 gmx::PinningPolicy pme_get_pinning_policy()
217 {
218     return gmx::PinningPolicy::PinnedIfSupported;
219 }
220
221 /*! \brief Number of bytes in a cache line.
222  *
223  * Must also be a multiple of the SIMD and SIMD4 register size, to
224  * preserve alignment.
225  */
226 const int gmxCacheLineSize = 64;
227
228 //! Set up coordinate communication
229 static void setup_coordinate_communication(PmeAtomComm* atc)
230 {
231     int nslab, n, i;
232     int fw, bw;
233
234     nslab = atc->nslab;
235
236     n = 0;
237     for (i = 1; i <= nslab / 2; i++)
238     {
239         fw = (atc->nodeid + i) % nslab;
240         bw = (atc->nodeid - i + nslab) % nslab;
241         if (n < nslab - 1)
242         {
243             atc->slabCommSetup[n].node_dest = fw;
244             atc->slabCommSetup[n].node_src  = bw;
245             n++;
246         }
247         if (n < nslab - 1)
248         {
249             atc->slabCommSetup[n].node_dest = bw;
250             atc->slabCommSetup[n].node_src  = fw;
251             n++;
252         }
253     }
254 }
255
256 /*! \brief Round \p n up to the next multiple of \p f */
257 static int mult_up(int n, int f)
258 {
259     return ((n + f - 1) / f) * f;
260 }
261
262 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
263 static double estimate_pme_load_imbalance(struct gmx_pme_t* pme)
264 {
265     int    nma, nmi;
266     double n1, n2, n3;
267
268     nma = pme->nnodes_major;
269     nmi = pme->nnodes_minor;
270
271     n1 = mult_up(pme->nkx, nma) * mult_up(pme->nky, nmi) * pme->nkz;
272     n2 = mult_up(pme->nkx, nma) * mult_up(pme->nkz, nmi) * pme->nky;
273     n3 = mult_up(pme->nky, nma) * mult_up(pme->nkz, nmi) * pme->nkx;
274
275     /* pme_solve is roughly double the cost of an fft */
276
277     return (n1 + n2 + 3 * n3) / static_cast<double>(6 * pme->nkx * pme->nky * pme->nkz);
278 }
279
280 #ifndef DOXYGEN
281
282 PmeAtomComm::PmeAtomComm(MPI_Comm   PmeMpiCommunicator,
283                          const int  numThreads,
284                          const int  pmeOrder,
285                          const int  dimIndex,
286                          const bool doSpread) :
287     dimind(dimIndex), bSpread(doSpread), pme_order(pmeOrder), nthread(numThreads), spline(nthread)
288 {
289     if (PmeMpiCommunicator != MPI_COMM_NULL)
290     {
291         mpi_comm = PmeMpiCommunicator;
292 #    if GMX_MPI
293         MPI_Comm_size(mpi_comm, &nslab);
294         MPI_Comm_rank(mpi_comm, &nodeid);
295 #    endif
296     }
297     if (debug)
298     {
299         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", dimind, nslab, nodeid);
300     }
301
302     if (nslab > 1)
303     {
304         slabCommSetup.resize(nslab);
305         setup_coordinate_communication(this);
306
307         count_thread.resize(nthread);
308         for (auto& countThread : count_thread)
309         {
310             countThread.resize(nslab);
311         }
312     }
313
314     if (nthread > 1)
315     {
316         threadMap.resize(nthread);
317
318 #    pragma omp parallel for num_threads(nthread) schedule(static)
319         for (int thread = 0; thread < nthread; thread++)
320         {
321             try
322             {
323                 /* Allocate buffer with padding to avoid cache polution */
324                 threadMap[thread].nBuffer.resize(nthread + 2 * gmxCacheLineSize);
325                 threadMap[thread].n = threadMap[thread].nBuffer.data() + gmxCacheLineSize;
326             }
327             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
328         }
329     }
330 }
331
332 #endif // !DOXYGEN
333
334 /*! \brief Initialize data structure for communication */
335 static void init_overlap_comm(pme_overlap_t* ol, int norder, MPI_Comm comm, int nnodes, int nodeid, int ndata, int commplainsize)
336 {
337     gmx_bool bCont;
338
339     ol->mpi_comm = comm;
340     ol->nnodes   = nnodes;
341     ol->nodeid   = nodeid;
342
343     /* Linear translation of the PME grid won't affect reciprocal space
344      * calculations, so to optimize we only interpolate "upwards",
345      * which also means we only have to consider overlap in one direction.
346      * I.e., particles on this node might also be spread to grid indices
347      * that belong to higher nodes (modulo nnodes)
348      */
349
350     ol->s2g0.resize(ol->nnodes + 1);
351     ol->s2g1.resize(ol->nnodes);
352     if (debug)
353     {
354         fprintf(debug, "PME slab boundaries:");
355     }
356     for (int i = 0; i < nnodes; i++)
357     {
358         /* s2g0 the local interpolation grid start.
359          * s2g1 the local interpolation grid end.
360          * Since in calc_pidx we divide particles, and not grid lines,
361          * spatially uniform along dimension x or y, we need to round
362          * s2g0 down and s2g1 up.
363          */
364         ol->s2g0[i] = (i * ndata + 0) / nnodes;
365         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
366
367         if (debug)
368         {
369             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
370         }
371     }
372     ol->s2g0[nnodes] = ndata;
373     if (debug)
374     {
375         fprintf(debug, "\n");
376     }
377
378     /* Determine with how many nodes we need to communicate the grid overlap */
379     int testRankCount = 0;
380     do
381     {
382         testRankCount++;
383         bCont = FALSE;
384         for (int i = 0; i < nnodes; i++)
385         {
386             if ((i + testRankCount < nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount])
387                 || (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
388             {
389                 bCont = TRUE;
390             }
391         }
392     } while (bCont && testRankCount < nnodes);
393
394     ol->comm_data.resize(testRankCount - 1);
395     ol->send_size = 0;
396
397     for (size_t b = 0; b < ol->comm_data.size(); b++)
398     {
399         pme_grid_comm_t* pgc = &ol->comm_data[b];
400
401         /* Send */
402         pgc->send_id  = (ol->nodeid + (b + 1)) % ol->nnodes;
403         int fft_start = ol->s2g0[pgc->send_id];
404         int fft_end   = ol->s2g0[pgc->send_id + 1];
405         if (pgc->send_id < nodeid)
406         {
407             fft_start += ndata;
408             fft_end += ndata;
409         }
410         int send_index1  = ol->s2g1[nodeid];
411         send_index1      = std::min(send_index1, fft_end);
412         pgc->send_index0 = fft_start;
413         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
414         ol->send_size += pgc->send_nindex;
415
416         /* We always start receiving to the first index of our slab */
417         pgc->recv_id    = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
418         fft_start       = ol->s2g0[ol->nodeid];
419         fft_end         = ol->s2g0[ol->nodeid + 1];
420         int recv_index1 = ol->s2g1[pgc->recv_id];
421         if (pgc->recv_id > nodeid)
422         {
423             recv_index1 -= ndata;
424         }
425         recv_index1      = std::min(recv_index1, fft_end);
426         pgc->recv_index0 = fft_start;
427         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
428     }
429
430 #if GMX_MPI
431     /* Communicate the buffer sizes to receive */
432     MPI_Status stat;
433     for (size_t b = 0; b < ol->comm_data.size(); b++)
434     {
435         MPI_Sendrecv(&ol->send_size,
436                      1,
437                      MPI_INT,
438                      ol->comm_data[b].send_id,
439                      b,
440                      &ol->comm_data[b].recv_size,
441                      1,
442                      MPI_INT,
443                      ol->comm_data[b].recv_id,
444                      b,
445                      ol->mpi_comm,
446                      &stat);
447     }
448 #endif
449
450     /* For non-divisible grid we need pme_order iso pme_order-1 */
451     ol->sendbuf.resize(norder * commplainsize);
452     ol->recvbuf.resize(norder * commplainsize);
453 }
454
455 int minimalPmeGridSize(int pmeOrder)
456 {
457     /* The actual grid size limitations are:
458      *   serial:        >= pme_order
459      *   DD, no OpenMP: >= 2*(pme_order - 1)
460      *   DD, OpenMP:    >= pme_order + 1
461      * But we use the maximum for simplicity since in practice there is not
462      * much performance difference between pme_order and 2*(pme_order -1).
463      */
464     int minimalSize = 2 * (pmeOrder - 1);
465
466     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
467     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
468
469     return minimalSize;
470 }
471
472 bool gmx_pme_check_restrictions(int pme_order, int nkx, int nky, int nkz, int numPmeDomainsAlongX, bool useThreads, bool errorsAreFatal)
473 {
474     if (pme_order > PME_ORDER_MAX)
475     {
476         if (!errorsAreFatal)
477         {
478             return false;
479         }
480
481         std::string message = gmx::formatString(
482                 "pme_order (%d) is larger than the maximum allowed value (%d). Modify and "
483                 "recompile the code if you really need such a high order.",
484                 pme_order,
485                 PME_ORDER_MAX);
486         GMX_THROW(gmx::InconsistentInputError(message));
487     }
488
489     const int minGridSize = minimalPmeGridSize(pme_order);
490     if (nkx < minGridSize || nky < minGridSize || nkz < minGridSize)
491     {
492         if (!errorsAreFatal)
493         {
494             return false;
495         }
496         std::string message =
497                 gmx::formatString("The PME grid sizes need to be >= 2*(pme_order-1) (%d)", minGridSize);
498         GMX_THROW(gmx::InconsistentInputError(message));
499     }
500
501     /* Check for a limitation of the (current) sum_fftgrid_dd code.
502      * We only allow multiple communication pulses in dim 1, not in dim 0.
503      */
504     if (useThreads
505         && (nkx < numPmeDomainsAlongX * pme_order && nkx != numPmeDomainsAlongX * (pme_order - 1)))
506     {
507         if (!errorsAreFatal)
508         {
509             return false;
510         }
511         gmx_fatal(FARGS,
512                   "The number of PME grid lines per rank along x is %g. But when using OpenMP "
513                   "threads, the number of grid lines per rank along x should be >= pme_order (%d) "
514                   "or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly "
515                   "more along y and/or z) by specifying -dd manually.",
516                   nkx / static_cast<double>(numPmeDomainsAlongX),
517                   pme_order);
518     }
519
520     return true;
521 }
522
523 /*! \brief Round \p enumerator */
524 static int div_round_up(int enumerator, int denominator)
525 {
526     return (enumerator + denominator - 1) / denominator;
527 }
528
529 gmx_pme_t* gmx_pme_init(const t_commrec*     cr,
530                         const NumPmeDomains& numPmeDomains,
531                         const t_inputrec*    ir,
532                         gmx_bool             bFreeEnergy_q,
533                         gmx_bool             bFreeEnergy_lj,
534                         gmx_bool             bReproducible,
535                         real                 ewaldcoeff_q,
536                         real                 ewaldcoeff_lj,
537                         int                  nthread,
538                         PmeRunMode           runMode,
539                         PmeGpu*              pmeGpu,
540                         const DeviceContext* deviceContext,
541                         const DeviceStream*  deviceStream,
542                         const PmeGpuProgram* pmeGpuProgram,
543                         const gmx::MDLogger& mdlog)
544 {
545     int  use_threads, sum_use_threads, i;
546     ivec ndata;
547
548     if (debug)
549     {
550         fprintf(debug, "Creating PME data structures.\n");
551     }
552
553     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
554
555     pme->buf_nalloc = 0;
556
557     pme->nnodes  = 1;
558     pme->bPPnode = TRUE;
559
560     pme->nnodes_major = numPmeDomains.x;
561     pme->nnodes_minor = numPmeDomains.y;
562
563     if (numPmeDomains.x * numPmeDomains.y > 1)
564     {
565         pme->mpi_comm = cr->mpi_comm_mygroup;
566
567 #if GMX_MPI
568         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
569         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
570 #endif
571         if (pme->nnodes != numPmeDomains.x * numPmeDomains.y)
572         {
573             gmx_incons("PME rank count mismatch");
574         }
575     }
576     else
577     {
578         pme->mpi_comm = MPI_COMM_NULL;
579     }
580
581     if (pme->nnodes == 1)
582     {
583         pme->mpi_comm_d[0] = MPI_COMM_NULL;
584         pme->mpi_comm_d[1] = MPI_COMM_NULL;
585         pme->ndecompdim    = 0;
586         pme->nodeid_major  = 0;
587         pme->nodeid_minor  = 0;
588     }
589     else
590     {
591         if (numPmeDomains.y == 1)
592         {
593             pme->mpi_comm_d[0] = pme->mpi_comm;
594             pme->mpi_comm_d[1] = MPI_COMM_NULL;
595             pme->ndecompdim    = 1;
596             pme->nodeid_major  = pme->nodeid;
597             pme->nodeid_minor  = 0;
598         }
599         else if (numPmeDomains.x == 1)
600         {
601             pme->mpi_comm_d[0] = MPI_COMM_NULL;
602             pme->mpi_comm_d[1] = pme->mpi_comm;
603             pme->ndecompdim    = 1;
604             pme->nodeid_major  = 0;
605             pme->nodeid_minor  = pme->nodeid;
606         }
607         else
608         {
609             if (pme->nnodes % numPmeDomains.x != 0)
610             {
611                 gmx_incons(
612                         "For 2D PME decomposition, #PME ranks must be divisible by the number of "
613                         "domains along x");
614             }
615             pme->ndecompdim = 2;
616
617 #if GMX_MPI
618             MPI_Comm_split(pme->mpi_comm,
619                            pme->nodeid % numPmeDomains.y,
620                            pme->nodeid,
621                            &pme->mpi_comm_d[0]); /* My communicator along major dimension */
622             MPI_Comm_split(pme->mpi_comm,
623                            pme->nodeid / numPmeDomains.y,
624                            pme->nodeid,
625                            &pme->mpi_comm_d[1]); /* My communicator along minor dimension */
626
627             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
628             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
629             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
630             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
631 #endif
632         }
633     }
634     // cr is always initialized if there is a a PP rank, so we can safely assume
635     // that when it is not, like in ewald tests, we not on a PP rank.
636     pme->bPPnode = ((cr != nullptr && cr->duty != 0) && thisRankHasDuty(cr, DUTY_PP));
637
638     pme->nthread = nthread;
639
640     /* Check if any of the PME MPI ranks uses threads */
641     use_threads = (pme->nthread > 1 ? 1 : 0);
642 #if GMX_MPI
643     if (pme->nnodes > 1)
644     {
645         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT, MPI_SUM, pme->mpi_comm);
646     }
647     else
648 #endif
649     {
650         sum_use_threads = use_threads;
651     }
652     pme->bUseThreads = (sum_use_threads > 0);
653
654     if (ir->pbcType == PbcType::Screw)
655     {
656         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
657     }
658
659     /* NOTE:
660      * It is likely that the current gmx_pme_do() routine supports calculating
661      * only Coulomb or LJ while gmx_pme_init() configures for both,
662      * but that has never been tested.
663      * It is likely that the current gmx_pme_do() routine supports calculating,
664      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
665      * configures with free-energy, but that has never been tested.
666      */
667     pme->doCoulomb = EEL_PME(ir->coulombtype);
668     pme->doLJ      = EVDW_PME(ir->vdwtype);
669     pme->bFEP_q    = ((ir->efep != FreeEnergyPerturbationType::No) && bFreeEnergy_q);
670     pme->bFEP_lj   = ((ir->efep != FreeEnergyPerturbationType::No) && bFreeEnergy_lj);
671     pme->bFEP      = (pme->bFEP_q || pme->bFEP_lj);
672     pme->nkx       = ir->nkx;
673     pme->nky       = ir->nky;
674     pme->nkz       = ir->nkz;
675     pme->bP3M = (ir->coulombtype == CoulombInteractionType::P3mAD || getenv("GMX_PME_P3M") != nullptr);
676     pme->pme_order     = ir->pme_order;
677     pme->ewaldcoeff_q  = ewaldcoeff_q;
678     pme->ewaldcoeff_lj = ewaldcoeff_lj;
679
680     /* Always constant electrostatics coefficients */
681     pme->epsilon_r = ir->epsilon_r;
682
683     /* Always constant LJ coefficients */
684     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
685
686     // The box requires scaling with nwalls = 2, we store that condition as well
687     // as the scaling factor
688     pme->boxScaler = std::make_unique<EwaldBoxZScaler>(
689             EwaldBoxZScaler(inputrecPbcXY2Walls(ir), ir->wall_ewald_zfac));
690
691     /* If we violate restrictions, generate a fatal error here */
692     gmx_pme_check_restrictions(
693             pme->pme_order, pme->nkx, pme->nky, pme->nkz, pme->nnodes_major, pme->bUseThreads, true);
694
695     if (pme->nnodes > 1)
696     {
697         double imbal;
698
699 #if GMX_MPI
700         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
701         MPI_Type_commit(&(pme->rvec_mpi));
702 #endif
703
704         /* Note that the coefficient spreading and force gathering, which usually
705          * takes about the same amount of time as FFT+solve_pme,
706          * is always fully load balanced
707          * (unless the coefficient distribution is inhomogeneous).
708          */
709
710         imbal = estimate_pme_load_imbalance(pme.get());
711         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
712         {
713             GMX_LOG(mdlog.warning)
714                     .asParagraph()
715                     .appendTextFormatted(
716                             "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
717                             "      For optimal PME load balancing\n"
718                             "      PME grid_x (%d) and grid_y (%d) should be divisible by "
719                             "#PME_ranks_x "
720                             "(%d)\n"
721                             "      and PME grid_y (%d) and grid_z (%d) should be divisible by "
722                             "#PME_ranks_y "
723                             "(%d)",
724                             gmx::roundToInt((imbal - 1) * 100),
725                             pme->nkx,
726                             pme->nky,
727                             pme->nnodes_major,
728                             pme->nky,
729                             pme->nkz,
730                             pme->nnodes_minor);
731         }
732     }
733
734     /* For non-divisible grid we need pme_order iso pme_order-1 */
735     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
736      * y is always copied through a buffer: we don't need padding in z,
737      * but we do need the overlap in x because of the communication order.
738      */
739     init_overlap_comm(&pme->overlap[0],
740                       pme->pme_order,
741                       pme->mpi_comm_d[0],
742                       pme->nnodes_major,
743                       pme->nodeid_major,
744                       pme->nkx,
745                       (div_round_up(pme->nky, pme->nnodes_minor) + pme->pme_order)
746                               * (pme->nkz + pme->pme_order - 1));
747
748     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
749      * We do this with an offset buffer of equal size, so we need to allocate
750      * extra for the offset. That's what the (+1)*pme->nkz is for.
751      */
752     init_overlap_comm(&pme->overlap[1],
753                       pme->pme_order,
754                       pme->mpi_comm_d[1],
755                       pme->nnodes_minor,
756                       pme->nodeid_minor,
757                       pme->nky,
758                       (div_round_up(pme->nkx, pme->nnodes_major) + pme->pme_order + 1) * pme->nkz);
759
760     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
761      * Note that gmx_pme_check_restrictions checked for this already.
762      */
763     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
764     {
765         gmx_incons(
766                 "More than one communication pulse required for grid overlap communication along "
767                 "the major dimension while using threads");
768     }
769
770     snew(pme->bsp_mod[XX], pme->nkx);
771     snew(pme->bsp_mod[YY], pme->nky);
772     snew(pme->bsp_mod[ZZ], pme->nkz);
773
774     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
775     pme->runMode = runMode;
776
777     /* The required size of the interpolation grid, including overlap.
778      * The allocated size (pmegrid_n?) might be slightly larger.
779      */
780     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] - pme->overlap[0].s2g0[pme->nodeid_major];
781     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] - pme->overlap[1].s2g0[pme->nodeid_minor];
782     pme->pmegrid_nz_base = pme->nkz;
783     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
784     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
785     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
786     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
787     pme->pmegrid_start_iz = 0;
788
789     make_gridindex_to_localindex(
790             pme->nkx, pme->pmegrid_start_ix, pme->pmegrid_nx - (pme->pme_order - 1), &pme->nnx, &pme->fshx);
791     make_gridindex_to_localindex(
792             pme->nky, pme->pmegrid_start_iy, pme->pmegrid_ny - (pme->pme_order - 1), &pme->nny, &pme->fshy);
793     make_gridindex_to_localindex(
794             pme->nkz, pme->pmegrid_start_iz, pme->pmegrid_nz_base, &pme->nnz, &pme->fshz);
795
796     pme->spline_work = make_pme_spline_work(pme->pme_order);
797
798     ndata[0] = pme->nkx;
799     ndata[1] = pme->nky;
800     ndata[2] = pme->nkz;
801     /* It doesn't matter if we allocate too many grids here,
802      * we only allocate and use the ones we need.
803      */
804     if (pme->doLJ)
805     {
806         pme->ngrids = ((ir->ljpme_combination_rule == LongRangeVdW::LB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
807     }
808     else
809     {
810         pme->ngrids = DO_Q;
811     }
812     snew(pme->fftgrid, pme->ngrids);
813     snew(pme->cfftgrid, pme->ngrids);
814     snew(pme->pfft_setup, pme->ngrids);
815
816     for (i = 0; i < pme->ngrids; ++i)
817     {
818         if ((i < DO_Q && pme->doCoulomb && (i == 0 || bFreeEnergy_q))
819             || (i >= DO_Q && pme->doLJ
820                 && (i == 2 || bFreeEnergy_lj || ir->ljpme_combination_rule == LongRangeVdW::LB)))
821         {
822             pmegrids_init(&pme->pmegrid[i],
823                           pme->pmegrid_nx,
824                           pme->pmegrid_ny,
825                           pme->pmegrid_nz,
826                           pme->pmegrid_nz_base,
827                           pme->pme_order,
828                           pme->bUseThreads,
829                           pme->nthread,
830                           pme->overlap[0].s2g1[pme->nodeid_major]
831                                   - pme->overlap[0].s2g0[pme->nodeid_major + 1],
832                           pme->overlap[1].s2g1[pme->nodeid_minor]
833                                   - pme->overlap[1].s2g0[pme->nodeid_minor + 1]);
834             /* This routine will allocate the grid data to fit the FFTs */
835             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed)
836                                                         ? gmx::PinningPolicy::PinnedIfSupported
837                                                         : gmx::PinningPolicy::CannotBePinned;
838             gmx_parallel_3dfft_init(&pme->pfft_setup[i],
839                                     ndata,
840                                     &pme->fftgrid[i],
841                                     &pme->cfftgrid[i],
842                                     pme->mpi_comm_d,
843                                     bReproducible,
844                                     pme->nthread,
845                                     allocateRealGridForGpu);
846         }
847     }
848
849     if (!pme->bP3M)
850     {
851         /* Use plain SPME B-spline interpolation */
852         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
853     }
854     else
855     {
856         /* Use the P3M grid-optimized influence function */
857         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
858     }
859
860     /* Use atc[0] for spreading */
861     const int firstDimIndex   = (numPmeDomains.x > 1 ? 0 : 1);
862     MPI_Comm  mpiCommFirstDim = (pme->nnodes > 1 ? pme->mpi_comm_d[firstDimIndex] : MPI_COMM_NULL);
863     bool      doSpread        = true;
864     pme->atc.emplace_back(mpiCommFirstDim, pme->nthread, pme->pme_order, firstDimIndex, doSpread);
865     if (pme->ndecompdim >= 2)
866     {
867         const int secondDimIndex = 1;
868         doSpread                 = false;
869         pme->atc.emplace_back(pme->mpi_comm_d[1], pme->nthread, pme->pme_order, secondDimIndex, doSpread);
870     }
871
872     // Initial check of validity of the input for running on the GPU
873     if (pme->runMode != PmeRunMode::CPU)
874     {
875         std::string errorString;
876         bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
877         if (!canRunOnGpu)
878         {
879             GMX_THROW(gmx::NotImplementedError(errorString));
880         }
881         pme_gpu_reinit(pme.get(), deviceContext, deviceStream, pmeGpuProgram);
882     }
883     else
884     {
885         GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object when PME is on a CPU.");
886     }
887
888
889     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
890
891     // no exception was thrown during the init, so we hand over the PME structure handle
892     return pme.release();
893 }
894
895 void gmx_pme_reinit(struct gmx_pme_t** pmedata,
896                     const t_commrec*   cr,
897                     struct gmx_pme_t*  pme_src,
898                     const t_inputrec*  ir,
899                     const ivec         grid_size,
900                     real               ewaldcoeff_q,
901                     real               ewaldcoeff_lj)
902 {
903     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
904     // TODO: This would be better as just copying a sub-structure that contains
905     // all the PME parameters and nothing else.
906     t_inputrec irc;
907     irc.pbcType                = ir->pbcType;
908     irc.coulombtype            = ir->coulombtype;
909     irc.vdwtype                = ir->vdwtype;
910     irc.efep                   = ir->efep;
911     irc.pme_order              = ir->pme_order;
912     irc.epsilon_r              = ir->epsilon_r;
913     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
914     irc.nkx                    = grid_size[XX];
915     irc.nky                    = grid_size[YY];
916     irc.nkz                    = grid_size[ZZ];
917
918     try
919     {
920         // This is reinit. Any logging should have been done at first init.
921         // Here we should avoid writing notes for settings the user did not
922         // set directly.
923         const gmx::MDLogger dummyLogger;
924         GMX_ASSERT(pmedata, "Invalid PME pointer");
925         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
926         *pmedata                    = gmx_pme_init(cr,
927                                 numPmeDomains,
928                                 &irc,
929                                 pme_src->bFEP_q,
930                                 pme_src->bFEP_lj,
931                                 FALSE,
932                                 ewaldcoeff_q,
933                                 ewaldcoeff_lj,
934                                 pme_src->nthread,
935                                 pme_src->runMode,
936                                 pme_src->gpu,
937                                 nullptr,
938                                 nullptr,
939                                 nullptr,
940                                 dummyLogger);
941         /* When running PME on the CPU not using domain decomposition,
942          * the atom data is allocated once only in gmx_pme_(re)init().
943          */
944         if (!pme_src->gpu && pme_src->nnodes == 1)
945         {
946             gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), {}, {});
947         }
948         // TODO this is mostly passing around current values
949     }
950     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
951
952     /* We can easily reuse the allocated pme grids in pme_src */
953     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
954     /* We would like to reuse the fft grids, but that's harder */
955 }
956
957 real gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q)
958 {
959     pmegrids_t* grid;
960
961     if (pme->nnodes > 1)
962     {
963         gmx_incons("gmx_pme_calc_energy called in parallel");
964     }
965     if (pme->bFEP_q)
966     {
967         gmx_incons("gmx_pme_calc_energy with free energy");
968     }
969
970     if (!pme->atc_energy)
971     {
972         pme->atc_energy = std::make_unique<PmeAtomComm>(MPI_COMM_NULL, 1, pme->pme_order, 0, true);
973     }
974     PmeAtomComm* atc = pme->atc_energy.get();
975     atc->setNumAtoms(x.ssize());
976     atc->x           = x;
977     atc->coefficient = q;
978
979     /* We only use the A-charges grid */
980     grid = &pme->pmegrid[PME_GRID_QA];
981
982     /* Only calculate the spline coefficients, don't actually spread */
983     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
984
985     return gather_energy_bsplines(pme, grid->grid.grid, atc);
986 }
987
988 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
989 static void calc_initial_lb_coeffs(gmx::ArrayRef<real>       coefficient,
990                                    gmx::ArrayRef<const real> local_c6,
991                                    gmx::ArrayRef<const real> local_sigma)
992 {
993     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
994     {
995         real sigma4    = local_sigma[i];
996         sigma4         = sigma4 * sigma4;
997         sigma4         = sigma4 * sigma4;
998         coefficient[i] = local_c6[i] / sigma4;
999     }
1000 }
1001
1002 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
1003 static void calc_next_lb_coeffs(gmx::ArrayRef<real> coefficient, gmx::ArrayRef<const real> local_sigma)
1004 {
1005     for (gmx::index i = 0; i < coefficient.ssize(); ++i)
1006     {
1007         coefficient[i] *= local_sigma[i];
1008     }
1009 }
1010
1011 int gmx_pme_do(struct gmx_pme_t*              pme,
1012                gmx::ArrayRef<const gmx::RVec> coordinates,
1013                gmx::ArrayRef<gmx::RVec>       forces,
1014                gmx::ArrayRef<const real>      chargeA,
1015                gmx::ArrayRef<const real>      chargeB,
1016                gmx::ArrayRef<const real>      c6A,
1017                gmx::ArrayRef<const real>      c6B,
1018                gmx::ArrayRef<const real>      sigmaA,
1019                gmx::ArrayRef<const real>      sigmaB,
1020                const matrix                   box,
1021                const t_commrec*               cr,
1022                int                            maxshift_x,
1023                int                            maxshift_y,
1024                t_nrnb*                        nrnb,
1025                gmx_wallcycle*                 wcycle,
1026                matrix                         vir_q,
1027                matrix                         vir_lj,
1028                real*                          energy_q,
1029                real*                          energy_lj,
1030                real                           lambda_q,
1031                real                           lambda_lj,
1032                real*                          dvdlambda_q,
1033                real*                          dvdlambda_lj,
1034                const gmx::StepWorkload&       stepWork)
1035 {
1036     GMX_ASSERT(pme->runMode == PmeRunMode::CPU,
1037                "gmx_pme_do should not be called on the GPU PME run.");
1038
1039     PmeAtomComm&              atc     = pme->atc[0];
1040     pmegrids_t*               pmegrid = nullptr;
1041     real*                     grid    = nullptr;
1042     gmx::ArrayRef<const real> coefficient;
1043     std::array<PmeOutput, 2>  output; // The second is used for the B state with FEP
1044     gmx_parallel_3dfft_t      pfft_setup;
1045     real*                     fftgrid;
1046     t_complex*                cfftgrid;
1047     int                       thread;
1048     const int                 fep_states_lj = pme->bFEP_lj ? 2 : 1;
1049     // There's no support for computing energy without virial, or vice versa
1050     const bool computeEnergyAndVirial = (stepWork.computeEnergy || stepWork.computeVirial);
1051
1052     /* We could be passing lambda!=0 while no q or LJ is actually perturbed */
1053     if (!pme->bFEP_q)
1054     {
1055         lambda_q = 0;
1056     }
1057     if (!pme->bFEP_lj)
1058     {
1059         lambda_lj = 0;
1060     }
1061
1062     assert(pme->nnodes > 0);
1063     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1064
1065     if (pme->nnodes > 1)
1066     {
1067         atc.pd.resize(coordinates.ssize());
1068         for (int d = pme->ndecompdim - 1; d >= 0; d--)
1069         {
1070             PmeAtomComm& atc = pme->atc[d];
1071             atc.maxshift     = (atc.dimind == 0 ? maxshift_x : maxshift_y);
1072         }
1073     }
1074     else
1075     {
1076         GMX_ASSERT(coordinates.ssize() == atc.numAtoms(), "We expect atc.numAtoms() coordinates");
1077         GMX_ASSERT(forces.ssize() >= atc.numAtoms(),
1078                    "We need a force buffer with at least atc.numAtoms() elements");
1079
1080         atc.x = coordinates;
1081         atc.f = forces;
1082     }
1083
1084     matrix scaledBox;
1085     pme->boxScaler->scaleBox(box, scaledBox);
1086
1087     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1088     bool bFirst = true;
1089
1090     /* For simplicity, we construct the splines for all particles if
1091      * more than one PME calculations is needed. Some optimization
1092      * could be done by keeping track of which atoms have splines
1093      * constructed, and construct new splines on each pass for atoms
1094      * that don't yet have them.
1095      */
1096
1097     bool bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1098
1099     /* We need a maximum of four separate PME calculations:
1100      * grid_index=0: Coulomb PME with charges from state A
1101      * grid_index=1: Coulomb PME with charges from state B
1102      * grid_index=2: LJ PME with C6 from state A
1103      * grid_index=3: LJ PME with C6 from state B
1104      * For Lorentz-Berthelot combination rules, a separate loop is used to
1105      * calculate all the terms
1106      */
1107
1108     /* If we are doing LJ-PME with LB, we only do Q here */
1109     const int max_grid_index = (pme->ljpme_combination_rule == LongRangeVdW::LB) ? DO_Q : DO_Q_AND_LJ;
1110     bool      bClearF;
1111     for (int grid_index = 0; grid_index < max_grid_index; ++grid_index)
1112     {
1113         /* Check if we should do calculations at this grid_index
1114          * If grid_index is odd we should be doing FEP
1115          * If grid_index < 2 we should be doing electrostatic PME
1116          * If grid_index >= 2 we should be doing LJ-PME
1117          */
1118         if ((grid_index < DO_Q && (!pme->doCoulomb || (grid_index == 1 && !pme->bFEP_q)))
1119             || (grid_index >= DO_Q && (!pme->doLJ || (grid_index == 3 && !pme->bFEP_lj))))
1120         {
1121             continue;
1122         }
1123         /* Unpack structure */
1124         pmegrid    = &pme->pmegrid[grid_index];
1125         fftgrid    = pme->fftgrid[grid_index];
1126         cfftgrid   = pme->cfftgrid[grid_index];
1127         pfft_setup = pme->pfft_setup[grid_index];
1128         switch (grid_index)
1129         {
1130             case 0: coefficient = chargeA; break;
1131             case 1: coefficient = chargeB; break;
1132             case 2: coefficient = c6A; break;
1133             case 3: coefficient = c6B; break;
1134         }
1135
1136         grid = pmegrid->grid.grid;
1137
1138         if (pme->nnodes == 1)
1139         {
1140             atc.coefficient = coefficient;
1141         }
1142         else
1143         {
1144             wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
1145             do_redist_pos_coeffs(pme, cr, bFirst, coordinates, coefficient);
1146
1147             wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
1148         }
1149
1150         wallcycle_start(wcycle, WallCycleCounter::PmeSpread);
1151
1152         /* Spread the coefficients on a grid */
1153         spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1154
1155         if (bFirst)
1156         {
1157             inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1158         }
1159         inc_nrnb(nrnb, eNR_SPREADBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1160
1161         if (!pme->bUseThreads)
1162         {
1163             wrap_periodic_pmegrid(pme, grid);
1164
1165             /* sum contributions to local grid from other nodes */
1166             if (pme->nnodes > 1)
1167             {
1168                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1169             }
1170
1171             copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1172         }
1173
1174         wallcycle_stop(wcycle, WallCycleCounter::PmeSpread);
1175
1176         /* TODO If the OpenMP and single-threaded implementations
1177            converge, then spread_on_grid() and
1178            copy_pmegrid_to_fftgrid() will perhaps live in the same
1179            source file.
1180         */
1181
1182         /* Here we start a large thread parallel region */
1183 #pragma omp parallel num_threads(pme->nthread) private(thread)
1184         {
1185             try
1186             {
1187                 thread = gmx_omp_get_thread_num();
1188                 int loop_count;
1189
1190                 /* do 3d-fft */
1191                 if (thread == 0)
1192                 {
1193                     wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1194                 }
1195                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1196                 if (thread == 0)
1197                 {
1198                     wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1199                 }
1200
1201                 /* solve in k-space for our local cells */
1202                 if (thread == 0)
1203                 {
1204                     wallcycle_start(
1205                             wcycle,
1206                             (grid_index < DO_Q ? WallCycleCounter::PmeSolve : WallCycleCounter::LJPme));
1207                 }
1208                 if (grid_index < DO_Q)
1209                 {
1210                     loop_count = solve_pme_yzx(pme,
1211                                                cfftgrid,
1212                                                scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1213                                                computeEnergyAndVirial,
1214                                                pme->nthread,
1215                                                thread);
1216                 }
1217                 else
1218                 {
1219                     loop_count =
1220                             solve_pme_lj_yzx(pme,
1221                                              &cfftgrid,
1222                                              FALSE,
1223                                              scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1224                                              computeEnergyAndVirial,
1225                                              pme->nthread,
1226                                              thread);
1227                 }
1228
1229                 if (thread == 0)
1230                 {
1231                     wallcycle_stop(
1232                             wcycle,
1233                             (grid_index < DO_Q ? WallCycleCounter::PmeSolve : WallCycleCounter::LJPme));
1234                     inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1235                 }
1236
1237                 /* do 3d-invfft */
1238                 if (thread == 0)
1239                 {
1240                     wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1241                 }
1242                 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1243                 if (thread == 0)
1244                 {
1245                     wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1246
1247
1248                     if (pme->nodeid == 0)
1249                     {
1250                         real      ntot = pme->nkx * pme->nky * pme->nkz;
1251                         const int npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1252                         inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1253                     }
1254
1255                     /* Note: this wallcycle region is closed below
1256                        outside an OpenMP region, so take care if
1257                        refactoring code here. */
1258                     wallcycle_start(wcycle, WallCycleCounter::PmeGather);
1259                 }
1260
1261                 copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1262             }
1263             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1264         }
1265         /* End of thread parallel section.
1266          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1267          */
1268
1269         /* distribute local grid to all nodes */
1270         if (pme->nnodes > 1)
1271         {
1272             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1273         }
1274
1275         unwrap_periodic_pmegrid(pme, grid);
1276
1277         if (stepWork.computeForces)
1278         {
1279             /* interpolate forces for our local atoms */
1280
1281
1282             /* If we are running without parallelization,
1283              * atc->f is the actual force array, not a buffer,
1284              * therefore we should not clear it.
1285              */
1286             real lambda = grid_index < DO_Q ? lambda_q : lambda_lj;
1287             bClearF     = (bFirst && PAR(cr));
1288 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1289             for (thread = 0; thread < pme->nthread; thread++)
1290             {
1291                 try
1292                 {
1293                     gather_f_bsplines(pme,
1294                                       grid,
1295                                       bClearF,
1296                                       &atc,
1297                                       &atc.spline[thread],
1298                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0 - lambda : lambda) : 1.0);
1299                 }
1300                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1301             }
1302
1303
1304             inc_nrnb(nrnb, eNR_GATHERFBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1305             /* Note: this wallcycle region is opened above inside an OpenMP
1306                region, so take care if refactoring code here. */
1307             wallcycle_stop(wcycle, WallCycleCounter::PmeGather);
1308         }
1309
1310         if (computeEnergyAndVirial)
1311         {
1312             /* This should only be called on the master thread
1313              * and after the threads have synchronized.
1314              */
1315             if (grid_index < 2)
1316             {
1317                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1318             }
1319             else
1320             {
1321                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1322             }
1323         }
1324         bFirst = FALSE;
1325     } /* of grid_index-loop */
1326
1327     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1328      * seven terms. */
1329
1330     if (pme->doLJ && pme->ljpme_combination_rule == LongRangeVdW::LB)
1331     {
1332         /* Loop over A- and B-state if we are doing FEP */
1333         for (int fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1334         {
1335             std::vector<real>         local_c6;
1336             std::vector<real>         local_sigma;
1337             gmx::ArrayRef<const real> RedistC6;
1338             gmx::ArrayRef<const real> RedistSigma;
1339             gmx::ArrayRef<real>       coefficientBuffer;
1340             if (pme->nnodes == 1)
1341             {
1342                 pme->lb_buf1.resize(atc.numAtoms());
1343                 coefficientBuffer = pme->lb_buf1;
1344                 switch (fep_state)
1345                 {
1346                     case 0:
1347                         local_c6.assign(c6A.begin(), c6A.end());
1348                         local_sigma.assign(sigmaA.begin(), sigmaA.end());
1349                         break;
1350                     case 1:
1351                         local_c6.assign(c6B.begin(), c6B.end());
1352                         local_sigma.assign(sigmaB.begin(), sigmaB.end());
1353                         break;
1354                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1355                 }
1356             }
1357             else
1358             {
1359                 coefficientBuffer = atc.coefficientBuffer;
1360                 switch (fep_state)
1361                 {
1362                     case 0:
1363                         RedistC6    = c6A;
1364                         RedistSigma = sigmaA;
1365                         break;
1366                     case 1:
1367                         RedistC6    = c6B;
1368                         RedistSigma = sigmaB;
1369                         break;
1370                     default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1371                 }
1372                 wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
1373
1374                 do_redist_pos_coeffs(pme, cr, bFirst, coordinates, RedistC6);
1375                 pme->lb_buf1.resize(atc.numAtoms());
1376                 pme->lb_buf2.resize(atc.numAtoms());
1377                 local_c6.assign(pme->lb_buf1.begin(), pme->lb_buf1.end());
1378                 for (int i = 0; i < atc.numAtoms(); ++i)
1379                 {
1380                     local_c6[i] = atc.coefficient[i];
1381                 }
1382
1383                 do_redist_pos_coeffs(pme, cr, FALSE, coordinates, RedistSigma);
1384                 local_sigma.assign(pme->lb_buf2.begin(), pme->lb_buf2.end());
1385                 for (int i = 0; i < atc.numAtoms(); ++i)
1386                 {
1387                     local_sigma[i] = atc.coefficient[i];
1388                 }
1389
1390                 wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
1391             }
1392             atc.coefficient = coefficientBuffer;
1393             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1394
1395             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1396             for (int grid_index = 2; grid_index < 9; ++grid_index)
1397             {
1398                 /* Unpack structure */
1399                 pmegrid    = &pme->pmegrid[grid_index];
1400                 fftgrid    = pme->fftgrid[grid_index];
1401                 pfft_setup = pme->pfft_setup[grid_index];
1402                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1403                 grid = pmegrid->grid.grid;
1404
1405                 wallcycle_start(wcycle, WallCycleCounter::PmeSpread);
1406                 /* Spread the c6 on a grid */
1407                 spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1408
1409                 if (bFirst)
1410                 {
1411                     inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
1412                 }
1413
1414                 inc_nrnb(nrnb,
1415                          eNR_SPREADBSP,
1416                          pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
1417                 if (pme->nthread == 1)
1418                 {
1419                     wrap_periodic_pmegrid(pme, grid);
1420                     /* sum contributions to local grid from other nodes */
1421                     if (pme->nnodes > 1)
1422                     {
1423                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1424                     }
1425                     copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1426                 }
1427                 wallcycle_stop(wcycle, WallCycleCounter::PmeSpread);
1428
1429                 /*Here we start a large thread parallel region*/
1430 #pragma omp parallel num_threads(pme->nthread) private(thread)
1431                 {
1432                     try
1433                     {
1434                         thread = gmx_omp_get_thread_num();
1435                         /* do 3d-fft */
1436                         if (thread == 0)
1437                         {
1438                             wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1439                         }
1440
1441                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
1442                         if (thread == 0)
1443                         {
1444                             wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1445                         }
1446                     }
1447                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1448                 }
1449                 bFirst = FALSE;
1450             }
1451             /* solve in k-space for our local cells */
1452 #pragma omp parallel num_threads(pme->nthread) private(thread)
1453             {
1454                 try
1455                 {
1456                     int loop_count;
1457                     thread = gmx_omp_get_thread_num();
1458                     if (thread == 0)
1459                     {
1460                         wallcycle_start(wcycle, WallCycleCounter::LJPme);
1461                     }
1462
1463                     loop_count =
1464                             solve_pme_lj_yzx(pme,
1465                                              &pme->cfftgrid[2],
1466                                              TRUE,
1467                                              scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
1468                                              computeEnergyAndVirial,
1469                                              pme->nthread,
1470                                              thread);
1471                     if (thread == 0)
1472                     {
1473                         wallcycle_stop(wcycle, WallCycleCounter::LJPme);
1474                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1475                     }
1476                 }
1477                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1478             }
1479
1480             if (computeEnergyAndVirial)
1481             {
1482                 /* This should only be called on the master thread and
1483                  * after the threads have synchronized.
1484                  */
1485                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
1486             }
1487
1488             bFirst = !pme->doCoulomb;
1489             calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
1490             for (int grid_index = 8; grid_index >= 2; --grid_index)
1491             {
1492                 /* Unpack structure */
1493                 pmegrid    = &pme->pmegrid[grid_index];
1494                 fftgrid    = pme->fftgrid[grid_index];
1495                 pfft_setup = pme->pfft_setup[grid_index];
1496                 grid       = pmegrid->grid.grid;
1497                 calc_next_lb_coeffs(coefficientBuffer, local_sigma);
1498 #pragma omp parallel num_threads(pme->nthread) private(thread)
1499                 {
1500                     try
1501                     {
1502                         thread = gmx_omp_get_thread_num();
1503                         /* do 3d-invfft */
1504                         if (thread == 0)
1505                         {
1506                             wallcycle_start(wcycle, WallCycleCounter::PmeFft);
1507                         }
1508
1509                         gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
1510                         if (thread == 0)
1511                         {
1512                             wallcycle_stop(wcycle, WallCycleCounter::PmeFft);
1513
1514
1515                             if (pme->nodeid == 0)
1516                             {
1517                                 real      ntot = pme->nkx * pme->nky * pme->nkz;
1518                                 const int npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
1519                                 inc_nrnb(nrnb, eNR_FFT, 2 * npme);
1520                             }
1521                             wallcycle_start(wcycle, WallCycleCounter::PmeGather);
1522                         }
1523
1524                         copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1525                     }
1526                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1527                 } /*#pragma omp parallel*/
1528
1529                 /* distribute local grid to all nodes */
1530                 if (pme->nnodes > 1)
1531                 {
1532                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1533                 }
1534
1535                 unwrap_periodic_pmegrid(pme, grid);
1536
1537                 if (stepWork.computeForces)
1538                 {
1539                     /* interpolate forces for our local atoms */
1540                     bClearF    = (bFirst && PAR(cr));
1541                     real scale = pme->bFEP ? (fep_state < 1 ? 1.0 - lambda_lj : lambda_lj) : 1.0;
1542                     scale *= lb_scale_factor[grid_index - 2];
1543
1544 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1545                     for (thread = 0; thread < pme->nthread; thread++)
1546                     {
1547                         try
1548                         {
1549                             gather_f_bsplines(
1550                                     pme, grid, bClearF, &pme->atc[0], &pme->atc[0].spline[thread], scale);
1551                         }
1552                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1553                     }
1554
1555
1556                     inc_nrnb(nrnb,
1557                              eNR_GATHERFBSP,
1558                              pme->pme_order * pme->pme_order * pme->pme_order * pme->atc[0].numAtoms());
1559                 }
1560                 wallcycle_stop(wcycle, WallCycleCounter::PmeGather);
1561
1562                 bFirst = FALSE;
1563             } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1564         }     /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1565     }         /* if (pme->doLJ && pme->ljpme_combination_rule == LongRangeVdW::LB) */
1566
1567     if (stepWork.computeForces && pme->nnodes > 1)
1568     {
1569         wallcycle_start(wcycle, WallCycleCounter::PmeRedistXF);
1570         for (int d = 0; d < pme->ndecompdim; d++)
1571         {
1572             gmx::ArrayRef<gmx::RVec> forcesRef;
1573             if (d == pme->ndecompdim - 1)
1574             {
1575                 const size_t numAtoms = coordinates.size();
1576                 GMX_ASSERT(forces.size() >= numAtoms, "Need at least numAtoms forces");
1577                 forcesRef = forces.subArray(0, numAtoms);
1578             }
1579             else
1580             {
1581                 forcesRef = pme->atc[d + 1].f;
1582             }
1583             if (DOMAINDECOMP(cr))
1584             {
1585                 dd_pmeredist_f(pme, &pme->atc[d], forcesRef, d == pme->ndecompdim - 1 && pme->bPPnode);
1586             }
1587         }
1588
1589         wallcycle_stop(wcycle, WallCycleCounter::PmeRedistXF);
1590     }
1591
1592     if (computeEnergyAndVirial)
1593     {
1594         if (pme->doCoulomb)
1595         {
1596             if (!pme->bFEP_q)
1597             {
1598                 *energy_q = output[0].coulombEnergy_;
1599                 m_add(vir_q, output[0].coulombVirial_, vir_q);
1600             }
1601             else
1602             {
1603                 *energy_q = (1.0 - lambda_q) * output[0].coulombEnergy_ + lambda_q * output[1].coulombEnergy_;
1604                 *dvdlambda_q += output[1].coulombEnergy_ - output[0].coulombEnergy_;
1605                 for (int i = 0; i < DIM; i++)
1606                 {
1607                     for (int j = 0; j < DIM; j++)
1608                     {
1609                         vir_q[i][j] += (1.0 - lambda_q) * output[0].coulombVirial_[i][j]
1610                                        + lambda_q * output[1].coulombVirial_[i][j];
1611                     }
1612                 }
1613             }
1614         }
1615         else
1616         {
1617             *energy_q = 0;
1618         }
1619
1620         if (pme->doLJ)
1621         {
1622             if (!pme->bFEP_lj)
1623             {
1624                 *energy_lj = output[0].lennardJonesEnergy_;
1625                 m_add(vir_lj, output[0].lennardJonesVirial_, vir_lj);
1626             }
1627             else
1628             {
1629                 *energy_lj = (1.0 - lambda_lj) * output[0].lennardJonesEnergy_
1630                              + lambda_lj * output[1].lennardJonesEnergy_;
1631                 *dvdlambda_lj += output[1].lennardJonesEnergy_ - output[0].lennardJonesEnergy_;
1632                 for (int i = 0; i < DIM; i++)
1633                 {
1634                     for (int j = 0; j < DIM; j++)
1635                     {
1636                         vir_lj[i][j] += (1.0 - lambda_lj) * output[0].lennardJonesVirial_[i][j]
1637                                         + lambda_lj * output[1].lennardJonesVirial_[i][j];
1638                     }
1639                 }
1640             }
1641         }
1642         else
1643         {
1644             *energy_lj = 0;
1645         }
1646     }
1647     return 0;
1648 }
1649
1650 void gmx_pme_destroy(gmx_pme_t* pme)
1651 {
1652     if (!pme)
1653     {
1654         return;
1655     }
1656
1657     sfree(pme->nnx);
1658     sfree(pme->nny);
1659     sfree(pme->nnz);
1660     sfree(pme->fshx);
1661     sfree(pme->fshy);
1662     sfree(pme->fshz);
1663
1664     for (int i = 0; i < pme->ngrids; ++i)
1665     {
1666         pmegrids_destroy(&pme->pmegrid[i]);
1667     }
1668     if (pme->pfft_setup)
1669     {
1670         for (int i = 0; i < pme->ngrids; ++i)
1671         {
1672             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1673         }
1674     }
1675     sfree(pme->fftgrid);
1676     sfree(pme->cfftgrid);
1677     sfree(pme->pfft_setup);
1678
1679     for (int i = 0; i < DIM; i++)
1680     {
1681         sfree(pme->bsp_mod[i]);
1682     }
1683
1684     sfree(pme->bufv);
1685     sfree(pme->bufr);
1686
1687     if (pme->solve_work)
1688     {
1689         pme_free_all_work(&pme->solve_work, pme->nthread);
1690     }
1691
1692     destroy_pme_spline_work(pme->spline_work);
1693
1694     if (pme->gpu != nullptr)
1695     {
1696         pme_gpu_destroy(pme->gpu);
1697     }
1698
1699     delete pme;
1700 }
1701
1702 void gmx_pme_reinit_atoms(gmx_pme_t*                pme,
1703                           const int                 numAtoms,
1704                           gmx::ArrayRef<const real> chargesA,
1705                           gmx::ArrayRef<const real> chargesB)
1706 {
1707     if (pme->gpu != nullptr)
1708     {
1709         GMX_ASSERT(!(pme->bFEP_q && chargesB.empty()),
1710                    "B state charges must be specified if running Coulomb FEP on the GPU");
1711         pme_gpu_reinit_atoms(pme->gpu, numAtoms, chargesA.data(), pme->bFEP_q ? chargesB.data() : nullptr);
1712     }
1713     else
1714     {
1715         pme->atc[0].setNumAtoms(numAtoms);
1716         // TODO: set the charges here as well
1717     }
1718 }
1719
1720 bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size)
1721 {
1722     return (pme.nkx == grid_size[XX] && pme.nky == grid_size[YY] && pme.nkz == grid_size[ZZ]);
1723 }
1724
1725 void gmx::SeparatePmeRanksPermitted::disablePmeRanks(const std::string& reason)
1726 {
1727     permitSeparatePmeRanks_ = false;
1728
1729     if (!reason.empty())
1730     {
1731         reasons_.push_back(reason);
1732     }
1733 }
1734
1735 bool gmx::SeparatePmeRanksPermitted::permitSeparatePmeRanks() const
1736 {
1737     return permitSeparatePmeRanks_;
1738 }
1739
1740 std::string gmx::SeparatePmeRanksPermitted::reasonsWhyDisabled() const
1741 {
1742     return joinStrings(reasons_, "; ");
1743 }