Actually fix PME forces with FE without perturbed q/LJ
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
7  * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9  * and including many others, as listed in the AUTHORS file in the
10  * top-level source directory and at http://www.gromacs.org.
11  *
12  * GROMACS is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public License
14  * as published by the Free Software Foundation; either version 2.1
15  * of the License, or (at your option) any later version.
16  *
17  * GROMACS is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with GROMACS; if not, see
24  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
26  *
27  * If you want to redistribute modifications to GROMACS, please
28  * consider that scientific software is very special. Version
29  * control is crucial - bugs must be traceable. We will be happy to
30  * consider code for inclusion in the official distribution, but
31  * derived work must not be called official GROMACS. Details are found
32  * in the README & COPYING files - if they are missing, get the
33  * official version at http://www.gromacs.org.
34  *
35  * To help us fund GROMACS development, we humbly ask that you cite
36  * the research papers on the package. Check out http://www.gromacs.org.
37  */
38 /*! \internal \file
39  *
40  * \brief This file contains function definitions necessary for
41  * computing energies and forces for the PME long-ranged part (Coulomb
42  * and LJ).
43  *
44  * \author Erik Lindahl <erik@kth.se>
45  * \author Berk Hess <hess@kth.se>
46  * \ingroup module_ewald
47  */
48 /* IMPORTANT FOR DEVELOPERS:
49  *
50  * Triclinic pme stuff isn't entirely trivial, and we've experienced
51  * some bugs during development (many of them due to me). To avoid
52  * this in the future, please check the following things if you make
53  * changes in this file:
54  *
55  * 1. You should obtain identical (at least to the PME precision)
56  *    energies, forces, and virial for
57  *    a rectangular box and a triclinic one where the z (or y) axis is
58  *    tilted a whole box side. For instance you could use these boxes:
59  *
60  *    rectangular       triclinic
61  *     2  0  0           2  0  0
62  *     0  2  0           0  2  0
63  *     0  0  6           2  2  6
64  *
65  * 2. You should check the energy conservation in a triclinic box.
66  *
67  * It might seem an overkill, but better safe than sorry.
68  * /Erik 001109
69  */
70
71 #include "gmxpre.h"
72
73 #include "pme.h"
74
75 #include "config.h"
76
77 #include <cassert>
78 #include <cmath>
79 #include <cstdio>
80 #include <cstdlib>
81 #include <cstring>
82
83 #include <algorithm>
84 #include <list>
85
86 #include "gromacs/domdec/domdec.h"
87 #include "gromacs/ewald/ewald-utils.h"
88 #include "gromacs/fft/parallel_3dfft.h"
89 #include "gromacs/fileio/pdbio.h"
90 #include "gromacs/gmxlib/network.h"
91 #include "gromacs/gmxlib/nrnb.h"
92 #include "gromacs/hardware/hw_info.h"
93 #include "gromacs/math/gmxcomplex.h"
94 #include "gromacs/math/invertmatrix.h"
95 #include "gromacs/math/units.h"
96 #include "gromacs/math/vec.h"
97 #include "gromacs/math/vectypes.h"
98 #include "gromacs/mdtypes/commrec.h"
99 #include "gromacs/mdtypes/forcerec.h"
100 #include "gromacs/mdtypes/inputrec.h"
101 #include "gromacs/mdtypes/md_enums.h"
102 #include "gromacs/pbcutil/pbc.h"
103 #include "gromacs/timing/cyclecounter.h"
104 #include "gromacs/timing/wallcycle.h"
105 #include "gromacs/timing/walltime_accounting.h"
106 #include "gromacs/topology/topology.h"
107 #include "gromacs/utility/basedefinitions.h"
108 #include "gromacs/utility/cstringutil.h"
109 #include "gromacs/utility/exceptions.h"
110 #include "gromacs/utility/fatalerror.h"
111 #include "gromacs/utility/gmxmpi.h"
112 #include "gromacs/utility/gmxomp.h"
113 #include "gromacs/utility/logger.h"
114 #include "gromacs/utility/real.h"
115 #include "gromacs/utility/smalloc.h"
116 #include "gromacs/utility/stringutil.h"
117 #include "gromacs/utility/unique_cptr.h"
118
119 #include "calculate-spline-moduli.h"
120 #include "pme-gather.h"
121 #include "pme-gpu-internal.h"
122 #include "pme-grid.h"
123 #include "pme-internal.h"
124 #include "pme-redistribute.h"
125 #include "pme-solve.h"
126 #include "pme-spline-work.h"
127 #include "pme-spread.h"
128
129 /*! \brief Help build a descriptive message in \c error if there are
130  * \c errorReasons why PME on GPU is not supported.
131  *
132  * \returns Whether the lack of errorReasons indicate there is support. */
133 static bool
134 addMessageIfNotSupported(const std::list<std::string> &errorReasons,
135                          std::string                  *error)
136 {
137     bool isSupported = errorReasons.empty();
138     if (!isSupported && error)
139     {
140         std::string regressionTestMarker = "PME GPU does not support";
141         // this prefix is tested for in the regression tests script gmxtest.pl
142         *error = regressionTestMarker;
143         if (errorReasons.size() == 1)
144         {
145             *error += " " + errorReasons.back();
146         }
147         else
148         {
149             *error += ": " + gmx::joinStrings(errorReasons, "; ");
150         }
151         *error += ".";
152     }
153     return isSupported;
154 }
155
156 bool pme_gpu_supports_build(std::string *error)
157 {
158     std::list<std::string> errorReasons;
159     if (GMX_DOUBLE)
160     {
161         errorReasons.emplace_back("a double-precision build");
162     }
163     if (GMX_GPU == GMX_GPU_NONE)
164     {
165         errorReasons.emplace_back("a non-GPU build");
166     }
167     return addMessageIfNotSupported(errorReasons, error);
168 }
169
170 bool pme_gpu_supports_hardware(const gmx_hw_info_t &hwinfo,
171                                std::string         *error)
172 {
173     std::list<std::string> errorReasons;
174     if (GMX_GPU == GMX_GPU_OPENCL)
175     {
176         if (!areAllGpuDevicesFromAmd(hwinfo.gpu_info))
177         {
178             errorReasons.emplace_back("non-AMD devices");
179         }
180 #ifdef __APPLE__
181         errorReasons.emplace_back("Apple OS X operating system");
182 #endif
183     }
184     return addMessageIfNotSupported(errorReasons, error);
185 }
186
187 bool pme_gpu_supports_input(const t_inputrec &ir, const gmx_mtop_t &mtop, std::string *error)
188 {
189     std::list<std::string> errorReasons;
190     if (!EEL_PME(ir.coulombtype))
191     {
192         errorReasons.emplace_back("systems that do not use PME for electrostatics");
193     }
194     if (ir.pme_order != 4)
195     {
196         errorReasons.emplace_back("interpolation orders other than 4");
197     }
198     if (ir.efep != efepNO)
199     {
200         if (gmx_mtop_has_perturbed_charges(mtop))
201         {
202             errorReasons.emplace_back("free energy calculations with perturbed charges (multiple grids)");
203         }
204     }
205     if (EVDW_PME(ir.vdwtype))
206     {
207         errorReasons.emplace_back("Lennard-Jones PME");
208     }
209     if (ir.cutoff_scheme == ecutsGROUP)
210     {
211         errorReasons.emplace_back("group cutoff scheme");
212     }
213     if (!EI_DYNAMICS(ir.eI))
214     {
215         errorReasons.emplace_back("not a dynamical integrator");
216     }
217     return addMessageIfNotSupported(errorReasons, error);
218 }
219
220 /*! \brief \libinternal
221  * Finds out if PME with given inputs is possible to run on GPU.
222  * This function is an internal final check, validating the whole PME structure on creation,
223  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
224  *
225  * \param[in]  pme          The PME structure.
226  * \param[out] error        The error message if the input is not supported on GPU.
227  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
228  */
229 static bool pme_gpu_check_restrictions(const gmx_pme_t *pme, std::string *error)
230 {
231     std::list<std::string> errorReasons;
232     if (pme->nnodes != 1)
233     {
234         errorReasons.emplace_back("PME decomposition");
235     }
236     if (pme->pme_order != 4)
237     {
238         errorReasons.emplace_back("interpolation orders other than 4");
239     }
240     if (pme->bFEP)
241     {
242         errorReasons.emplace_back("free energy calculations (multiple grids)");
243     }
244     if (pme->doLJ)
245     {
246         errorReasons.emplace_back("Lennard-Jones PME");
247     }
248     if (GMX_DOUBLE)
249     {
250         errorReasons.emplace_back("double precision");
251     }
252     if (GMX_GPU == GMX_GPU_NONE)
253     {
254         errorReasons.emplace_back("non-GPU build of GROMACS");
255     }
256
257     return addMessageIfNotSupported(errorReasons, error);
258 }
259
260 PmeRunMode pme_run_mode(const gmx_pme_t *pme)
261 {
262     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
263     return pme->runMode;
264 }
265
266 gmx::PinningPolicy pme_get_pinning_policy()
267 {
268     return gmx::PinningPolicy::PinnedIfSupported;
269 }
270
271 /*! \brief Number of bytes in a cache line.
272  *
273  * Must also be a multiple of the SIMD and SIMD4 register size, to
274  * preserve alignment.
275  */
276 const int gmxCacheLineSize = 64;
277
278 //! Set up coordinate communication
279 static void setup_coordinate_communication(pme_atomcomm_t *atc)
280 {
281     int nslab, n, i;
282     int fw, bw;
283
284     nslab = atc->nslab;
285
286     n = 0;
287     for (i = 1; i <= nslab/2; i++)
288     {
289         fw = (atc->nodeid + i) % nslab;
290         bw = (atc->nodeid - i + nslab) % nslab;
291         if (n < nslab - 1)
292         {
293             atc->node_dest[n] = fw;
294             atc->node_src[n]  = bw;
295             n++;
296         }
297         if (n < nslab - 1)
298         {
299             atc->node_dest[n] = bw;
300             atc->node_src[n]  = fw;
301             n++;
302         }
303     }
304 }
305
306 /*! \brief Round \p n up to the next multiple of \p f */
307 static int mult_up(int n, int f)
308 {
309     return ((n + f - 1)/f)*f;
310 }
311
312 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
313 static double estimate_pme_load_imbalance(struct gmx_pme_t *pme)
314 {
315     int    nma, nmi;
316     double n1, n2, n3;
317
318     nma = pme->nnodes_major;
319     nmi = pme->nnodes_minor;
320
321     n1 = mult_up(pme->nkx, nma)*mult_up(pme->nky, nmi)*pme->nkz;
322     n2 = mult_up(pme->nkx, nma)*mult_up(pme->nkz, nmi)*pme->nky;
323     n3 = mult_up(pme->nky, nma)*mult_up(pme->nkz, nmi)*pme->nkx;
324
325     /* pme_solve is roughly double the cost of an fft */
326
327     return (n1 + n2 + 3*n3)/static_cast<double>(6*pme->nkx*pme->nky*pme->nkz);
328 }
329
330 /*! \brief Initialize atom communication data structure */
331 static void init_atomcomm(struct gmx_pme_t *pme, pme_atomcomm_t *atc,
332                           int dimind, gmx_bool bSpread)
333 {
334     int thread;
335
336     atc->dimind    = dimind;
337     atc->nslab     = 1;
338     atc->nodeid    = 0;
339     atc->pd_nalloc = 0;
340 #if GMX_MPI
341     if (pme->nnodes > 1)
342     {
343         atc->mpi_comm = pme->mpi_comm_d[dimind];
344         MPI_Comm_size(atc->mpi_comm, &atc->nslab);
345         MPI_Comm_rank(atc->mpi_comm, &atc->nodeid);
346     }
347     if (debug)
348     {
349         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", atc->dimind, atc->nslab, atc->nodeid);
350     }
351 #endif
352
353     atc->bSpread   = bSpread;
354     atc->pme_order = pme->pme_order;
355
356     if (atc->nslab > 1)
357     {
358         snew(atc->node_dest, atc->nslab);
359         snew(atc->node_src, atc->nslab);
360         setup_coordinate_communication(atc);
361
362         snew(atc->count_thread, pme->nthread);
363         for (thread = 0; thread < pme->nthread; thread++)
364         {
365             snew(atc->count_thread[thread], atc->nslab);
366         }
367         atc->count = atc->count_thread[0];
368         snew(atc->rcount, atc->nslab);
369         snew(atc->buf_index, atc->nslab);
370     }
371
372     atc->nthread = pme->nthread;
373     if (atc->nthread > 1)
374     {
375         snew(atc->thread_plist, atc->nthread);
376     }
377     snew(atc->spline, atc->nthread);
378     for (thread = 0; thread < atc->nthread; thread++)
379     {
380         if (atc->nthread > 1)
381         {
382             snew(atc->thread_plist[thread].n, atc->nthread+2*gmxCacheLineSize);
383             atc->thread_plist[thread].n += gmxCacheLineSize;
384         }
385     }
386 }
387
388 /*! \brief Destroy an atom communication data structure and its child structs */
389 static void destroy_atomcomm(pme_atomcomm_t *atc)
390 {
391     sfree(atc->pd);
392     if (atc->nslab > 1)
393     {
394         sfree(atc->node_dest);
395         sfree(atc->node_src);
396         for (int i = 0; i < atc->nthread; i++)
397         {
398             sfree(atc->count_thread[i]);
399         }
400         sfree(atc->count_thread);
401         sfree(atc->rcount);
402         sfree(atc->buf_index);
403
404         sfree(atc->x);
405         sfree(atc->coefficient);
406         sfree(atc->f);
407     }
408     sfree(atc->idx);
409     sfree(atc->fractx);
410
411     sfree(atc->thread_idx);
412     for (int i = 0; i < atc->nthread; i++)
413     {
414         if (atc->nthread > 1)
415         {
416             int *n_ptr = atc->thread_plist[i].n - gmxCacheLineSize;
417             sfree(n_ptr);
418             sfree(atc->thread_plist[i].i);
419         }
420         sfree(atc->spline[i].ind);
421         for (int d = 0; d < ZZ; d++)
422         {
423             sfree(atc->spline[i].theta[d]);
424             sfree(atc->spline[i].dtheta[d]);
425         }
426         sfree_aligned(atc->spline[i].ptr_dtheta_z);
427         sfree_aligned(atc->spline[i].ptr_theta_z);
428     }
429     if (atc->nthread > 1)
430     {
431         sfree(atc->thread_plist);
432     }
433     sfree(atc->spline);
434 }
435
436 /*! \brief Initialize data structure for communication */
437 static void
438 init_overlap_comm(pme_overlap_t *  ol,
439                   int              norder,
440 #if GMX_MPI
441                   MPI_Comm         comm,
442 #endif
443                   int              nnodes,
444                   int              nodeid,
445                   int              ndata,
446                   int              commplainsize)
447 {
448     gmx_bool         bCont;
449 #if GMX_MPI
450     MPI_Status       stat;
451
452     ol->mpi_comm = comm;
453 #endif
454
455     ol->nnodes = nnodes;
456     ol->nodeid = nodeid;
457
458     /* Linear translation of the PME grid won't affect reciprocal space
459      * calculations, so to optimize we only interpolate "upwards",
460      * which also means we only have to consider overlap in one direction.
461      * I.e., particles on this node might also be spread to grid indices
462      * that belong to higher nodes (modulo nnodes)
463      */
464
465     ol->s2g0.resize(ol->nnodes + 1);
466     ol->s2g1.resize(ol->nnodes);
467     if (debug)
468     {
469         fprintf(debug, "PME slab boundaries:");
470     }
471     for (int i = 0; i < nnodes; i++)
472     {
473         /* s2g0 the local interpolation grid start.
474          * s2g1 the local interpolation grid end.
475          * Since in calc_pidx we divide particles, and not grid lines,
476          * spatially uniform along dimension x or y, we need to round
477          * s2g0 down and s2g1 up.
478          */
479         ol->s2g0[i] = (i * ndata + 0) / nnodes;
480         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
481
482         if (debug)
483         {
484             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
485         }
486     }
487     ol->s2g0[nnodes] = ndata;
488     if (debug)
489     {
490         fprintf(debug, "\n");
491     }
492
493     /* Determine with how many nodes we need to communicate the grid overlap */
494     int testRankCount = 0;
495     do
496     {
497         testRankCount++;
498         bCont = FALSE;
499         for (int i = 0; i < nnodes; i++)
500         {
501             if ((i + testRankCount <  nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount]) ||
502                 (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
503             {
504                 bCont = TRUE;
505             }
506         }
507     }
508     while (bCont && testRankCount < nnodes);
509
510     ol->comm_data.resize(testRankCount - 1);
511     ol->send_size = 0;
512
513     for (size_t b = 0; b < ol->comm_data.size(); b++)
514     {
515         pme_grid_comm_t *pgc = &ol->comm_data[b];
516
517         /* Send */
518         pgc->send_id = (ol->nodeid + (b + 1)) % ol->nnodes;
519         int fft_start = ol->s2g0[pgc->send_id];
520         int fft_end   = ol->s2g0[pgc->send_id + 1];
521         if (pgc->send_id < nodeid)
522         {
523             fft_start += ndata;
524             fft_end   += ndata;
525         }
526         int send_index1  = ol->s2g1[nodeid];
527         send_index1      = std::min(send_index1, fft_end);
528         pgc->send_index0 = fft_start;
529         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
530         ol->send_size   += pgc->send_nindex;
531
532         /* We always start receiving to the first index of our slab */
533         pgc->recv_id     = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
534         fft_start        = ol->s2g0[ol->nodeid];
535         fft_end          = ol->s2g0[ol->nodeid + 1];
536         int recv_index1  = ol->s2g1[pgc->recv_id];
537         if (pgc->recv_id > nodeid)
538         {
539             recv_index1 -= ndata;
540         }
541         recv_index1      = std::min(recv_index1, fft_end);
542         pgc->recv_index0 = fft_start;
543         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
544     }
545
546 #if GMX_MPI
547     /* Communicate the buffer sizes to receive */
548     for (size_t b = 0; b < ol->comm_data.size(); b++)
549     {
550         MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b,
551                      &ol->comm_data[b].recv_size, 1, MPI_INT, ol->comm_data[b].recv_id, b,
552                      ol->mpi_comm, &stat);
553     }
554 #endif
555
556     /* For non-divisible grid we need pme_order iso pme_order-1 */
557     ol->sendbuf.resize(norder * commplainsize);
558     ol->recvbuf.resize(norder * commplainsize);
559 }
560
561 int minimalPmeGridSize(int pmeOrder)
562 {
563     /* The actual grid size limitations are:
564      *   serial:        >= pme_order
565      *   DD, no OpenMP: >= 2*(pme_order - 1)
566      *   DD, OpenMP:    >= pme_order + 1
567      * But we use the maximum for simplicity since in practice there is not
568      * much performance difference between pme_order and 2*(pme_order -1).
569      */
570     int minimalSize = 2*(pmeOrder - 1);
571
572     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
573     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
574
575     return minimalSize;
576 }
577
578 bool gmx_pme_check_restrictions(int pme_order,
579                                 int nkx, int nky, int nkz,
580                                 int numPmeDomainsAlongX,
581                                 bool useThreads,
582                                 bool errorsAreFatal)
583 {
584     if (pme_order > PME_ORDER_MAX)
585     {
586         if (!errorsAreFatal)
587         {
588             return false;
589         }
590
591         std::string message = gmx::formatString(
592                     "pme_order (%d) is larger than the maximum allowed value (%d). Modify and recompile the code if you really need such a high order.",
593                     pme_order, PME_ORDER_MAX);
594         GMX_THROW(gmx::InconsistentInputError(message));
595     }
596
597     const int minGridSize = minimalPmeGridSize(pme_order);
598     if (nkx < minGridSize ||
599         nky < minGridSize ||
600         nkz < minGridSize)
601     {
602         if (!errorsAreFatal)
603         {
604             return false;
605         }
606         std::string message = gmx::formatString(
607                     "The PME grid sizes need to be >= 2*(pme_order-1) (%d)",
608                     minGridSize);
609         GMX_THROW(gmx::InconsistentInputError(message));
610     }
611
612     /* Check for a limitation of the (current) sum_fftgrid_dd code.
613      * We only allow multiple communication pulses in dim 1, not in dim 0.
614      */
615     if (useThreads && (nkx < numPmeDomainsAlongX*pme_order &&
616                        nkx != numPmeDomainsAlongX*(pme_order - 1)))
617     {
618         if (!errorsAreFatal)
619         {
620             return false;
621         }
622         gmx_fatal(FARGS, "The number of PME grid lines per rank along x is %g. But when using OpenMP threads, the number of grid lines per rank along x should be >= pme_order (%d) or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly more along y and/or z) by specifying -dd manually.",
623                   nkx/static_cast<double>(numPmeDomainsAlongX), pme_order);
624     }
625
626     return true;
627 }
628
629 /*! \brief Round \p enumerator */
630 static int div_round_up(int enumerator, int denominator)
631 {
632     return (enumerator + denominator - 1)/denominator;
633 }
634
635 gmx_pme_t *gmx_pme_init(const t_commrec         *cr,
636                         const NumPmeDomains     &numPmeDomains,
637                         const t_inputrec        *ir,
638                         int                      homenr,
639                         gmx_bool                 bFreeEnergy_q,
640                         gmx_bool                 bFreeEnergy_lj,
641                         gmx_bool                 bReproducible,
642                         real                     ewaldcoeff_q,
643                         real                     ewaldcoeff_lj,
644                         int                      nthread,
645                         PmeRunMode               runMode,
646                         PmeGpu                  *pmeGpu,
647                         const gmx_device_info_t *gpuInfo,
648                         PmeGpuProgramHandle      pmeGpuProgram,
649                         const gmx::MDLogger      & /*mdlog*/)
650 {
651     int               use_threads, sum_use_threads, i;
652     ivec              ndata;
653
654     if (debug)
655     {
656         fprintf(debug, "Creating PME data structures.\n");
657     }
658
659     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
660
661     pme->sum_qgrid_tmp       = nullptr;
662     pme->sum_qgrid_dd_tmp    = nullptr;
663
664     pme->buf_nalloc          = 0;
665
666     pme->nnodes              = 1;
667     pme->bPPnode             = TRUE;
668
669     pme->nnodes_major        = numPmeDomains.x;
670     pme->nnodes_minor        = numPmeDomains.y;
671
672 #if GMX_MPI
673     if (numPmeDomains.x*numPmeDomains.y > 1)
674     {
675         pme->mpi_comm = cr->mpi_comm_mygroup;
676
677         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
678         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
679         if (pme->nnodes != numPmeDomains.x*numPmeDomains.y)
680         {
681             gmx_incons("PME rank count mismatch");
682         }
683     }
684     else
685     {
686         pme->mpi_comm = MPI_COMM_NULL;
687     }
688 #endif
689
690     if (pme->nnodes == 1)
691     {
692 #if GMX_MPI
693         pme->mpi_comm_d[0] = MPI_COMM_NULL;
694         pme->mpi_comm_d[1] = MPI_COMM_NULL;
695 #endif
696         pme->ndecompdim   = 0;
697         pme->nodeid_major = 0;
698         pme->nodeid_minor = 0;
699 #if GMX_MPI
700         pme->mpi_comm_d[0] = pme->mpi_comm_d[1] = MPI_COMM_NULL;
701 #endif
702     }
703     else
704     {
705         if (numPmeDomains.y == 1)
706         {
707 #if GMX_MPI
708             pme->mpi_comm_d[0] = pme->mpi_comm;
709             pme->mpi_comm_d[1] = MPI_COMM_NULL;
710 #endif
711             pme->ndecompdim   = 1;
712             pme->nodeid_major = pme->nodeid;
713             pme->nodeid_minor = 0;
714
715         }
716         else if (numPmeDomains.x == 1)
717         {
718 #if GMX_MPI
719             pme->mpi_comm_d[0] = MPI_COMM_NULL;
720             pme->mpi_comm_d[1] = pme->mpi_comm;
721 #endif
722             pme->ndecompdim   = 1;
723             pme->nodeid_major = 0;
724             pme->nodeid_minor = pme->nodeid;
725         }
726         else
727         {
728             if (pme->nnodes % numPmeDomains.x != 0)
729             {
730                 gmx_incons("For 2D PME decomposition, #PME ranks must be divisible by the number of domains along x");
731             }
732             pme->ndecompdim = 2;
733
734 #if GMX_MPI
735             MPI_Comm_split(pme->mpi_comm, pme->nodeid % numPmeDomains.y,
736                            pme->nodeid, &pme->mpi_comm_d[0]);  /* My communicator along major dimension */
737             MPI_Comm_split(pme->mpi_comm, pme->nodeid/numPmeDomains.y,
738                            pme->nodeid, &pme->mpi_comm_d[1]);  /* My communicator along minor dimension */
739
740             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
741             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
742             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
743             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
744 #endif
745         }
746         pme->bPPnode = thisRankHasDuty(cr, DUTY_PP);
747     }
748
749     pme->nthread = nthread;
750
751     /* Check if any of the PME MPI ranks uses threads */
752     use_threads = (pme->nthread > 1 ? 1 : 0);
753 #if GMX_MPI
754     if (pme->nnodes > 1)
755     {
756         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT,
757                       MPI_SUM, pme->mpi_comm);
758     }
759     else
760 #endif
761     {
762         sum_use_threads = use_threads;
763     }
764     pme->bUseThreads = (sum_use_threads > 0);
765
766     if (ir->ePBC == epbcSCREW)
767     {
768         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
769     }
770
771     /* NOTE:
772      * It is likely that the current gmx_pme_do() routine supports calculating
773      * only Coulomb or LJ while gmx_pme_init() configures for both,
774      * but that has never been tested.
775      * It is likely that the current gmx_pme_do() routine supports calculating,
776      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
777      * configures with free-energy, but that has never been tested.
778      */
779     pme->doCoulomb     = EEL_PME(ir->coulombtype);
780     pme->doLJ          = EVDW_PME(ir->vdwtype);
781     pme->bFEP_q        = ((ir->efep != efepNO) && bFreeEnergy_q);
782     pme->bFEP_lj       = ((ir->efep != efepNO) && bFreeEnergy_lj);
783     pme->bFEP          = (pme->bFEP_q || pme->bFEP_lj);
784     pme->nkx           = ir->nkx;
785     pme->nky           = ir->nky;
786     pme->nkz           = ir->nkz;
787     pme->bP3M          = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != nullptr);
788     pme->pme_order     = ir->pme_order;
789     pme->ewaldcoeff_q  = ewaldcoeff_q;
790     pme->ewaldcoeff_lj = ewaldcoeff_lj;
791
792     /* Always constant electrostatics coefficients */
793     pme->epsilon_r     = ir->epsilon_r;
794
795     /* Always constant LJ coefficients */
796     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
797
798     // The box requires scaling with nwalls = 2, we store that condition as well
799     // as the scaling factor
800     delete pme->boxScaler;
801     pme->boxScaler = new EwaldBoxZScaler(*ir);
802
803     /* If we violate restrictions, generate a fatal error here */
804     gmx_pme_check_restrictions(pme->pme_order,
805                                pme->nkx, pme->nky, pme->nkz,
806                                pme->nnodes_major,
807                                pme->bUseThreads,
808                                true);
809
810     if (pme->nnodes > 1)
811     {
812         double imbal;
813
814 #if GMX_MPI
815         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
816         MPI_Type_commit(&(pme->rvec_mpi));
817 #endif
818
819         /* Note that the coefficient spreading and force gathering, which usually
820          * takes about the same amount of time as FFT+solve_pme,
821          * is always fully load balanced
822          * (unless the coefficient distribution is inhomogeneous).
823          */
824
825         imbal = estimate_pme_load_imbalance(pme.get());
826         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
827         {
828             fprintf(stderr,
829                     "\n"
830                     "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
831                     "      For optimal PME load balancing\n"
832                     "      PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x (%d)\n"
833                     "      and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y (%d)\n"
834                     "\n",
835                     gmx::roundToInt((imbal-1)*100),
836                     pme->nkx, pme->nky, pme->nnodes_major,
837                     pme->nky, pme->nkz, pme->nnodes_minor);
838         }
839     }
840
841     /* For non-divisible grid we need pme_order iso pme_order-1 */
842     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
843      * y is always copied through a buffer: we don't need padding in z,
844      * but we do need the overlap in x because of the communication order.
845      */
846     init_overlap_comm(&pme->overlap[0], pme->pme_order,
847 #if GMX_MPI
848                       pme->mpi_comm_d[0],
849 #endif
850                       pme->nnodes_major, pme->nodeid_major,
851                       pme->nkx,
852                       (div_round_up(pme->nky, pme->nnodes_minor)+pme->pme_order)*(pme->nkz+pme->pme_order-1));
853
854     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
855      * We do this with an offset buffer of equal size, so we need to allocate
856      * extra for the offset. That's what the (+1)*pme->nkz is for.
857      */
858     init_overlap_comm(&pme->overlap[1], pme->pme_order,
859 #if GMX_MPI
860                       pme->mpi_comm_d[1],
861 #endif
862                       pme->nnodes_minor, pme->nodeid_minor,
863                       pme->nky,
864                       (div_round_up(pme->nkx, pme->nnodes_major)+pme->pme_order+1)*pme->nkz);
865
866     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
867      * Note that gmx_pme_check_restrictions checked for this already.
868      */
869     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
870     {
871         gmx_incons("More than one communication pulse required for grid overlap communication along the major dimension while using threads");
872     }
873
874     snew(pme->bsp_mod[XX], pme->nkx);
875     snew(pme->bsp_mod[YY], pme->nky);
876     snew(pme->bsp_mod[ZZ], pme->nkz);
877
878     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
879     pme->runMode = runMode;
880
881     /* The required size of the interpolation grid, including overlap.
882      * The allocated size (pmegrid_n?) might be slightly larger.
883      */
884     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] -
885         pme->overlap[0].s2g0[pme->nodeid_major];
886     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] -
887         pme->overlap[1].s2g0[pme->nodeid_minor];
888     pme->pmegrid_nz_base = pme->nkz;
889     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
890     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
891     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
892     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
893     pme->pmegrid_start_iz = 0;
894
895     make_gridindex_to_localindex(pme->nkx,
896                                  pme->pmegrid_start_ix,
897                                  pme->pmegrid_nx - (pme->pme_order-1),
898                                  &pme->nnx, &pme->fshx);
899     make_gridindex_to_localindex(pme->nky,
900                                  pme->pmegrid_start_iy,
901                                  pme->pmegrid_ny - (pme->pme_order-1),
902                                  &pme->nny, &pme->fshy);
903     make_gridindex_to_localindex(pme->nkz,
904                                  pme->pmegrid_start_iz,
905                                  pme->pmegrid_nz_base,
906                                  &pme->nnz, &pme->fshz);
907
908     pme->spline_work = make_pme_spline_work(pme->pme_order);
909
910     ndata[0]    = pme->nkx;
911     ndata[1]    = pme->nky;
912     ndata[2]    = pme->nkz;
913     /* It doesn't matter if we allocate too many grids here,
914      * we only allocate and use the ones we need.
915      */
916     if (pme->doLJ)
917     {
918         pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
919     }
920     else
921     {
922         pme->ngrids = DO_Q;
923     }
924     snew(pme->fftgrid, pme->ngrids);
925     snew(pme->cfftgrid, pme->ngrids);
926     snew(pme->pfft_setup, pme->ngrids);
927
928     for (i = 0; i < pme->ngrids; ++i)
929     {
930         if ((i <  DO_Q && pme->doCoulomb && (i == 0 ||
931                                              bFreeEnergy_q)) ||
932             (i >= DO_Q && pme->doLJ && (i == 2 ||
933                                         bFreeEnergy_lj ||
934                                         ir->ljpme_combination_rule == eljpmeLB)))
935         {
936             pmegrids_init(&pme->pmegrid[i],
937                           pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
938                           pme->pmegrid_nz_base,
939                           pme->pme_order,
940                           pme->bUseThreads,
941                           pme->nthread,
942                           pme->overlap[0].s2g1[pme->nodeid_major]-pme->overlap[0].s2g0[pme->nodeid_major+1],
943                           pme->overlap[1].s2g1[pme->nodeid_minor]-pme->overlap[1].s2g0[pme->nodeid_minor+1]);
944             /* This routine will allocate the grid data to fit the FFTs */
945             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed) ? gmx::PinningPolicy::PinnedIfSupported : gmx::PinningPolicy::CannotBePinned;
946             gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata,
947                                     &pme->fftgrid[i], &pme->cfftgrid[i],
948                                     pme->mpi_comm_d,
949                                     bReproducible, pme->nthread, allocateRealGridForGpu);
950
951         }
952     }
953
954     if (!pme->bP3M)
955     {
956         /* Use plain SPME B-spline interpolation */
957         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
958     }
959     else
960     {
961         /* Use the P3M grid-optimized influence function */
962         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
963     }
964
965     /* Use atc[0] for spreading */
966     init_atomcomm(pme.get(), &pme->atc[0], numPmeDomains.x > 1 ? 0 : 1, TRUE);
967     if (pme->ndecompdim >= 2)
968     {
969         init_atomcomm(pme.get(), &pme->atc[1], 1, FALSE);
970     }
971
972     if (pme->nnodes == 1)
973     {
974         pme->atc[0].n = homenr;
975         pme_realloc_atomcomm_things(&pme->atc[0]);
976     }
977
978     pme->lb_buf1       = nullptr;
979     pme->lb_buf2       = nullptr;
980     pme->lb_buf_nalloc = 0;
981
982     if (pme_gpu_active(pme.get()))
983     {
984         if (!pme->gpu)
985         {
986             // Initial check of validity of the data
987             std::string errorString;
988             bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
989             if (!canRunOnGpu)
990             {
991                 GMX_THROW(gmx::NotImplementedError(errorString));
992             }
993         }
994
995         pme_gpu_reinit(pme.get(), gpuInfo, pmeGpuProgram);
996     }
997
998     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
999
1000     // no exception was thrown during the init, so we hand over the PME structure handle
1001     return pme.release();
1002 }
1003
1004 void gmx_pme_reinit(struct gmx_pme_t **pmedata,
1005                     const t_commrec   *cr,
1006                     struct gmx_pme_t * pme_src,
1007                     const t_inputrec * ir,
1008                     const ivec         grid_size,
1009                     real               ewaldcoeff_q,
1010                     real               ewaldcoeff_lj)
1011 {
1012     int        homenr;
1013
1014     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
1015     // TODO: This would be better as just copying a sub-structure that contains
1016     // all the PME parameters and nothing else.
1017     t_inputrec irc;
1018     irc.ePBC                   = ir->ePBC;
1019     irc.coulombtype            = ir->coulombtype;
1020     irc.vdwtype                = ir->vdwtype;
1021     irc.efep                   = ir->efep;
1022     irc.pme_order              = ir->pme_order;
1023     irc.epsilon_r              = ir->epsilon_r;
1024     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
1025     irc.nkx                    = grid_size[XX];
1026     irc.nky                    = grid_size[YY];
1027     irc.nkz                    = grid_size[ZZ];
1028
1029     if (pme_src->nnodes == 1)
1030     {
1031         homenr = pme_src->atc[0].n;
1032     }
1033     else
1034     {
1035         homenr = -1;
1036     }
1037
1038     try
1039     {
1040         const gmx::MDLogger dummyLogger;
1041         // This is reinit which is currently only changing grid size/coefficients,
1042         // so we don't expect the actual logging.
1043         // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
1044         GMX_ASSERT(pmedata, "Invalid PME pointer");
1045         NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
1046         *pmedata = gmx_pme_init(cr, numPmeDomains,
1047                                 &irc, homenr, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE, ewaldcoeff_q, ewaldcoeff_lj,
1048                                 pme_src->nthread, pme_src->runMode, pme_src->gpu, nullptr, nullptr, dummyLogger);
1049         //TODO this is mostly passing around current values
1050     }
1051     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1052
1053     /* We can easily reuse the allocated pme grids in pme_src */
1054     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
1055     /* We would like to reuse the fft grids, but that's harder */
1056 }
1057
1058 void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V)
1059 {
1060     pme_atomcomm_t *atc;
1061     pmegrids_t     *grid;
1062
1063     if (pme->nnodes > 1)
1064     {
1065         gmx_incons("gmx_pme_calc_energy called in parallel");
1066     }
1067     if (pme->bFEP_q)
1068     {
1069         gmx_incons("gmx_pme_calc_energy with free energy");
1070     }
1071
1072     atc            = &pme->atc_energy;
1073     atc->nthread   = 1;
1074     if (atc->spline == nullptr)
1075     {
1076         snew(atc->spline, atc->nthread);
1077     }
1078     atc->nslab     = 1;
1079     atc->bSpread   = TRUE;
1080     atc->pme_order = pme->pme_order;
1081     atc->n         = n;
1082     pme_realloc_atomcomm_things(atc);
1083     atc->x           = x;
1084     atc->coefficient = q;
1085
1086     /* We only use the A-charges grid */
1087     grid = &pme->pmegrid[PME_GRID_QA];
1088
1089     /* Only calculate the spline coefficients, don't actually spread */
1090     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
1091
1092     *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
1093 }
1094
1095 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
1096 static void
1097 calc_initial_lb_coeffs(struct gmx_pme_t *pme, const real *local_c6, const real *local_sigma)
1098 {
1099     int  i;
1100     for (i = 0; i < pme->atc[0].n; ++i)
1101     {
1102         real sigma4;
1103         sigma4                     = local_sigma[i];
1104         sigma4                     = sigma4*sigma4;
1105         sigma4                     = sigma4*sigma4;
1106         pme->atc[0].coefficient[i] = local_c6[i] / sigma4;
1107     }
1108 }
1109
1110 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
1111 static void
1112 calc_next_lb_coeffs(struct gmx_pme_t *pme, const real *local_sigma)
1113 {
1114     int  i;
1115
1116     for (i = 0; i < pme->atc[0].n; ++i)
1117     {
1118         pme->atc[0].coefficient[i] *= local_sigma[i];
1119     }
1120 }
1121
1122 int gmx_pme_do(struct gmx_pme_t *pme,
1123                int start,       int homenr,
1124                rvec x[],        rvec f[],
1125                real chargeA[],  real chargeB[],
1126                real c6A[],      real c6B[],
1127                real sigmaA[],   real sigmaB[],
1128                matrix box,      const t_commrec *cr,
1129                int  maxshift_x, int maxshift_y,
1130                t_nrnb *nrnb,    gmx_wallcycle *wcycle,
1131                matrix vir_q,    matrix vir_lj,
1132                real *energy_q,  real *energy_lj,
1133                real lambda_q,   real lambda_lj,
1134                real *dvdlambda_q, real *dvdlambda_lj,
1135                int flags)
1136 {
1137     GMX_ASSERT(pme->runMode == PmeRunMode::CPU, "gmx_pme_do should not be called on the GPU PME run.");
1138
1139     int                  d, i, j, npme, grid_index, max_grid_index;
1140     int                  n_d;
1141     pme_atomcomm_t      *atc        = nullptr;
1142     pmegrids_t          *pmegrid    = nullptr;
1143     real                *grid       = nullptr;
1144     rvec                *f_d;
1145     real                *coefficient = nullptr;
1146     PmeOutput            output[2]; // The second is used for the B state with FEP
1147     real                 scale, lambda;
1148     gmx_bool             bClearF;
1149     gmx_parallel_3dfft_t pfft_setup;
1150     real              *  fftgrid;
1151     t_complex          * cfftgrid;
1152     int                  thread;
1153     gmx_bool             bFirst, bDoSplines;
1154     int                  fep_state;
1155     int                  fep_states_lj           = pme->bFEP_lj ? 2 : 1;
1156     const gmx_bool       bCalcEnerVir            = (flags & GMX_PME_CALC_ENER_VIR) != 0;
1157     const gmx_bool       bBackFFT                = (flags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
1158     const gmx_bool       bCalcF                  = (flags & GMX_PME_CALC_F) != 0;
1159
1160     /* We could be passing lambda!=0 while no q or LJ is actually perturbed */
1161     if (!pme->bFEP_q)
1162     {
1163         lambda_q = 0;
1164     }
1165     if (!pme->bFEP_lj)
1166     {
1167         lambda_lj = 0;
1168     }
1169
1170     assert(pme->nnodes > 0);
1171     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1172
1173     if (pme->nnodes > 1)
1174     {
1175         atc      = &pme->atc[0];
1176         atc->npd = homenr;
1177         if (atc->npd > atc->pd_nalloc)
1178         {
1179             atc->pd_nalloc = over_alloc_dd(atc->npd);
1180             srenew(atc->pd, atc->pd_nalloc);
1181         }
1182         for (d = pme->ndecompdim-1; d >= 0; d--)
1183         {
1184             atc           = &pme->atc[d];
1185             atc->maxshift = (atc->dimind == 0 ? maxshift_x : maxshift_y);
1186         }
1187     }
1188     else
1189     {
1190         atc = &pme->atc[0];
1191         /* This could be necessary for TPI */
1192         pme->atc[0].n = homenr;
1193         if (DOMAINDECOMP(cr))
1194         {
1195             pme_realloc_atomcomm_things(atc);
1196         }
1197         atc->x = x;
1198         atc->f = f;
1199     }
1200
1201     matrix scaledBox;
1202     pme->boxScaler->scaleBox(box, scaledBox);
1203
1204     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1205     bFirst = TRUE;
1206
1207     /* For simplicity, we construct the splines for all particles if
1208      * more than one PME calculations is needed. Some optimization
1209      * could be done by keeping track of which atoms have splines
1210      * constructed, and construct new splines on each pass for atoms
1211      * that don't yet have them.
1212      */
1213
1214     bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1215
1216     /* We need a maximum of four separate PME calculations:
1217      * grid_index=0: Coulomb PME with charges from state A
1218      * grid_index=1: Coulomb PME with charges from state B
1219      * grid_index=2: LJ PME with C6 from state A
1220      * grid_index=3: LJ PME with C6 from state B
1221      * For Lorentz-Berthelot combination rules, a separate loop is used to
1222      * calculate all the terms
1223      */
1224
1225     /* If we are doing LJ-PME with LB, we only do Q here */
1226     max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
1227
1228     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1229     {
1230         /* Check if we should do calculations at this grid_index
1231          * If grid_index is odd we should be doing FEP
1232          * If grid_index < 2 we should be doing electrostatic PME
1233          * If grid_index >= 2 we should be doing LJ-PME
1234          */
1235         if ((grid_index <  DO_Q && (!pme->doCoulomb ||
1236                                     (grid_index == 1 && !pme->bFEP_q))) ||
1237             (grid_index >= DO_Q && (!pme->doLJ ||
1238                                     (grid_index == 3 && !pme->bFEP_lj))))
1239         {
1240             continue;
1241         }
1242         /* Unpack structure */
1243         pmegrid    = &pme->pmegrid[grid_index];
1244         fftgrid    = pme->fftgrid[grid_index];
1245         cfftgrid   = pme->cfftgrid[grid_index];
1246         pfft_setup = pme->pfft_setup[grid_index];
1247         switch (grid_index)
1248         {
1249             case 0: coefficient = chargeA + start; break;
1250             case 1: coefficient = chargeB + start; break;
1251             case 2: coefficient = c6A + start; break;
1252             case 3: coefficient = c6B + start; break;
1253         }
1254
1255         grid = pmegrid->grid.grid;
1256
1257         if (debug)
1258         {
1259             fprintf(debug, "PME: number of ranks = %d, rank = %d\n",
1260                     cr->nnodes, cr->nodeid);
1261             fprintf(debug, "Grid = %p\n", static_cast<void*>(grid));
1262             if (grid == nullptr)
1263             {
1264                 gmx_fatal(FARGS, "No grid!");
1265             }
1266         }
1267
1268         if (pme->nnodes == 1)
1269         {
1270             atc->coefficient = coefficient;
1271         }
1272         else
1273         {
1274             wallcycle_start(wcycle, ewcPME_REDISTXF);
1275             do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, coefficient);
1276
1277             wallcycle_stop(wcycle, ewcPME_REDISTXF);
1278         }
1279
1280         if (debug)
1281         {
1282             fprintf(debug, "Rank= %6d, pme local particles=%6d\n",
1283                     cr->nodeid, atc->n);
1284         }
1285
1286         if (flags & GMX_PME_SPREAD)
1287         {
1288             wallcycle_start(wcycle, ewcPME_SPREAD);
1289
1290             /* Spread the coefficients on a grid */
1291             spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1292
1293             if (bFirst)
1294             {
1295                 inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
1296             }
1297             inc_nrnb(nrnb, eNR_SPREADBSP,
1298                      pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
1299
1300             if (!pme->bUseThreads)
1301             {
1302                 wrap_periodic_pmegrid(pme, grid);
1303
1304                 /* sum contributions to local grid from other nodes */
1305 #if GMX_MPI
1306                 if (pme->nnodes > 1)
1307                 {
1308                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1309                 }
1310 #endif
1311
1312                 copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1313             }
1314
1315             wallcycle_stop(wcycle, ewcPME_SPREAD);
1316
1317             /* TODO If the OpenMP and single-threaded implementations
1318                converge, then spread_on_grid() and
1319                copy_pmegrid_to_fftgrid() will perhaps live in the same
1320                source file.
1321              */
1322         }
1323
1324         /* Here we start a large thread parallel region */
1325 #pragma omp parallel num_threads(pme->nthread) private(thread)
1326         {
1327             try
1328             {
1329                 thread = gmx_omp_get_thread_num();
1330                 if (flags & GMX_PME_SOLVE)
1331                 {
1332                     int loop_count;
1333
1334                     /* do 3d-fft */
1335                     if (thread == 0)
1336                     {
1337                         wallcycle_start(wcycle, ewcPME_FFT);
1338                     }
1339                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1340                                                thread, wcycle);
1341                     if (thread == 0)
1342                     {
1343                         wallcycle_stop(wcycle, ewcPME_FFT);
1344                     }
1345
1346                     /* solve in k-space for our local cells */
1347                     if (thread == 0)
1348                     {
1349                         wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1350                     }
1351                     if (grid_index < DO_Q)
1352                     {
1353                         loop_count =
1354                             solve_pme_yzx(pme, cfftgrid,
1355                                           scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1356                                           bCalcEnerVir,
1357                                           pme->nthread, thread);
1358                     }
1359                     else
1360                     {
1361                         loop_count =
1362                             solve_pme_lj_yzx(pme, &cfftgrid, FALSE,
1363                                              scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1364                                              bCalcEnerVir,
1365                                              pme->nthread, thread);
1366                     }
1367
1368                     if (thread == 0)
1369                     {
1370                         wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1371                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1372                     }
1373                 }
1374
1375                 if (bBackFFT)
1376                 {
1377                     /* do 3d-invfft */
1378                     if (thread == 0)
1379                     {
1380                         wallcycle_start(wcycle, ewcPME_FFT);
1381                     }
1382                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1383                                                thread, wcycle);
1384                     if (thread == 0)
1385                     {
1386                         wallcycle_stop(wcycle, ewcPME_FFT);
1387
1388
1389                         if (pme->nodeid == 0)
1390                         {
1391                             real ntot = pme->nkx*pme->nky*pme->nkz;
1392                             npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1393                             inc_nrnb(nrnb, eNR_FFT, 2*npme);
1394                         }
1395
1396                         /* Note: this wallcycle region is closed below
1397                            outside an OpenMP region, so take care if
1398                            refactoring code here. */
1399                         wallcycle_start(wcycle, ewcPME_GATHER);
1400                     }
1401
1402                     copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1403                 }
1404             } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1405         }
1406         /* End of thread parallel section.
1407          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1408          */
1409
1410         if (bBackFFT)
1411         {
1412             /* distribute local grid to all nodes */
1413 #if GMX_MPI
1414             if (pme->nnodes > 1)
1415             {
1416                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1417             }
1418 #endif
1419
1420             unwrap_periodic_pmegrid(pme, grid);
1421         }
1422
1423         if (bCalcF)
1424         {
1425             /* interpolate forces for our local atoms */
1426
1427
1428             /* If we are running without parallelization,
1429              * atc->f is the actual force array, not a buffer,
1430              * therefore we should not clear it.
1431              */
1432             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
1433             bClearF = (bFirst && PAR(cr));
1434 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1435             for (thread = 0; thread < pme->nthread; thread++)
1436             {
1437                 try
1438                 {
1439                     gather_f_bsplines(pme, grid, bClearF, atc,
1440                                       &atc->spline[thread],
1441                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0-lambda : lambda) : 1.0);
1442                 }
1443                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1444             }
1445
1446
1447             inc_nrnb(nrnb, eNR_GATHERFBSP,
1448                      pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
1449             /* Note: this wallcycle region is opened above inside an OpenMP
1450                region, so take care if refactoring code here. */
1451             wallcycle_stop(wcycle, ewcPME_GATHER);
1452         }
1453
1454         if (bCalcEnerVir)
1455         {
1456             /* This should only be called on the master thread
1457              * and after the threads have synchronized.
1458              */
1459             if (grid_index < 2)
1460             {
1461                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1462             }
1463             else
1464             {
1465                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[grid_index % 2]);
1466             }
1467         }
1468         bFirst = FALSE;
1469     } /* of grid_index-loop */
1470
1471     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1472      * seven terms. */
1473
1474     if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB)
1475     {
1476         /* Loop over A- and B-state if we are doing FEP */
1477         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1478         {
1479             real *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
1480             if (pme->nnodes == 1)
1481             {
1482                 if (pme->lb_buf1 == nullptr)
1483                 {
1484                     pme->lb_buf_nalloc = pme->atc[0].n;
1485                     snew(pme->lb_buf1, pme->lb_buf_nalloc);
1486                 }
1487                 pme->atc[0].coefficient = pme->lb_buf1;
1488                 switch (fep_state)
1489                 {
1490                     case 0:
1491                         local_c6      = c6A;
1492                         local_sigma   = sigmaA;
1493                         break;
1494                     case 1:
1495                         local_c6      = c6B;
1496                         local_sigma   = sigmaB;
1497                         break;
1498                     default:
1499                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1500                 }
1501             }
1502             else
1503             {
1504                 atc = &pme->atc[0];
1505                 switch (fep_state)
1506                 {
1507                     case 0:
1508                         RedistC6      = c6A;
1509                         RedistSigma   = sigmaA;
1510                         break;
1511                     case 1:
1512                         RedistC6      = c6B;
1513                         RedistSigma   = sigmaB;
1514                         break;
1515                     default:
1516                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1517                 }
1518                 wallcycle_start(wcycle, ewcPME_REDISTXF);
1519
1520                 do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, RedistC6);
1521                 if (pme->lb_buf_nalloc < atc->n)
1522                 {
1523                     pme->lb_buf_nalloc = atc->nalloc;
1524                     srenew(pme->lb_buf1, pme->lb_buf_nalloc);
1525                     srenew(pme->lb_buf2, pme->lb_buf_nalloc);
1526                 }
1527                 local_c6 = pme->lb_buf1;
1528                 for (i = 0; i < atc->n; ++i)
1529                 {
1530                     local_c6[i] = atc->coefficient[i];
1531                 }
1532
1533                 do_redist_pos_coeffs(pme, cr, start, homenr, FALSE, x, RedistSigma);
1534                 local_sigma = pme->lb_buf2;
1535                 for (i = 0; i < atc->n; ++i)
1536                 {
1537                     local_sigma[i] = atc->coefficient[i];
1538                 }
1539
1540                 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1541             }
1542             calc_initial_lb_coeffs(pme, local_c6, local_sigma);
1543
1544             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1545             for (grid_index = 2; grid_index < 9; ++grid_index)
1546             {
1547                 /* Unpack structure */
1548                 pmegrid    = &pme->pmegrid[grid_index];
1549                 fftgrid    = pme->fftgrid[grid_index];
1550                 pfft_setup = pme->pfft_setup[grid_index];
1551                 calc_next_lb_coeffs(pme, local_sigma);
1552                 grid = pmegrid->grid.grid;
1553
1554                 if (flags & GMX_PME_SPREAD)
1555                 {
1556                     wallcycle_start(wcycle, ewcPME_SPREAD);
1557                     /* Spread the c6 on a grid */
1558                     spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1559
1560                     if (bFirst)
1561                     {
1562                         inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
1563                     }
1564
1565                     inc_nrnb(nrnb, eNR_SPREADBSP,
1566                              pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
1567                     if (pme->nthread == 1)
1568                     {
1569                         wrap_periodic_pmegrid(pme, grid);
1570                         /* sum contributions to local grid from other nodes */
1571 #if GMX_MPI
1572                         if (pme->nnodes > 1)
1573                         {
1574                             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1575                         }
1576 #endif
1577                         copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1578                     }
1579                     wallcycle_stop(wcycle, ewcPME_SPREAD);
1580                 }
1581                 /*Here we start a large thread parallel region*/
1582 #pragma omp parallel num_threads(pme->nthread) private(thread)
1583                 {
1584                     try
1585                     {
1586                         thread = gmx_omp_get_thread_num();
1587                         if (flags & GMX_PME_SOLVE)
1588                         {
1589                             /* do 3d-fft */
1590                             if (thread == 0)
1591                             {
1592                                 wallcycle_start(wcycle, ewcPME_FFT);
1593                             }
1594
1595                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1596                                                        thread, wcycle);
1597                             if (thread == 0)
1598                             {
1599                                 wallcycle_stop(wcycle, ewcPME_FFT);
1600                             }
1601                         }
1602                     }
1603                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1604                 }
1605                 bFirst = FALSE;
1606             }
1607             if (flags & GMX_PME_SOLVE)
1608             {
1609                 /* solve in k-space for our local cells */
1610 #pragma omp parallel num_threads(pme->nthread) private(thread)
1611                 {
1612                     try
1613                     {
1614                         int loop_count;
1615                         thread = gmx_omp_get_thread_num();
1616                         if (thread == 0)
1617                         {
1618                             wallcycle_start(wcycle, ewcLJPME);
1619                         }
1620
1621                         loop_count =
1622                             solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE,
1623                                              scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1624                                              bCalcEnerVir,
1625                                              pme->nthread, thread);
1626                         if (thread == 0)
1627                         {
1628                             wallcycle_stop(wcycle, ewcLJPME);
1629                             inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1630                         }
1631                     }
1632                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1633                 }
1634             }
1635
1636             if (bCalcEnerVir)
1637             {
1638                 /* This should only be called on the master thread and
1639                  * after the threads have synchronized.
1640                  */
1641                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
1642             }
1643
1644             if (bBackFFT)
1645             {
1646                 bFirst = !pme->doCoulomb;
1647                 calc_initial_lb_coeffs(pme, local_c6, local_sigma);
1648                 for (grid_index = 8; grid_index >= 2; --grid_index)
1649                 {
1650                     /* Unpack structure */
1651                     pmegrid    = &pme->pmegrid[grid_index];
1652                     fftgrid    = pme->fftgrid[grid_index];
1653                     pfft_setup = pme->pfft_setup[grid_index];
1654                     grid       = pmegrid->grid.grid;
1655                     calc_next_lb_coeffs(pme, local_sigma);
1656 #pragma omp parallel num_threads(pme->nthread) private(thread)
1657                     {
1658                         try
1659                         {
1660                             thread = gmx_omp_get_thread_num();
1661                             /* do 3d-invfft */
1662                             if (thread == 0)
1663                             {
1664                                 wallcycle_start(wcycle, ewcPME_FFT);
1665                             }
1666
1667                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1668                                                        thread, wcycle);
1669                             if (thread == 0)
1670                             {
1671                                 wallcycle_stop(wcycle, ewcPME_FFT);
1672
1673
1674                                 if (pme->nodeid == 0)
1675                                 {
1676                                     real ntot = pme->nkx*pme->nky*pme->nkz;
1677                                     npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1678                                     inc_nrnb(nrnb, eNR_FFT, 2*npme);
1679                                 }
1680                                 wallcycle_start(wcycle, ewcPME_GATHER);
1681                             }
1682
1683                             copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1684                         }
1685                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1686                     } /*#pragma omp parallel*/
1687
1688                     /* distribute local grid to all nodes */
1689 #if GMX_MPI
1690                     if (pme->nnodes > 1)
1691                     {
1692                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1693                     }
1694 #endif
1695
1696                     unwrap_periodic_pmegrid(pme, grid);
1697
1698                     if (bCalcF)
1699                     {
1700                         /* interpolate forces for our local atoms */
1701                         bClearF = (bFirst && PAR(cr));
1702                         scale   = pme->bFEP ? (fep_state < 1 ? 1.0-lambda_lj : lambda_lj) : 1.0;
1703                         scale  *= lb_scale_factor[grid_index-2];
1704
1705 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1706                         for (thread = 0; thread < pme->nthread; thread++)
1707                         {
1708                             try
1709                             {
1710                                 gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
1711                                                   &pme->atc[0].spline[thread],
1712                                                   scale);
1713                             }
1714                             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1715                         }
1716
1717
1718                         inc_nrnb(nrnb, eNR_GATHERFBSP,
1719                                  pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
1720                     }
1721                     wallcycle_stop(wcycle, ewcPME_GATHER);
1722
1723                     bFirst = FALSE;
1724                 } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1725             }     /* if (bCalcF) */
1726         }         /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1727     }             /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
1728
1729     if (bCalcF && pme->nnodes > 1)
1730     {
1731         wallcycle_start(wcycle, ewcPME_REDISTXF);
1732         for (d = 0; d < pme->ndecompdim; d++)
1733         {
1734             atc = &pme->atc[d];
1735             if (d == pme->ndecompdim - 1)
1736             {
1737                 n_d = homenr;
1738                 f_d = f + start;
1739             }
1740             else
1741             {
1742                 n_d = pme->atc[d+1].n;
1743                 f_d = pme->atc[d+1].f;
1744             }
1745             if (DOMAINDECOMP(cr))
1746             {
1747                 dd_pmeredist_f(pme, atc, n_d, f_d,
1748                                d == pme->ndecompdim-1 && pme->bPPnode);
1749             }
1750         }
1751
1752         wallcycle_stop(wcycle, ewcPME_REDISTXF);
1753     }
1754
1755     if (bCalcEnerVir)
1756     {
1757         if (pme->doCoulomb)
1758         {
1759             if (!pme->bFEP_q)
1760             {
1761                 *energy_q = output[0].coulombEnergy_;
1762                 m_add(vir_q, output[0].coulombVirial_, vir_q);
1763             }
1764             else
1765             {
1766                 *energy_q       = (1.0-lambda_q)*output[0].coulombEnergy_ + lambda_q*output[1].coulombEnergy_;
1767                 *dvdlambda_q   += output[1].coulombEnergy_ - output[0].coulombEnergy_;
1768                 for (i = 0; i < DIM; i++)
1769                 {
1770                     for (j = 0; j < DIM; j++)
1771                     {
1772                         vir_q[i][j] += (1.0-lambda_q)*output[0].coulombVirial_[i][j] +
1773                             lambda_q*output[1].coulombVirial_[i][j];
1774                     }
1775                 }
1776             }
1777             if (debug)
1778             {
1779                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1780             }
1781         }
1782         else
1783         {
1784             *energy_q = 0;
1785         }
1786
1787         if (pme->doLJ)
1788         {
1789             if (!pme->bFEP_lj)
1790             {
1791                 *energy_lj = output[0].lennardJonesEnergy_;
1792                 m_add(vir_lj, output[0].lennardJonesVirial_, vir_lj);
1793             }
1794             else
1795             {
1796                 *energy_lj     = (1.0-lambda_lj)*output[0].lennardJonesEnergy_ + lambda_lj*output[1].lennardJonesEnergy_;
1797                 *dvdlambda_lj += output[1].lennardJonesEnergy_ - output[0].lennardJonesEnergy_;
1798                 for (i = 0; i < DIM; i++)
1799                 {
1800                     for (j = 0; j < DIM; j++)
1801                     {
1802                         vir_lj[i][j] += (1.0-lambda_lj)*output[0].lennardJonesVirial_[i][j] + lambda_lj*output[1].lennardJonesVirial_[i][j];
1803                     }
1804                 }
1805             }
1806             if (debug)
1807             {
1808                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1809             }
1810         }
1811         else
1812         {
1813             *energy_lj = 0;
1814         }
1815     }
1816     return 0;
1817 }
1818
1819 void gmx_pme_destroy(gmx_pme_t *pme)
1820 {
1821     if (!pme)
1822     {
1823         return;
1824     }
1825
1826     delete pme->boxScaler;
1827
1828     sfree(pme->nnx);
1829     sfree(pme->nny);
1830     sfree(pme->nnz);
1831     sfree(pme->fshx);
1832     sfree(pme->fshy);
1833     sfree(pme->fshz);
1834
1835     for (int i = 0; i < pme->ngrids; ++i)
1836     {
1837         pmegrids_destroy(&pme->pmegrid[i]);
1838     }
1839     if (pme->pfft_setup)
1840     {
1841         for (int i = 0; i < pme->ngrids; ++i)
1842         {
1843             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1844         }
1845     }
1846     sfree(pme->fftgrid);
1847     sfree(pme->cfftgrid);
1848     sfree(pme->pfft_setup);
1849
1850     for (int i = 0; i < std::max(1, pme->ndecompdim); i++) //pme->atc[0] is always allocated
1851     {
1852         destroy_atomcomm(&pme->atc[i]);
1853     }
1854
1855     for (int i = 0; i < DIM; i++)
1856     {
1857         sfree(pme->bsp_mod[i]);
1858     }
1859
1860     sfree(pme->lb_buf1);
1861     sfree(pme->lb_buf2);
1862
1863     sfree(pme->bufv);
1864     sfree(pme->bufr);
1865
1866     if (pme->solve_work)
1867     {
1868         pme_free_all_work(&pme->solve_work, pme->nthread);
1869     }
1870
1871     sfree(pme->sum_qgrid_tmp);
1872     sfree(pme->sum_qgrid_dd_tmp);
1873
1874     destroy_pme_spline_work(pme->spline_work);
1875
1876     if (pme_gpu_active(pme) && pme->gpu)
1877     {
1878         pme_gpu_destroy(pme->gpu);
1879     }
1880
1881     delete pme;
1882 }
1883
1884 void gmx_pme_reinit_atoms(const gmx_pme_t *pme, const int nAtoms, const real *charges)
1885 {
1886     if (pme_gpu_active(pme))
1887     {
1888         pme_gpu_reinit_atoms(pme->gpu, nAtoms, charges);
1889     }
1890     // TODO: handle the CPU case here; handle the whole t_mdatoms
1891 }