Remove use of where(), if DEBUG etc.
[alexxy/gromacs.git] / src / gromacs / ewald / pme.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /*! \internal \file
38  *
39  * \brief This file contains function definitions necessary for
40  * computing energies and forces for the PME long-ranged part (Coulomb
41  * and LJ).
42  *
43  * \author Erik Lindahl <erik@kth.se>
44  * \author Berk Hess <hess@kth.se>
45  * \ingroup module_ewald
46  */
47 /* IMPORTANT FOR DEVELOPERS:
48  *
49  * Triclinic pme stuff isn't entirely trivial, and we've experienced
50  * some bugs during development (many of them due to me). To avoid
51  * this in the future, please check the following things if you make
52  * changes in this file:
53  *
54  * 1. You should obtain identical (at least to the PME precision)
55  *    energies, forces, and virial for
56  *    a rectangular box and a triclinic one where the z (or y) axis is
57  *    tilted a whole box side. For instance you could use these boxes:
58  *
59  *    rectangular       triclinic
60  *     2  0  0           2  0  0
61  *     0  2  0           0  2  0
62  *     0  0  6           2  2  6
63  *
64  * 2. You should check the energy conservation in a triclinic box.
65  *
66  * It might seem an overkill, but better safe than sorry.
67  * /Erik 001109
68  */
69
70 #include "gmxpre.h"
71
72 #include "pme.h"
73
74 #include "config.h"
75
76 #include <assert.h>
77 #include <stdio.h>
78 #include <stdlib.h>
79 #include <string.h>
80
81 #include <cmath>
82
83 #include <algorithm>
84 #include <list>
85
86 #include "gromacs/ewald/ewald-utils.h"
87 #include "gromacs/fft/parallel_3dfft.h"
88 #include "gromacs/fileio/pdbio.h"
89 #include "gromacs/gmxlib/network.h"
90 #include "gromacs/gmxlib/nrnb.h"
91 #include "gromacs/math/gmxcomplex.h"
92 #include "gromacs/math/invertmatrix.h"
93 #include "gromacs/math/units.h"
94 #include "gromacs/math/vec.h"
95 #include "gromacs/math/vectypes.h"
96 #include "gromacs/mdtypes/commrec.h"
97 #include "gromacs/mdtypes/forcerec.h"
98 #include "gromacs/mdtypes/inputrec.h"
99 #include "gromacs/mdtypes/md_enums.h"
100 #include "gromacs/pbcutil/pbc.h"
101 #include "gromacs/timing/cyclecounter.h"
102 #include "gromacs/timing/wallcycle.h"
103 #include "gromacs/timing/walltime_accounting.h"
104 #include "gromacs/utility/basedefinitions.h"
105 #include "gromacs/utility/exceptions.h"
106 #include "gromacs/utility/fatalerror.h"
107 #include "gromacs/utility/gmxmpi.h"
108 #include "gromacs/utility/gmxomp.h"
109 #include "gromacs/utility/logger.h"
110 #include "gromacs/utility/real.h"
111 #include "gromacs/utility/smalloc.h"
112 #include "gromacs/utility/stringutil.h"
113 #include "gromacs/utility/unique_cptr.h"
114
115 #include "calculate-spline-moduli.h"
116 #include "pme-gather.h"
117 #include "pme-gpu-internal.h"
118 #include "pme-grid.h"
119 #include "pme-internal.h"
120 #include "pme-redistribute.h"
121 #include "pme-solve.h"
122 #include "pme-spline-work.h"
123 #include "pme-spread.h"
124
125 bool pme_gpu_supports_input(const t_inputrec *ir, std::string *error)
126 {
127     std::list<std::string> errorReasons;
128     if (!EEL_PME(ir->coulombtype))
129     {
130         errorReasons.push_back("systems that do not use PME for electrostatics");
131     }
132     if (ir->pme_order != 4)
133     {
134         errorReasons.push_back("interpolation orders other than 4");
135     }
136     if (ir->efep != efepNO)
137     {
138         errorReasons.push_back("free energy calculations (multiple grids)");
139     }
140     if (EVDW_PME(ir->vdwtype))
141     {
142         errorReasons.push_back("Lennard-Jones PME");
143     }
144 #if GMX_DOUBLE
145     {
146         errorReasons.push_back("double precision");
147     }
148 #endif
149 #if GMX_GPU != GMX_GPU_CUDA
150     {
151         errorReasons.push_back("non-CUDA build of GROMACS");
152     }
153 #endif
154     if (ir->cutoff_scheme == ecutsGROUP)
155     {
156         errorReasons.push_back("group cutoff scheme");
157     }
158     if (EI_TPI(ir->eI))
159     {
160         errorReasons.push_back("test particle insertion");
161     }
162
163     bool inputSupported = errorReasons.empty();
164     if (!inputSupported && error)
165     {
166         std::string regressionTestMarker = "PME GPU does not support";
167         // this prefix is tested for in the regression tests script gmxtest.pl
168         *error = regressionTestMarker + ": " + gmx::joinStrings(errorReasons, "; ") + ".";
169     }
170     return inputSupported;
171 }
172
173 /*! \brief \libinternal
174  * Finds out if PME with given inputs is possible to run on GPU.
175  * This function is an internal final check, validating the whole PME structure on creation,
176  * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
177  *
178  * \param[in]  pme          The PME structure.
179  * \param[out] error        The error message if the input is not supported on GPU.
180  * \returns                 True if this PME input is possible to run on GPU, false otherwise.
181  */
182 static bool pme_gpu_check_restrictions(const gmx_pme_t *pme, std::string *error)
183 {
184     std::list<std::string> errorReasons;
185     if (pme->nnodes != 1)
186     {
187         errorReasons.push_back("PME decomposition");
188     }
189     if (pme->pme_order != 4)
190     {
191         errorReasons.push_back("interpolation orders other than 4");
192     }
193     if (pme->bFEP)
194     {
195         errorReasons.push_back("free energy calculations (multiple grids)");
196     }
197     if (pme->doLJ)
198     {
199         errorReasons.push_back("Lennard-Jones PME");
200     }
201 #if GMX_DOUBLE
202     {
203         errorReasons.push_back("double precision");
204     }
205 #endif
206 #if GMX_GPU != GMX_GPU_CUDA
207     {
208         errorReasons.push_back("non-CUDA build of GROMACS");
209     }
210 #endif
211
212     bool inputSupported = errorReasons.empty();
213     if (!inputSupported && error)
214     {
215         std::string regressionTestMarker = "PME GPU does not support";
216         // this prefix is tested for in the regression tests script gmxtest.pl
217         *error = regressionTestMarker + ": " + gmx::joinStrings(errorReasons, "; ") + ".";
218     }
219     return inputSupported;
220 }
221
222 PmeRunMode pme_run_mode(const gmx_pme_t *pme)
223 {
224     GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
225     return pme->runMode;
226 }
227
228 /*! \brief Number of bytes in a cache line.
229  *
230  * Must also be a multiple of the SIMD and SIMD4 register size, to
231  * preserve alignment.
232  */
233 const int gmxCacheLineSize = 64;
234
235 //! Set up coordinate communication
236 static void setup_coordinate_communication(pme_atomcomm_t *atc)
237 {
238     int nslab, n, i;
239     int fw, bw;
240
241     nslab = atc->nslab;
242
243     n = 0;
244     for (i = 1; i <= nslab/2; i++)
245     {
246         fw = (atc->nodeid + i) % nslab;
247         bw = (atc->nodeid - i + nslab) % nslab;
248         if (n < nslab - 1)
249         {
250             atc->node_dest[n] = fw;
251             atc->node_src[n]  = bw;
252             n++;
253         }
254         if (n < nslab - 1)
255         {
256             atc->node_dest[n] = bw;
257             atc->node_src[n]  = fw;
258             n++;
259         }
260     }
261 }
262
263 /*! \brief Round \p n up to the next multiple of \p f */
264 static int mult_up(int n, int f)
265 {
266     return ((n + f - 1)/f)*f;
267 }
268
269 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
270 static double estimate_pme_load_imbalance(struct gmx_pme_t *pme)
271 {
272     int    nma, nmi;
273     double n1, n2, n3;
274
275     nma = pme->nnodes_major;
276     nmi = pme->nnodes_minor;
277
278     n1 = mult_up(pme->nkx, nma)*mult_up(pme->nky, nmi)*pme->nkz;
279     n2 = mult_up(pme->nkx, nma)*mult_up(pme->nkz, nmi)*pme->nky;
280     n3 = mult_up(pme->nky, nma)*mult_up(pme->nkz, nmi)*pme->nkx;
281
282     /* pme_solve is roughly double the cost of an fft */
283
284     return (n1 + n2 + 3*n3)/(double)(6*pme->nkx*pme->nky*pme->nkz);
285 }
286
287 /*! \brief Initialize atom communication data structure */
288 static void init_atomcomm(struct gmx_pme_t *pme, pme_atomcomm_t *atc,
289                           int dimind, gmx_bool bSpread)
290 {
291     int thread;
292
293     atc->dimind    = dimind;
294     atc->nslab     = 1;
295     atc->nodeid    = 0;
296     atc->pd_nalloc = 0;
297 #if GMX_MPI
298     if (pme->nnodes > 1)
299     {
300         atc->mpi_comm = pme->mpi_comm_d[dimind];
301         MPI_Comm_size(atc->mpi_comm, &atc->nslab);
302         MPI_Comm_rank(atc->mpi_comm, &atc->nodeid);
303     }
304     if (debug)
305     {
306         fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", atc->dimind, atc->nslab, atc->nodeid);
307     }
308 #endif
309
310     atc->bSpread   = bSpread;
311     atc->pme_order = pme->pme_order;
312
313     if (atc->nslab > 1)
314     {
315         snew(atc->node_dest, atc->nslab);
316         snew(atc->node_src, atc->nslab);
317         setup_coordinate_communication(atc);
318
319         snew(atc->count_thread, pme->nthread);
320         for (thread = 0; thread < pme->nthread; thread++)
321         {
322             snew(atc->count_thread[thread], atc->nslab);
323         }
324         atc->count = atc->count_thread[0];
325         snew(atc->rcount, atc->nslab);
326         snew(atc->buf_index, atc->nslab);
327     }
328
329     atc->nthread = pme->nthread;
330     if (atc->nthread > 1)
331     {
332         snew(atc->thread_plist, atc->nthread);
333     }
334     snew(atc->spline, atc->nthread);
335     for (thread = 0; thread < atc->nthread; thread++)
336     {
337         if (atc->nthread > 1)
338         {
339             snew(atc->thread_plist[thread].n, atc->nthread+2*gmxCacheLineSize);
340             atc->thread_plist[thread].n += gmxCacheLineSize;
341         }
342     }
343 }
344
345 /*! \brief Destroy an atom communication data structure and its child structs */
346 static void destroy_atomcomm(pme_atomcomm_t *atc)
347 {
348     sfree(atc->pd);
349     if (atc->nslab > 1)
350     {
351         sfree(atc->node_dest);
352         sfree(atc->node_src);
353         for (int i = 0; i < atc->nthread; i++)
354         {
355             sfree(atc->count_thread[i]);
356         }
357         sfree(atc->count_thread);
358         sfree(atc->rcount);
359         sfree(atc->buf_index);
360
361         sfree(atc->x);
362         sfree(atc->coefficient);
363         sfree(atc->f);
364     }
365     sfree(atc->idx);
366     sfree(atc->fractx);
367
368     sfree(atc->thread_idx);
369     for (int i = 0; i < atc->nthread; i++)
370     {
371         if (atc->nthread > 1)
372         {
373             int *n_ptr = atc->thread_plist[i].n - gmxCacheLineSize;
374             sfree(n_ptr);
375             sfree(atc->thread_plist[i].i);
376         }
377         sfree(atc->spline[i].ind);
378         for (int d = 0; d < ZZ; d++)
379         {
380             sfree(atc->spline[i].theta[d]);
381             sfree(atc->spline[i].dtheta[d]);
382         }
383         sfree_aligned(atc->spline[i].ptr_dtheta_z);
384         sfree_aligned(atc->spline[i].ptr_theta_z);
385     }
386     if (atc->nthread > 1)
387     {
388         sfree(atc->thread_plist);
389     }
390     sfree(atc->spline);
391 }
392
393 /*! \brief Initialize data structure for communication */
394 static void
395 init_overlap_comm(pme_overlap_t *  ol,
396                   int              norder,
397 #if GMX_MPI
398                   MPI_Comm         comm,
399 #endif
400                   int              nnodes,
401                   int              nodeid,
402                   int              ndata,
403                   int              commplainsize)
404 {
405     gmx_bool         bCont;
406 #if GMX_MPI
407     MPI_Status       stat;
408
409     ol->mpi_comm = comm;
410 #endif
411
412     ol->nnodes = nnodes;
413     ol->nodeid = nodeid;
414
415     /* Linear translation of the PME grid won't affect reciprocal space
416      * calculations, so to optimize we only interpolate "upwards",
417      * which also means we only have to consider overlap in one direction.
418      * I.e., particles on this node might also be spread to grid indices
419      * that belong to higher nodes (modulo nnodes)
420      */
421
422     ol->s2g0.resize(ol->nnodes + 1);
423     ol->s2g1.resize(ol->nnodes);
424     if (debug)
425     {
426         fprintf(debug, "PME slab boundaries:");
427     }
428     for (int i = 0; i < nnodes; i++)
429     {
430         /* s2g0 the local interpolation grid start.
431          * s2g1 the local interpolation grid end.
432          * Since in calc_pidx we divide particles, and not grid lines,
433          * spatially uniform along dimension x or y, we need to round
434          * s2g0 down and s2g1 up.
435          */
436         ol->s2g0[i] = (i * ndata + 0) / nnodes;
437         ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
438
439         if (debug)
440         {
441             fprintf(debug, "  %3d %3d", ol->s2g0[i], ol->s2g1[i]);
442         }
443     }
444     ol->s2g0[nnodes] = ndata;
445     if (debug)
446     {
447         fprintf(debug, "\n");
448     }
449
450     /* Determine with how many nodes we need to communicate the grid overlap */
451     int testRankCount = 0;
452     do
453     {
454         testRankCount++;
455         bCont = FALSE;
456         for (int i = 0; i < nnodes; i++)
457         {
458             if ((i + testRankCount <  nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount]) ||
459                 (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
460             {
461                 bCont = TRUE;
462             }
463         }
464     }
465     while (bCont && testRankCount < nnodes);
466
467     ol->comm_data.resize(testRankCount - 1);
468     ol->send_size = 0;
469
470     for (size_t b = 0; b < ol->comm_data.size(); b++)
471     {
472         pme_grid_comm_t *pgc = &ol->comm_data[b];
473
474         /* Send */
475         pgc->send_id = (ol->nodeid + (b + 1)) % ol->nnodes;
476         int fft_start = ol->s2g0[pgc->send_id];
477         int fft_end   = ol->s2g0[pgc->send_id + 1];
478         if (pgc->send_id < nodeid)
479         {
480             fft_start += ndata;
481             fft_end   += ndata;
482         }
483         int send_index1  = ol->s2g1[nodeid];
484         send_index1      = std::min(send_index1, fft_end);
485         pgc->send_index0 = fft_start;
486         pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
487         ol->send_size   += pgc->send_nindex;
488
489         /* We always start receiving to the first index of our slab */
490         pgc->recv_id     = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
491         fft_start        = ol->s2g0[ol->nodeid];
492         fft_end          = ol->s2g0[ol->nodeid + 1];
493         int recv_index1  = ol->s2g1[pgc->recv_id];
494         if (pgc->recv_id > nodeid)
495         {
496             recv_index1 -= ndata;
497         }
498         recv_index1      = std::min(recv_index1, fft_end);
499         pgc->recv_index0 = fft_start;
500         pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
501     }
502
503 #if GMX_MPI
504     /* Communicate the buffer sizes to receive */
505     for (size_t b = 0; b < ol->comm_data.size(); b++)
506     {
507         MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b,
508                      &ol->comm_data[b].recv_size, 1, MPI_INT, ol->comm_data[b].recv_id, b,
509                      ol->mpi_comm, &stat);
510     }
511 #endif
512
513     /* For non-divisible grid we need pme_order iso pme_order-1 */
514     ol->sendbuf.resize(norder * commplainsize);
515     ol->recvbuf.resize(norder * commplainsize);
516 }
517
518 int minimalPmeGridSize(int pmeOrder)
519 {
520     /* The actual grid size limitations are:
521      *   serial:        >= pme_order
522      *   DD, no OpenMP: >= 2*(pme_order - 1)
523      *   DD, OpenMP:    >= pme_order + 1
524      * But we use the maximum for simplicity since in practice there is not
525      * much performance difference between pme_order and 2*(pme_order -1).
526      */
527     int minimalSize = 2*(pmeOrder - 1);
528
529     GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
530     GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
531
532     return minimalSize;
533 }
534
535 bool gmx_pme_check_restrictions(int pme_order,
536                                 int nkx, int nky, int nkz,
537                                 int nnodes_major,
538                                 bool useThreads,
539                                 bool errorsAreFatal)
540 {
541     if (pme_order > PME_ORDER_MAX)
542     {
543         if (!errorsAreFatal)
544         {
545             return false;
546         }
547
548         std::string message = gmx::formatString(
549                     "pme_order (%d) is larger than the maximum allowed value (%d). Modify and recompile the code if you really need such a high order.",
550                     pme_order, PME_ORDER_MAX);
551         GMX_THROW(InconsistentInputError(message));
552     }
553
554     const int minGridSize = minimalPmeGridSize(pme_order);
555     if (nkx < minGridSize ||
556         nky < minGridSize ||
557         nkz < minGridSize)
558     {
559         if (!errorsAreFatal)
560         {
561             return false;
562         }
563         std::string message = gmx::formatString(
564                     "The PME grid sizes need to be >= 2*(pme_order-1) (%d)",
565                     minGridSize);
566         GMX_THROW(InconsistentInputError(message));
567     }
568
569     /* Check for a limitation of the (current) sum_fftgrid_dd code.
570      * We only allow multiple communication pulses in dim 1, not in dim 0.
571      */
572     if (useThreads && (nkx < nnodes_major*pme_order &&
573                        nkx != nnodes_major*(pme_order - 1)))
574     {
575         if (!errorsAreFatal)
576         {
577             return false;
578         }
579         gmx_fatal(FARGS, "The number of PME grid lines per rank along x is %g. But when using OpenMP threads, the number of grid lines per rank along x should be >= pme_order (%d) or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly more along y and/or z) by specifying -dd manually.",
580                   nkx/(double)nnodes_major, pme_order);
581     }
582
583     return true;
584 }
585
586 /*! \brief Round \p enumerator */
587 static int div_round_up(int enumerator, int denominator)
588 {
589     return (enumerator + denominator - 1)/denominator;
590 }
591
592 gmx_pme_t *gmx_pme_init(const t_commrec     *cr,
593                         int                  nnodes_major,
594                         int                  nnodes_minor,
595                         const t_inputrec    *ir,
596                         int                  homenr,
597                         gmx_bool             bFreeEnergy_q,
598                         gmx_bool             bFreeEnergy_lj,
599                         gmx_bool             bReproducible,
600                         real                 ewaldcoeff_q,
601                         real                 ewaldcoeff_lj,
602                         int                  nthread,
603                         PmeRunMode           runMode,
604                         PmeGpu              *pmeGpu,
605                         gmx_device_info_t   *gpuInfo,
606                         const gmx::MDLogger  & /*mdlog*/)
607 {
608     int               use_threads, sum_use_threads, i;
609     ivec              ndata;
610
611     if (debug)
612     {
613         fprintf(debug, "Creating PME data structures.\n");
614     }
615
616     unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
617
618     pme->sum_qgrid_tmp       = nullptr;
619     pme->sum_qgrid_dd_tmp    = nullptr;
620
621     pme->buf_nalloc          = 0;
622
623     pme->nnodes              = 1;
624     pme->bPPnode             = TRUE;
625
626     pme->nnodes_major        = nnodes_major;
627     pme->nnodes_minor        = nnodes_minor;
628
629 #if GMX_MPI
630     if (nnodes_major*nnodes_minor > 1)
631     {
632         pme->mpi_comm = cr->mpi_comm_mygroup;
633
634         MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
635         MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
636         if (pme->nnodes != nnodes_major*nnodes_minor)
637         {
638             gmx_incons("PME rank count mismatch");
639         }
640     }
641     else
642     {
643         pme->mpi_comm = MPI_COMM_NULL;
644     }
645 #endif
646
647     if (pme->nnodes == 1)
648     {
649 #if GMX_MPI
650         pme->mpi_comm_d[0] = MPI_COMM_NULL;
651         pme->mpi_comm_d[1] = MPI_COMM_NULL;
652 #endif
653         pme->ndecompdim   = 0;
654         pme->nodeid_major = 0;
655         pme->nodeid_minor = 0;
656 #if GMX_MPI
657         pme->mpi_comm_d[0] = pme->mpi_comm_d[1] = MPI_COMM_NULL;
658 #endif
659     }
660     else
661     {
662         if (nnodes_minor == 1)
663         {
664 #if GMX_MPI
665             pme->mpi_comm_d[0] = pme->mpi_comm;
666             pme->mpi_comm_d[1] = MPI_COMM_NULL;
667 #endif
668             pme->ndecompdim   = 1;
669             pme->nodeid_major = pme->nodeid;
670             pme->nodeid_minor = 0;
671
672         }
673         else if (nnodes_major == 1)
674         {
675 #if GMX_MPI
676             pme->mpi_comm_d[0] = MPI_COMM_NULL;
677             pme->mpi_comm_d[1] = pme->mpi_comm;
678 #endif
679             pme->ndecompdim   = 1;
680             pme->nodeid_major = 0;
681             pme->nodeid_minor = pme->nodeid;
682         }
683         else
684         {
685             if (pme->nnodes % nnodes_major != 0)
686             {
687                 gmx_incons("For 2D PME decomposition, #PME ranks must be divisible by the number of ranks in the major dimension");
688             }
689             pme->ndecompdim = 2;
690
691 #if GMX_MPI
692             MPI_Comm_split(pme->mpi_comm, pme->nodeid % nnodes_minor,
693                            pme->nodeid, &pme->mpi_comm_d[0]);  /* My communicator along major dimension */
694             MPI_Comm_split(pme->mpi_comm, pme->nodeid/nnodes_minor,
695                            pme->nodeid, &pme->mpi_comm_d[1]);  /* My communicator along minor dimension */
696
697             MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
698             MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
699             MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
700             MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
701 #endif
702         }
703         pme->bPPnode = thisRankHasDuty(cr, DUTY_PP);
704     }
705
706     pme->nthread = nthread;
707
708     /* Check if any of the PME MPI ranks uses threads */
709     use_threads = (pme->nthread > 1 ? 1 : 0);
710 #if GMX_MPI
711     if (pme->nnodes > 1)
712     {
713         MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT,
714                       MPI_SUM, pme->mpi_comm);
715     }
716     else
717 #endif
718     {
719         sum_use_threads = use_threads;
720     }
721     pme->bUseThreads = (sum_use_threads > 0);
722
723     if (ir->ePBC == epbcSCREW)
724     {
725         gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
726     }
727
728     /* NOTE:
729      * It is likely that the current gmx_pme_do() routine supports calculating
730      * only Coulomb or LJ while gmx_pme_init() configures for both,
731      * but that has never been tested.
732      * It is likely that the current gmx_pme_do() routine supports calculating,
733      * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
734      * configures with free-energy, but that has never been tested.
735      */
736     pme->doCoulomb     = EEL_PME(ir->coulombtype);
737     pme->doLJ          = EVDW_PME(ir->vdwtype);
738     pme->bFEP_q        = ((ir->efep != efepNO) && bFreeEnergy_q);
739     pme->bFEP_lj       = ((ir->efep != efepNO) && bFreeEnergy_lj);
740     pme->bFEP          = (pme->bFEP_q || pme->bFEP_lj);
741     pme->nkx           = ir->nkx;
742     pme->nky           = ir->nky;
743     pme->nkz           = ir->nkz;
744     pme->bP3M          = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != nullptr);
745     pme->pme_order     = ir->pme_order;
746     pme->ewaldcoeff_q  = ewaldcoeff_q;
747     pme->ewaldcoeff_lj = ewaldcoeff_lj;
748
749     /* Always constant electrostatics coefficients */
750     pme->epsilon_r     = ir->epsilon_r;
751
752     /* Always constant LJ coefficients */
753     pme->ljpme_combination_rule = ir->ljpme_combination_rule;
754
755     // The box requires scaling with nwalls = 2, we store that condition as well
756     // as the scaling factor
757     delete pme->boxScaler;
758     pme->boxScaler = new EwaldBoxZScaler(*ir);
759
760     /* If we violate restrictions, generate a fatal error here */
761     gmx_pme_check_restrictions(pme->pme_order,
762                                pme->nkx, pme->nky, pme->nkz,
763                                pme->nnodes_major,
764                                pme->bUseThreads,
765                                true);
766
767     if (pme->nnodes > 1)
768     {
769         double imbal;
770
771 #if GMX_MPI
772         MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
773         MPI_Type_commit(&(pme->rvec_mpi));
774 #endif
775
776         /* Note that the coefficient spreading and force gathering, which usually
777          * takes about the same amount of time as FFT+solve_pme,
778          * is always fully load balanced
779          * (unless the coefficient distribution is inhomogeneous).
780          */
781
782         imbal = estimate_pme_load_imbalance(pme.get());
783         if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
784         {
785             fprintf(stderr,
786                     "\n"
787                     "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
788                     "      For optimal PME load balancing\n"
789                     "      PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x (%d)\n"
790                     "      and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y (%d)\n"
791                     "\n",
792                     (int)((imbal-1)*100 + 0.5),
793                     pme->nkx, pme->nky, pme->nnodes_major,
794                     pme->nky, pme->nkz, pme->nnodes_minor);
795         }
796     }
797
798     /* For non-divisible grid we need pme_order iso pme_order-1 */
799     /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
800      * y is always copied through a buffer: we don't need padding in z,
801      * but we do need the overlap in x because of the communication order.
802      */
803     init_overlap_comm(&pme->overlap[0], pme->pme_order,
804 #if GMX_MPI
805                       pme->mpi_comm_d[0],
806 #endif
807                       pme->nnodes_major, pme->nodeid_major,
808                       pme->nkx,
809                       (div_round_up(pme->nky, pme->nnodes_minor)+pme->pme_order)*(pme->nkz+pme->pme_order-1));
810
811     /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
812      * We do this with an offset buffer of equal size, so we need to allocate
813      * extra for the offset. That's what the (+1)*pme->nkz is for.
814      */
815     init_overlap_comm(&pme->overlap[1], pme->pme_order,
816 #if GMX_MPI
817                       pme->mpi_comm_d[1],
818 #endif
819                       pme->nnodes_minor, pme->nodeid_minor,
820                       pme->nky,
821                       (div_round_up(pme->nkx, pme->nnodes_major)+pme->pme_order+1)*pme->nkz);
822
823     /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
824      * Note that gmx_pme_check_restrictions checked for this already.
825      */
826     if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
827     {
828         gmx_incons("More than one communication pulse required for grid overlap communication along the major dimension while using threads");
829     }
830
831     snew(pme->bsp_mod[XX], pme->nkx);
832     snew(pme->bsp_mod[YY], pme->nky);
833     snew(pme->bsp_mod[ZZ], pme->nkz);
834
835     pme->gpu     = pmeGpu; /* Carrying over the single GPU structure */
836     pme->runMode = runMode;
837
838     /* The required size of the interpolation grid, including overlap.
839      * The allocated size (pmegrid_n?) might be slightly larger.
840      */
841     pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] -
842         pme->overlap[0].s2g0[pme->nodeid_major];
843     pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] -
844         pme->overlap[1].s2g0[pme->nodeid_minor];
845     pme->pmegrid_nz_base = pme->nkz;
846     pme->pmegrid_nz      = pme->pmegrid_nz_base + pme->pme_order - 1;
847     set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
848     pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
849     pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
850     pme->pmegrid_start_iz = 0;
851
852     make_gridindex_to_localindex(pme->nkx,
853                                  pme->pmegrid_start_ix,
854                                  pme->pmegrid_nx - (pme->pme_order-1),
855                                  &pme->nnx, &pme->fshx);
856     make_gridindex_to_localindex(pme->nky,
857                                  pme->pmegrid_start_iy,
858                                  pme->pmegrid_ny - (pme->pme_order-1),
859                                  &pme->nny, &pme->fshy);
860     make_gridindex_to_localindex(pme->nkz,
861                                  pme->pmegrid_start_iz,
862                                  pme->pmegrid_nz_base,
863                                  &pme->nnz, &pme->fshz);
864
865     pme->spline_work = make_pme_spline_work(pme->pme_order);
866
867     ndata[0]    = pme->nkx;
868     ndata[1]    = pme->nky;
869     ndata[2]    = pme->nkz;
870     /* It doesn't matter if we allocate too many grids here,
871      * we only allocate and use the ones we need.
872      */
873     if (pme->doLJ)
874     {
875         pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
876     }
877     else
878     {
879         pme->ngrids = DO_Q;
880     }
881     snew(pme->fftgrid, pme->ngrids);
882     snew(pme->cfftgrid, pme->ngrids);
883     snew(pme->pfft_setup, pme->ngrids);
884
885     for (i = 0; i < pme->ngrids; ++i)
886     {
887         if ((i <  DO_Q && pme->doCoulomb && (i == 0 ||
888                                              bFreeEnergy_q)) ||
889             (i >= DO_Q && pme->doLJ && (i == 2 ||
890                                         bFreeEnergy_lj ||
891                                         ir->ljpme_combination_rule == eljpmeLB)))
892         {
893             pmegrids_init(&pme->pmegrid[i],
894                           pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
895                           pme->pmegrid_nz_base,
896                           pme->pme_order,
897                           pme->bUseThreads,
898                           pme->nthread,
899                           pme->overlap[0].s2g1[pme->nodeid_major]-pme->overlap[0].s2g0[pme->nodeid_major+1],
900                           pme->overlap[1].s2g1[pme->nodeid_minor]-pme->overlap[1].s2g0[pme->nodeid_minor+1]);
901             /* This routine will allocate the grid data to fit the FFTs */
902             const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed) ? gmx::PinningPolicy::CanBePinned : gmx::PinningPolicy::CannotBePinned;
903             gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata,
904                                     &pme->fftgrid[i], &pme->cfftgrid[i],
905                                     pme->mpi_comm_d,
906                                     bReproducible, pme->nthread, allocateRealGridForGpu);
907
908         }
909     }
910
911     if (!pme->bP3M)
912     {
913         /* Use plain SPME B-spline interpolation */
914         make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
915     }
916     else
917     {
918         /* Use the P3M grid-optimized influence function */
919         make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
920     }
921
922     /* Use atc[0] for spreading */
923     init_atomcomm(pme.get(), &pme->atc[0], nnodes_major > 1 ? 0 : 1, TRUE);
924     if (pme->ndecompdim >= 2)
925     {
926         init_atomcomm(pme.get(), &pme->atc[1], 1, FALSE);
927     }
928
929     if (pme->nnodes == 1)
930     {
931         pme->atc[0].n = homenr;
932         pme_realloc_atomcomm_things(&pme->atc[0]);
933     }
934
935     pme->lb_buf1       = nullptr;
936     pme->lb_buf2       = nullptr;
937     pme->lb_buf_nalloc = 0;
938
939     if (pme_gpu_active(pme.get()))
940     {
941         if (!pme->gpu)
942         {
943             // Initial check of validity of the data
944             std::string errorString;
945             bool        canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
946             if (!canRunOnGpu)
947             {
948                 GMX_THROW(gmx::NotImplementedError(errorString));
949             }
950         }
951
952         pme_gpu_reinit(pme.get(), gpuInfo);
953     }
954
955     pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
956
957     // no exception was thrown during the init, so we hand over the PME structure handle
958     return pme.release();
959 }
960
961 void gmx_pme_reinit(struct gmx_pme_t **pmedata,
962                     const t_commrec   *cr,
963                     struct gmx_pme_t * pme_src,
964                     const t_inputrec * ir,
965                     const ivec         grid_size,
966                     real               ewaldcoeff_q,
967                     real               ewaldcoeff_lj)
968 {
969     int        homenr;
970
971     // Create a copy of t_inputrec fields that are used in gmx_pme_init().
972     // TODO: This would be better as just copying a sub-structure that contains
973     // all the PME parameters and nothing else.
974     t_inputrec irc;
975     irc.ePBC                   = ir->ePBC;
976     irc.coulombtype            = ir->coulombtype;
977     irc.vdwtype                = ir->vdwtype;
978     irc.efep                   = ir->efep;
979     irc.pme_order              = ir->pme_order;
980     irc.epsilon_r              = ir->epsilon_r;
981     irc.ljpme_combination_rule = ir->ljpme_combination_rule;
982     irc.nkx                    = grid_size[XX];
983     irc.nky                    = grid_size[YY];
984     irc.nkz                    = grid_size[ZZ];
985
986     if (pme_src->nnodes == 1)
987     {
988         homenr = pme_src->atc[0].n;
989     }
990     else
991     {
992         homenr = -1;
993     }
994
995     try
996     {
997         const gmx::MDLogger dummyLogger;
998         // This is reinit which is currently only changing grid size/coefficients,
999         // so we don't expect the actual logging.
1000         // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
1001         GMX_ASSERT(pmedata, "Invalid PME pointer");
1002         *pmedata = gmx_pme_init(cr, pme_src->nnodes_major, pme_src->nnodes_minor,
1003                                 &irc, homenr, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE, ewaldcoeff_q, ewaldcoeff_lj,
1004                                 pme_src->nthread, pme_src->runMode, pme_src->gpu, nullptr, dummyLogger);
1005         //TODO this is mostly passing around current values
1006     }
1007     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1008
1009     /* We can easily reuse the allocated pme grids in pme_src */
1010     reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
1011     /* We would like to reuse the fft grids, but that's harder */
1012 }
1013
1014 void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V)
1015 {
1016     pme_atomcomm_t *atc;
1017     pmegrids_t     *grid;
1018
1019     if (pme->nnodes > 1)
1020     {
1021         gmx_incons("gmx_pme_calc_energy called in parallel");
1022     }
1023     if (pme->bFEP_q > 1)
1024     {
1025         gmx_incons("gmx_pme_calc_energy with free energy");
1026     }
1027
1028     atc            = &pme->atc_energy;
1029     atc->nthread   = 1;
1030     if (atc->spline == nullptr)
1031     {
1032         snew(atc->spline, atc->nthread);
1033     }
1034     atc->nslab     = 1;
1035     atc->bSpread   = TRUE;
1036     atc->pme_order = pme->pme_order;
1037     atc->n         = n;
1038     pme_realloc_atomcomm_things(atc);
1039     atc->x           = x;
1040     atc->coefficient = q;
1041
1042     /* We only use the A-charges grid */
1043     grid = &pme->pmegrid[PME_GRID_QA];
1044
1045     /* Only calculate the spline coefficients, don't actually spread */
1046     spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
1047
1048     *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
1049 }
1050
1051 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
1052 static void
1053 calc_initial_lb_coeffs(struct gmx_pme_t *pme, real *local_c6, real *local_sigma)
1054 {
1055     int  i;
1056     for (i = 0; i < pme->atc[0].n; ++i)
1057     {
1058         real sigma4;
1059         sigma4                     = local_sigma[i];
1060         sigma4                     = sigma4*sigma4;
1061         sigma4                     = sigma4*sigma4;
1062         pme->atc[0].coefficient[i] = local_c6[i] / sigma4;
1063     }
1064 }
1065
1066 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
1067 static void
1068 calc_next_lb_coeffs(struct gmx_pme_t *pme, real *local_sigma)
1069 {
1070     int  i;
1071
1072     for (i = 0; i < pme->atc[0].n; ++i)
1073     {
1074         pme->atc[0].coefficient[i] *= local_sigma[i];
1075     }
1076 }
1077
1078 int gmx_pme_do(struct gmx_pme_t *pme,
1079                int start,       int homenr,
1080                rvec x[],        rvec f[],
1081                real chargeA[],  real chargeB[],
1082                real c6A[],      real c6B[],
1083                real sigmaA[],   real sigmaB[],
1084                matrix box,      const t_commrec *cr,
1085                int  maxshift_x, int maxshift_y,
1086                t_nrnb *nrnb,    gmx_wallcycle *wcycle,
1087                matrix vir_q,    matrix vir_lj,
1088                real *energy_q,  real *energy_lj,
1089                real lambda_q,   real lambda_lj,
1090                real *dvdlambda_q, real *dvdlambda_lj,
1091                int flags)
1092 {
1093     GMX_ASSERT(pme->runMode == PmeRunMode::CPU, "gmx_pme_do should not be called on the GPU PME run.");
1094
1095     int                  d, i, j, npme, grid_index, max_grid_index;
1096     int                  n_d;
1097     pme_atomcomm_t      *atc        = nullptr;
1098     pmegrids_t          *pmegrid    = nullptr;
1099     real                *grid       = nullptr;
1100     rvec                *f_d;
1101     real                *coefficient = nullptr;
1102     real                 energy_AB[4];
1103     matrix               vir_AB[4];
1104     real                 scale, lambda;
1105     gmx_bool             bClearF;
1106     gmx_parallel_3dfft_t pfft_setup;
1107     real              *  fftgrid;
1108     t_complex          * cfftgrid;
1109     int                  thread;
1110     gmx_bool             bFirst, bDoSplines;
1111     int                  fep_state;
1112     int                  fep_states_lj           = pme->bFEP_lj ? 2 : 1;
1113     const gmx_bool       bCalcEnerVir            = flags & GMX_PME_CALC_ENER_VIR;
1114     const gmx_bool       bBackFFT                = flags & (GMX_PME_CALC_F | GMX_PME_CALC_POT);
1115     const gmx_bool       bCalcF                  = flags & GMX_PME_CALC_F;
1116
1117     assert(pme->nnodes > 0);
1118     assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1119
1120     if (pme->nnodes > 1)
1121     {
1122         atc      = &pme->atc[0];
1123         atc->npd = homenr;
1124         if (atc->npd > atc->pd_nalloc)
1125         {
1126             atc->pd_nalloc = over_alloc_dd(atc->npd);
1127             srenew(atc->pd, atc->pd_nalloc);
1128         }
1129         for (d = pme->ndecompdim-1; d >= 0; d--)
1130         {
1131             atc           = &pme->atc[d];
1132             atc->maxshift = (atc->dimind == 0 ? maxshift_x : maxshift_y);
1133         }
1134     }
1135     else
1136     {
1137         atc = &pme->atc[0];
1138         /* This could be necessary for TPI */
1139         pme->atc[0].n = homenr;
1140         if (DOMAINDECOMP(cr))
1141         {
1142             pme_realloc_atomcomm_things(atc);
1143         }
1144         atc->x = x;
1145         atc->f = f;
1146     }
1147
1148     matrix scaledBox;
1149     pme->boxScaler->scaleBox(box, scaledBox);
1150
1151     gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1152     bFirst = TRUE;
1153
1154     /* For simplicity, we construct the splines for all particles if
1155      * more than one PME calculations is needed. Some optimization
1156      * could be done by keeping track of which atoms have splines
1157      * constructed, and construct new splines on each pass for atoms
1158      * that don't yet have them.
1159      */
1160
1161     bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1162
1163     /* We need a maximum of four separate PME calculations:
1164      * grid_index=0: Coulomb PME with charges from state A
1165      * grid_index=1: Coulomb PME with charges from state B
1166      * grid_index=2: LJ PME with C6 from state A
1167      * grid_index=3: LJ PME with C6 from state B
1168      * For Lorentz-Berthelot combination rules, a separate loop is used to
1169      * calculate all the terms
1170      */
1171
1172     /* If we are doing LJ-PME with LB, we only do Q here */
1173     max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
1174
1175     for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1176     {
1177         /* Check if we should do calculations at this grid_index
1178          * If grid_index is odd we should be doing FEP
1179          * If grid_index < 2 we should be doing electrostatic PME
1180          * If grid_index >= 2 we should be doing LJ-PME
1181          */
1182         if ((grid_index <  DO_Q && (!pme->doCoulomb ||
1183                                     (grid_index == 1 && !pme->bFEP_q))) ||
1184             (grid_index >= DO_Q && (!pme->doLJ ||
1185                                     (grid_index == 3 && !pme->bFEP_lj))))
1186         {
1187             continue;
1188         }
1189         /* Unpack structure */
1190         pmegrid    = &pme->pmegrid[grid_index];
1191         fftgrid    = pme->fftgrid[grid_index];
1192         cfftgrid   = pme->cfftgrid[grid_index];
1193         pfft_setup = pme->pfft_setup[grid_index];
1194         switch (grid_index)
1195         {
1196             case 0: coefficient = chargeA + start; break;
1197             case 1: coefficient = chargeB + start; break;
1198             case 2: coefficient = c6A + start; break;
1199             case 3: coefficient = c6B + start; break;
1200         }
1201
1202         grid = pmegrid->grid.grid;
1203
1204         if (debug)
1205         {
1206             fprintf(debug, "PME: number of ranks = %d, rank = %d\n",
1207                     cr->nnodes, cr->nodeid);
1208             fprintf(debug, "Grid = %p\n", (void*)grid);
1209             if (grid == nullptr)
1210             {
1211                 gmx_fatal(FARGS, "No grid!");
1212             }
1213         }
1214
1215         if (pme->nnodes == 1)
1216         {
1217             atc->coefficient = coefficient;
1218         }
1219         else
1220         {
1221             wallcycle_start(wcycle, ewcPME_REDISTXF);
1222             do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, coefficient);
1223
1224             wallcycle_stop(wcycle, ewcPME_REDISTXF);
1225         }
1226
1227         if (debug)
1228         {
1229             fprintf(debug, "Rank= %6d, pme local particles=%6d\n",
1230                     cr->nodeid, atc->n);
1231         }
1232
1233         if (flags & GMX_PME_SPREAD)
1234         {
1235             wallcycle_start(wcycle, ewcPME_SPREAD);
1236
1237             /* Spread the coefficients on a grid */
1238             spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1239
1240             if (bFirst)
1241             {
1242                 inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
1243             }
1244             inc_nrnb(nrnb, eNR_SPREADBSP,
1245                      pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
1246
1247             if (!pme->bUseThreads)
1248             {
1249                 wrap_periodic_pmegrid(pme, grid);
1250
1251                 /* sum contributions to local grid from other nodes */
1252 #if GMX_MPI
1253                 if (pme->nnodes > 1)
1254                 {
1255                     gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1256                 }
1257 #endif
1258
1259                 copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1260             }
1261
1262             wallcycle_stop(wcycle, ewcPME_SPREAD);
1263
1264             /* TODO If the OpenMP and single-threaded implementations
1265                converge, then spread_on_grid() and
1266                copy_pmegrid_to_fftgrid() will perhaps live in the same
1267                source file.
1268              */
1269         }
1270
1271         /* Here we start a large thread parallel region */
1272 #pragma omp parallel num_threads(pme->nthread) private(thread)
1273         {
1274             try
1275             {
1276                 thread = gmx_omp_get_thread_num();
1277                 if (flags & GMX_PME_SOLVE)
1278                 {
1279                     int loop_count;
1280
1281                     /* do 3d-fft */
1282                     if (thread == 0)
1283                     {
1284                         wallcycle_start(wcycle, ewcPME_FFT);
1285                     }
1286                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1287                                                thread, wcycle);
1288                     if (thread == 0)
1289                     {
1290                         wallcycle_stop(wcycle, ewcPME_FFT);
1291                     }
1292
1293                     /* solve in k-space for our local cells */
1294                     if (thread == 0)
1295                     {
1296                         wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1297                     }
1298                     if (grid_index < DO_Q)
1299                     {
1300                         loop_count =
1301                             solve_pme_yzx(pme, cfftgrid,
1302                                           scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1303                                           bCalcEnerVir,
1304                                           pme->nthread, thread);
1305                     }
1306                     else
1307                     {
1308                         loop_count =
1309                             solve_pme_lj_yzx(pme, &cfftgrid, FALSE,
1310                                              scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1311                                              bCalcEnerVir,
1312                                              pme->nthread, thread);
1313                     }
1314
1315                     if (thread == 0)
1316                     {
1317                         wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1318                         inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1319                     }
1320                 }
1321
1322                 if (bBackFFT)
1323                 {
1324                     /* do 3d-invfft */
1325                     if (thread == 0)
1326                     {
1327                         wallcycle_start(wcycle, ewcPME_FFT);
1328                     }
1329                     gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1330                                                thread, wcycle);
1331                     if (thread == 0)
1332                     {
1333                         wallcycle_stop(wcycle, ewcPME_FFT);
1334
1335
1336                         if (pme->nodeid == 0)
1337                         {
1338                             real ntot = pme->nkx*pme->nky*pme->nkz;
1339                             npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1340                             inc_nrnb(nrnb, eNR_FFT, 2*npme);
1341                         }
1342
1343                         /* Note: this wallcycle region is closed below
1344                            outside an OpenMP region, so take care if
1345                            refactoring code here. */
1346                         wallcycle_start(wcycle, ewcPME_GATHER);
1347                     }
1348
1349                     copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1350                 }
1351             } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1352         }
1353         /* End of thread parallel section.
1354          * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1355          */
1356
1357         if (bBackFFT)
1358         {
1359             /* distribute local grid to all nodes */
1360 #if GMX_MPI
1361             if (pme->nnodes > 1)
1362             {
1363                 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1364             }
1365 #endif
1366
1367             unwrap_periodic_pmegrid(pme, grid);
1368         }
1369
1370         if (bCalcF)
1371         {
1372             /* interpolate forces for our local atoms */
1373
1374
1375             /* If we are running without parallelization,
1376              * atc->f is the actual force array, not a buffer,
1377              * therefore we should not clear it.
1378              */
1379             lambda  = grid_index < DO_Q ? lambda_q : lambda_lj;
1380             bClearF = (bFirst && PAR(cr));
1381 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1382             for (thread = 0; thread < pme->nthread; thread++)
1383             {
1384                 try
1385                 {
1386                     gather_f_bsplines(pme, grid, bClearF, atc,
1387                                       &atc->spline[thread],
1388                                       pme->bFEP ? (grid_index % 2 == 0 ? 1.0-lambda : lambda) : 1.0);
1389                 }
1390                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1391             }
1392
1393
1394             inc_nrnb(nrnb, eNR_GATHERFBSP,
1395                      pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
1396             /* Note: this wallcycle region is opened above inside an OpenMP
1397                region, so take care if refactoring code here. */
1398             wallcycle_stop(wcycle, ewcPME_GATHER);
1399         }
1400
1401         if (bCalcEnerVir)
1402         {
1403             /* This should only be called on the master thread
1404              * and after the threads have synchronized.
1405              */
1406             if (grid_index < 2)
1407             {
1408                 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
1409             }
1410             else
1411             {
1412                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
1413             }
1414         }
1415         bFirst = FALSE;
1416     } /* of grid_index-loop */
1417
1418     /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1419      * seven terms. */
1420
1421     if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB)
1422     {
1423         /* Loop over A- and B-state if we are doing FEP */
1424         for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1425         {
1426             real *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
1427             if (pme->nnodes == 1)
1428             {
1429                 if (pme->lb_buf1 == nullptr)
1430                 {
1431                     pme->lb_buf_nalloc = pme->atc[0].n;
1432                     snew(pme->lb_buf1, pme->lb_buf_nalloc);
1433                 }
1434                 pme->atc[0].coefficient = pme->lb_buf1;
1435                 switch (fep_state)
1436                 {
1437                     case 0:
1438                         local_c6      = c6A;
1439                         local_sigma   = sigmaA;
1440                         break;
1441                     case 1:
1442                         local_c6      = c6B;
1443                         local_sigma   = sigmaB;
1444                         break;
1445                     default:
1446                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1447                 }
1448             }
1449             else
1450             {
1451                 atc = &pme->atc[0];
1452                 switch (fep_state)
1453                 {
1454                     case 0:
1455                         RedistC6      = c6A;
1456                         RedistSigma   = sigmaA;
1457                         break;
1458                     case 1:
1459                         RedistC6      = c6B;
1460                         RedistSigma   = sigmaB;
1461                         break;
1462                     default:
1463                         gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1464                 }
1465                 wallcycle_start(wcycle, ewcPME_REDISTXF);
1466
1467                 do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, RedistC6);
1468                 if (pme->lb_buf_nalloc < atc->n)
1469                 {
1470                     pme->lb_buf_nalloc = atc->nalloc;
1471                     srenew(pme->lb_buf1, pme->lb_buf_nalloc);
1472                     srenew(pme->lb_buf2, pme->lb_buf_nalloc);
1473                 }
1474                 local_c6 = pme->lb_buf1;
1475                 for (i = 0; i < atc->n; ++i)
1476                 {
1477                     local_c6[i] = atc->coefficient[i];
1478                 }
1479
1480                 do_redist_pos_coeffs(pme, cr, start, homenr, FALSE, x, RedistSigma);
1481                 local_sigma = pme->lb_buf2;
1482                 for (i = 0; i < atc->n; ++i)
1483                 {
1484                     local_sigma[i] = atc->coefficient[i];
1485                 }
1486
1487                 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1488             }
1489             calc_initial_lb_coeffs(pme, local_c6, local_sigma);
1490
1491             /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1492             for (grid_index = 2; grid_index < 9; ++grid_index)
1493             {
1494                 /* Unpack structure */
1495                 pmegrid    = &pme->pmegrid[grid_index];
1496                 fftgrid    = pme->fftgrid[grid_index];
1497                 pfft_setup = pme->pfft_setup[grid_index];
1498                 calc_next_lb_coeffs(pme, local_sigma);
1499                 grid = pmegrid->grid.grid;
1500
1501                 if (flags & GMX_PME_SPREAD)
1502                 {
1503                     wallcycle_start(wcycle, ewcPME_SPREAD);
1504                     /* Spread the c6 on a grid */
1505                     spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1506
1507                     if (bFirst)
1508                     {
1509                         inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
1510                     }
1511
1512                     inc_nrnb(nrnb, eNR_SPREADBSP,
1513                              pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
1514                     if (pme->nthread == 1)
1515                     {
1516                         wrap_periodic_pmegrid(pme, grid);
1517                         /* sum contributions to local grid from other nodes */
1518 #if GMX_MPI
1519                         if (pme->nnodes > 1)
1520                         {
1521                             gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1522                         }
1523 #endif
1524                         copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1525                     }
1526                     wallcycle_stop(wcycle, ewcPME_SPREAD);
1527                 }
1528                 /*Here we start a large thread parallel region*/
1529 #pragma omp parallel num_threads(pme->nthread) private(thread)
1530                 {
1531                     try
1532                     {
1533                         thread = gmx_omp_get_thread_num();
1534                         if (flags & GMX_PME_SOLVE)
1535                         {
1536                             /* do 3d-fft */
1537                             if (thread == 0)
1538                             {
1539                                 wallcycle_start(wcycle, ewcPME_FFT);
1540                             }
1541
1542                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1543                                                        thread, wcycle);
1544                             if (thread == 0)
1545                             {
1546                                 wallcycle_stop(wcycle, ewcPME_FFT);
1547                             }
1548                         }
1549                     }
1550                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1551                 }
1552                 bFirst = FALSE;
1553             }
1554             if (flags & GMX_PME_SOLVE)
1555             {
1556                 /* solve in k-space for our local cells */
1557 #pragma omp parallel num_threads(pme->nthread) private(thread)
1558                 {
1559                     try
1560                     {
1561                         int loop_count;
1562                         thread = gmx_omp_get_thread_num();
1563                         if (thread == 0)
1564                         {
1565                             wallcycle_start(wcycle, ewcLJPME);
1566                         }
1567
1568                         loop_count =
1569                             solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE,
1570                                              scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1571                                              bCalcEnerVir,
1572                                              pme->nthread, thread);
1573                         if (thread == 0)
1574                         {
1575                             wallcycle_stop(wcycle, ewcLJPME);
1576                             inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1577                         }
1578                     }
1579                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1580                 }
1581             }
1582
1583             if (bCalcEnerVir)
1584             {
1585                 /* This should only be called on the master thread and
1586                  * after the threads have synchronized.
1587                  */
1588                 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[2+fep_state], vir_AB[2+fep_state]);
1589             }
1590
1591             if (bBackFFT)
1592             {
1593                 bFirst = !pme->doCoulomb;
1594                 calc_initial_lb_coeffs(pme, local_c6, local_sigma);
1595                 for (grid_index = 8; grid_index >= 2; --grid_index)
1596                 {
1597                     /* Unpack structure */
1598                     pmegrid    = &pme->pmegrid[grid_index];
1599                     fftgrid    = pme->fftgrid[grid_index];
1600                     pfft_setup = pme->pfft_setup[grid_index];
1601                     grid       = pmegrid->grid.grid;
1602                     calc_next_lb_coeffs(pme, local_sigma);
1603 #pragma omp parallel num_threads(pme->nthread) private(thread)
1604                     {
1605                         try
1606                         {
1607                             thread = gmx_omp_get_thread_num();
1608                             /* do 3d-invfft */
1609                             if (thread == 0)
1610                             {
1611                                 wallcycle_start(wcycle, ewcPME_FFT);
1612                             }
1613
1614                             gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1615                                                        thread, wcycle);
1616                             if (thread == 0)
1617                             {
1618                                 wallcycle_stop(wcycle, ewcPME_FFT);
1619
1620
1621                                 if (pme->nodeid == 0)
1622                                 {
1623                                     real ntot = pme->nkx*pme->nky*pme->nkz;
1624                                     npme  = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1625                                     inc_nrnb(nrnb, eNR_FFT, 2*npme);
1626                                 }
1627                                 wallcycle_start(wcycle, ewcPME_GATHER);
1628                             }
1629
1630                             copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1631                         }
1632                         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1633                     } /*#pragma omp parallel*/
1634
1635                     /* distribute local grid to all nodes */
1636 #if GMX_MPI
1637                     if (pme->nnodes > 1)
1638                     {
1639                         gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1640                     }
1641 #endif
1642
1643                     unwrap_periodic_pmegrid(pme, grid);
1644
1645                     if (bCalcF)
1646                     {
1647                         /* interpolate forces for our local atoms */
1648                         bClearF = (bFirst && PAR(cr));
1649                         scale   = pme->bFEP ? (fep_state < 1 ? 1.0-lambda_lj : lambda_lj) : 1.0;
1650                         scale  *= lb_scale_factor[grid_index-2];
1651
1652 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1653                         for (thread = 0; thread < pme->nthread; thread++)
1654                         {
1655                             try
1656                             {
1657                                 gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
1658                                                   &pme->atc[0].spline[thread],
1659                                                   scale);
1660                             }
1661                             GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1662                         }
1663
1664
1665                         inc_nrnb(nrnb, eNR_GATHERFBSP,
1666                                  pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
1667                     }
1668                     wallcycle_stop(wcycle, ewcPME_GATHER);
1669
1670                     bFirst = FALSE;
1671                 } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1672             }     /* if (bCalcF) */
1673         }         /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1674     }             /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
1675
1676     if (bCalcF && pme->nnodes > 1)
1677     {
1678         wallcycle_start(wcycle, ewcPME_REDISTXF);
1679         for (d = 0; d < pme->ndecompdim; d++)
1680         {
1681             atc = &pme->atc[d];
1682             if (d == pme->ndecompdim - 1)
1683             {
1684                 n_d = homenr;
1685                 f_d = f + start;
1686             }
1687             else
1688             {
1689                 n_d = pme->atc[d+1].n;
1690                 f_d = pme->atc[d+1].f;
1691             }
1692             if (DOMAINDECOMP(cr))
1693             {
1694                 dd_pmeredist_f(pme, atc, n_d, f_d,
1695                                d == pme->ndecompdim-1 && pme->bPPnode);
1696             }
1697         }
1698
1699         wallcycle_stop(wcycle, ewcPME_REDISTXF);
1700     }
1701
1702     if (bCalcEnerVir)
1703     {
1704         if (pme->doCoulomb)
1705         {
1706             if (!pme->bFEP_q)
1707             {
1708                 *energy_q = energy_AB[0];
1709                 m_add(vir_q, vir_AB[0], vir_q);
1710             }
1711             else
1712             {
1713                 *energy_q       = (1.0-lambda_q)*energy_AB[0] + lambda_q*energy_AB[1];
1714                 *dvdlambda_q   += energy_AB[1] - energy_AB[0];
1715                 for (i = 0; i < DIM; i++)
1716                 {
1717                     for (j = 0; j < DIM; j++)
1718                     {
1719                         vir_q[i][j] += (1.0-lambda_q)*vir_AB[0][i][j] +
1720                             lambda_q*vir_AB[1][i][j];
1721                     }
1722                 }
1723             }
1724             if (debug)
1725             {
1726                 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1727             }
1728         }
1729         else
1730         {
1731             *energy_q = 0;
1732         }
1733
1734         if (pme->doLJ)
1735         {
1736             if (!pme->bFEP_lj)
1737             {
1738                 *energy_lj = energy_AB[2];
1739                 m_add(vir_lj, vir_AB[2], vir_lj);
1740             }
1741             else
1742             {
1743                 *energy_lj     = (1.0-lambda_lj)*energy_AB[2] + lambda_lj*energy_AB[3];
1744                 *dvdlambda_lj += energy_AB[3] - energy_AB[2];
1745                 for (i = 0; i < DIM; i++)
1746                 {
1747                     for (j = 0; j < DIM; j++)
1748                     {
1749                         vir_lj[i][j] += (1.0-lambda_lj)*vir_AB[2][i][j] + lambda_lj*vir_AB[3][i][j];
1750                     }
1751                 }
1752             }
1753             if (debug)
1754             {
1755                 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1756             }
1757         }
1758         else
1759         {
1760             *energy_lj = 0;
1761         }
1762     }
1763     return 0;
1764 }
1765
1766 void gmx_pme_destroy(gmx_pme_t *pme)
1767 {
1768     if (!pme)
1769     {
1770         return;
1771     }
1772
1773     delete pme->boxScaler;
1774
1775     sfree(pme->nnx);
1776     sfree(pme->nny);
1777     sfree(pme->nnz);
1778     sfree(pme->fshx);
1779     sfree(pme->fshy);
1780     sfree(pme->fshz);
1781
1782     for (int i = 0; i < pme->ngrids; ++i)
1783     {
1784         pmegrids_destroy(&pme->pmegrid[i]);
1785     }
1786     if (pme->pfft_setup)
1787     {
1788         for (int i = 0; i < pme->ngrids; ++i)
1789         {
1790             gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1791         }
1792     }
1793     sfree(pme->fftgrid);
1794     sfree(pme->cfftgrid);
1795     sfree(pme->pfft_setup);
1796
1797     for (int i = 0; i < std::max(1, pme->ndecompdim); i++) //pme->atc[0] is always allocated
1798     {
1799         destroy_atomcomm(&pme->atc[i]);
1800     }
1801
1802     for (int i = 0; i < DIM; i++)
1803     {
1804         sfree(pme->bsp_mod[i]);
1805     }
1806
1807     sfree(pme->lb_buf1);
1808     sfree(pme->lb_buf2);
1809
1810     sfree(pme->bufv);
1811     sfree(pme->bufr);
1812
1813     if (pme->solve_work)
1814     {
1815         pme_free_all_work(&pme->solve_work, pme->nthread);
1816     }
1817
1818     sfree(pme->sum_qgrid_tmp);
1819     sfree(pme->sum_qgrid_dd_tmp);
1820
1821     destroy_pme_spline_work(pme->spline_work);
1822
1823     if (pme_gpu_active(pme) && pme->gpu)
1824     {
1825         pme_gpu_destroy(pme->gpu);
1826     }
1827
1828     delete pme;
1829 }
1830
1831 void gmx_pme_reinit_atoms(const gmx_pme_t *pme, const int nAtoms, const real *charges)
1832 {
1833     if (pme_gpu_active(pme))
1834     {
1835         pme_gpu_reinit_atoms(pme->gpu, nAtoms, charges);
1836     }
1837     // TODO: handle the CPU case here; handle the whole t_mdatoms
1838 }