Extend GPU traits class
[alexxy/gromacs.git] / src / gromacs / ewald / pme_only.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /* IMPORTANT FOR DEVELOPERS:
38  *
39  * Triclinic pme stuff isn't entirely trivial, and we've experienced
40  * some bugs during development (many of them due to me). To avoid
41  * this in the future, please check the following things if you make
42  * changes in this file:
43  *
44  * 1. You should obtain identical (at least to the PME precision)
45  *    energies, forces, and virial for
46  *    a rectangular box and a triclinic one where the z (or y) axis is
47  *    tilted a whole box side. For instance you could use these boxes:
48  *
49  *    rectangular       triclinic
50  *     2  0  0           2  0  0
51  *     0  2  0           0  2  0
52  *     0  0  6           2  2  6
53  *
54  * 2. You should check the energy conservation in a triclinic box.
55  *
56  * It might seem an overkill, but better safe than sorry.
57  * /Erik 001109
58  */
59
60 #include "gmxpre.h"
61
62 #include "config.h"
63
64 #include <cassert>
65 #include <cmath>
66 #include <cstdio>
67 #include <cstdlib>
68 #include <cstring>
69
70 #include <memory>
71 #include <numeric>
72 #include <vector>
73
74 #include "gromacs/domdec/domdec.h"
75 #include "gromacs/ewald/pme.h"
76 #include "gromacs/fft/parallel_3dfft.h"
77 #include "gromacs/fileio/pdbio.h"
78 #include "gromacs/gmxlib/network.h"
79 #include "gromacs/gmxlib/nrnb.h"
80 #include "gromacs/gpu_utils/hostallocator.h"
81 #include "gromacs/math/gmxcomplex.h"
82 #include "gromacs/math/units.h"
83 #include "gromacs/math/vec.h"
84 #include "gromacs/mdtypes/commrec.h"
85 #include "gromacs/mdtypes/forceoutput.h"
86 #include "gromacs/mdtypes/inputrec.h"
87 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
88 #include "gromacs/timing/cyclecounter.h"
89 #include "gromacs/timing/wallcycle.h"
90 #include "gromacs/utility/fatalerror.h"
91 #include "gromacs/utility/futil.h"
92 #include "gromacs/utility/gmxmpi.h"
93 #include "gromacs/utility/gmxomp.h"
94 #include "gromacs/utility/smalloc.h"
95
96 #include "pme_gpu_internal.h"
97 #include "pme_internal.h"
98 #include "pme_pp_communication.h"
99
100 //! Contains information about the PP ranks that partner this PME rank.
101 struct PpRanks
102 {
103     //! The MPI rank ID of this partner PP rank.
104     int rankId;
105     //! The number of atoms to communicate with this partner PP rank.
106     int numAtoms;
107 };
108
109 /*! \brief Master PP-PME communication data structure */
110 struct gmx_pme_pp {
111     MPI_Comm             mpi_comm_mysim; /**< MPI communicator for this simulation */
112     std::vector<PpRanks> ppRanks;        /**< The PP partner ranks                 */
113     int                  peerRankId;     /**< The peer PP rank id                  */
114     //@{
115     /**< Vectors of A- and B-state parameters used to transfer vectors to PME ranks  */
116     gmx::PaddedHostVector<real> chargeA;
117     std::vector<real>           chargeB;
118     std::vector<real>           sqrt_c6A;
119     std::vector<real>           sqrt_c6B;
120     std::vector<real>           sigmaA;
121     std::vector<real>           sigmaB;
122     //@}
123     gmx::HostVector<gmx::RVec>  x; /**< Vector of atom coordinates to transfer to PME ranks */
124     std::vector<gmx::RVec>      f; /**< Vector of atom forces received from PME ranks */
125     //@{
126     /**< Vectors of MPI objects used in non-blocking communication between multiple PP ranks per PME rank */
127     std::vector<MPI_Request> req;
128     std::vector<MPI_Status>  stat;
129     //@}
130 };
131
132 /*! \brief Initialize the PME-only side of the PME <-> PP communication */
133 static std::unique_ptr<gmx_pme_pp> gmx_pme_pp_init(const t_commrec *cr)
134 {
135     auto pme_pp = std::make_unique<gmx_pme_pp>();
136
137 #if GMX_MPI
138     int rank;
139
140     pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
141     MPI_Comm_rank(cr->mpi_comm_mygroup, &rank);
142     auto ppRanks = get_pme_ddranks(cr, rank);
143     pme_pp->ppRanks.reserve(ppRanks.size());
144     for (const auto &ppRankId : ppRanks)
145     {
146         pme_pp->ppRanks.push_back({ppRankId, 0});
147     }
148     // The peer PP rank is the last one.
149     pme_pp->peerRankId = pme_pp->ppRanks.back().rankId;
150     pme_pp->req.resize(eCommType_NR*pme_pp->ppRanks.size());
151     pme_pp->stat.resize(eCommType_NR*pme_pp->ppRanks.size());
152 #else
153     GMX_UNUSED_VALUE(cr);
154 #endif
155
156     return pme_pp;
157 }
158
159 static void reset_pmeonly_counters(gmx_wallcycle_t           wcycle,
160                                    gmx_walltime_accounting_t walltime_accounting,
161                                    t_nrnb                   *nrnb,
162                                    int64_t                   step,
163                                    bool                      useGpuForPme)
164 {
165     /* Reset all the counters related to performance over the run */
166     wallcycle_stop(wcycle, ewcRUN);
167     wallcycle_reset_all(wcycle);
168     *nrnb = { 0 };
169     wallcycle_start(wcycle, ewcRUN);
170     walltime_accounting_reset_time(walltime_accounting, step);
171
172     if (useGpuForPme)
173     {
174         resetGpuProfiler();
175     }
176 }
177
178 static gmx_pme_t *gmx_pmeonly_switch(std::vector<gmx_pme_t *> *pmedata,
179                                      const ivec grid_size,
180                                      real ewaldcoeff_q, real ewaldcoeff_lj,
181                                      const t_commrec *cr, const t_inputrec *ir)
182 {
183     GMX_ASSERT(pmedata, "Bad PME tuning list pointer");
184     for (auto &pme : *pmedata)
185     {
186         GMX_ASSERT(pme, "Bad PME tuning list element pointer");
187         if (pme->nkx == grid_size[XX] &&
188             pme->nky == grid_size[YY] &&
189             pme->nkz == grid_size[ZZ])
190         {
191             /* Here we have found an existing PME data structure that suits us.
192              * However, in the GPU case, we have to reinitialize it - there's only one GPU structure.
193              * This should not cause actual GPU reallocations, at least (the allocated buffers are never shrunk).
194              * So, just some grid size updates in the GPU kernel parameters.
195              * TODO: this should be something like gmx_pme_update_split_params()
196              */
197             gmx_pme_reinit(&pme, cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
198             return pme;
199         }
200     }
201
202     const auto &pme          = pmedata->back();
203     gmx_pme_t  *newStructure = nullptr;
204     // Copy last structure with new grid params
205     gmx_pme_reinit(&newStructure, cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
206     pmedata->push_back(newStructure);
207     return newStructure;
208 }
209
210 /*! \brief Called by PME-only ranks to receive coefficients and coordinates
211  *
212  * \param[in,out] pme_pp    PME-PP communication structure.
213  * \param[out] natoms       Number of received atoms.
214  * \param[out] box        System box, if received.
215  * \param[out] maxshift_x        Maximum shift in X direction, if received.
216  * \param[out] maxshift_y        Maximum shift in Y direction, if received.
217  * \param[out] lambda_q         Free-energy lambda for electrostatics, if received.
218  * \param[out] lambda_lj         Free-energy lambda for Lennard-Jones, if received.
219  * \param[out] bEnerVir          Set to true if this is an energy/virial calculation step, otherwise set to false.
220  * \param[out] step              MD integration step number.
221  * \param[out] grid_size         PME grid size, if received.
222  * \param[out] ewaldcoeff_q         Ewald cut-off parameter for electrostatics, if received.
223  * \param[out] ewaldcoeff_lj         Ewald cut-off parameter for Lennard-Jones, if received.
224  * \param[out] atomSetChanged    Set to true only if the local domain atom data (charges/coefficients)
225  *                               has been received (after DD) and should be reinitialized. Otherwise not changed.
226  *
227  * \retval pmerecvqxX             All parameters were set, chargeA and chargeB can be NULL.
228  * \retval pmerecvqxFINISH        No parameters were set.
229  * \retval pmerecvqxSWITCHGRID    Only grid_size and *ewaldcoeff were set.
230  * \retval pmerecvqxRESETCOUNTERS *step was set.
231  */
232 static int gmx_pme_recv_coeffs_coords(gmx_pme_pp        *pme_pp,
233                                       int               *natoms,
234                                       matrix             box,
235                                       int               *maxshift_x,
236                                       int               *maxshift_y,
237                                       real              *lambda_q,
238                                       real              *lambda_lj,
239                                       gmx_bool          *bEnerVir,
240                                       int64_t           *step,
241                                       ivec              *grid_size,
242                                       real              *ewaldcoeff_q,
243                                       real              *ewaldcoeff_lj,
244                                       bool              *atomSetChanged)
245 {
246     int status = -1;
247     int nat    = 0;
248
249 #if GMX_MPI
250     unsigned int flags    = 0;
251     int          messages = 0;
252
253     do
254     {
255         gmx_pme_comm_n_box_t cnb;
256         cnb.flags = 0;
257
258         /* Receive the send count, box and time step from the peer PP node */
259         MPI_Recv(&cnb, sizeof(cnb), MPI_BYTE,
260                  pme_pp->peerRankId, eCommType_CNB,
261                  pme_pp->mpi_comm_mysim, MPI_STATUS_IGNORE);
262
263         /* We accumulate all received flags */
264         flags |= cnb.flags;
265
266         *step  = cnb.step;
267
268         if (debug)
269         {
270             fprintf(debug, "PME only rank receiving:%s%s%s%s%s\n",
271                     (cnb.flags & PP_PME_CHARGE)        ? " charges" : "",
272                     (cnb.flags & PP_PME_COORD )        ? " coordinates" : "",
273                     (cnb.flags & PP_PME_FINISH)        ? " finish" : "",
274                     (cnb.flags & PP_PME_SWITCHGRID)    ? " switch grid" : "",
275                     (cnb.flags & PP_PME_RESETCOUNTERS) ? " reset counters" : "");
276         }
277
278         if (cnb.flags & PP_PME_FINISH)
279         {
280             status = pmerecvqxFINISH;
281         }
282
283         if (cnb.flags & PP_PME_SWITCHGRID)
284         {
285             /* Special case, receive the new parameters and return */
286             copy_ivec(cnb.grid_size, *grid_size);
287             *ewaldcoeff_q  = cnb.ewaldcoeff_q;
288             *ewaldcoeff_lj = cnb.ewaldcoeff_lj;
289
290             status         = pmerecvqxSWITCHGRID;
291         }
292
293         if (cnb.flags & PP_PME_RESETCOUNTERS)
294         {
295             /* Special case, receive the step (set above) and return */
296             status = pmerecvqxRESETCOUNTERS;
297         }
298
299         if (cnb.flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
300         {
301             *atomSetChanged = true;
302
303             /* Receive the send counts from the other PP nodes */
304             for (auto &sender : pme_pp->ppRanks)
305             {
306                 if (sender.rankId == pme_pp->peerRankId)
307                 {
308                     sender.numAtoms = cnb.natoms;
309                 }
310                 else
311                 {
312                     MPI_Irecv(&sender.numAtoms, sizeof(sender.numAtoms),
313                               MPI_BYTE,
314                               sender.rankId, eCommType_CNB,
315                               pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
316                 }
317             }
318             MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
319             messages = 0;
320
321             nat = 0;
322             for (const auto &sender : pme_pp->ppRanks)
323             {
324                 nat += sender.numAtoms;
325             }
326
327             if (cnb.flags & PP_PME_CHARGE)
328             {
329                 pme_pp->chargeA.resizeWithPadding(nat);
330             }
331             if (cnb.flags & PP_PME_CHARGEB)
332             {
333                 pme_pp->chargeB.resize(nat);
334             }
335             if (cnb.flags & PP_PME_SQRTC6)
336             {
337                 pme_pp->sqrt_c6A.resize(nat);
338             }
339             if (cnb.flags & PP_PME_SQRTC6B)
340             {
341                 pme_pp->sqrt_c6B.resize(nat);
342             }
343             if (cnb.flags & PP_PME_SIGMA)
344             {
345                 pme_pp->sigmaA.resize(nat);
346             }
347             if (cnb.flags & PP_PME_SIGMAB)
348             {
349                 pme_pp->sigmaB.resize(nat);
350             }
351             pme_pp->x.resize(nat);
352             pme_pp->f.resize(nat);
353
354             /* maxshift is sent when the charges are sent */
355             *maxshift_x = cnb.maxshift_x;
356             *maxshift_y = cnb.maxshift_y;
357
358             /* Receive the charges in place */
359             for (int q = 0; q < eCommType_NR; q++)
360             {
361                 real *bufferPtr;
362
363                 if (!(cnb.flags & (PP_PME_CHARGE<<q)))
364                 {
365                     continue;
366                 }
367                 switch (q)
368                 {
369                     case eCommType_ChargeA: bufferPtr = pme_pp->chargeA.data();  break;
370                     case eCommType_ChargeB: bufferPtr = pme_pp->chargeB.data();  break;
371                     case eCommType_SQRTC6A: bufferPtr = pme_pp->sqrt_c6A.data(); break;
372                     case eCommType_SQRTC6B: bufferPtr = pme_pp->sqrt_c6B.data(); break;
373                     case eCommType_SigmaA:  bufferPtr = pme_pp->sigmaA.data();   break;
374                     case eCommType_SigmaB:  bufferPtr = pme_pp->sigmaB.data();   break;
375                     default: gmx_incons("Wrong eCommType");
376                 }
377                 nat = 0;
378                 for (const auto &sender : pme_pp->ppRanks)
379                 {
380                     if (sender.numAtoms > 0)
381                     {
382                         MPI_Irecv(bufferPtr+nat,
383                                   sender.numAtoms*sizeof(real),
384                                   MPI_BYTE,
385                                   sender.rankId, q,
386                                   pme_pp->mpi_comm_mysim,
387                                   &pme_pp->req[messages++]);
388                         nat += sender.numAtoms;
389                         if (debug)
390                         {
391                             fprintf(debug, "Received from PP rank %d: %d %s\n",
392                                     sender.rankId, sender.numAtoms,
393                                     (q == eCommType_ChargeA ||
394                                      q == eCommType_ChargeB) ? "charges" : "params");
395                         }
396                     }
397                 }
398             }
399         }
400
401         if (cnb.flags & PP_PME_COORD)
402         {
403             /* The box, FE flag and lambda are sent along with the coordinates
404              *  */
405             copy_mat(cnb.box, box);
406             *lambda_q       = cnb.lambda_q;
407             *lambda_lj      = cnb.lambda_lj;
408             *bEnerVir       = ((cnb.flags & PP_PME_ENER_VIR) != 0U);
409             *step           = cnb.step;
410
411             /* Receive the coordinates in place */
412             nat = 0;
413             for (const auto &sender : pme_pp->ppRanks)
414             {
415                 if (sender.numAtoms > 0)
416                 {
417                     MPI_Irecv(pme_pp->x[nat],
418                               sender.numAtoms*sizeof(rvec),
419                               MPI_BYTE,
420                               sender.rankId, eCommType_COORD,
421                               pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
422                     nat += sender.numAtoms;
423                     if (debug)
424                     {
425                         fprintf(debug, "Received from PP rank %d: %d "
426                                 "coordinates\n",
427                                 sender.rankId, sender.numAtoms);
428                     }
429                 }
430             }
431
432             status = pmerecvqxX;
433         }
434
435         /* Wait for the coordinates and/or charges to arrive */
436         MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
437         messages = 0;
438     }
439     while (status == -1);
440 #else
441     GMX_UNUSED_VALUE(pme_pp);
442     GMX_UNUSED_VALUE(box);
443     GMX_UNUSED_VALUE(maxshift_x);
444     GMX_UNUSED_VALUE(maxshift_y);
445     GMX_UNUSED_VALUE(lambda_q);
446     GMX_UNUSED_VALUE(lambda_lj);
447     GMX_UNUSED_VALUE(bEnerVir);
448     GMX_UNUSED_VALUE(step);
449     GMX_UNUSED_VALUE(grid_size);
450     GMX_UNUSED_VALUE(ewaldcoeff_q);
451     GMX_UNUSED_VALUE(ewaldcoeff_lj);
452     GMX_UNUSED_VALUE(atomSetChanged);
453
454     status = pmerecvqxX;
455 #endif
456
457     if (status == pmerecvqxX)
458     {
459         *natoms   = nat;
460     }
461
462     return status;
463 }
464
465 /*! \brief Send the PME mesh force, virial and energy to the PP-only ranks. */
466 static void gmx_pme_send_force_vir_ener(gmx_pme_pp *pme_pp,
467                                         const PmeOutput &output,
468                                         real dvdlambda_q, real dvdlambda_lj,
469                                         float cycles)
470 {
471 #if GMX_MPI
472     gmx_pme_comm_vir_ene_t cve;
473     int                    messages, ind_start, ind_end;
474     cve.cycles = cycles;
475
476     /* Now the evaluated forces have to be transferred to the PP nodes */
477     messages = 0;
478     ind_end  = 0;
479     for (const auto &receiver : pme_pp->ppRanks)
480     {
481         ind_start = ind_end;
482         ind_end   = ind_start + receiver.numAtoms;
483         if (MPI_Isend(const_cast<void *>(static_cast<const void *>(output.forces_[ind_start])),
484                       (ind_end-ind_start)*sizeof(rvec), MPI_BYTE,
485                       receiver.rankId, 0,
486                       pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]) != 0)
487         {
488             gmx_comm("MPI_Isend failed in do_pmeonly");
489         }
490     }
491
492     /* send virial and energy to our last PP node */
493     copy_mat(output.coulombVirial_, cve.vir_q);
494     copy_mat(output.lennardJonesVirial_, cve.vir_lj);
495     cve.energy_q     = output.coulombEnergy_;
496     cve.energy_lj    = output.lennardJonesEnergy_;
497     cve.dvdlambda_q  = dvdlambda_q;
498     cve.dvdlambda_lj = dvdlambda_lj;
499     /* check for the signals to send back to a PP node */
500     cve.stop_cond = gmx_get_stop_condition();
501
502     cve.cycles = cycles;
503
504     if (debug)
505     {
506         fprintf(debug, "PME rank sending to PP rank %d: virial and energy\n",
507                 pme_pp->peerRankId);
508     }
509     MPI_Isend(&cve, sizeof(cve), MPI_BYTE,
510               pme_pp->peerRankId, 1,
511               pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
512
513     /* Wait for the forces to arrive */
514     MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
515 #else
516     gmx_call("MPI not enabled");
517     GMX_UNUSED_VALUE(pme_pp);
518     GMX_UNUSED_VALUE(output);
519     GMX_UNUSED_VALUE(dvdlambda_q);
520     GMX_UNUSED_VALUE(dvdlambda_lj);
521     GMX_UNUSED_VALUE(cycles);
522 #endif
523 }
524
525 int gmx_pmeonly(struct gmx_pme_t *pme,
526                 const t_commrec *cr, t_nrnb *mynrnb,
527                 gmx_wallcycle  *wcycle,
528                 gmx_walltime_accounting_t walltime_accounting,
529                 t_inputrec *ir, PmeRunMode runMode)
530 {
531     int                ret;
532     int                natoms = 0;
533     matrix             box;
534     real               lambda_q   = 0;
535     real               lambda_lj  = 0;
536     int                maxshift_x = 0, maxshift_y = 0;
537     real               dvdlambda_q, dvdlambda_lj;
538     float              cycles;
539     int                count;
540     gmx_bool           bEnerVir = FALSE;
541     int64_t            step;
542
543     /* This data will only use with PME tuning, i.e. switching PME grids */
544     std::vector<gmx_pme_t *> pmedata;
545     pmedata.push_back(pme);
546
547     auto        pme_pp       = gmx_pme_pp_init(cr);
548     //TODO the variable below should be queried from the task assignment info
549     const bool  useGpuForPme   = (runMode == PmeRunMode::GPU) || (runMode == PmeRunMode::Mixed);
550     const void *commandStream  = useGpuForPme ? pme_gpu_get_device_stream(pme) : nullptr;
551     const void *deviceContext  = useGpuForPme ? pme_gpu_get_device_context(pme) : nullptr;
552     const int   paddingSize    = pme_gpu_get_padding_size(pme);
553     if (useGpuForPme)
554     {
555         changePinningPolicy(&pme_pp->chargeA, pme_get_pinning_policy());
556         changePinningPolicy(&pme_pp->x, pme_get_pinning_policy());
557     }
558
559     // Unconditionally initialize the StatePropagatorDataGpu object to get more verbose message if it is used from CPU builds
560     auto stateGpu = std::make_unique<gmx::StatePropagatorDataGpu>(commandStream, deviceContext, GpuApiCallBehavior::Sync, paddingSize);
561
562     clear_nrnb(mynrnb);
563
564     count = 0;
565     do /****** this is a quasi-loop over time steps! */
566     {
567         /* The reason for having a loop here is PME grid tuning/switching */
568         do
569         {
570             /* Domain decomposition */
571             ivec newGridSize;
572             bool atomSetChanged = false;
573             real ewaldcoeff_q   = 0, ewaldcoeff_lj = 0;
574             ret = gmx_pme_recv_coeffs_coords(pme_pp.get(),
575                                              &natoms,
576                                              box,
577                                              &maxshift_x, &maxshift_y,
578                                              &lambda_q, &lambda_lj,
579                                              &bEnerVir,
580                                              &step,
581                                              &newGridSize,
582                                              &ewaldcoeff_q,
583                                              &ewaldcoeff_lj,
584                                              &atomSetChanged);
585
586             if (ret == pmerecvqxSWITCHGRID)
587             {
588                 /* Switch the PME grid to newGridSize */
589                 pme = gmx_pmeonly_switch(&pmedata, newGridSize, ewaldcoeff_q, ewaldcoeff_lj, cr, ir);
590             }
591
592             if (atomSetChanged)
593             {
594                 gmx_pme_reinit_atoms(pme, natoms, pme_pp->chargeA.data());
595                 if (useGpuForPme)
596                 {
597                     stateGpu->reinit(natoms, natoms);
598                     pme_gpu_set_device_x(pme, stateGpu->getCoordinates());
599                 }
600             }
601
602             if (ret == pmerecvqxRESETCOUNTERS)
603             {
604                 /* Reset the cycle and flop counters */
605                 reset_pmeonly_counters(wcycle, walltime_accounting, mynrnb, step, useGpuForPme);
606             }
607         }
608         while (ret == pmerecvqxSWITCHGRID || ret == pmerecvqxRESETCOUNTERS);
609
610         if (ret == pmerecvqxFINISH)
611         {
612             /* We should stop: break out of the loop */
613             break;
614         }
615
616         if (count == 0)
617         {
618             wallcycle_start(wcycle, ewcRUN);
619             walltime_accounting_start_time(walltime_accounting);
620         }
621
622         wallcycle_start(wcycle, ewcPMEMESH);
623
624         dvdlambda_q  = 0;
625         dvdlambda_lj = 0;
626
627         // TODO Make a struct of array refs onto these per-atom fields
628         // of pme_pp (maybe box, energy and virial, too; and likewise
629         // from mdatoms for the other call to gmx_pme_do), so we have
630         // fewer lines of code and less parameter passing.
631         const int pmeFlags = GMX_PME_DO_ALL_F | (bEnerVir ? GMX_PME_CALC_ENER_VIR : 0);
632         PmeOutput output;
633         if (useGpuForPme)
634         {
635             const bool boxChanged              = false;
636             const bool useGpuPmeForceReduction = false;
637             //TODO this should be set properly by gmx_pme_recv_coeffs_coords,
638             // or maybe use inputrecDynamicBox(ir), at the very least - change this when this codepath is tested!
639             pme_gpu_prepare_computation(pme, boxChanged, box, wcycle, pmeFlags, useGpuPmeForceReduction);
640             stateGpu->copyCoordinatesToGpu(gmx::ArrayRef<gmx::RVec>(pme_pp->x), gmx::StatePropagatorDataGpu::AtomLocality::All);
641
642             pme_gpu_launch_spread(pme, wcycle);
643             pme_gpu_launch_complex_transforms(pme, wcycle);
644             pme_gpu_launch_gather(pme, wcycle, PmeForceOutputHandling::Set);
645             output = pme_gpu_wait_finish_task(pme, pmeFlags, wcycle);
646             pme_gpu_reinit_computation(pme, wcycle);
647         }
648         else
649         {
650             GMX_ASSERT(pme_pp->x.size() == static_cast<size_t>(natoms), "The coordinate buffer should have size natoms");
651
652             gmx_pme_do(pme, pme_pp->x, pme_pp->f,
653                        pme_pp->chargeA.data(), pme_pp->chargeB.data(),
654                        pme_pp->sqrt_c6A.data(), pme_pp->sqrt_c6B.data(),
655                        pme_pp->sigmaA.data(), pme_pp->sigmaB.data(), box,
656                        cr, maxshift_x, maxshift_y, mynrnb, wcycle,
657                        output.coulombVirial_, output.lennardJonesVirial_,
658                        &output.coulombEnergy_, &output.lennardJonesEnergy_,
659                        lambda_q, lambda_lj, &dvdlambda_q, &dvdlambda_lj,
660                        pmeFlags);
661             output.forces_ = pme_pp->f;
662         }
663
664         cycles = wallcycle_stop(wcycle, ewcPMEMESH);
665
666         gmx_pme_send_force_vir_ener(pme_pp.get(), output,
667                                     dvdlambda_q, dvdlambda_lj, cycles);
668
669         count++;
670     } /***** end of quasi-loop, we stop with the break above */
671     while (TRUE);
672
673     walltime_accounting_end_time(walltime_accounting);
674
675     return 0;
676 }