b3119b843ce76e8683d1ed15f35ecc4a21cf64be
[alexxy/gromacs.git] / src / gromacs / ewald / pme-only.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 /* IMPORTANT FOR DEVELOPERS:
38  *
39  * Triclinic pme stuff isn't entirely trivial, and we've experienced
40  * some bugs during development (many of them due to me). To avoid
41  * this in the future, please check the following things if you make
42  * changes in this file:
43  *
44  * 1. You should obtain identical (at least to the PME precision)
45  *    energies, forces, and virial for
46  *    a rectangular box and a triclinic one where the z (or y) axis is
47  *    tilted a whole box side. For instance you could use these boxes:
48  *
49  *    rectangular       triclinic
50  *     2  0  0           2  0  0
51  *     0  2  0           0  2  0
52  *     0  0  6           2  2  6
53  *
54  * 2. You should check the energy conservation in a triclinic box.
55  *
56  * It might seem an overkill, but better safe than sorry.
57  * /Erik 001109
58  */
59
60 #include "gmxpre.h"
61
62 #include "config.h"
63
64 #include <assert.h>
65 #include <stdio.h>
66 #include <stdlib.h>
67 #include <string.h>
68
69 #include <cmath>
70
71 #include <memory>
72 #include <numeric>
73 #include <vector>
74
75 #include "gromacs/compat/make_unique.h"
76 #include "gromacs/domdec/domdec.h"
77 #include "gromacs/ewald/pme.h"
78 #include "gromacs/fft/parallel_3dfft.h"
79 #include "gromacs/fileio/pdbio.h"
80 #include "gromacs/gmxlib/network.h"
81 #include "gromacs/gmxlib/nrnb.h"
82 #include "gromacs/gpu_utils/hostallocator.h"
83 #include "gromacs/math/gmxcomplex.h"
84 #include "gromacs/math/units.h"
85 #include "gromacs/math/vec.h"
86 #include "gromacs/mdtypes/commrec.h"
87 #include "gromacs/mdtypes/inputrec.h"
88 #include "gromacs/timing/cyclecounter.h"
89 #include "gromacs/timing/wallcycle.h"
90 #include "gromacs/utility/fatalerror.h"
91 #include "gromacs/utility/futil.h"
92 #include "gromacs/utility/gmxmpi.h"
93 #include "gromacs/utility/gmxomp.h"
94 #include "gromacs/utility/smalloc.h"
95
96 #include "pme-internal.h"
97 #include "pme-pp-communication.h"
98
99 //! Contains information about the PP ranks that partner this PME rank.
100 struct PpRanks
101 {
102     //! The MPI rank ID of this partner PP rank.
103     int rankId;
104     //! The number of atoms to communicate with this partner PP rank.
105     int numAtoms;
106 };
107
108 /*! \brief Master PP-PME communication data structure */
109 struct gmx_pme_pp {
110     MPI_Comm             mpi_comm_mysim; /**< MPI communicator for this simulation */
111     std::vector<PpRanks> ppRanks;        /**< The PP partner ranks                 */
112     int                  peerRankId;     /**< The peer PP rank id                  */
113     //@{
114     /**< Vectors of A- and B-state parameters used to transfer vectors to PME ranks  */
115     gmx::HostVector<real>      chargeA;
116     std::vector<real>          chargeB;
117     std::vector<real>          sqrt_c6A;
118     std::vector<real>          sqrt_c6B;
119     std::vector<real>          sigmaA;
120     std::vector<real>          sigmaB;
121     //@}
122     gmx::HostVector<gmx::RVec> x; /**< Vector of atom coordinates to transfer to PME ranks */
123     std::vector<gmx::RVec>     f; /**< Vector of atom forces received from PME ranks */
124     //@{
125     /**< Vectors of MPI objects used in non-blocking communication between multiple PP ranks per PME rank */
126     std::vector<MPI_Request> req;
127     std::vector<MPI_Status>  stat;
128     //@}
129 };
130
131 /*! \brief Initialize the PME-only side of the PME <-> PP communication */
132 static std::unique_ptr<gmx_pme_pp> gmx_pme_pp_init(const t_commrec *cr)
133 {
134     auto pme_pp = gmx::compat::make_unique<gmx_pme_pp>();
135
136 #if GMX_MPI
137     int rank;
138
139     pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
140     MPI_Comm_rank(cr->mpi_comm_mygroup, &rank);
141     auto ppRanks = get_pme_ddranks(cr, rank);
142     pme_pp->ppRanks.reserve(ppRanks.size());
143     for (const auto &ppRankId : ppRanks)
144     {
145         pme_pp->ppRanks.push_back({ppRankId, 0});
146     }
147     // The peer PP rank is the last one.
148     pme_pp->peerRankId = pme_pp->ppRanks.back().rankId;
149     pme_pp->req.resize(eCommType_NR*pme_pp->ppRanks.size());
150     pme_pp->stat.resize(eCommType_NR*pme_pp->ppRanks.size());
151 #else
152     GMX_UNUSED_VALUE(cr);
153 #endif
154
155     return pme_pp;
156 }
157
158 static void reset_pmeonly_counters(gmx_wallcycle_t wcycle,
159                                    gmx_walltime_accounting_t walltime_accounting,
160                                    t_nrnb *nrnb, t_inputrec *ir,
161                                    int64_t step,
162                                    bool useGpuForPme)
163 {
164     /* Reset all the counters related to performance over the run */
165     wallcycle_stop(wcycle, ewcRUN);
166     wallcycle_reset_all(wcycle);
167     init_nrnb(nrnb);
168     if (ir->nsteps >= 0)
169     {
170         /* ir->nsteps is not used here, but we update it for consistency */
171         ir->nsteps -= step - ir->init_step;
172     }
173     ir->init_step = step;
174     wallcycle_start(wcycle, ewcRUN);
175     walltime_accounting_start(walltime_accounting);
176
177     if (useGpuForPme)
178     {
179         resetGpuProfiler();
180     }
181 }
182
183 static gmx_pme_t *gmx_pmeonly_switch(std::vector<gmx_pme_t *> *pmedata,
184                                      const ivec grid_size,
185                                      real ewaldcoeff_q, real ewaldcoeff_lj,
186                                      const t_commrec *cr, const t_inputrec *ir)
187 {
188     GMX_ASSERT(pmedata, "Bad PME tuning list pointer");
189     for (auto &pme : *pmedata)
190     {
191         GMX_ASSERT(pme, "Bad PME tuning list element pointer");
192         if (pme->nkx == grid_size[XX] &&
193             pme->nky == grid_size[YY] &&
194             pme->nkz == grid_size[ZZ])
195         {
196             /* Here we have found an existing PME data structure that suits us.
197              * However, in the GPU case, we have to reinitialize it - there's only one GPU structure.
198              * This should not cause actual GPU reallocations, at least (the allocated buffers are never shrunk).
199              * So, just some grid size updates in the GPU kernel parameters.
200              * TODO: this should be something like gmx_pme_update_split_params()
201              */
202             gmx_pme_reinit(&pme, cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
203             return pme;
204         }
205     }
206
207     const auto &pme          = pmedata->back();
208     gmx_pme_t  *newStructure = nullptr;
209     // Copy last structure with new grid params
210     gmx_pme_reinit(&newStructure, cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
211     pmedata->push_back(newStructure);
212     return newStructure;
213 }
214
215 /*! \brief Called by PME-only ranks to receive coefficients and coordinates
216  *
217  * \param[in,out] pme_pp    PME-PP communication structure.
218  * \param[out] natoms       Number of received atoms.
219  * \param[out] box        System box, if received.
220  * \param[out] maxshift_x        Maximum shift in X direction, if received.
221  * \param[out] maxshift_y        Maximum shift in Y direction, if received.
222  * \param[out] lambda_q         Free-energy lambda for electrostatics, if received.
223  * \param[out] lambda_lj         Free-energy lambda for Lennard-Jones, if received.
224  * \param[out] bEnerVir          Set to true if this is an energy/virial calculation step, otherwise set to false.
225  * \param[out] step              MD integration step number.
226  * \param[out] grid_size         PME grid size, if received.
227  * \param[out] ewaldcoeff_q         Ewald cut-off parameter for electrostatics, if received.
228  * \param[out] ewaldcoeff_lj         Ewald cut-off parameter for Lennard-Jones, if received.
229  * \param[out] atomSetChanged    Set to true only if the local domain atom data (charges/coefficients)
230  *                               has been received (after DD) and should be reinitialized. Otherwise not changed.
231  *
232  * \retval pmerecvqxX             All parameters were set, chargeA and chargeB can be NULL.
233  * \retval pmerecvqxFINISH        No parameters were set.
234  * \retval pmerecvqxSWITCHGRID    Only grid_size and *ewaldcoeff were set.
235  * \retval pmerecvqxRESETCOUNTERS *step was set.
236  */
237 static int gmx_pme_recv_coeffs_coords(gmx_pme_pp        *pme_pp,
238                                       int               *natoms,
239                                       matrix             box,
240                                       int               *maxshift_x,
241                                       int               *maxshift_y,
242                                       real              *lambda_q,
243                                       real              *lambda_lj,
244                                       gmx_bool          *bEnerVir,
245                                       int64_t           *step,
246                                       ivec              *grid_size,
247                                       real              *ewaldcoeff_q,
248                                       real              *ewaldcoeff_lj,
249                                       bool              *atomSetChanged)
250 {
251     int status = -1;
252     int nat    = 0;
253
254 #if GMX_MPI
255     unsigned int flags    = 0;
256     int          messages = 0;
257
258     do
259     {
260         gmx_pme_comm_n_box_t cnb;
261         cnb.flags = 0;
262
263         /* Receive the send count, box and time step from the peer PP node */
264         MPI_Recv(&cnb, sizeof(cnb), MPI_BYTE,
265                  pme_pp->peerRankId, eCommType_CNB,
266                  pme_pp->mpi_comm_mysim, MPI_STATUS_IGNORE);
267
268         /* We accumulate all received flags */
269         flags |= cnb.flags;
270
271         *step  = cnb.step;
272
273         if (debug)
274         {
275             fprintf(debug, "PME only rank receiving:%s%s%s%s%s\n",
276                     (cnb.flags & PP_PME_CHARGE)        ? " charges" : "",
277                     (cnb.flags & PP_PME_COORD )        ? " coordinates" : "",
278                     (cnb.flags & PP_PME_FINISH)        ? " finish" : "",
279                     (cnb.flags & PP_PME_SWITCHGRID)    ? " switch grid" : "",
280                     (cnb.flags & PP_PME_RESETCOUNTERS) ? " reset counters" : "");
281         }
282
283         if (cnb.flags & PP_PME_FINISH)
284         {
285             status = pmerecvqxFINISH;
286         }
287
288         if (cnb.flags & PP_PME_SWITCHGRID)
289         {
290             /* Special case, receive the new parameters and return */
291             copy_ivec(cnb.grid_size, *grid_size);
292             *ewaldcoeff_q  = cnb.ewaldcoeff_q;
293             *ewaldcoeff_lj = cnb.ewaldcoeff_lj;
294
295             status         = pmerecvqxSWITCHGRID;
296         }
297
298         if (cnb.flags & PP_PME_RESETCOUNTERS)
299         {
300             /* Special case, receive the step (set above) and return */
301             status = pmerecvqxRESETCOUNTERS;
302         }
303
304         if (cnb.flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
305         {
306             *atomSetChanged = true;
307
308             /* Receive the send counts from the other PP nodes */
309             for (auto &sender : pme_pp->ppRanks)
310             {
311                 if (sender.rankId == pme_pp->peerRankId)
312                 {
313                     sender.numAtoms = cnb.natoms;
314                 }
315                 else
316                 {
317                     MPI_Irecv(&sender.numAtoms, sizeof(sender.numAtoms),
318                               MPI_BYTE,
319                               sender.rankId, eCommType_CNB,
320                               pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
321                 }
322             }
323             MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
324             messages = 0;
325
326             nat = 0;
327             for (const auto &sender : pme_pp->ppRanks)
328             {
329                 nat += sender.numAtoms;
330             }
331
332             if (cnb.flags & PP_PME_CHARGE)
333             {
334                 pme_pp->chargeA.resize(nat);
335             }
336             if (cnb.flags & PP_PME_CHARGEB)
337             {
338                 pme_pp->chargeB.resize(nat);
339             }
340             if (cnb.flags & PP_PME_SQRTC6)
341             {
342                 pme_pp->sqrt_c6A.resize(nat);
343             }
344             if (cnb.flags & PP_PME_SQRTC6B)
345             {
346                 pme_pp->sqrt_c6B.resize(nat);
347             }
348             if (cnb.flags & PP_PME_SIGMA)
349             {
350                 pme_pp->sigmaA.resize(nat);
351             }
352             if (cnb.flags & PP_PME_SIGMAB)
353             {
354                 pme_pp->sigmaB.resize(nat);
355             }
356             pme_pp->x.resize(nat);
357             pme_pp->f.resize(nat);
358
359             /* maxshift is sent when the charges are sent */
360             *maxshift_x = cnb.maxshift_x;
361             *maxshift_y = cnb.maxshift_y;
362
363             /* Receive the charges in place */
364             for (int q = 0; q < eCommType_NR; q++)
365             {
366                 real *bufferPtr;
367
368                 if (!(cnb.flags & (PP_PME_CHARGE<<q)))
369                 {
370                     continue;
371                 }
372                 switch (q)
373                 {
374                     case eCommType_ChargeA: bufferPtr = pme_pp->chargeA.data();  break;
375                     case eCommType_ChargeB: bufferPtr = pme_pp->chargeB.data();  break;
376                     case eCommType_SQRTC6A: bufferPtr = pme_pp->sqrt_c6A.data(); break;
377                     case eCommType_SQRTC6B: bufferPtr = pme_pp->sqrt_c6B.data(); break;
378                     case eCommType_SigmaA:  bufferPtr = pme_pp->sigmaA.data();   break;
379                     case eCommType_SigmaB:  bufferPtr = pme_pp->sigmaB.data();   break;
380                     default: gmx_incons("Wrong eCommType");
381                 }
382                 nat = 0;
383                 for (const auto &sender : pme_pp->ppRanks)
384                 {
385                     if (sender.numAtoms > 0)
386                     {
387                         MPI_Irecv(bufferPtr+nat,
388                                   sender.numAtoms*sizeof(real),
389                                   MPI_BYTE,
390                                   sender.rankId, q,
391                                   pme_pp->mpi_comm_mysim,
392                                   &pme_pp->req[messages++]);
393                         nat += sender.numAtoms;
394                         if (debug)
395                         {
396                             fprintf(debug, "Received from PP rank %d: %d %s\n",
397                                     sender.rankId, sender.numAtoms,
398                                     (q == eCommType_ChargeA ||
399                                      q == eCommType_ChargeB) ? "charges" : "params");
400                         }
401                     }
402                 }
403             }
404         }
405
406         if (cnb.flags & PP_PME_COORD)
407         {
408             /* The box, FE flag and lambda are sent along with the coordinates
409              *  */
410             copy_mat(cnb.box, box);
411             *lambda_q       = cnb.lambda_q;
412             *lambda_lj      = cnb.lambda_lj;
413             *bEnerVir       = (cnb.flags & PP_PME_ENER_VIR);
414             *step           = cnb.step;
415
416             /* Receive the coordinates in place */
417             nat = 0;
418             for (const auto &sender : pme_pp->ppRanks)
419             {
420                 if (sender.numAtoms > 0)
421                 {
422                     MPI_Irecv(pme_pp->x[nat], sender.numAtoms*sizeof(rvec),
423                               MPI_BYTE,
424                               sender.rankId, eCommType_COORD,
425                               pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
426                     nat += sender.numAtoms;
427                     if (debug)
428                     {
429                         fprintf(debug, "Received from PP rank %d: %d "
430                                 "coordinates\n",
431                                 sender.rankId, sender.numAtoms);
432                     }
433                 }
434             }
435
436             status = pmerecvqxX;
437         }
438
439         /* Wait for the coordinates and/or charges to arrive */
440         MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
441         messages = 0;
442     }
443     while (status == -1);
444 #else
445     GMX_UNUSED_VALUE(pme_pp);
446     GMX_UNUSED_VALUE(box);
447     GMX_UNUSED_VALUE(maxshift_x);
448     GMX_UNUSED_VALUE(maxshift_y);
449     GMX_UNUSED_VALUE(lambda_q);
450     GMX_UNUSED_VALUE(lambda_lj);
451     GMX_UNUSED_VALUE(bEnerVir);
452     GMX_UNUSED_VALUE(step);
453     GMX_UNUSED_VALUE(grid_size);
454     GMX_UNUSED_VALUE(ewaldcoeff_q);
455     GMX_UNUSED_VALUE(ewaldcoeff_lj);
456     GMX_UNUSED_VALUE(atomSetChanged);
457
458     status = pmerecvqxX;
459 #endif
460
461     if (status == pmerecvqxX)
462     {
463         *natoms   = nat;
464     }
465
466     return status;
467 }
468
469 /*! \brief Send the PME mesh force, virial and energy to the PP-only ranks. */
470 static void gmx_pme_send_force_vir_ener(gmx_pme_pp *pme_pp,
471                                         const rvec *f,
472                                         matrix vir_q, real energy_q,
473                                         matrix vir_lj, real energy_lj,
474                                         real dvdlambda_q, real dvdlambda_lj,
475                                         float cycles)
476 {
477 #if GMX_MPI
478     gmx_pme_comm_vir_ene_t cve;
479     int                    messages, ind_start, ind_end;
480     cve.cycles = cycles;
481
482     /* Now the evaluated forces have to be transferred to the PP nodes */
483     messages = 0;
484     ind_end  = 0;
485     for (const auto &receiver : pme_pp->ppRanks)
486     {
487         ind_start = ind_end;
488         ind_end   = ind_start + receiver.numAtoms;
489         if (MPI_Isend(const_cast<void *>(static_cast<const void *>(f[ind_start])),
490                       (ind_end-ind_start)*sizeof(rvec), MPI_BYTE,
491                       receiver.rankId, 0,
492                       pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]) != 0)
493         {
494             gmx_comm("MPI_Isend failed in do_pmeonly");
495         }
496     }
497
498     /* send virial and energy to our last PP node */
499     copy_mat(vir_q, cve.vir_q);
500     copy_mat(vir_lj, cve.vir_lj);
501     cve.energy_q     = energy_q;
502     cve.energy_lj    = energy_lj;
503     cve.dvdlambda_q  = dvdlambda_q;
504     cve.dvdlambda_lj = dvdlambda_lj;
505     /* check for the signals to send back to a PP node */
506     cve.stop_cond = gmx_get_stop_condition();
507
508     cve.cycles = cycles;
509
510     if (debug)
511     {
512         fprintf(debug, "PME rank sending to PP rank %d: virial and energy\n",
513                 pme_pp->peerRankId);
514     }
515     MPI_Isend(&cve, sizeof(cve), MPI_BYTE,
516               pme_pp->peerRankId, 1,
517               pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
518
519     /* Wait for the forces to arrive */
520     MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
521 #else
522     gmx_call("MPI not enabled");
523     GMX_UNUSED_VALUE(pme_pp);
524     GMX_UNUSED_VALUE(f);
525     GMX_UNUSED_VALUE(vir_q);
526     GMX_UNUSED_VALUE(energy_q);
527     GMX_UNUSED_VALUE(vir_lj);
528     GMX_UNUSED_VALUE(energy_lj);
529     GMX_UNUSED_VALUE(dvdlambda_q);
530     GMX_UNUSED_VALUE(dvdlambda_lj);
531     GMX_UNUSED_VALUE(cycles);
532 #endif
533 }
534
535 int gmx_pmeonly(struct gmx_pme_t *pme,
536                 const t_commrec *cr, t_nrnb *mynrnb,
537                 gmx_wallcycle  *wcycle,
538                 gmx_walltime_accounting_t walltime_accounting,
539                 t_inputrec *ir, PmeRunMode runMode)
540 {
541     int                ret;
542     int                natoms = 0;
543     matrix             box;
544     real               lambda_q   = 0;
545     real               lambda_lj  = 0;
546     int                maxshift_x = 0, maxshift_y = 0;
547     real               energy_q, energy_lj, dvdlambda_q, dvdlambda_lj;
548     matrix             vir_q, vir_lj;
549     float              cycles;
550     int                count;
551     gmx_bool           bEnerVir = FALSE;
552     int64_t            step;
553
554     /* This data will only use with PME tuning, i.e. switching PME grids */
555     std::vector<gmx_pme_t *> pmedata;
556     pmedata.push_back(pme);
557
558     auto       pme_pp       = gmx_pme_pp_init(cr);
559     //TODO the variable below should be queried from the task assignment info
560     const bool useGpuForPme = (runMode == PmeRunMode::GPU) || (runMode == PmeRunMode::Mixed);
561     if (useGpuForPme)
562     {
563         changePinningPolicy(&pme_pp->chargeA, pme_get_pinning_policy());
564         changePinningPolicy(&pme_pp->x, pme_get_pinning_policy());
565     }
566
567     init_nrnb(mynrnb);
568
569     count = 0;
570     do /****** this is a quasi-loop over time steps! */
571     {
572         /* The reason for having a loop here is PME grid tuning/switching */
573         do
574         {
575             /* Domain decomposition */
576             ivec newGridSize;
577             bool atomSetChanged = false;
578             real ewaldcoeff_q   = 0, ewaldcoeff_lj = 0;
579             ret = gmx_pme_recv_coeffs_coords(pme_pp.get(),
580                                              &natoms,
581                                              box,
582                                              &maxshift_x, &maxshift_y,
583                                              &lambda_q, &lambda_lj,
584                                              &bEnerVir,
585                                              &step,
586                                              &newGridSize,
587                                              &ewaldcoeff_q,
588                                              &ewaldcoeff_lj,
589                                              &atomSetChanged);
590
591             if (ret == pmerecvqxSWITCHGRID)
592             {
593                 /* Switch the PME grid to newGridSize */
594                 pme = gmx_pmeonly_switch(&pmedata, newGridSize, ewaldcoeff_q, ewaldcoeff_lj, cr, ir);
595             }
596
597             if (atomSetChanged)
598             {
599                 gmx_pme_reinit_atoms(pme, natoms, pme_pp->chargeA.data());
600             }
601
602             if (ret == pmerecvqxRESETCOUNTERS)
603             {
604                 /* Reset the cycle and flop counters */
605                 reset_pmeonly_counters(wcycle, walltime_accounting, mynrnb, ir, step, useGpuForPme);
606             }
607         }
608         while (ret == pmerecvqxSWITCHGRID || ret == pmerecvqxRESETCOUNTERS);
609
610         if (ret == pmerecvqxFINISH)
611         {
612             /* We should stop: break out of the loop */
613             break;
614         }
615
616         if (count == 0)
617         {
618             wallcycle_start(wcycle, ewcRUN);
619             walltime_accounting_start(walltime_accounting);
620         }
621
622         wallcycle_start(wcycle, ewcPMEMESH);
623
624         dvdlambda_q  = 0;
625         dvdlambda_lj = 0;
626         clear_mat(vir_q);
627         clear_mat(vir_lj);
628         energy_q  = 0;
629         energy_lj = 0;
630
631         // TODO Make a struct of array refs onto these per-atom fields
632         // of pme_pp (maybe box, energy and virial, too; and likewise
633         // from mdatoms for the other call to gmx_pme_do), so we have
634         // fewer lines of code and less parameter passing.
635         const int pmeFlags = GMX_PME_DO_ALL_F | (bEnerVir ? GMX_PME_CALC_ENER_VIR : 0);
636         gmx::ArrayRef<const gmx::RVec> forces;
637         if (useGpuForPme)
638         {
639             const bool boxChanged = false;
640             //TODO this should be set properly by gmx_pme_recv_coeffs_coords,
641             // or maybe use inputrecDynamicBox(ir), at the very least - change this when this codepath is tested!
642             pme_gpu_prepare_computation(pme, boxChanged, box, wcycle, pmeFlags);
643             pme_gpu_launch_spread(pme, as_rvec_array(pme_pp->x.data()), wcycle);
644             pme_gpu_launch_complex_transforms(pme, wcycle);
645             pme_gpu_launch_gather(pme, wcycle, PmeForceOutputHandling::Set);
646             pme_gpu_wait_finish_task(pme, wcycle, &forces, vir_q, &energy_q);
647             pme_gpu_reinit_computation(pme, wcycle);
648         }
649         else
650         {
651             gmx_pme_do(pme, 0, natoms, as_rvec_array(pme_pp->x.data()), as_rvec_array(pme_pp->f.data()),
652                        pme_pp->chargeA.data(), pme_pp->chargeB.data(),
653                        pme_pp->sqrt_c6A.data(), pme_pp->sqrt_c6B.data(),
654                        pme_pp->sigmaA.data(), pme_pp->sigmaB.data(), box,
655                        cr, maxshift_x, maxshift_y, mynrnb, wcycle,
656                        vir_q, vir_lj,
657                        &energy_q, &energy_lj, lambda_q, lambda_lj, &dvdlambda_q, &dvdlambda_lj,
658                        pmeFlags);
659             forces = pme_pp->f;
660         }
661
662         cycles = wallcycle_stop(wcycle, ewcPMEMESH);
663
664         gmx_pme_send_force_vir_ener(pme_pp.get(), as_rvec_array(forces.data()),
665                                     vir_q, energy_q, vir_lj, energy_lj,
666                                     dvdlambda_q, dvdlambda_lj, cycles);
667
668         count++;
669     } /***** end of quasi-loop, we stop with the break above */
670     while (TRUE);
671
672     walltime_accounting_end(walltime_accounting);
673
674     return 0;
675 }