2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
38 /* IMPORTANT FOR DEVELOPERS:
40 * Triclinic pme stuff isn't entirely trivial, and we've experienced
41 * some bugs during development (many of them due to me). To avoid
42 * this in the future, please check the following things if you make
43 * changes in this file:
45 * 1. You should obtain identical (at least to the PME precision)
46 * energies, forces, and virial for
47 * a rectangular box and a triclinic one where the z (or y) axis is
48 * tilted a whole box side. For instance you could use these boxes:
50 * rectangular triclinic
55 * 2. You should check the energy conservation in a triclinic box.
57 * It might seem an overkill, but better safe than sorry.
75 #include "gromacs/domdec/domdec.h"
76 #include "gromacs/ewald/pme.h"
77 #include "gromacs/ewald/pme_coordinate_receiver_gpu.h"
78 #include "gromacs/ewald/pme_force_sender_gpu.h"
79 #include "gromacs/fft/parallel_3dfft.h"
80 #include "gromacs/fileio/pdbio.h"
81 #include "gromacs/gmxlib/network.h"
82 #include "gromacs/gmxlib/nrnb.h"
83 #include "gromacs/gpu_utils/hostallocator.h"
84 #include "gromacs/math/gmxcomplex.h"
85 #include "gromacs/math/units.h"
86 #include "gromacs/math/vec.h"
87 #include "gromacs/mdtypes/commrec.h"
88 #include "gromacs/mdtypes/forceoutput.h"
89 #include "gromacs/mdtypes/inputrec.h"
90 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
91 #include "gromacs/timing/cyclecounter.h"
92 #include "gromacs/timing/wallcycle.h"
93 #include "gromacs/utility/fatalerror.h"
94 #include "gromacs/utility/futil.h"
95 #include "gromacs/utility/gmxmpi.h"
96 #include "gromacs/utility/gmxomp.h"
97 #include "gromacs/utility/smalloc.h"
99 #include "pme_gpu_internal.h"
100 #include "pme_internal.h"
101 #include "pme_pp_communication.h"
103 /*! \brief environment variable to enable GPU P2P communication */
104 static const bool c_enableGpuPmePpComms =
105 (getenv("GMX_GPU_PME_PP_COMMS") != nullptr) && GMX_THREAD_MPI && (GMX_GPU == GMX_GPU_CUDA);
107 /*! \brief Master PP-PME communication data structure */
110 MPI_Comm mpi_comm_mysim; /**< MPI communicator for this simulation */
111 std::vector<PpRanks> ppRanks; /**< The PP partner ranks */
112 int peerRankId; /**< The peer PP rank id */
114 /**< Vectors of A- and B-state parameters used to transfer vectors to PME ranks */
115 gmx::PaddedHostVector<real> chargeA;
116 std::vector<real> chargeB;
117 std::vector<real> sqrt_c6A;
118 std::vector<real> sqrt_c6B;
119 std::vector<real> sigmaA;
120 std::vector<real> sigmaB;
122 gmx::HostVector<gmx::RVec> x; /**< Vector of atom coordinates to transfer to PME ranks */
123 std::vector<gmx::RVec> f; /**< Vector of atom forces received from PME ranks */
125 /**< Vectors of MPI objects used in non-blocking communication between multiple PP ranks per PME rank */
126 std::vector<MPI_Request> req;
127 std::vector<MPI_Status> stat;
130 /*! \brief object for receiving coordinates using communications operating on GPU memory space */
131 std::unique_ptr<gmx::PmeCoordinateReceiverGpu> pmeCoordinateReceiverGpu;
132 /*! \brief object for sending PME force using communications operating on GPU memory space */
133 std::unique_ptr<gmx::PmeForceSenderGpu> pmeForceSenderGpu;
135 /*! \brief whether GPU direct communications are active for PME-PP transfers */
136 bool useGpuDirectComm = false;
139 /*! \brief Initialize the PME-only side of the PME <-> PP communication */
140 static std::unique_ptr<gmx_pme_pp> gmx_pme_pp_init(const t_commrec* cr)
142 auto pme_pp = std::make_unique<gmx_pme_pp>();
147 pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
148 MPI_Comm_rank(cr->mpi_comm_mygroup, &rank);
149 auto ppRanks = get_pme_ddranks(cr, rank);
150 pme_pp->ppRanks.reserve(ppRanks.size());
151 for (const auto& ppRankId : ppRanks)
153 pme_pp->ppRanks.push_back({ ppRankId, 0 });
155 // The peer PP rank is the last one.
156 pme_pp->peerRankId = pme_pp->ppRanks.back().rankId;
157 pme_pp->req.resize(eCommType_NR * pme_pp->ppRanks.size());
158 pme_pp->stat.resize(eCommType_NR * pme_pp->ppRanks.size());
160 GMX_UNUSED_VALUE(cr);
166 static void reset_pmeonly_counters(gmx_wallcycle_t wcycle,
167 gmx_walltime_accounting_t walltime_accounting,
172 /* Reset all the counters related to performance over the run */
173 wallcycle_stop(wcycle, ewcRUN);
174 wallcycle_reset_all(wcycle);
176 wallcycle_start(wcycle, ewcRUN);
177 walltime_accounting_reset_time(walltime_accounting, step);
185 static gmx_pme_t* gmx_pmeonly_switch(std::vector<gmx_pme_t*>* pmedata,
186 const ivec grid_size,
190 const t_inputrec* ir)
192 GMX_ASSERT(pmedata, "Bad PME tuning list pointer");
193 for (auto& pme : *pmedata)
195 GMX_ASSERT(pme, "Bad PME tuning list element pointer");
196 if (pme->nkx == grid_size[XX] && pme->nky == grid_size[YY] && pme->nkz == grid_size[ZZ])
198 /* Here we have found an existing PME data structure that suits us.
199 * However, in the GPU case, we have to reinitialize it - there's only one GPU structure.
200 * This should not cause actual GPU reallocations, at least (the allocated buffers are never shrunk).
201 * So, just some grid size updates in the GPU kernel parameters.
202 * TODO: this should be something like gmx_pme_update_split_params()
204 gmx_pme_reinit(&pme, cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
209 const auto& pme = pmedata->back();
210 gmx_pme_t* newStructure = nullptr;
211 // Copy last structure with new grid params
212 gmx_pme_reinit(&newStructure, cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
213 pmedata->push_back(newStructure);
217 /*! \brief Called by PME-only ranks to receive coefficients and coordinates
219 * \param[in] pme PME data structure.
220 * \param[in,out] pme_pp PME-PP communication structure.
221 * \param[out] natoms Number of received atoms.
222 * \param[out] box System box, if received.
223 * \param[out] maxshift_x Maximum shift in X direction, if received.
224 * \param[out] maxshift_y Maximum shift in Y direction, if received.
225 * \param[out] lambda_q Free-energy lambda for electrostatics, if received.
226 * \param[out] lambda_lj Free-energy lambda for Lennard-Jones, if received.
227 * \param[out] bEnerVir Set to true if this is an energy/virial calculation step, otherwise
228 * set to false. \param[out] step MD integration step number. \param[out] grid_size PME
229 * grid size, if received. \param[out] ewaldcoeff_q Ewald cut-off parameter for
230 * electrostatics, if received. \param[out] ewaldcoeff_lj Ewald cut-off parameter for
231 * Lennard-Jones, if received. \param[in] useGpuForPme flag on whether PME is on GPU \param[in]
232 * stateGpu GPU state propagator object \param[in] runMode PME run mode
234 * \retval pmerecvqxX All parameters were set, chargeA and chargeB can be NULL.
235 * \retval pmerecvqxFINISH No parameters were set.
236 * \retval pmerecvqxSWITCHGRID Only grid_size and *ewaldcoeff were set.
237 * \retval pmerecvqxRESETCOUNTERS *step was set.
239 static int gmx_pme_recv_coeffs_coords(struct gmx_pme_t* pme,
253 gmx::StatePropagatorDataGpu* stateGpu,
254 PmeRunMode gmx_unused runMode)
260 unsigned int flags = 0;
262 bool atomSetChanged = false;
266 gmx_pme_comm_n_box_t cnb;
269 /* Receive the send count, box and time step from the peer PP node */
270 MPI_Recv(&cnb, sizeof(cnb), MPI_BYTE, pme_pp->peerRankId, eCommType_CNB,
271 pme_pp->mpi_comm_mysim, MPI_STATUS_IGNORE);
273 /* We accumulate all received flags */
280 fprintf(debug, "PME only rank receiving:%s%s%s%s%s\n",
281 (cnb.flags & PP_PME_CHARGE) ? " charges" : "",
282 (cnb.flags & PP_PME_COORD) ? " coordinates" : "",
283 (cnb.flags & PP_PME_FINISH) ? " finish" : "",
284 (cnb.flags & PP_PME_SWITCHGRID) ? " switch grid" : "",
285 (cnb.flags & PP_PME_RESETCOUNTERS) ? " reset counters" : "");
288 pme_pp->useGpuDirectComm = ((cnb.flags & PP_PME_GPUCOMMS) != 0);
289 GMX_ASSERT(!pme_pp->useGpuDirectComm || (pme_pp->pmeForceSenderGpu != nullptr),
290 "The use of GPU direct communication for PME-PP is enabled, "
291 "but the PME GPU force reciever object does not exist");
293 if (cnb.flags & PP_PME_FINISH)
295 status = pmerecvqxFINISH;
298 if (cnb.flags & PP_PME_SWITCHGRID)
300 /* Special case, receive the new parameters and return */
301 copy_ivec(cnb.grid_size, *grid_size);
302 *ewaldcoeff_q = cnb.ewaldcoeff_q;
303 *ewaldcoeff_lj = cnb.ewaldcoeff_lj;
305 status = pmerecvqxSWITCHGRID;
308 if (cnb.flags & PP_PME_RESETCOUNTERS)
310 /* Special case, receive the step (set above) and return */
311 status = pmerecvqxRESETCOUNTERS;
314 if (cnb.flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
316 atomSetChanged = true;
318 /* Receive the send counts from the other PP nodes */
319 for (auto& sender : pme_pp->ppRanks)
321 if (sender.rankId == pme_pp->peerRankId)
323 sender.numAtoms = cnb.natoms;
327 MPI_Irecv(&sender.numAtoms, sizeof(sender.numAtoms), MPI_BYTE, sender.rankId,
328 eCommType_CNB, pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
331 MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
335 for (const auto& sender : pme_pp->ppRanks)
337 nat += sender.numAtoms;
340 if (cnb.flags & PP_PME_CHARGE)
342 pme_pp->chargeA.resizeWithPadding(nat);
344 if (cnb.flags & PP_PME_CHARGEB)
346 pme_pp->chargeB.resize(nat);
348 if (cnb.flags & PP_PME_SQRTC6)
350 pme_pp->sqrt_c6A.resize(nat);
352 if (cnb.flags & PP_PME_SQRTC6B)
354 pme_pp->sqrt_c6B.resize(nat);
356 if (cnb.flags & PP_PME_SIGMA)
358 pme_pp->sigmaA.resize(nat);
360 if (cnb.flags & PP_PME_SIGMAB)
362 pme_pp->sigmaB.resize(nat);
364 pme_pp->x.resize(nat);
365 pme_pp->f.resize(nat);
367 /* maxshift is sent when the charges are sent */
368 *maxshift_x = cnb.maxshift_x;
369 *maxshift_y = cnb.maxshift_y;
371 /* Receive the charges in place */
372 for (int q = 0; q < eCommType_NR; q++)
376 if (!(cnb.flags & (PP_PME_CHARGE << q)))
382 case eCommType_ChargeA: bufferPtr = pme_pp->chargeA.data(); break;
383 case eCommType_ChargeB: bufferPtr = pme_pp->chargeB.data(); break;
384 case eCommType_SQRTC6A: bufferPtr = pme_pp->sqrt_c6A.data(); break;
385 case eCommType_SQRTC6B: bufferPtr = pme_pp->sqrt_c6B.data(); break;
386 case eCommType_SigmaA: bufferPtr = pme_pp->sigmaA.data(); break;
387 case eCommType_SigmaB: bufferPtr = pme_pp->sigmaB.data(); break;
388 default: gmx_incons("Wrong eCommType");
391 for (const auto& sender : pme_pp->ppRanks)
393 if (sender.numAtoms > 0)
395 MPI_Irecv(bufferPtr + nat, sender.numAtoms * sizeof(real), MPI_BYTE,
396 sender.rankId, q, pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
397 nat += sender.numAtoms;
400 fprintf(debug, "Received from PP rank %d: %d %s\n", sender.rankId,
402 (q == eCommType_ChargeA || q == eCommType_ChargeB) ? "charges"
410 if (cnb.flags & PP_PME_COORD)
414 gmx_pme_reinit_atoms(pme, nat, pme_pp->chargeA.data());
417 stateGpu->reinit(nat, nat);
418 pme_gpu_set_device_x(pme, stateGpu->getCoordinates());
420 if (pme_pp->useGpuDirectComm)
422 GMX_ASSERT(runMode == PmeRunMode::GPU,
423 "GPU Direct PME-PP communication has been enabled, "
424 "but PME run mode is not PmeRunMode::GPU\n");
426 // This rank will have its data accessed directly by PP rank, so needs to send the remote addresses.
427 pme_pp->pmeCoordinateReceiverGpu->sendCoordinateBufferAddressToPpRanks(
428 pme_gpu_get_device_x(pme));
429 pme_pp->pmeForceSenderGpu->sendForceBufferAddressToPpRanks(
430 reinterpret_cast<rvec*>(pme_gpu_get_device_f(pme)));
435 /* The box, FE flag and lambda are sent along with the coordinates
437 copy_mat(cnb.box, box);
438 *lambda_q = cnb.lambda_q;
439 *lambda_lj = cnb.lambda_lj;
440 *bEnerVir = ((cnb.flags & PP_PME_ENER_VIR) != 0U);
443 /* Receive the coordinates in place */
445 for (const auto& sender : pme_pp->ppRanks)
447 if (sender.numAtoms > 0)
449 if (pme_pp->useGpuDirectComm)
451 pme_pp->pmeCoordinateReceiverGpu->launchReceiveCoordinatesFromPpCudaDirect(
456 MPI_Irecv(pme_pp->x[nat], sender.numAtoms * sizeof(rvec), MPI_BYTE, sender.rankId,
457 eCommType_COORD, pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
459 nat += sender.numAtoms;
463 "Received from PP rank %d: %d "
465 sender.rankId, sender.numAtoms);
470 if (pme_pp->useGpuDirectComm)
472 pme_pp->pmeCoordinateReceiverGpu->enqueueWaitReceiveCoordinatesFromPpCudaDirect();
478 /* Wait for the coordinates and/or charges to arrive */
479 MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
481 } while (status == -1);
483 GMX_UNUSED_VALUE(pme);
484 GMX_UNUSED_VALUE(pme_pp);
485 GMX_UNUSED_VALUE(box);
486 GMX_UNUSED_VALUE(maxshift_x);
487 GMX_UNUSED_VALUE(maxshift_y);
488 GMX_UNUSED_VALUE(lambda_q);
489 GMX_UNUSED_VALUE(lambda_lj);
490 GMX_UNUSED_VALUE(bEnerVir);
491 GMX_UNUSED_VALUE(step);
492 GMX_UNUSED_VALUE(grid_size);
493 GMX_UNUSED_VALUE(ewaldcoeff_q);
494 GMX_UNUSED_VALUE(ewaldcoeff_lj);
495 GMX_UNUSED_VALUE(useGpuForPme);
496 GMX_UNUSED_VALUE(stateGpu);
501 if (status == pmerecvqxX)
510 /*! \brief Send force data to PP ranks */
511 static void sendFToPP(void* sendbuf, PpRanks receiver, gmx_pme_pp* pme_pp, int* messages)
514 if (pme_pp->useGpuDirectComm)
516 GMX_ASSERT((pme_pp->pmeForceSenderGpu != nullptr),
517 "The use of GPU direct communication for PME-PP is enabled, "
518 "but the PME GPU force reciever object does not exist");
520 pme_pp->pmeForceSenderGpu->sendFToPpCudaDirect(receiver.rankId);
525 MPI_Isend(sendbuf, receiver.numAtoms * sizeof(rvec), MPI_BYTE, receiver.rankId, 0,
526 pme_pp->mpi_comm_mysim, &pme_pp->req[*messages]);
527 *messages = *messages + 1;
532 /*! \brief Send the PME mesh force, virial and energy to the PP-only ranks. */
533 static void gmx_pme_send_force_vir_ener(const gmx_pme_t& pme,
535 const PmeOutput& output,
541 gmx_pme_comm_vir_ene_t cve;
542 int messages, ind_start, ind_end;
545 /* Now the evaluated forces have to be transferred to the PP nodes */
548 for (const auto& receiver : pme_pp->ppRanks)
551 ind_end = ind_start + receiver.numAtoms;
552 void* sendbuf = const_cast<void*>(static_cast<const void*>(output.forces_[ind_start]));
553 if (pme_pp->useGpuDirectComm)
555 // Data will be transferred directly from GPU.
556 rvec* d_f = reinterpret_cast<rvec*>(pme_gpu_get_device_f(&pme));
557 sendbuf = reinterpret_cast<void*>(&d_f[ind_start]);
559 sendFToPP(sendbuf, receiver, pme_pp, &messages);
562 /* send virial and energy to our last PP node */
563 copy_mat(output.coulombVirial_, cve.vir_q);
564 copy_mat(output.lennardJonesVirial_, cve.vir_lj);
565 cve.energy_q = output.coulombEnergy_;
566 cve.energy_lj = output.lennardJonesEnergy_;
567 cve.dvdlambda_q = dvdlambda_q;
568 cve.dvdlambda_lj = dvdlambda_lj;
569 /* check for the signals to send back to a PP node */
570 cve.stop_cond = gmx_get_stop_condition();
576 fprintf(debug, "PME rank sending to PP rank %d: virial and energy\n", pme_pp->peerRankId);
578 MPI_Isend(&cve, sizeof(cve), MPI_BYTE, pme_pp->peerRankId, 1, pme_pp->mpi_comm_mysim,
579 &pme_pp->req[messages++]);
581 /* Wait for the forces to arrive */
582 MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
584 gmx_call("MPI not enabled");
585 GMX_UNUSED_VALUE(pme);
586 GMX_UNUSED_VALUE(pme_pp);
587 GMX_UNUSED_VALUE(output);
588 GMX_UNUSED_VALUE(dvdlambda_q);
589 GMX_UNUSED_VALUE(dvdlambda_lj);
590 GMX_UNUSED_VALUE(cycles);
594 int gmx_pmeonly(struct gmx_pme_t* pme,
597 gmx_wallcycle* wcycle,
598 gmx_walltime_accounting_t walltime_accounting,
607 int maxshift_x = 0, maxshift_y = 0;
608 real dvdlambda_q, dvdlambda_lj;
611 gmx_bool bEnerVir = FALSE;
614 /* This data will only use with PME tuning, i.e. switching PME grids */
615 std::vector<gmx_pme_t*> pmedata;
616 pmedata.push_back(pme);
618 auto pme_pp = gmx_pme_pp_init(cr);
619 // TODO the variable below should be queried from the task assignment info
620 const bool useGpuForPme = (runMode == PmeRunMode::GPU) || (runMode == PmeRunMode::Mixed);
621 const void* commandStream = useGpuForPme ? pme_gpu_get_device_stream(pme) : nullptr;
622 const void* deviceContext = useGpuForPme ? pme_gpu_get_device_context(pme) : nullptr;
623 const int paddingSize = pme_gpu_get_padding_size(pme);
626 changePinningPolicy(&pme_pp->chargeA, pme_get_pinning_policy());
627 changePinningPolicy(&pme_pp->x, pme_get_pinning_policy());
628 if (c_enableGpuPmePpComms)
630 pme_pp->pmeCoordinateReceiverGpu = std::make_unique<gmx::PmeCoordinateReceiverGpu>(
631 pme_gpu_get_device_stream(pme), pme_pp->mpi_comm_mysim, pme_pp->ppRanks);
632 pme_pp->pmeForceSenderGpu = std::make_unique<gmx::PmeForceSenderGpu>(
633 pme_gpu_get_device_stream(pme), pme_pp->mpi_comm_mysim, pme_pp->ppRanks);
637 std::unique_ptr<gmx::StatePropagatorDataGpu> stateGpu;
640 // TODO: Special PME-only constructor is used here. There is no mechanism to prevent from using the other constructor here.
641 // This should be made safer.
642 stateGpu = std::make_unique<gmx::StatePropagatorDataGpu>(
643 commandStream, deviceContext, GpuApiCallBehavior::Async, paddingSize, wcycle);
650 do /****** this is a quasi-loop over time steps! */
652 /* The reason for having a loop here is PME grid tuning/switching */
655 /* Domain decomposition */
657 real ewaldcoeff_q = 0, ewaldcoeff_lj = 0;
658 ret = gmx_pme_recv_coeffs_coords(pme, pme_pp.get(), &natoms, box, &maxshift_x,
659 &maxshift_y, &lambda_q, &lambda_lj, &bEnerVir, &step,
660 &newGridSize, &ewaldcoeff_q, &ewaldcoeff_lj,
661 useGpuForPme, stateGpu.get(), runMode);
663 if (ret == pmerecvqxSWITCHGRID)
665 /* Switch the PME grid to newGridSize */
666 pme = gmx_pmeonly_switch(&pmedata, newGridSize, ewaldcoeff_q, ewaldcoeff_lj, cr, ir);
669 if (ret == pmerecvqxRESETCOUNTERS)
671 /* Reset the cycle and flop counters */
672 reset_pmeonly_counters(wcycle, walltime_accounting, mynrnb, step, useGpuForPme);
674 } while (ret == pmerecvqxSWITCHGRID || ret == pmerecvqxRESETCOUNTERS);
676 if (ret == pmerecvqxFINISH)
678 /* We should stop: break out of the loop */
684 wallcycle_start(wcycle, ewcRUN);
685 walltime_accounting_start_time(walltime_accounting);
688 wallcycle_start(wcycle, ewcPMEMESH);
693 // TODO Make a struct of array refs onto these per-atom fields
694 // of pme_pp (maybe box, energy and virial, too; and likewise
695 // from mdatoms for the other call to gmx_pme_do), so we have
696 // fewer lines of code and less parameter passing.
697 const int pmeFlags = GMX_PME_DO_ALL_F | (bEnerVir ? GMX_PME_CALC_ENER_VIR : 0);
701 const bool boxChanged = false;
702 const bool useGpuPmeForceReduction = pme_pp->useGpuDirectComm;
703 // TODO this should be set properly by gmx_pme_recv_coeffs_coords,
704 // or maybe use inputrecDynamicBox(ir), at the very least - change this when this codepath is tested!
705 pme_gpu_prepare_computation(pme, boxChanged, box, wcycle, pmeFlags, useGpuPmeForceReduction);
706 if (!pme_pp->useGpuDirectComm)
708 stateGpu->copyCoordinatesToGpu(gmx::ArrayRef<gmx::RVec>(pme_pp->x), gmx::AtomLocality::All);
710 // On the separate PME rank we do not need a synchronizer as we schedule everything in a single stream
711 // TODO: with pme on GPU the receive should make a list of synchronizers and pass it here #3157
712 auto xReadyOnDevice = nullptr;
714 pme_gpu_launch_spread(pme, xReadyOnDevice, wcycle);
715 pme_gpu_launch_complex_transforms(pme, wcycle);
716 pme_gpu_launch_gather(pme, wcycle, PmeForceOutputHandling::Set);
717 output = pme_gpu_wait_finish_task(pme, pmeFlags, wcycle);
718 pme_gpu_reinit_computation(pme, wcycle);
722 GMX_ASSERT(pme_pp->x.size() == static_cast<size_t>(natoms),
723 "The coordinate buffer should have size natoms");
725 gmx_pme_do(pme, pme_pp->x, pme_pp->f, pme_pp->chargeA.data(), pme_pp->chargeB.data(),
726 pme_pp->sqrt_c6A.data(), pme_pp->sqrt_c6B.data(), pme_pp->sigmaA.data(),
727 pme_pp->sigmaB.data(), box, cr, maxshift_x, maxshift_y, mynrnb, wcycle,
728 output.coulombVirial_, output.lennardJonesVirial_, &output.coulombEnergy_,
729 &output.lennardJonesEnergy_, lambda_q, lambda_lj, &dvdlambda_q,
730 &dvdlambda_lj, pmeFlags);
731 output.forces_ = pme_pp->f;
734 cycles = wallcycle_stop(wcycle, ewcPMEMESH);
735 gmx_pme_send_force_vir_ener(*pme, pme_pp.get(), output, dvdlambda_q, dvdlambda_lj, cycles);
738 } /***** end of quasi-loop, we stop with the break above */
741 walltime_accounting_end_time(walltime_accounting);