2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Declaration of class which receives coordinates to GPU memory on PME task
39 * \author Alan Gray <alang@nvidia.com>
41 * \ingroup module_ewald
43 #ifndef GMX_PMECOORDINATERECEIVERGPU_IMPL_H
44 #define GMX_PMECOORDINATERECEIVERGPU_IMPL_H
48 #include "gromacs/ewald/pme_coordinate_receiver_gpu.h"
49 #include "gromacs/utility/arrayref.h"
51 class GpuEventSynchronizer;
56 /*! \brief Object to manage communications with a specific PP rank */
59 //! Details of PP rank that may be updated after repartitioning
60 const PpRanks& ppRank;
61 //! Stream used communication with for PP rank
62 std::unique_ptr<DeviceStream> stream;
63 //! Synchronization event to receive from PP rank
64 GpuEventSynchronizer* sync = nullptr;
65 //! Range of atoms corresponding to PP rank
66 std::tuple<int, int> atomRange = { 0, 0 };
69 /*! \internal \brief Class with interfaces and data for CUDA version of PME coordinate receiving functionality */
70 class PmeCoordinateReceiverGpu::Impl
74 /*! \brief Creates PME GPU coordinate receiver object
75 * \param[in] comm Communicator used for simulation
76 * \param[in] deviceContext GPU context
77 * \param[in] ppRanks List of PP ranks
79 Impl(MPI_Comm comm, const DeviceContext& deviceContext, gmx::ArrayRef<const PpRanks> ppRanks);
83 * Re-initialize: set atom ranges and, for thread-MPI case,
84 * send coordinates buffer address to PP rank.
85 * This is required after repartitioning since atom ranges and
86 * buffer allocations may have changed.
87 * \param[in] d_x coordinates buffer in GPU memory
89 void reinitCoordinateReceiver(DeviceBuffer<RVec> d_x);
92 * Receive coordinate synchronizer pointer from the PP ranks.
93 * \param[in] ppRank PP rank to receive the synchronizer from.
95 void receiveCoordinatesSynchronizerFromPpCudaDirect(int ppRank);
98 * Used for lib MPI, receives co-ordinates from PP ranks
99 * \param[in] recvbuf coordinates buffer in GPU memory
100 * \param[in] numAtoms starting element in buffer
101 * \param[in] numBytes number of bytes to transfer
102 * \param[in] ppRank PP rank to send data
104 void launchReceiveCoordinatesFromPpCudaMpi(DeviceBuffer<RVec> recvbuf, int numAtoms, int numBytes, int ppRank);
107 * For lib MPI, wait for coordinates from any PP rank
108 * For thread MPI, enqueue PP co-ordinate transfer event received from PP
109 * rank determined from pipeline stage into given stream
110 * \param[in] pipelineStage stage of pipeline corresponding to this transfer
111 * \param[in] deviceStream stream in which to enqueue the wait event.
112 * \returns rank of sending PP task
114 int synchronizeOnCoordinatesFromPpRank(int pipelineStage, const DeviceStream& deviceStream);
116 /*! \brief Perform above synchronizeOnCoordinatesFromPpRanks for all PP ranks,
117 * enqueueing all events to a single stream
118 * \param[in] deviceStream stream in which to enqueue the wait events.
120 void synchronizeOnCoordinatesFromAllPpRanks(const DeviceStream& deviceStream);
123 * Return pointer to stream associated with specific PP rank sender index
124 * \param[in] senderIndex Index of sender PP rank.
126 DeviceStream* ppCommStream(int senderIndex);
129 * Returns range of atoms involved in communication associated with specific PP rank sender
130 * index \param[in] senderIndex Index of sender PP rank.
132 std::tuple<int, int> ppCommAtomRange(int senderIndex);
135 * Return number of PP ranks involved in PME-PP communication
137 int ppCommNumSenderRanks();
140 //! communicator for simulation
142 //! MPI requests, one per PP rank
143 std::vector<MPI_Request> requests_;
144 //! GPU context handle (not used in CUDA)
145 const DeviceContext& deviceContext_;
146 //! Communication manager objects corresponding to multiple sending PP ranks
147 std::vector<PpCommManager> ppCommManagers_;