2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements class which recieves coordinates to GPU memory on PME task using CUDA
40 * \author Alan Gray <alang@nvidia.com>
42 * \ingroup module_ewald
46 #include "gromacs/ewald/pme_pp_communication.h"
47 #include "pme_coordinate_receiver_gpu_impl.h"
51 #include "gromacs/ewald/pme_force_sender_gpu.h"
52 #include "gromacs/gpu_utils/cudautils.cuh"
53 #include "gromacs/gpu_utils/gpueventsynchronizer.h"
54 #include "gromacs/utility/gmxmpi.h"
59 PmeCoordinateReceiverGpu::Impl::Impl(MPI_Comm comm,
60 const DeviceContext& deviceContext,
61 gmx::ArrayRef<const PpRanks> ppRanks) :
62 comm_(comm), requests_(ppRanks.size(), MPI_REQUEST_NULL), deviceContext_(deviceContext)
64 // Create streams to manage pipelining
65 ppCommManagers_.reserve(ppRanks.size());
66 for (const auto& ppRank : ppRanks)
68 ppCommManagers_.emplace_back(PpCommManager{
70 std::make_unique<DeviceStream>(deviceContext_, DeviceStreamPriority::High, false),
76 PmeCoordinateReceiverGpu::Impl::~Impl() = default;
78 void PmeCoordinateReceiverGpu::Impl::reinitCoordinateReceiver(DeviceBuffer<RVec> d_x)
81 for (auto& ppCommManager : ppCommManagers_)
83 int indStart = indEnd;
84 indEnd = indStart + ppCommManager.ppRank.numAtoms;
86 ppCommManager.atomRange = std::make_tuple(indStart, indEnd);
88 // Need to send address to PP rank only for thread-MPI as PP rank pushes data using cudamemcpy
91 // Data will be transferred directly from GPU.
92 void* sendBuf = reinterpret_cast<void*>(&d_x[indStart]);
94 MPI_Send(&sendBuf, sizeof(void**), MPI_BYTE, ppCommManager.ppRank.rankId, 0, comm_);
96 GMX_UNUSED_VALUE(sendBuf);
102 /*! \brief Receive coordinate synchronizer pointer from the PP ranks. */
103 void PmeCoordinateReceiverGpu::Impl::receiveCoordinatesSynchronizerFromPpCudaDirect(int ppRank)
105 GMX_ASSERT(GMX_THREAD_MPI,
106 "receiveCoordinatesSynchronizerFromPpCudaDirect is expected to be called only for "
109 // Data will be pushed directly from PP task
112 // Receive event from PP task
113 MPI_Irecv(&ppCommManagers_[ppRank].sync,
114 sizeof(GpuEventSynchronizer*), // NOLINT(bugprone-sizeof-expression)
119 &(requests_[ppRank]));
121 GMX_UNUSED_VALUE(ppRank);
125 /*! \brief Receive coordinate data using CUDA-aware MPI */
126 void PmeCoordinateReceiverGpu::Impl::launchReceiveCoordinatesFromPpCudaMpi(DeviceBuffer<RVec> recvbuf,
131 GMX_ASSERT(GMX_LIB_MPI,
132 "launchReceiveCoordinatesFromPpCudaMpi is expected to be called only for Lib-MPI");
135 MPI_Irecv(&recvbuf[numAtoms], numBytes, MPI_BYTE, ppRank, eCommType_COORD_GPU, comm_, &(requests_[ppRank]));
137 GMX_UNUSED_VALUE(recvbuf);
138 GMX_UNUSED_VALUE(numAtoms);
139 GMX_UNUSED_VALUE(numBytes);
140 GMX_UNUSED_VALUE(ppRank);
144 int PmeCoordinateReceiverGpu::Impl::synchronizeOnCoordinatesFromPpRank(int pipelineStage,
145 const DeviceStream& deviceStream)
148 int senderRank = -1; // Rank of PP task that is associated with this invocation.
149 # if (!GMX_THREAD_MPI)
150 // Wait on data from any one of the PP sender GPUs
151 MPI_Waitany(requests_.size(), requests_.data(), &senderRank, MPI_STATUS_IGNORE);
152 GMX_ASSERT(senderRank >= 0, "Rank of sending PP task must be 0 or greater");
153 GMX_UNUSED_VALUE(pipelineStage);
154 GMX_UNUSED_VALUE(deviceStream);
156 // MPI_Waitany is not available in thread-MPI. However, the
157 // MPI_Wait here is not associated with data but is host-side
158 // scheduling code to receive a CUDA event, and will be executed
159 // in advance of the actual data transfer. Therefore we can
160 // receive in order of pipeline stage, still allowing the
161 // scheduled GPU-direct comms to initiate out-of-order in their
162 // respective streams. For cases with CPU force computations, the
163 // scheduling is less asynchronous (done on a per-step basis), so
164 // host-side improvements should be investigated as tracked in
166 senderRank = pipelineStage;
167 MPI_Wait(&(requests_[senderRank]), MPI_STATUS_IGNORE);
168 ppCommManagers_[senderRank].sync->enqueueWaitEvent(deviceStream);
174 void PmeCoordinateReceiverGpu::Impl::synchronizeOnCoordinatesFromAllPpRanks(const DeviceStream& deviceStream)
176 for (int i = 0; i < static_cast<int>(ppCommManagers_.size()); i++)
178 synchronizeOnCoordinatesFromPpRank(i, deviceStream);
181 DeviceStream* PmeCoordinateReceiverGpu::Impl::ppCommStream(int senderIndex)
183 return ppCommManagers_[senderIndex].stream.get();
186 std::tuple<int, int> PmeCoordinateReceiverGpu::Impl::ppCommAtomRange(int senderIndex)
188 return ppCommManagers_[senderIndex].atomRange;
191 int PmeCoordinateReceiverGpu::Impl::ppCommNumSenderRanks()
193 return ppCommManagers_.size();
196 PmeCoordinateReceiverGpu::PmeCoordinateReceiverGpu(MPI_Comm comm,
197 const DeviceContext& deviceContext,
198 gmx::ArrayRef<PpRanks> ppRanks) :
199 impl_(new Impl(comm, deviceContext, ppRanks))
203 PmeCoordinateReceiverGpu::~PmeCoordinateReceiverGpu() = default;
205 void PmeCoordinateReceiverGpu::reinitCoordinateReceiver(DeviceBuffer<RVec> d_x)
207 impl_->reinitCoordinateReceiver(d_x);
210 void PmeCoordinateReceiverGpu::receiveCoordinatesSynchronizerFromPpCudaDirect(int ppRank)
212 impl_->receiveCoordinatesSynchronizerFromPpCudaDirect(ppRank);
215 void PmeCoordinateReceiverGpu::launchReceiveCoordinatesFromPpCudaMpi(DeviceBuffer<RVec> recvbuf,
220 impl_->launchReceiveCoordinatesFromPpCudaMpi(recvbuf, numAtoms, numBytes, ppRank);
223 int PmeCoordinateReceiverGpu::synchronizeOnCoordinatesFromPpRank(int senderIndex,
224 const DeviceStream& deviceStream)
226 return impl_->synchronizeOnCoordinatesFromPpRank(senderIndex, deviceStream);
229 void PmeCoordinateReceiverGpu::synchronizeOnCoordinatesFromAllPpRanks(const DeviceStream& deviceStream)
231 impl_->synchronizeOnCoordinatesFromAllPpRanks(deviceStream);
234 DeviceStream* PmeCoordinateReceiverGpu::ppCommStream(int senderIndex)
236 return impl_->ppCommStream(senderIndex);
239 std::tuple<int, int> PmeCoordinateReceiverGpu::ppCommAtomRange(int senderIndex)
241 return impl_->ppCommAtomRange(senderIndex);
244 int PmeCoordinateReceiverGpu::ppCommNumSenderRanks()
246 return impl_->ppCommNumSenderRanks();