2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements PME-PP communication using CUDA
40 * \author Alan Gray <alang@nvidia.com>
42 * \ingroup module_ewald
46 #include "pme_force_sender_gpu_impl.h"
50 #include "gromacs/gpu_utils/cudautils.cuh"
51 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
52 #include "gromacs/utility/gmxmpi.h"
57 /*! \brief Create PME-PP GPU communication object */
58 PmeForceSenderGpu::Impl::Impl(GpuEventSynchronizer* pmeForcesReady,
60 const DeviceContext& deviceContext,
61 gmx::ArrayRef<PpRanks> ppRanks) :
62 pmeForcesReady_(pmeForcesReady), comm_(comm), ppRanks_(ppRanks), deviceContext_(deviceContext)
64 // Create streams and events to manage pushing of force buffers to remote PP ranks
65 std::unique_ptr<DeviceStream> stream;
66 std::unique_ptr<GpuEventSynchronizer> event;
68 ppCommStream_.resize(ppRanks_.size());
69 ppCommEvent_.resize(ppRanks_.size());
70 for (i = 0; i < ppRanks_.size(); i++)
72 stream = std::make_unique<DeviceStream>(deviceContext_, DeviceStreamPriority::High, false);
73 ppCommStream_[i] = std::move(stream);
74 event = std::make_unique<GpuEventSynchronizer>();
75 ppCommEvent_[i] = std::move(event);
79 PmeForceSenderGpu::Impl::~Impl() = default;
81 /*! \brief Sets location of force to be sent to each PP rank */
82 void PmeForceSenderGpu::Impl::setForceSendBuffer(DeviceBuffer<Float3> d_f)
85 // Need to send address to PP rank only for thread-MPI as PP rank pulls
86 // data using cudamemcpy
94 if (localForcePtr_.empty())
96 localForcePtr_.resize(ppRanks_.size());
101 for (const auto& receiver : ppRanks_)
104 ind_end = ind_start + receiver.numAtoms;
106 localForcePtr_[i++] = &d_f[ind_start];
109 GMX_UNUSED_VALUE(d_f);
114 /*! \brief Send PME synchronizer directly using CUDA memory copy */
115 void PmeForceSenderGpu::Impl::sendFToPpCudaDirect(int ppRank, int numAtoms)
118 GMX_ASSERT(GMX_THREAD_MPI, "sendFToPpCudaDirect is expected to be called only for Thread-MPI");
122 void* pmeRemoteForcePtr;
123 // NOLINTNEXTLINE(bugprone-sizeof-expression)
124 MPI_Recv(&pmeRemoteForcePtr, sizeof(void*), MPI_BYTE, ppRank, 0, comm_, MPI_STATUS_IGNORE);
126 pmeForcesReady_->enqueueWaitEvent(*ppCommStream_[ppRank]);
128 cudaError_t stat = cudaMemcpyAsync(pmeRemoteForcePtr,
129 localForcePtr_[ppRank],
130 numAtoms * sizeof(rvec),
132 ppCommStream_[ppRank]->stream());
133 CU_RET_ERR(stat, "cudaMemcpyAsync on Recv from PME CUDA direct data transfer failed");
134 ppCommEvent_[ppRank]->markEvent(*ppCommStream_[ppRank]);
135 // NOLINTNEXTLINE(bugprone-sizeof-expression)
136 MPI_Send(&ppCommEvent_[ppRank], sizeof(GpuEventSynchronizer*), MPI_BYTE, ppRank, 0, comm_);
138 GMX_UNUSED_VALUE(ppRank);
139 GMX_UNUSED_VALUE(numAtoms);
143 /*! \brief Send PME data directly using CUDA-aware MPI */
144 void PmeForceSenderGpu::Impl::sendFToPpCudaMpi(DeviceBuffer<RVec> sendbuf,
148 MPI_Request* request)
150 GMX_ASSERT(GMX_LIB_MPI, "sendFToPpCudaMpi is expected to be called only for Lib-MPI");
153 // if using GPU direct comm with CUDA-aware MPI, make sure forces are ready on device
154 // before sending it to PP ranks
155 pmeForcesReady_->waitForEvent();
157 MPI_Isend(sendbuf[offset], numBytes, MPI_BYTE, ppRank, 0, comm_, request);
160 GMX_UNUSED_VALUE(sendbuf);
161 GMX_UNUSED_VALUE(offset);
162 GMX_UNUSED_VALUE(numBytes);
163 GMX_UNUSED_VALUE(ppRank);
164 GMX_UNUSED_VALUE(request);
168 PmeForceSenderGpu::PmeForceSenderGpu(GpuEventSynchronizer* pmeForcesReady,
170 const DeviceContext& deviceContext,
171 gmx::ArrayRef<PpRanks> ppRanks) :
172 impl_(new Impl(pmeForcesReady, comm, deviceContext, ppRanks))
176 PmeForceSenderGpu::~PmeForceSenderGpu() = default;
179 void PmeForceSenderGpu::setForceSendBuffer(DeviceBuffer<RVec> d_f)
181 impl_->setForceSendBuffer(d_f);
184 void PmeForceSenderGpu::sendFToPpCudaMpi(DeviceBuffer<RVec> sendbuf,
188 MPI_Request* request)
190 impl_->sendFToPpCudaMpi(sendbuf, offset, numBytes, ppRank, request);
193 void PmeForceSenderGpu::sendFToPpCudaDirect(int ppRank, int numAtoms)
195 impl_->sendFToPpCudaDirect(ppRank, numAtoms);