2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements PME-PP communication using CUDA
40 * \author Alan Gray <alang@nvidia.com>
42 * \ingroup module_ewald
46 #include "gromacs/ewald/pme_pp_communication.h"
47 #include "pme_pp_comm_gpu_impl.h"
51 #include "gromacs/gpu_utils/cudautils.cuh"
52 #include "gromacs/gpu_utils/device_context.h"
53 #include "gromacs/gpu_utils/device_stream.h"
54 #include "gromacs/gpu_utils/devicebuffer.h"
55 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
56 #include "gromacs/gpu_utils/typecasts.cuh"
57 #include "gromacs/utility/gmxmpi.h"
62 PmePpCommGpu::Impl::Impl(MPI_Comm comm,
64 std::vector<gmx::RVec>* pmeCpuForceBuffer,
65 const DeviceContext& deviceContext,
66 const DeviceStream& deviceStream) :
67 deviceContext_(deviceContext),
68 pmePpCommStream_(deviceStream),
71 pmeCpuForceBuffer_(pmeCpuForceBuffer),
76 PmePpCommGpu::Impl::~Impl() = default;
78 void PmePpCommGpu::Impl::reinit(int size)
80 // Reallocate device buffer used for staging PME force
81 reallocateDeviceBuffer(&d_pmeForces_, size, &d_pmeForcesSize_, &d_pmeForcesSizeAlloc_, deviceContext_);
83 // This rank will access PME rank memory directly, so needs to receive the remote PME buffer addresses.
88 // receive device coordinate buffer address from PME rank
89 MPI_Recv(&remotePmeXBuffer_, sizeof(float3*), MPI_BYTE, pmeRank_, 0, comm_, MPI_STATUS_IGNORE);
90 // send host and device force buffer addresses to PME rank
91 MPI_Send(&d_pmeForces_, sizeof(float3*), MPI_BYTE, pmeRank_, 0, comm_);
92 RVec* pmeCpuForceBufferData = pmeCpuForceBuffer_->data();
93 MPI_Send(&pmeCpuForceBufferData, sizeof(RVec*), MPI_BYTE, pmeRank_, 0, comm_);
99 // TODO make this asynchronous by splitting into this into
100 // launchRecvForceFromPmeCudaDirect() and sycnRecvForceFromPmeCudaDirect()
101 void PmePpCommGpu::Impl::receiveForceFromPmeCudaDirect(bool receivePmeForceToGpu)
104 // Remote PME task pushes GPU data directly data to this PP task.
106 // Recieve event from PME task after PME->PP force data push has
107 // been scheduled and enqueue this to PP stream.
108 GpuEventSynchronizer* eventptr;
109 // NOLINTNEXTLINE(bugprone-sizeof-expression)
110 MPI_Recv(&eventptr, sizeof(GpuEventSynchronizer*), MPI_BYTE, pmeRank_, 0, comm_, MPI_STATUS_IGNORE);
111 eventptr->enqueueWaitEvent(pmePpCommStream_);
113 if (receivePmeForceToGpu)
115 // Record event to be enqueued in the GPU local buffer operations, to
116 // satisfy dependency on receiving the PME force data before
117 // reducing it with the other force contributions.
118 forcesReadySynchronizer_.markEvent(pmePpCommStream_);
122 // Ensure CPU waits for PME forces to be copied before reducing
123 // them with other forces on the CPU
124 pmePpCommStream_.synchronize();
129 void PmePpCommGpu::Impl::receiveForceFromPmeCudaMpi(float3* pmeForcePtr, int recvSize)
132 MPI_Recv(pmeForcePtr, recvSize * DIM, MPI_FLOAT, pmeRank_, 0, comm_, MPI_STATUS_IGNORE);
134 GMX_UNUSED_VALUE(pmeForcePtr);
135 GMX_UNUSED_VALUE(recvSize);
139 void PmePpCommGpu::Impl::receiveForceFromPme(float3* recvPtr, int recvSize, bool receivePmeForceToGpu)
141 float3* pmeForcePtr = receivePmeForceToGpu ? asFloat3(d_pmeForces_) : recvPtr;
144 receiveForceFromPmeCudaDirect(receivePmeForceToGpu);
148 receiveForceFromPmeCudaMpi(pmeForcePtr, recvSize);
152 void PmePpCommGpu::Impl::sendCoordinatesToPmeCudaDirect(float3* sendPtr,
154 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
156 // ensure stream waits until coordinate data is available on device
157 coordinatesReadyOnDeviceEvent->enqueueWaitEvent(pmePpCommStream_);
159 cudaError_t stat = cudaMemcpyAsync(remotePmeXBuffer_,
161 sendSize * DIM * sizeof(float),
163 pmePpCommStream_.stream());
164 CU_RET_ERR(stat, "cudaMemcpyAsync on Send to PME CUDA direct data transfer failed");
167 // Record and send event to allow PME task to sync to above transfer before commencing force calculations
168 pmeCoordinatesSynchronizer_.markEvent(pmePpCommStream_);
169 GpuEventSynchronizer* pmeSync = &pmeCoordinatesSynchronizer_;
170 // NOLINTNEXTLINE(bugprone-sizeof-expression)
171 MPI_Send(&pmeSync, sizeof(GpuEventSynchronizer*), MPI_BYTE, pmeRank_, 0, comm_);
175 void PmePpCommGpu::Impl::sendCoordinatesToPmeCudaMpi(float3* sendPtr,
177 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
179 // ensure coordinate data is available on device before we start transfer
180 coordinatesReadyOnDeviceEvent->waitForEvent();
183 float3* sendptr_x = sendPtr;
185 MPI_Send(sendptr_x, sendSize * DIM, MPI_FLOAT, pmeRank_, eCommType_COORD_GPU, comm_);
187 GMX_UNUSED_VALUE(sendPtr);
188 GMX_UNUSED_VALUE(sendSize);
192 void PmePpCommGpu::Impl::sendCoordinatesToPme(float3* sendPtr,
194 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
198 sendCoordinatesToPmeCudaDirect(sendPtr, sendSize, coordinatesReadyOnDeviceEvent);
202 sendCoordinatesToPmeCudaMpi(sendPtr, sendSize, coordinatesReadyOnDeviceEvent);
205 DeviceBuffer<Float3> PmePpCommGpu::Impl::getGpuForceStagingPtr()
210 GpuEventSynchronizer* PmePpCommGpu::Impl::getForcesReadySynchronizer()
214 return &forcesReadySynchronizer_;
222 PmePpCommGpu::PmePpCommGpu(MPI_Comm comm,
224 std::vector<gmx::RVec>* pmeCpuForceBuffer,
225 const DeviceContext& deviceContext,
226 const DeviceStream& deviceStream) :
227 impl_(new Impl(comm, pmeRank, pmeCpuForceBuffer, deviceContext, deviceStream))
231 PmePpCommGpu::~PmePpCommGpu() = default;
233 void PmePpCommGpu::reinit(int size)
238 void PmePpCommGpu::receiveForceFromPme(RVec* recvPtr, int recvSize, bool receivePmeForceToGpu)
240 impl_->receiveForceFromPme(asFloat3(recvPtr), recvSize, receivePmeForceToGpu);
243 void PmePpCommGpu::sendCoordinatesToPmeFromGpu(DeviceBuffer<RVec> sendPtr,
245 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
247 impl_->sendCoordinatesToPme(asFloat3(sendPtr), sendSize, coordinatesReadyOnDeviceEvent);
250 void PmePpCommGpu::sendCoordinatesToPmeFromCpu(RVec* sendPtr,
252 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
254 impl_->sendCoordinatesToPme(asFloat3(sendPtr), sendSize, coordinatesReadyOnDeviceEvent);
257 DeviceBuffer<Float3> PmePpCommGpu::getGpuForceStagingPtr()
259 return impl_->getGpuForceStagingPtr();
262 GpuEventSynchronizer* PmePpCommGpu::getForcesReadySynchronizer()
264 return impl_->getForcesReadySynchronizer();