2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements PME-PP communication using CUDA
40 * \author Alan Gray <alang@nvidia.com>
42 * \ingroup module_ewald
46 #include "pme_pp_comm_gpu_impl.h"
50 #include "gromacs/gpu_utils/cudautils.cuh"
51 #include "gromacs/gpu_utils/devicebuffer.h"
52 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
53 #include "gromacs/utility/gmxmpi.h"
58 PmePpCommGpu::Impl::Impl(MPI_Comm comm, int pmeRank)
62 GMX_RELEASE_ASSERT(GMX_THREAD_MPI, "PME-PP GPU Communication is currently only supported with thread-MPI enabled");
63 cudaStreamCreate(&pmePpCommStream_);
66 PmePpCommGpu::Impl::~Impl() = default;
68 void PmePpCommGpu::Impl::reinit(int size)
70 // This rank will access PME rank memory directly, so needs to receive the remote PME buffer addresses.
71 MPI_Recv(&remotePmeXBuffer_, sizeof(void**), MPI_BYTE, pmeRank_,
72 0, comm_, MPI_STATUS_IGNORE);
73 MPI_Recv(&remotePmeFBuffer_, sizeof(void**), MPI_BYTE, pmeRank_,
74 0, comm_, MPI_STATUS_IGNORE);
76 // Reallocate buffer used for staging PME force on GPU
77 reallocateDeviceBuffer(&d_pmeForces_, size, &d_pmeForcesSize_, &d_pmeForcesSizeAlloc_, nullptr);
82 // TODO make this asynchronous by splitting into this into
83 // launchRecvForceFromPmeCudaDirect() and sycnRecvForceFromPmeCudaDirect()
84 void PmePpCommGpu::Impl::receiveForceFromPmeCudaDirect(void *recvPtr, int recvSize, bool receivePmeForceToGpu)
87 // Receive event from PME task and add to stream, to ensure pull of data doesn't
88 // occur before PME force calc is completed
89 GpuEventSynchronizer *pmeSync;
90 MPI_Recv(&pmeSync, sizeof(GpuEventSynchronizer*),
91 MPI_BYTE, pmeRank_, 0,
92 comm_, MPI_STATUS_IGNORE);
93 pmeSync->enqueueWaitEvent(pmePpCommStream_);
95 // Pull force data from remote GPU
96 void * pmeForcePtr = receivePmeForceToGpu ? static_cast<void*> (d_pmeForces_) : recvPtr;
97 cudaError_t stat = cudaMemcpyAsync(pmeForcePtr, remotePmeFBuffer_,
98 recvSize*DIM*sizeof(float), cudaMemcpyDefault,
100 CU_RET_ERR(stat, "cudaMemcpyAsync on Recv from PME CUDA direct data transfer failed");
102 if (receivePmeForceToGpu)
104 // Record event to be enqueued in the GPU local buffer operations, to
105 // satisfy dependency on receiving the PME force data before
106 // reducing it with the other force contributions.
107 forcesReadySynchronizer_.markEvent(pmePpCommStream_);
111 // Ensure CPU waits for PME forces to be copied before reducing
112 // them with other forces on the CPU
113 cudaStreamSynchronize(pmePpCommStream_);
117 void PmePpCommGpu::Impl::sendCoordinatesToPmeCudaDirect(void *sendPtr, int sendSize, bool gmx_unused sendPmeCoordinatesFromGpu, GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
120 //ensure stream waits until coordinate data is available on device
121 coordinatesReadyOnDeviceEvent->enqueueWaitEvent(pmePpCommStream_);
123 cudaError_t stat = cudaMemcpyAsync(remotePmeXBuffer_, sendPtr,
124 sendSize*DIM*sizeof(float), cudaMemcpyDefault,
126 CU_RET_ERR(stat, "cudaMemcpyAsync on Send to PME CUDA direct data transfer failed");
128 // Record and send event to allow PME task to sync to above transfer before commencing force calculations
129 pmeCoordinatesSynchronizer_.markEvent(pmePpCommStream_);
130 GpuEventSynchronizer *pmeSync = &pmeCoordinatesSynchronizer_;
131 MPI_Send(&pmeSync, sizeof(GpuEventSynchronizer*),
132 MPI_BYTE, pmeRank_, 0,
136 void* PmePpCommGpu::Impl::getGpuForceStagingPtr()
138 return static_cast<void*> (d_pmeForces_);
141 void* PmePpCommGpu::Impl::getForcesReadySynchronizer()
143 return static_cast<void*> (&forcesReadySynchronizer_);
146 PmePpCommGpu::PmePpCommGpu(MPI_Comm comm, int pmeRank)
147 : impl_(new Impl(comm, pmeRank))
151 PmePpCommGpu::~PmePpCommGpu() = default;
153 void PmePpCommGpu::reinit(int size)
158 void PmePpCommGpu::receiveForceFromPmeCudaDirect(void *recvPtr, int recvSize, bool receivePmeForceToGpu)
160 impl_->receiveForceFromPmeCudaDirect(recvPtr, recvSize, receivePmeForceToGpu);
163 void PmePpCommGpu::sendCoordinatesToPmeCudaDirect(void *sendPtr, int sendSize, bool sendPmeCoordinatesFromGpu, GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
165 impl_->sendCoordinatesToPmeCudaDirect(sendPtr, sendSize, sendPmeCoordinatesFromGpu, coordinatesReadyOnDeviceEvent);
168 void* PmePpCommGpu::getGpuForceStagingPtr()
170 return impl_->getGpuForceStagingPtr();
173 void* PmePpCommGpu::getForcesReadySynchronizer()
175 return impl_->getForcesReadySynchronizer();