2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements GPU halo exchange using CUDA.
40 * \author Alan Gray <alang@nvidia.com.com>
42 * \ingroup module_domdec
46 #include "gpuhaloexchange_impl.cuh"
53 #include "gromacs/domdec/domdec.h"
54 #include "gromacs/domdec/domdec_struct.h"
55 #include "gromacs/domdec/gpuhaloexchange.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/gpu_utils/devicebuffer.h"
58 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
59 #include "gromacs/gpu_utils/vectype_ops.cuh"
60 #include "gromacs/pbcutil/ishift.h"
62 #include "domdec_internal.h"
67 //! Number of CUDA threads in a block
68 //TODO Optimize this through experimentation
69 constexpr static int c_threadsPerBlock = 256;
71 template <bool usePBC>
72 __global__ void packSendBufKernel(float3 * __restrict__ dataPacked,
73 const float3 * __restrict__ data,
74 const int * __restrict__ map,
76 const float3 coordinateShift)
78 int threadIndex = blockIdx.x*blockDim.x+threadIdx.x;
79 float3 *gm_dataDest = &dataPacked[threadIndex];
80 const float3 *gm_dataSrc = &data[map[threadIndex]];
82 if (threadIndex < mapSize)
86 *gm_dataDest = *gm_dataSrc + coordinateShift;
90 *gm_dataDest = *gm_dataSrc;
98 void GpuHaloExchange::Impl::reinitHalo(float3 *d_coordinatesBuffer)
101 d_x_ = d_coordinatesBuffer;
103 cudaStream_t stream = nonLocalStream_;
105 const gmx_domdec_comm_t &comm = *dd_->comm;
106 const gmx_domdec_comm_dim_t &cd = comm.cd[0];
107 const gmx_domdec_ind_t &ind = cd.ind[0];
108 int newSize = ind.nsend[nzone+1];
110 GMX_RELEASE_ASSERT(cd.numPulses() == 1, "Multiple pulses are not yet supported in GPU halo exchange");
111 GMX_ASSERT(cd.receiveInPlace, "Out-of-place receive is not yet supported in GPU halo exchange");
113 // reallocates only if needed
114 h_indexMap_.resize(newSize);
115 // reallocate on device only if needed
116 if (newSize > maxPackedBufferSize_)
118 reallocateDeviceBuffer(&d_indexMap_, newSize, &indexMapSize_, &indexMapSizeAlloc_, nullptr);
119 reallocateDeviceBuffer(&d_sendBuf_, newSize, &sendBufSize_, &sendBufSizeAlloc_, nullptr);
120 reallocateDeviceBuffer(&d_recvBuf_, newSize, &recvBufSize_, &recvBufSizeAlloc_, nullptr);
121 maxPackedBufferSize_ = newSize;
124 xSendSize_ = newSize;
125 MPI_Sendrecv(&xSendSize_, sizeof(int), MPI_BYTE, sendRankX_, 0,
126 &xRecvSize_, sizeof(int), MPI_BYTE, recvRankX_, 0,
127 mpi_comm_mysim_, MPI_STATUS_IGNORE);
128 fSendSize_ = xRecvSize_;
129 fRecvSize_ = xSendSize_;
131 localOffset_ = comm.atomRanges.numHomeAtoms(); //offset for data recieved by this rank
133 GMX_ASSERT(ind.index.size() == h_indexMap_.size(), "Size mismatch");
134 std::copy(ind.index.begin(), ind.index.end(), h_indexMap_.begin());
136 copyToDeviceBuffer(&d_indexMap_, h_indexMap_.data(), 0, newSize, stream, GpuApiCallBehavior::Async, nullptr);
138 // This rank will push data to its neighbor, so needs to know
139 // the remote receive address and similarly send its receive
140 // address to other neighbour. We can do this here in reinit fn
141 // since the pointers will not change until the next NS step.
143 //Coordinates buffer:
144 void* recvPtr = static_cast<void*> (&d_coordinatesBuffer[localOffset_]);
145 MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankX_, 0,
146 &remoteXPtr_, sizeof(void*), MPI_BYTE, sendRankX_, 0,
147 mpi_comm_mysim_, MPI_STATUS_IGNORE);
150 recvPtr = static_cast<void*> (d_recvBuf_);
151 MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankF_, 0,
152 &remoteFPtr_, sizeof(void*), MPI_BYTE, sendRankF_, 0,
153 mpi_comm_mysim_, MPI_STATUS_IGNORE);
159 // The following method be called after local setCoordinates (which records the coordinatesOnDeviceEvent_
160 // event when the coordinate data has been copied to the device).
161 void GpuHaloExchange::Impl::communicateHaloCoordinates(const matrix box)
164 //ensure stream waits until coordinate data is available on device
165 coordinatesOnDeviceEvent_->enqueueWaitEvent(nonLocalStream_);
167 // launch kernel to pack send buffer
168 KernelLaunchConfig config;
169 config.blockSize[0] = c_threadsPerBlock;
170 config.blockSize[1] = 1;
171 config.blockSize[2] = 1;
172 config.gridSize[0] = (xSendSize_+c_threadsPerBlock-1)/c_threadsPerBlock;
173 config.gridSize[1] = 1;
174 config.gridSize[2] = 1;
175 config.sharedMemorySize = 0;
176 config.stream = nonLocalStream_;
178 const float3 *sendBuf = d_sendBuf_;
179 const float3 *d_x = d_x_;
180 const int *indexMap = d_indexMap_;
181 const int size = xSendSize_;
182 // The coordinateShift changes between steps when we have
183 // performed a DD partition, or have updated the box e.g. when
184 // performing pressure coupling. So, for simplicity, the the box
185 // is used every step to pass the shift vector as an argument of
186 // the packing kernel.
188 // Because only one-dimensional DD is supported, the coordinate
189 // shift only needs to handle that dimension.
190 const int dimensionIndex = dd_->dim[0];
191 const float3 coordinateShift {
192 box[dimensionIndex][XX], box[dimensionIndex][YY], box[dimensionIndex][ZZ]
195 // Avoid launching kernel when there is no work to do
198 auto kernelFn = usePBC_ ? packSendBufKernel<true> : packSendBufKernel<false>;
200 const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config, &sendBuf, &d_x, &indexMap,
201 &size, &coordinateShift);
203 launchGpuKernel(kernelFn, config, nullptr, "Domdec GPU Apply X Halo Exchange", kernelArgs);
206 communicateHaloData(d_x_, HaloQuantity::HaloCoordinates);
212 void GpuHaloExchange::Impl::communicateHaloData(float3 * d_ptr,
213 HaloQuantity haloQuantity)
221 if (haloQuantity == HaloQuantity::HaloCoordinates)
223 sendPtr = static_cast<void*> (d_sendBuf_);
224 sendSize = xSendSize_;
225 remotePtr = remoteXPtr_;
226 sendRank = sendRankX_;
227 recvRank = recvRankX_;
231 sendPtr = static_cast<void*> (&(d_ptr[localOffset_]));
232 sendSize = fSendSize_;
233 remotePtr = remoteFPtr_;
234 sendRank = sendRankF_;
235 recvRank = recvRankF_;
238 communicateHaloDataWithCudaDirect(sendPtr, sendSize, sendRank, remotePtr, recvRank);
242 void GpuHaloExchange::Impl::communicateHaloDataWithCudaDirect(void *sendPtr,
251 // We asynchronously push data to remote rank. The remote
252 // destination pointer has already been set in the init fn. We
253 // don't need to worry about overwriting data the remote ranks
254 // still needs since the halo exchange is just done once per
255 // timestep, for each of X and F.
257 // send data to neighbor, if any data exists to send
260 stat = cudaMemcpyAsync(remotePtr, sendPtr, sendSize*DIM*sizeof(float), cudaMemcpyDeviceToDevice, nonLocalStream_);
261 CU_RET_ERR(stat, "cudaMemcpyAsync on GPU Domdec CUDA direct data transfer failed");
264 //ensure pushed data has arrived before remote rank progresses
265 // This rank records an event and sends it to the remote rank which has just been pushed data.
266 // This rank recieves event from remote rank which has pushed data here, and enqueues that event to
268 GpuEventSynchronizer *haloDataTransferRemote;
270 haloDataTransferLaunched_->markEvent(nonLocalStream_);
272 MPI_Sendrecv(&haloDataTransferLaunched_, sizeof(GpuEventSynchronizer*), MPI_BYTE, sendRank, 0,
273 &haloDataTransferRemote, sizeof(GpuEventSynchronizer*), MPI_BYTE, recvRank, 0,
274 mpi_comm_mysim_, MPI_STATUS_IGNORE);
276 haloDataTransferRemote->enqueueWaitEvent(nonLocalStream_);
280 /*! \brief Create Domdec GPU object */
281 GpuHaloExchange::Impl::Impl(gmx_domdec_t *dd,
282 MPI_Comm mpi_comm_mysim,
283 void * nonLocalStream,
284 void * coordinatesOnDeviceEvent)
286 sendRankX_(dd->neighbor[0][1]),
287 recvRankX_(dd->neighbor[0][0]),
288 sendRankF_(dd->neighbor[0][0]),
289 recvRankF_(dd->neighbor[0][1]),
290 usePBC_(dd->ci[dd->dim[0]] == 0),
291 haloDataTransferLaunched_(new GpuEventSynchronizer()),
292 mpi_comm_mysim_(mpi_comm_mysim),
293 nonLocalStream_(*static_cast<cudaStream_t*> (nonLocalStream)),
294 coordinatesOnDeviceEvent_(static_cast<GpuEventSynchronizer*> (coordinatesOnDeviceEvent))
297 GMX_RELEASE_ASSERT(GMX_THREAD_MPI, "GPU Halo exchange is currently only supported with thread-MPI enabled");
301 gmx_fatal(FARGS, "Error: dd->ndim > 1 is not yet supported in GPU halo exchange");
304 if (usePBC_ && dd->unitCellInfo.haveScrewPBC)
306 gmx_fatal(FARGS, "Error: screw is not yet supported in GPU halo exchange\n");
309 changePinningPolicy(&h_indexMap_, gmx::PinningPolicy::PinnedIfSupported);
311 allocateDeviceBuffer(&d_fShift_, 1, nullptr);
315 GpuHaloExchange::Impl::~Impl()
317 freeDeviceBuffer(&d_indexMap_);
318 freeDeviceBuffer(&d_sendBuf_);
319 freeDeviceBuffer(&d_recvBuf_);
320 freeDeviceBuffer(&d_fShift_);
321 delete haloDataTransferLaunched_;
324 GpuHaloExchange::GpuHaloExchange(gmx_domdec_t *dd,
325 MPI_Comm mpi_comm_mysim,
326 void *nonLocalStream,
327 void *coordinatesOnDeviceEvent)
328 : impl_(new Impl(dd, mpi_comm_mysim, nonLocalStream, coordinatesOnDeviceEvent))
332 GpuHaloExchange::~GpuHaloExchange() = default;
334 void GpuHaloExchange::reinitHalo(rvec *d_coordinatesBuffer)
336 impl_->reinitHalo(reinterpret_cast<float3*>(d_coordinatesBuffer));
339 void GpuHaloExchange::communicateHaloCoordinates(const matrix box)
341 impl_->communicateHaloCoordinates(box);