2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements GPU halo exchange using CUDA.
40 * \author Alan Gray <alang@nvidia.com>
42 * \ingroup module_domdec
46 #include "gpuhaloexchange_impl.cuh"
53 #include "gromacs/domdec/domdec.h"
54 #include "gromacs/domdec/domdec_struct.h"
55 #include "gromacs/domdec/gpuhaloexchange.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/gpu_utils/devicebuffer.h"
58 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
59 #include "gromacs/gpu_utils/typecasts.cuh"
60 #include "gromacs/gpu_utils/vectype_ops.cuh"
61 #include "gromacs/math/vectypes.h"
62 #include "gromacs/pbcutil/ishift.h"
63 #include "gromacs/utility/gmxmpi.h"
65 #include "domdec_internal.h"
70 //! Number of CUDA threads in a block
71 // TODO Optimize this through experimentation
72 constexpr static int c_threadsPerBlock = 256;
75 __global__ void packSendBufKernel(float3* __restrict__ dataPacked,
76 const float3* __restrict__ data,
77 const int* __restrict__ map,
79 const float3 coordinateShift)
81 int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
82 float3* gm_dataDest = &dataPacked[threadIndex];
83 const float3* gm_dataSrc = &data[map[threadIndex]];
85 if (threadIndex < mapSize)
89 *gm_dataDest = *gm_dataSrc + coordinateShift;
93 *gm_dataDest = *gm_dataSrc;
100 /*! \brief unpack non-local force data buffer on the GPU using pre-populated "map" containing index
101 * information \param[out] data full array of force values \param[in] dataPacked packed
102 * array of force values to be transferred \param[in] map array of indices defining mapping
103 * from full to packed array \param[in] mapSize number of elements in map array
105 template<bool accumulate>
106 __global__ void unpackRecvBufKernel(float3* __restrict__ data,
107 const float3* __restrict__ dataPacked,
108 const int* __restrict__ map,
112 int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
113 const float3* gm_dataSrc = &dataPacked[threadIndex];
114 float3* gm_dataDest = &data[map[threadIndex]];
116 if (threadIndex < mapSize)
120 *gm_dataDest += *gm_dataSrc;
124 *gm_dataDest = *gm_dataSrc;
131 void GpuHaloExchange::Impl::reinitHalo(float3* d_coordinatesBuffer, float3* d_forcesBuffer)
134 d_x_ = d_coordinatesBuffer;
135 d_f_ = d_forcesBuffer;
137 cudaStream_t stream = nonLocalStream_;
138 const gmx_domdec_comm_t& comm = *dd_->comm;
139 const gmx_domdec_comm_dim_t& cd = comm.cd[0];
140 const gmx_domdec_ind_t& ind = cd.ind[pulse_];
141 int newSize = ind.nsend[nzone_ + 1];
143 GMX_ASSERT(cd.receiveInPlace, "Out-of-place receive is not yet supported in GPU halo exchange");
145 // reallocates only if needed
146 h_indexMap_.resize(newSize);
147 // reallocate on device only if needed
148 if (newSize > maxPackedBufferSize_)
150 reallocateDeviceBuffer(&d_indexMap_, newSize, &indexMapSize_, &indexMapSizeAlloc_, deviceContext_);
151 reallocateDeviceBuffer(&d_sendBuf_, newSize, &sendBufSize_, &sendBufSizeAlloc_, deviceContext_);
152 reallocateDeviceBuffer(&d_recvBuf_, newSize, &recvBufSize_, &recvBufSizeAlloc_, deviceContext_);
153 maxPackedBufferSize_ = newSize;
156 xSendSize_ = newSize;
158 MPI_Sendrecv(&xSendSize_, sizeof(int), MPI_BYTE, sendRankX_, 0, &xRecvSize_, sizeof(int),
159 MPI_BYTE, recvRankX_, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
161 fSendSize_ = xRecvSize_;
162 fRecvSize_ = xSendSize_;
164 numHomeAtoms_ = comm.atomRanges.numHomeAtoms(); // offset for data recieved by this rank
166 GMX_ASSERT(ind.index.size() == h_indexMap_.size(), "Size mismatch");
167 std::copy(ind.index.begin(), ind.index.end(), h_indexMap_.begin());
169 copyToDeviceBuffer(&d_indexMap_, h_indexMap_.data(), 0, newSize, stream,
170 GpuApiCallBehavior::Async, nullptr);
172 // This rank will push data to its neighbor, so needs to know
173 // the remote receive address and similarly send its receive
174 // address to other neighbour. We can do this here in reinit fn
175 // since the pointers will not change until the next NS step.
177 // Coordinates buffer:
180 for (int p = pulse_ - 1; p >= 0; p--)
182 pulseOffset += cd.ind[p].nrecv[nzone_ + 1];
184 // void* recvPtr = static_cast<void*>(&d_coordinatesBuffer[numHomeAtoms_ + pulseOffset]);
185 void* recvPtr = static_cast<void*>(&d_x_[numHomeAtoms_ + pulseOffset]);
186 MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankX_, 0, &remoteXPtr_, sizeof(void*),
187 MPI_BYTE, sendRankX_, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
190 recvPtr = static_cast<void*>(d_recvBuf_);
191 MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankF_, 0, &remoteFPtr_, sizeof(void*),
192 MPI_BYTE, sendRankF_, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
198 void GpuHaloExchange::Impl::communicateHaloCoordinates(const matrix box,
199 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
204 // ensure stream waits until coordinate data is available on device
205 coordinatesReadyOnDeviceEvent->enqueueWaitEvent(nonLocalStream_);
208 // launch kernel to pack send buffer
209 KernelLaunchConfig config;
210 config.blockSize[0] = c_threadsPerBlock;
211 config.blockSize[1] = 1;
212 config.blockSize[2] = 1;
213 config.gridSize[0] = (xSendSize_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
214 config.gridSize[1] = 1;
215 config.gridSize[2] = 1;
216 config.sharedMemorySize = 0;
217 config.stream = nonLocalStream_;
219 const float3* sendBuf = d_sendBuf_;
220 const float3* d_x = d_x_;
221 const int* indexMap = d_indexMap_;
222 const int size = xSendSize_;
223 // The coordinateShift changes between steps when we have
224 // performed a DD partition, or have updated the box e.g. when
225 // performing pressure coupling. So, for simplicity, the box
226 // is used every step to pass the shift vector as an argument of
227 // the packing kernel.
229 // Because only one-dimensional DD is supported, the coordinate
230 // shift only needs to handle that dimension.
231 const int dimensionIndex = dd_->dim[0];
232 const float3 coordinateShift{ box[dimensionIndex][XX], box[dimensionIndex][YY],
233 box[dimensionIndex][ZZ] };
235 // Avoid launching kernel when there is no work to do
238 auto kernelFn = usePBC_ ? packSendBufKernel<true> : packSendBufKernel<false>;
240 const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config, &sendBuf, &d_x,
241 &indexMap, &size, &coordinateShift);
243 launchGpuKernel(kernelFn, config, nullptr, "Domdec GPU Apply X Halo Exchange", kernelArgs);
246 communicateHaloData(d_x_, HaloQuantity::HaloCoordinates, coordinatesReadyOnDeviceEvent);
251 // The following method should be called after non-local buffer operations,
252 // and before the local buffer operations. It operates in the non-local stream.
253 void GpuHaloExchange::Impl::communicateHaloForces(bool accumulateForces)
256 // Communicate halo data (in non-local stream)
257 communicateHaloData(d_f_, HaloQuantity::HaloForces, nullptr);
261 if (pulse_ == (dd_->comm->cd[0].numPulses() - 1))
263 if (!accumulateForces)
265 // Clear local portion of force array (in local stream)
266 cudaMemsetAsync(d_f, 0, numHomeAtoms_ * sizeof(rvec), localStream_);
269 // ensure non-local stream waits for local stream, due to dependence on
270 // the previous H2D copy of CPU forces (if accumulateForces is true)
271 // or the above clearing.
272 // TODO remove this dependency on localStream - edmine issue #3093
273 GpuEventSynchronizer eventLocal;
274 eventLocal.markEvent(localStream_);
275 eventLocal.enqueueWaitEvent(nonLocalStream_);
278 // Unpack halo buffer into force array
280 KernelLaunchConfig config;
281 config.blockSize[0] = c_threadsPerBlock;
282 config.blockSize[1] = 1;
283 config.blockSize[2] = 1;
284 config.gridSize[0] = (fRecvSize_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
285 config.gridSize[1] = 1;
286 config.gridSize[2] = 1;
287 config.sharedMemorySize = 0;
288 config.stream = nonLocalStream_;
290 const float3* recvBuf = d_recvBuf_;
291 const int* indexMap = d_indexMap_;
292 const int size = fRecvSize_;
296 // We need to accumulate rather than set, since it is possible
297 // that, in this pulse, a value could be written to a location
298 // corresponding to the halo region of a following pulse.
299 accumulateForces = true;
304 auto kernelFn = accumulateForces ? unpackRecvBufKernel<true> : unpackRecvBufKernel<false>;
306 const auto kernelArgs =
307 prepareGpuKernelArguments(kernelFn, config, &d_f, &recvBuf, &indexMap, &size);
309 launchGpuKernel(kernelFn, config, nullptr, "Domdec GPU Apply F Halo Exchange", kernelArgs);
314 fReadyOnDevice_.markEvent(nonLocalStream_);
319 void GpuHaloExchange::Impl::communicateHaloData(float3* d_ptr,
320 HaloQuantity haloQuantity,
321 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
330 if (haloQuantity == HaloQuantity::HaloCoordinates)
332 sendPtr = static_cast<void*>(d_sendBuf_);
333 sendSize = xSendSize_;
334 remotePtr = remoteXPtr_;
335 sendRank = sendRankX_;
336 recvRank = recvRankX_;
339 // Wait for event from receiving task that remote coordinates are ready, and enqueue that event to stream used
340 // for subsequent data push. This avoids a race condition with the remote data being written in the previous timestep.
341 // Similarly send event to task that will push data to this task.
342 GpuEventSynchronizer* remoteCoordinatesReadyOnDeviceEvent;
343 MPI_Sendrecv(&coordinatesReadyOnDeviceEvent, sizeof(GpuEventSynchronizer*), MPI_BYTE,
344 recvRank, 0, &remoteCoordinatesReadyOnDeviceEvent, sizeof(GpuEventSynchronizer*),
345 MPI_BYTE, sendRank, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
346 remoteCoordinatesReadyOnDeviceEvent->enqueueWaitEvent(nonLocalStream_);
348 GMX_UNUSED_VALUE(coordinatesReadyOnDeviceEvent);
353 int recvOffset = dd_->comm->atomRanges.end(DDAtomRanges::Type::Zones);
354 for (int p = pulse_; p < dd_->comm->cd[0].numPulses(); p++)
356 recvOffset -= dd_->comm->cd[0].ind[p].nrecv[nzone_ + 1];
358 sendPtr = static_cast<void*>(&(d_ptr[recvOffset]));
359 sendSize = fSendSize_;
360 remotePtr = remoteFPtr_;
361 sendRank = sendRankF_;
362 recvRank = recvRankF_;
365 communicateHaloDataWithCudaDirect(sendPtr, sendSize, sendRank, remotePtr, recvRank);
368 void GpuHaloExchange::Impl::communicateHaloDataWithCudaDirect(void* sendPtr,
376 cudaStream_t stream = nonLocalStream_;
378 // We asynchronously push data to remote rank. The remote
379 // destination pointer has already been set in the init fn. We
380 // don't need to worry about overwriting data the remote ranks
381 // still needs since the halo exchange is just done once per
382 // timestep, for each of X and F.
384 // send data to neighbor, if any data exists to send
387 stat = cudaMemcpyAsync(remotePtr, sendPtr, sendSize * DIM * sizeof(float),
388 cudaMemcpyDeviceToDevice, stream);
389 CU_RET_ERR(stat, "cudaMemcpyAsync on GPU Domdec CUDA direct data transfer failed");
393 // ensure pushed data has arrived before remote rank progresses
394 // This rank records an event and sends it to the remote rank which has just been pushed data.
395 // This rank recieves event from remote rank which has pushed data here, and enqueues that event
397 GpuEventSynchronizer* haloDataTransferRemote;
399 haloDataTransferLaunched_->markEvent(stream);
401 MPI_Sendrecv(&haloDataTransferLaunched_, sizeof(GpuEventSynchronizer*), MPI_BYTE, sendRank, 0,
402 &haloDataTransferRemote, sizeof(GpuEventSynchronizer*), MPI_BYTE, recvRank, 0,
403 mpi_comm_mysim_, MPI_STATUS_IGNORE);
405 haloDataTransferRemote->enqueueWaitEvent(stream);
407 GMX_UNUSED_VALUE(sendRank);
408 GMX_UNUSED_VALUE(recvRank);
412 GpuEventSynchronizer* GpuHaloExchange::Impl::getForcesReadyOnDeviceEvent()
414 return &fReadyOnDevice_;
417 /*! \brief Create Domdec GPU object */
418 GpuHaloExchange::Impl::Impl(gmx_domdec_t* dd,
419 MPI_Comm mpi_comm_mysim,
421 void* nonLocalStream,
424 sendRankX_(dd->neighbor[0][1]),
425 recvRankX_(dd->neighbor[0][0]),
426 sendRankF_(dd->neighbor[0][0]),
427 recvRankF_(dd->neighbor[0][1]),
428 usePBC_(dd->ci[dd->dim[0]] == 0),
429 haloDataTransferLaunched_(new GpuEventSynchronizer()),
430 mpi_comm_mysim_(mpi_comm_mysim),
431 localStream_(*static_cast<cudaStream_t*>(localStream)),
432 nonLocalStream_(*static_cast<cudaStream_t*>(nonLocalStream)),
436 GMX_RELEASE_ASSERT(GMX_THREAD_MPI,
437 "GPU Halo exchange is currently only supported with thread-MPI enabled");
441 gmx_fatal(FARGS, "Error: dd->ndim > 1 is not yet supported in GPU halo exchange");
444 if (usePBC_ && dd->unitCellInfo.haveScrewPBC)
446 gmx_fatal(FARGS, "Error: screw is not yet supported in GPU halo exchange\n");
449 changePinningPolicy(&h_indexMap_, gmx::PinningPolicy::PinnedIfSupported);
451 allocateDeviceBuffer(&d_fShift_, 1, deviceContext_);
454 GpuHaloExchange::Impl::~Impl()
456 freeDeviceBuffer(&d_indexMap_);
457 freeDeviceBuffer(&d_sendBuf_);
458 freeDeviceBuffer(&d_recvBuf_);
459 freeDeviceBuffer(&d_fShift_);
460 delete haloDataTransferLaunched_;
463 GpuHaloExchange::GpuHaloExchange(gmx_domdec_t* dd,
464 MPI_Comm mpi_comm_mysim,
466 void* nonLocalStream,
468 impl_(new Impl(dd, mpi_comm_mysim, localStream, nonLocalStream, pulse))
472 GpuHaloExchange::~GpuHaloExchange() = default;
474 void GpuHaloExchange::reinitHalo(DeviceBuffer<RVec> d_coordinatesBuffer, DeviceBuffer<RVec> d_forcesBuffer)
476 impl_->reinitHalo(asFloat3(d_coordinatesBuffer), asFloat3(d_forcesBuffer));
479 void GpuHaloExchange::communicateHaloCoordinates(const matrix box,
480 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
482 impl_->communicateHaloCoordinates(box, coordinatesReadyOnDeviceEvent);
485 void GpuHaloExchange::communicateHaloForces(bool accumulateForces)
487 impl_->communicateHaloForces(accumulateForces);
490 GpuEventSynchronizer* GpuHaloExchange::getForcesReadyOnDeviceEvent()
492 return impl_->getForcesReadyOnDeviceEvent();