2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements GPU halo exchange using CUDA.
40 * \author Alan Gray <alang@nvidia.com>
42 * \ingroup module_domdec
46 #include "gpuhaloexchange_impl.cuh"
53 #include "gromacs/domdec/domdec.h"
54 #include "gromacs/domdec/domdec_struct.h"
55 #include "gromacs/domdec/gpuhaloexchange.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/gpu_utils/device_context.h"
58 #include "gromacs/gpu_utils/devicebuffer.h"
59 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
60 #include "gromacs/gpu_utils/typecasts.cuh"
61 #include "gromacs/gpu_utils/vectype_ops.cuh"
62 #include "gromacs/math/vectypes.h"
63 #include "gromacs/pbcutil/ishift.h"
64 #include "gromacs/timing/wallcycle.h"
65 #include "gromacs/utility/gmxmpi.h"
67 #include "domdec_internal.h"
72 //! Number of CUDA threads in a block
73 // TODO Optimize this through experimentation
74 constexpr static int c_threadsPerBlock = 256;
77 __global__ void packSendBufKernel(float3* __restrict__ dataPacked,
78 const float3* __restrict__ data,
79 const int* __restrict__ map,
81 const float3 coordinateShift)
83 int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
84 float3* gm_dataDest = &dataPacked[threadIndex];
85 const float3* gm_dataSrc = &data[map[threadIndex]];
87 if (threadIndex < mapSize)
91 *gm_dataDest = *gm_dataSrc + coordinateShift;
95 *gm_dataDest = *gm_dataSrc;
102 /*! \brief unpack non-local force data buffer on the GPU using pre-populated "map" containing index
103 * information \param[out] data full array of force values \param[in] dataPacked packed
104 * array of force values to be transferred \param[in] map array of indices defining mapping
105 * from full to packed array \param[in] mapSize number of elements in map array
107 template<bool accumulate>
108 __global__ void unpackRecvBufKernel(float3* __restrict__ data,
109 const float3* __restrict__ dataPacked,
110 const int* __restrict__ map,
114 int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
115 const float3* gm_dataSrc = &dataPacked[threadIndex];
116 float3* gm_dataDest = &data[map[threadIndex]];
118 if (threadIndex < mapSize)
122 *gm_dataDest += *gm_dataSrc;
126 *gm_dataDest = *gm_dataSrc;
133 void GpuHaloExchange::Impl::reinitHalo(float3* d_coordinatesBuffer, float3* d_forcesBuffer)
135 wallcycle_start(wcycle_, ewcDOMDEC);
136 wallcycle_sub_start(wcycle_, ewcsDD_GPU);
138 d_x_ = d_coordinatesBuffer;
139 d_f_ = d_forcesBuffer;
141 const gmx_domdec_comm_t& comm = *dd_->comm;
142 const gmx_domdec_comm_dim_t& cd = comm.cd[dimIndex_];
143 const gmx_domdec_ind_t& ind = cd.ind[pulse_];
145 numHomeAtoms_ = comm.atomRanges.numHomeAtoms(); // offset for data recieved by this rank
147 // Determine receive offset for the dimension index and pulse of this halo exchange object
150 int numAtomsTotal = numHomeAtoms_;
151 for (int i = 0; i <= dimIndex_; i++)
153 int pulseMax = (i == dimIndex_) ? pulse_ : (comm.cd[i].numPulses() - 1);
154 for (int p = 0; p <= pulseMax; p++)
156 atomOffset_ = numAtomsTotal;
157 const gmx_domdec_ind_t& indTemp = comm.cd[i].ind[p];
158 numAtomsTotal += indTemp.nrecv[numZoneTemp + 1];
160 numZone = numZoneTemp;
161 numZoneTemp += numZoneTemp;
164 int newSize = ind.nsend[numZone + 1];
166 GMX_ASSERT(cd.receiveInPlace, "Out-of-place receive is not yet supported in GPU halo exchange");
168 // reallocates only if needed
169 h_indexMap_.resize(newSize);
170 // reallocate on device only if needed
171 if (newSize > maxPackedBufferSize_)
173 reallocateDeviceBuffer(&d_indexMap_, newSize, &indexMapSize_, &indexMapSizeAlloc_, deviceContext_);
174 reallocateDeviceBuffer(&d_sendBuf_, newSize, &sendBufSize_, &sendBufSizeAlloc_, deviceContext_);
175 reallocateDeviceBuffer(&d_recvBuf_, newSize, &recvBufSize_, &recvBufSizeAlloc_, deviceContext_);
176 maxPackedBufferSize_ = newSize;
179 xSendSize_ = newSize;
181 MPI_Sendrecv(&xSendSize_, sizeof(int), MPI_BYTE, sendRankX_, 0, &xRecvSize_, sizeof(int),
182 MPI_BYTE, recvRankX_, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
184 fSendSize_ = xRecvSize_;
185 fRecvSize_ = xSendSize_;
189 GMX_ASSERT(ind.index.size() == h_indexMap_.size(),
190 "Size mismatch between domain decomposition communication index array and GPU "
191 "halo exchange index mapping array");
192 std::copy(ind.index.begin(), ind.index.end(), h_indexMap_.begin());
194 copyToDeviceBuffer(&d_indexMap_, h_indexMap_.data(), 0, newSize, nonLocalStream_,
195 GpuApiCallBehavior::Async, nullptr);
197 // This rank will push data to its neighbor, so needs to know
198 // the remote receive address and similarly send its receive
199 // address to other neighbour. We can do this here in reinit fn
200 // since the pointers will not change until the next NS step.
202 // Coordinates buffer:
203 void* recvPtr = static_cast<void*>(&d_x_[atomOffset_]);
205 MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankX_, 0, &remoteXPtr_, sizeof(void*),
206 MPI_BYTE, sendRankX_, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
209 recvPtr = static_cast<void*>(d_recvBuf_);
210 MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankF_, 0, &remoteFPtr_, sizeof(void*),
211 MPI_BYTE, sendRankF_, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
214 wallcycle_sub_stop(wcycle_, ewcsDD_GPU);
215 wallcycle_stop(wcycle_, ewcDOMDEC);
220 void GpuHaloExchange::Impl::communicateHaloCoordinates(const matrix box,
221 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
224 wallcycle_start(wcycle_, ewcLAUNCH_GPU);
227 // ensure stream waits until coordinate data is available on device
228 coordinatesReadyOnDeviceEvent->enqueueWaitEvent(nonLocalStream_);
231 wallcycle_sub_start(wcycle_, ewcsLAUNCH_GPU_MOVEX);
233 // launch kernel to pack send buffer
234 KernelLaunchConfig config;
235 config.blockSize[0] = c_threadsPerBlock;
236 config.blockSize[1] = 1;
237 config.blockSize[2] = 1;
238 config.gridSize[0] = (xSendSize_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
239 config.gridSize[1] = 1;
240 config.gridSize[2] = 1;
241 config.sharedMemorySize = 0;
243 const float3* sendBuf = d_sendBuf_;
244 const float3* d_x = d_x_;
245 const int* indexMap = d_indexMap_;
246 const int size = xSendSize_;
247 // The coordinateShift changes between steps when we have
248 // performed a DD partition, or have updated the box e.g. when
249 // performing pressure coupling. So, for simplicity, the box
250 // is used every step to pass the shift vector as an argument of
251 // the packing kernel.
252 const int boxDimensionIndex = dd_->dim[dimIndex_];
253 const float3 coordinateShift{ box[boxDimensionIndex][XX], box[boxDimensionIndex][YY],
254 box[boxDimensionIndex][ZZ] };
256 // Avoid launching kernel when there is no work to do
259 auto kernelFn = usePBC_ ? packSendBufKernel<true> : packSendBufKernel<false>;
261 const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config, &sendBuf, &d_x,
262 &indexMap, &size, &coordinateShift);
264 launchGpuKernel(kernelFn, config, nonLocalStream_, nullptr,
265 "Domdec GPU Apply X Halo Exchange", kernelArgs);
268 wallcycle_sub_stop(wcycle_, ewcsLAUNCH_GPU_MOVEX);
269 wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
271 // Consider time spent in communicateHaloData as Comm.X counter
272 // ToDo: We need further refinement here as communicateHaloData includes launch time for cudamemcpyasync
273 wallcycle_start(wcycle_, ewcMOVEX);
275 communicateHaloData(d_x_, HaloQuantity::HaloCoordinates, coordinatesReadyOnDeviceEvent);
277 wallcycle_stop(wcycle_, ewcMOVEX);
282 // The following method should be called after non-local buffer operations,
283 // and before the local buffer operations. It operates in the non-local stream.
284 void GpuHaloExchange::Impl::communicateHaloForces(bool accumulateForces)
286 // Consider time spent in communicateHaloData as Comm.F counter
287 // ToDo: We need further refinement here as communicateHaloData includes launch time for cudamemcpyasync
288 wallcycle_start(wcycle_, ewcMOVEF);
290 // Communicate halo data (in non-local stream)
291 communicateHaloData(d_f_, HaloQuantity::HaloForces, nullptr);
293 wallcycle_stop(wcycle_, ewcMOVEF);
295 wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
296 wallcycle_sub_start(wcycle_, ewcsLAUNCH_GPU_MOVEF);
299 // If this is the last pulse and index (noting the force halo
300 // exchanges across multiple pulses and indices are called in
301 // reverse order) then perform the following preparation
303 if ((pulse_ == (dd_->comm->cd[dimIndex_].numPulses() - 1)) && (dimIndex_ == (dd_->ndim - 1)))
305 if (!accumulateForces)
307 // Clear local portion of force array (in local stream)
308 cudaMemsetAsync(d_f, 0, numHomeAtoms_ * sizeof(rvec), localStream_.stream());
311 // ensure non-local stream waits for local stream, due to dependence on
312 // the previous H2D copy of CPU forces (if accumulateForces is true)
313 // or the above clearing.
314 // TODO remove this dependency on localStream - edmine Issue #3093
315 GpuEventSynchronizer eventLocal;
316 eventLocal.markEvent(localStream_);
317 eventLocal.enqueueWaitEvent(nonLocalStream_);
320 // Unpack halo buffer into force array
322 KernelLaunchConfig config;
323 config.blockSize[0] = c_threadsPerBlock;
324 config.blockSize[1] = 1;
325 config.blockSize[2] = 1;
326 config.gridSize[0] = (fRecvSize_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
327 config.gridSize[1] = 1;
328 config.gridSize[2] = 1;
329 config.sharedMemorySize = 0;
331 const float3* recvBuf = d_recvBuf_;
332 const int* indexMap = d_indexMap_;
333 const int size = fRecvSize_;
335 if (pulse_ > 0 || dd_->ndim > 1)
337 // We need to accumulate rather than set, since it is possible
338 // that, in this pulse/dim, a value could be written to a location
339 // corresponding to the halo region of a following pulse/dim.
340 accumulateForces = true;
345 auto kernelFn = accumulateForces ? unpackRecvBufKernel<true> : unpackRecvBufKernel<false>;
347 const auto kernelArgs =
348 prepareGpuKernelArguments(kernelFn, config, &d_f, &recvBuf, &indexMap, &size);
350 launchGpuKernel(kernelFn, config, nonLocalStream_, nullptr,
351 "Domdec GPU Apply F Halo Exchange", kernelArgs);
356 fReadyOnDevice_.markEvent(nonLocalStream_);
359 wallcycle_sub_stop(wcycle_, ewcsLAUNCH_GPU_MOVEF);
360 wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
364 void GpuHaloExchange::Impl::communicateHaloData(float3* d_ptr,
365 HaloQuantity haloQuantity,
366 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
375 if (haloQuantity == HaloQuantity::HaloCoordinates)
377 sendPtr = static_cast<void*>(d_sendBuf_);
378 sendSize = xSendSize_;
379 remotePtr = remoteXPtr_;
380 sendRank = sendRankX_;
381 recvRank = recvRankX_;
384 // Wait for event from receiving task that remote coordinates are ready, and enqueue that event to stream used
385 // for subsequent data push. This avoids a race condition with the remote data being written in the previous timestep.
386 // Similarly send event to task that will push data to this task.
387 GpuEventSynchronizer* remoteCoordinatesReadyOnDeviceEvent;
388 MPI_Sendrecv(&coordinatesReadyOnDeviceEvent, sizeof(GpuEventSynchronizer*), MPI_BYTE,
389 recvRank, 0, &remoteCoordinatesReadyOnDeviceEvent, sizeof(GpuEventSynchronizer*),
390 MPI_BYTE, sendRank, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
391 remoteCoordinatesReadyOnDeviceEvent->enqueueWaitEvent(nonLocalStream_);
393 GMX_UNUSED_VALUE(coordinatesReadyOnDeviceEvent);
398 sendPtr = static_cast<void*>(&(d_ptr[atomOffset_]));
399 sendSize = fSendSize_;
400 remotePtr = remoteFPtr_;
401 sendRank = sendRankF_;
402 recvRank = recvRankF_;
405 communicateHaloDataWithCudaDirect(sendPtr, sendSize, sendRank, remotePtr, recvRank);
408 void GpuHaloExchange::Impl::communicateHaloDataWithCudaDirect(void* sendPtr,
417 // We asynchronously push data to remote rank. The remote
418 // destination pointer has already been set in the init fn. We
419 // don't need to worry about overwriting data the remote ranks
420 // still needs since the halo exchange is just done once per
421 // timestep, for each of X and F.
423 // send data to neighbor, if any data exists to send
426 stat = cudaMemcpyAsync(remotePtr, sendPtr, sendSize * DIM * sizeof(float),
427 cudaMemcpyDeviceToDevice, nonLocalStream_.stream());
429 CU_RET_ERR(stat, "cudaMemcpyAsync on GPU Domdec CUDA direct data transfer failed");
433 // ensure pushed data has arrived before remote rank progresses
434 // This rank records an event and sends it to the remote rank which has just been pushed data.
435 // This rank recieves event from remote rank which has pushed data here, and enqueues that event
437 GpuEventSynchronizer* haloDataTransferRemote;
439 haloDataTransferLaunched_->markEvent(nonLocalStream_);
441 MPI_Sendrecv(&haloDataTransferLaunched_, sizeof(GpuEventSynchronizer*), MPI_BYTE, sendRank, 0,
442 &haloDataTransferRemote, sizeof(GpuEventSynchronizer*), MPI_BYTE, recvRank, 0,
443 mpi_comm_mysim_, MPI_STATUS_IGNORE);
445 haloDataTransferRemote->enqueueWaitEvent(nonLocalStream_);
447 GMX_UNUSED_VALUE(sendRank);
448 GMX_UNUSED_VALUE(recvRank);
452 GpuEventSynchronizer* GpuHaloExchange::Impl::getForcesReadyOnDeviceEvent()
454 return &fReadyOnDevice_;
457 /*! \brief Create Domdec GPU object */
458 GpuHaloExchange::Impl::Impl(gmx_domdec_t* dd,
460 MPI_Comm mpi_comm_mysim,
461 const DeviceContext& deviceContext,
462 const DeviceStream& localStream,
463 const DeviceStream& nonLocalStream,
465 gmx_wallcycle* wcycle) :
467 sendRankX_(dd->neighbor[dimIndex][1]),
468 recvRankX_(dd->neighbor[dimIndex][0]),
469 sendRankF_(dd->neighbor[dimIndex][0]),
470 recvRankF_(dd->neighbor[dimIndex][1]),
471 usePBC_(dd->ci[dd->dim[dimIndex]] == 0),
472 haloDataTransferLaunched_(new GpuEventSynchronizer()),
473 mpi_comm_mysim_(mpi_comm_mysim),
474 deviceContext_(deviceContext),
475 localStream_(localStream),
476 nonLocalStream_(nonLocalStream),
482 GMX_RELEASE_ASSERT(GMX_THREAD_MPI,
483 "GPU Halo exchange is currently only supported with thread-MPI enabled");
485 if (usePBC_ && dd->unitCellInfo.haveScrewPBC)
487 gmx_fatal(FARGS, "Error: screw is not yet supported in GPU halo exchange\n");
490 changePinningPolicy(&h_indexMap_, gmx::PinningPolicy::PinnedIfSupported);
492 allocateDeviceBuffer(&d_fShift_, 1, deviceContext_);
495 GpuHaloExchange::Impl::~Impl()
497 freeDeviceBuffer(&d_indexMap_);
498 freeDeviceBuffer(&d_sendBuf_);
499 freeDeviceBuffer(&d_recvBuf_);
500 freeDeviceBuffer(&d_fShift_);
501 delete haloDataTransferLaunched_;
504 GpuHaloExchange::GpuHaloExchange(gmx_domdec_t* dd,
506 MPI_Comm mpi_comm_mysim,
507 const DeviceContext& deviceContext,
508 const DeviceStream& localStream,
509 const DeviceStream& nonLocalStream,
511 gmx_wallcycle* wcycle) :
512 impl_(new Impl(dd, dimIndex, mpi_comm_mysim, deviceContext, localStream, nonLocalStream, pulse, wcycle))
516 GpuHaloExchange::~GpuHaloExchange() = default;
518 void GpuHaloExchange::reinitHalo(DeviceBuffer<RVec> d_coordinatesBuffer, DeviceBuffer<RVec> d_forcesBuffer)
520 impl_->reinitHalo(asFloat3(d_coordinatesBuffer), asFloat3(d_forcesBuffer));
523 void GpuHaloExchange::communicateHaloCoordinates(const matrix box,
524 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
526 impl_->communicateHaloCoordinates(box, coordinatesReadyOnDeviceEvent);
529 void GpuHaloExchange::communicateHaloForces(bool accumulateForces)
531 impl_->communicateHaloForces(accumulateForces);
534 GpuEventSynchronizer* GpuHaloExchange::getForcesReadyOnDeviceEvent()
536 return impl_->getForcesReadyOnDeviceEvent();