9c5aa5b8fe392cca6adc036d3e4620ef64c5ad37
[alexxy/gromacs.git] / src / gromacs / domdec / gpuhaloexchange_impl.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Implements GPU halo exchange using CUDA.
38  *
39  *
40  * \author Alan Gray <alang@nvidia.com>
41  *
42  * \ingroup module_domdec
43  */
44 #include "gmxpre.h"
45
46 #include "gpuhaloexchange_impl.cuh"
47
48 #include "config.h"
49
50 #include <assert.h>
51 #include <stdio.h>
52
53 #include "gromacs/domdec/domdec.h"
54 #include "gromacs/domdec/domdec_struct.h"
55 #include "gromacs/domdec/gpuhaloexchange.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/gpu_utils/devicebuffer.h"
58 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
59 #include "gromacs/gpu_utils/vectype_ops.cuh"
60 #include "gromacs/math/vectypes.h"
61 #include "gromacs/pbcutil/ishift.h"
62 #include "gromacs/utility/gmxmpi.h"
63
64 #include "domdec_internal.h"
65
66 namespace gmx
67 {
68
69 //! Number of CUDA threads in a block
70 // TODO Optimize this through experimentation
71 constexpr static int c_threadsPerBlock = 256;
72
73 template<bool usePBC>
74 __global__ void packSendBufKernel(float3* __restrict__ dataPacked,
75                                   const float3* __restrict__ data,
76                                   const int* __restrict__ map,
77                                   const int    mapSize,
78                                   const float3 coordinateShift)
79 {
80     int           threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
81     float3*       gm_dataDest = &dataPacked[threadIndex];
82     const float3* gm_dataSrc  = &data[map[threadIndex]];
83
84     if (threadIndex < mapSize)
85     {
86         if (usePBC)
87         {
88             *gm_dataDest = *gm_dataSrc + coordinateShift;
89         }
90         else
91         {
92             *gm_dataDest = *gm_dataSrc;
93         }
94     }
95
96     return;
97 }
98
99 /*! \brief unpack non-local force data buffer on the GPU using pre-populated "map" containing index
100  * information \param[out] data        full array of force values \param[in]  dataPacked  packed
101  * array of force values to be transferred \param[in]  map         array of indices defining mapping
102  * from full to packed array \param[in]  mapSize     number of elements in map array
103  */
104 template<bool accumulate>
105 __global__ void unpackRecvBufKernel(float3* __restrict__ data,
106                                     const float3* __restrict__ dataPacked,
107                                     const int* __restrict__ map,
108                                     const int mapSize)
109 {
110
111     int           threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
112     const float3* gm_dataSrc  = &dataPacked[threadIndex];
113     float3*       gm_dataDest = &data[map[threadIndex]];
114
115     if (threadIndex < mapSize)
116     {
117         if (accumulate)
118         {
119             *gm_dataDest += *gm_dataSrc;
120         }
121         else
122         {
123             *gm_dataDest = *gm_dataSrc;
124         }
125     }
126
127     return;
128 }
129
130 void GpuHaloExchange::Impl::reinitHalo(float3* d_coordinatesBuffer, float3* d_forcesBuffer)
131 {
132
133     d_x_ = d_coordinatesBuffer;
134     d_f_ = d_forcesBuffer;
135
136     cudaStream_t                 stream  = nonLocalStream_;
137     const gmx_domdec_comm_t&     comm    = *dd_->comm;
138     const gmx_domdec_comm_dim_t& cd      = comm.cd[0];
139     const gmx_domdec_ind_t&      ind     = cd.ind[pulse_];
140     int                          newSize = ind.nsend[nzone_ + 1];
141
142     GMX_ASSERT(cd.receiveInPlace, "Out-of-place receive is not yet supported in GPU halo exchange");
143
144     // reallocates only if needed
145     h_indexMap_.resize(newSize);
146     // reallocate on device only if needed
147     if (newSize > maxPackedBufferSize_)
148     {
149         reallocateDeviceBuffer(&d_indexMap_, newSize, &indexMapSize_, &indexMapSizeAlloc_, nullptr);
150         reallocateDeviceBuffer(&d_sendBuf_, newSize, &sendBufSize_, &sendBufSizeAlloc_, nullptr);
151         reallocateDeviceBuffer(&d_recvBuf_, newSize, &recvBufSize_, &recvBufSizeAlloc_, nullptr);
152         maxPackedBufferSize_ = newSize;
153     }
154
155     xSendSize_ = newSize;
156 #if GMX_MPI
157     MPI_Sendrecv(&xSendSize_, sizeof(int), MPI_BYTE, sendRankX_, 0, &xRecvSize_, sizeof(int),
158                  MPI_BYTE, recvRankX_, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
159 #endif
160     fSendSize_ = xRecvSize_;
161     fRecvSize_ = xSendSize_;
162
163     numHomeAtoms_ = comm.atomRanges.numHomeAtoms(); // offset for data recieved by this rank
164
165     GMX_ASSERT(ind.index.size() == h_indexMap_.size(), "Size mismatch");
166     std::copy(ind.index.begin(), ind.index.end(), h_indexMap_.begin());
167
168     copyToDeviceBuffer(&d_indexMap_, h_indexMap_.data(), 0, newSize, stream,
169                        GpuApiCallBehavior::Async, nullptr);
170
171     // This rank will push data to its neighbor, so needs to know
172     // the remote receive address and similarly send its receive
173     // address to other neighbour. We can do this here in reinit fn
174     // since the pointers will not change until the next NS step.
175
176     // Coordinates buffer:
177 #if GMX_MPI
178     int pulseOffset = 0;
179     for (int p = pulse_ - 1; p >= 0; p--)
180     {
181         pulseOffset += cd.ind[p].nrecv[nzone_ + 1];
182     }
183     //    void* recvPtr = static_cast<void*>(&d_coordinatesBuffer[numHomeAtoms_ + pulseOffset]);
184     void* recvPtr = static_cast<void*>(&d_x_[numHomeAtoms_ + pulseOffset]);
185     MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankX_, 0, &remoteXPtr_, sizeof(void*),
186                  MPI_BYTE, sendRankX_, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
187
188     // Force buffer:
189     recvPtr = static_cast<void*>(d_recvBuf_);
190     MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankF_, 0, &remoteFPtr_, sizeof(void*),
191                  MPI_BYTE, sendRankF_, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
192 #endif
193
194     return;
195 }
196
197 void GpuHaloExchange::Impl::communicateHaloCoordinates(const matrix          box,
198                                                        GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
199 {
200
201     if (pulse_ == 0)
202     {
203         // ensure stream waits until coordinate data is available on device
204         coordinatesReadyOnDeviceEvent->enqueueWaitEvent(nonLocalStream_);
205     }
206
207     // launch kernel to pack send buffer
208     KernelLaunchConfig config;
209     config.blockSize[0]     = c_threadsPerBlock;
210     config.blockSize[1]     = 1;
211     config.blockSize[2]     = 1;
212     config.gridSize[0]      = (xSendSize_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
213     config.gridSize[1]      = 1;
214     config.gridSize[2]      = 1;
215     config.sharedMemorySize = 0;
216     config.stream           = nonLocalStream_;
217
218     const float3* sendBuf  = d_sendBuf_;
219     const float3* d_x      = d_x_;
220     const int*    indexMap = d_indexMap_;
221     const int     size     = xSendSize_;
222     // The coordinateShift changes between steps when we have
223     // performed a DD partition, or have updated the box e.g. when
224     // performing pressure coupling. So, for simplicity, the box
225     // is used every step to pass the shift vector as an argument of
226     // the packing kernel.
227     //
228     // Because only one-dimensional DD is supported, the coordinate
229     // shift only needs to handle that dimension.
230     const int    dimensionIndex = dd_->dim[0];
231     const float3 coordinateShift{ box[dimensionIndex][XX], box[dimensionIndex][YY],
232                                   box[dimensionIndex][ZZ] };
233
234     // Avoid launching kernel when there is no work to do
235     if (size > 0)
236     {
237         auto kernelFn = usePBC_ ? packSendBufKernel<true> : packSendBufKernel<false>;
238
239         const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config, &sendBuf, &d_x,
240                                                           &indexMap, &size, &coordinateShift);
241
242         launchGpuKernel(kernelFn, config, nullptr, "Domdec GPU Apply X Halo Exchange", kernelArgs);
243     }
244
245     communicateHaloData(d_x_, HaloQuantity::HaloCoordinates, coordinatesReadyOnDeviceEvent);
246
247     return;
248 }
249
250 // The following method should be called after non-local buffer operations,
251 // and before the local buffer operations. It operates in the non-local stream.
252 void GpuHaloExchange::Impl::communicateHaloForces(bool accumulateForces)
253 {
254
255     // Communicate halo data (in non-local stream)
256     communicateHaloData(d_f_, HaloQuantity::HaloForces, nullptr);
257
258     float3* d_f = d_f_;
259
260     if (pulse_ == (dd_->comm->cd[0].numPulses() - 1))
261     {
262         if (!accumulateForces)
263         {
264             // Clear local portion of force array (in local stream)
265             cudaMemsetAsync(d_f, 0, numHomeAtoms_ * sizeof(rvec), localStream_);
266         }
267
268         // ensure non-local stream waits for local stream, due to dependence on
269         // the previous H2D copy of CPU forces (if accumulateForces is true)
270         // or the above clearing.
271         // TODO remove this dependency on localStream - edmine issue #3093
272         GpuEventSynchronizer eventLocal;
273         eventLocal.markEvent(localStream_);
274         eventLocal.enqueueWaitEvent(nonLocalStream_);
275     }
276
277     // Unpack halo buffer into force array
278
279     KernelLaunchConfig config;
280     config.blockSize[0]     = c_threadsPerBlock;
281     config.blockSize[1]     = 1;
282     config.blockSize[2]     = 1;
283     config.gridSize[0]      = (fRecvSize_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
284     config.gridSize[1]      = 1;
285     config.gridSize[2]      = 1;
286     config.sharedMemorySize = 0;
287     config.stream           = nonLocalStream_;
288
289     const float3* recvBuf  = d_recvBuf_;
290     const int*    indexMap = d_indexMap_;
291     const int     size     = fRecvSize_;
292
293     if (pulse_ > 0)
294     {
295         // We need to accumulate rather than set, since it is possible
296         // that, in this pulse, a value could be written to a location
297         // corresponding to the halo region of a following pulse.
298         accumulateForces = true;
299     }
300
301     if (size > 0)
302     {
303         auto kernelFn = accumulateForces ? unpackRecvBufKernel<true> : unpackRecvBufKernel<false>;
304
305         const auto kernelArgs =
306                 prepareGpuKernelArguments(kernelFn, config, &d_f, &recvBuf, &indexMap, &size);
307
308         launchGpuKernel(kernelFn, config, nullptr, "Domdec GPU Apply F Halo Exchange", kernelArgs);
309     }
310
311     if (pulse_ == 0)
312     {
313         fReadyOnDevice_.markEvent(nonLocalStream_);
314     }
315 }
316
317
318 void GpuHaloExchange::Impl::communicateHaloData(float3*               d_ptr,
319                                                 HaloQuantity          haloQuantity,
320                                                 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
321 {
322
323     void* sendPtr;
324     int   sendSize;
325     void* remotePtr;
326     int   sendRank;
327     int   recvRank;
328
329     if (haloQuantity == HaloQuantity::HaloCoordinates)
330     {
331         sendPtr   = static_cast<void*>(d_sendBuf_);
332         sendSize  = xSendSize_;
333         remotePtr = remoteXPtr_;
334         sendRank  = sendRankX_;
335         recvRank  = recvRankX_;
336
337 #if GMX_MPI
338         // Wait for event from receiving task that remote coordinates are ready, and enqueue that event to stream used
339         // for subsequent data push. This avoids a race condition with the remote data being written in the previous timestep.
340         // Similarly send event to task that will push data to this task.
341         GpuEventSynchronizer* remoteCoordinatesReadyOnDeviceEvent;
342         MPI_Sendrecv(&coordinatesReadyOnDeviceEvent, sizeof(GpuEventSynchronizer*), MPI_BYTE,
343                      recvRank, 0, &remoteCoordinatesReadyOnDeviceEvent, sizeof(GpuEventSynchronizer*),
344                      MPI_BYTE, sendRank, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
345         remoteCoordinatesReadyOnDeviceEvent->enqueueWaitEvent(nonLocalStream_);
346 #else
347         GMX_UNUSED_VALUE(coordinatesReadyOnDeviceEvent);
348 #endif
349     }
350     else
351     {
352         int recvOffset = dd_->comm->atomRanges.end(DDAtomRanges::Type::Zones);
353         for (int p = pulse_; p < dd_->comm->cd[0].numPulses(); p++)
354         {
355             recvOffset -= dd_->comm->cd[0].ind[p].nrecv[nzone_ + 1];
356         }
357         sendPtr   = static_cast<void*>(&(d_ptr[recvOffset]));
358         sendSize  = fSendSize_;
359         remotePtr = remoteFPtr_;
360         sendRank  = sendRankF_;
361         recvRank  = recvRankF_;
362     }
363
364     communicateHaloDataWithCudaDirect(sendPtr, sendSize, sendRank, remotePtr, recvRank);
365 }
366
367 void GpuHaloExchange::Impl::communicateHaloDataWithCudaDirect(void* sendPtr,
368                                                               int   sendSize,
369                                                               int   sendRank,
370                                                               void* remotePtr,
371                                                               int   recvRank)
372 {
373
374     cudaError_t  stat;
375     cudaStream_t stream = nonLocalStream_;
376
377     // We asynchronously push data to remote rank. The remote
378     // destination pointer has already been set in the init fn.  We
379     // don't need to worry about overwriting data the remote ranks
380     // still needs since the halo exchange is just done once per
381     // timestep, for each of X and F.
382
383     // send data to neighbor, if any data exists to send
384     if (sendSize > 0)
385     {
386         stat = cudaMemcpyAsync(remotePtr, sendPtr, sendSize * DIM * sizeof(float),
387                                cudaMemcpyDeviceToDevice, stream);
388         CU_RET_ERR(stat, "cudaMemcpyAsync on GPU Domdec CUDA direct data transfer failed");
389     }
390
391 #if GMX_MPI
392     // ensure pushed data has arrived before remote rank progresses
393     // This rank records an event and sends it to the remote rank which has just been pushed data.
394     // This rank recieves event from remote rank which has pushed data here, and enqueues that event
395     // to its stream.
396     GpuEventSynchronizer* haloDataTransferRemote;
397
398     haloDataTransferLaunched_->markEvent(stream);
399
400     MPI_Sendrecv(&haloDataTransferLaunched_, sizeof(GpuEventSynchronizer*), MPI_BYTE, sendRank, 0,
401                  &haloDataTransferRemote, sizeof(GpuEventSynchronizer*), MPI_BYTE, recvRank, 0,
402                  mpi_comm_mysim_, MPI_STATUS_IGNORE);
403
404     haloDataTransferRemote->enqueueWaitEvent(stream);
405 #else
406     GMX_UNUSED_VALUE(sendRank);
407     GMX_UNUSED_VALUE(recvRank);
408 #endif
409 }
410
411 GpuEventSynchronizer* GpuHaloExchange::Impl::getForcesReadyOnDeviceEvent()
412 {
413     return &fReadyOnDevice_;
414 }
415
416 /*! \brief Create Domdec GPU object */
417 GpuHaloExchange::Impl::Impl(gmx_domdec_t* dd,
418                             MPI_Comm      mpi_comm_mysim,
419                             void*         localStream,
420                             void*         nonLocalStream,
421                             int           pulse) :
422     dd_(dd),
423     sendRankX_(dd->neighbor[0][1]),
424     recvRankX_(dd->neighbor[0][0]),
425     sendRankF_(dd->neighbor[0][0]),
426     recvRankF_(dd->neighbor[0][1]),
427     usePBC_(dd->ci[dd->dim[0]] == 0),
428     haloDataTransferLaunched_(new GpuEventSynchronizer()),
429     mpi_comm_mysim_(mpi_comm_mysim),
430     localStream_(*static_cast<cudaStream_t*>(localStream)),
431     nonLocalStream_(*static_cast<cudaStream_t*>(nonLocalStream)),
432     pulse_(pulse)
433 {
434
435     GMX_RELEASE_ASSERT(GMX_THREAD_MPI,
436                        "GPU Halo exchange is currently only supported with thread-MPI enabled");
437
438     if (dd->ndim > 1)
439     {
440         gmx_fatal(FARGS, "Error: dd->ndim > 1 is not yet supported in GPU halo exchange");
441     }
442
443     if (usePBC_ && dd->unitCellInfo.haveScrewPBC)
444     {
445         gmx_fatal(FARGS, "Error: screw is not yet supported in GPU halo exchange\n");
446     }
447
448     changePinningPolicy(&h_indexMap_, gmx::PinningPolicy::PinnedIfSupported);
449
450     allocateDeviceBuffer(&d_fShift_, 1, nullptr);
451 }
452
453 GpuHaloExchange::Impl::~Impl()
454 {
455     freeDeviceBuffer(&d_indexMap_);
456     freeDeviceBuffer(&d_sendBuf_);
457     freeDeviceBuffer(&d_recvBuf_);
458     freeDeviceBuffer(&d_fShift_);
459     delete haloDataTransferLaunched_;
460 }
461
462 GpuHaloExchange::GpuHaloExchange(gmx_domdec_t* dd,
463                                  MPI_Comm      mpi_comm_mysim,
464                                  void*         localStream,
465                                  void*         nonLocalStream,
466                                  int           pulse) :
467     impl_(new Impl(dd, mpi_comm_mysim, localStream, nonLocalStream, pulse))
468 {
469 }
470
471 GpuHaloExchange::~GpuHaloExchange() = default;
472
473 void GpuHaloExchange::reinitHalo(DeviceBuffer<RVec> d_coordinatesBuffer, DeviceBuffer<RVec> d_forcesBuffer)
474 {
475     impl_->reinitHalo(asFloat3(d_coordinatesBuffer), asFloat3(d_forcesBuffer));
476 }
477
478 void GpuHaloExchange::communicateHaloCoordinates(const matrix          box,
479                                                  GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
480 {
481     impl_->communicateHaloCoordinates(box, coordinatesReadyOnDeviceEvent);
482 }
483
484 void GpuHaloExchange::communicateHaloForces(bool accumulateForces)
485 {
486     impl_->communicateHaloForces(accumulateForces);
487 }
488
489 GpuEventSynchronizer* GpuHaloExchange::getForcesReadyOnDeviceEvent()
490 {
491     return impl_->getForcesReadyOnDeviceEvent();
492 }
493 } // namespace gmx