1e5038d0e40513513b6c376484a2fa6a30f522df
[alexxy/gromacs.git] / src / gromacs / domdec / gpuhaloexchange_impl.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Implements GPU halo exchange using CUDA.
38  *
39  *
40  * \author Alan Gray <alang@nvidia.com>
41  *
42  * \ingroup module_domdec
43  */
44 #include "gmxpre.h"
45
46 #include "gpuhaloexchange_impl.cuh"
47
48 #include "config.h"
49
50 #include <assert.h>
51 #include <stdio.h>
52
53 #include "gromacs/domdec/domdec.h"
54 #include "gromacs/domdec/domdec_struct.h"
55 #include "gromacs/domdec/gpuhaloexchange.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/gpu_utils/devicebuffer.h"
58 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
59 #include "gromacs/gpu_utils/vectype_ops.cuh"
60 #include "gromacs/pbcutil/ishift.h"
61
62 #include "domdec_internal.h"
63
64 namespace gmx
65 {
66
67 //! Number of CUDA threads in a block
68 //TODO Optimize this through experimentation
69 constexpr static int c_threadsPerBlock = 256;
70
71 template <bool usePBC>
72 __global__ void packSendBufKernel(float3 * __restrict__       dataPacked,
73                                   const float3 * __restrict__ data,
74                                   const int * __restrict__    map,
75                                   const int                   mapSize,
76                                   const float3                coordinateShift)
77 {
78     int           threadIndex          = blockIdx.x*blockDim.x+threadIdx.x;
79     float3       *gm_dataDest          = &dataPacked[threadIndex];
80     const float3 *gm_dataSrc           = &data[map[threadIndex]];
81
82     if (threadIndex < mapSize)
83     {
84         if (usePBC)
85         {
86             *gm_dataDest = *gm_dataSrc + coordinateShift;
87         }
88         else
89         {
90             *gm_dataDest = *gm_dataSrc;
91         }
92
93     }
94
95     return;
96 }
97
98 /*! \brief unpack non-local force data buffer on the GPU using pre-populated "map" containing index information
99  * \param[out] data        full array of force values
100  * \param[in]  dataPacked  packed array of force values to be transferred
101  * \param[in]  map         array of indices defining mapping from full to packed array
102  * \param[in]  mapSize     number of elements in map array
103  */
104 template <bool accumulate>
105 __global__ void unpackRecvBufKernel(float3 * __restrict__       data,
106                                     const float3 * __restrict__ dataPacked,
107                                     const int * __restrict__    map,
108                                     const int                   mapSize)
109 {
110
111     int           threadIndex           = blockIdx.x*blockDim.x+threadIdx.x;
112     const float3 *gm_dataSrc            = &dataPacked[threadIndex];
113     float3       *gm_dataDest           = &data[map[threadIndex]];
114
115     if (threadIndex < mapSize)
116     {
117         if (accumulate)
118         {
119             *gm_dataDest += *gm_dataSrc;
120         }
121         else
122         {
123             *gm_dataDest = *gm_dataSrc;
124         }
125     }
126
127     return;
128 }
129
130 void GpuHaloExchange::Impl::reinitHalo(float3      *d_coordinatesBuffer,
131                                        float3      *d_forcesBuffer)
132 {
133
134     d_x_ = d_coordinatesBuffer;
135     d_f_ = d_forcesBuffer;
136
137     cudaStream_t                  stream            = nonLocalStream_;
138     int                           nzone             = 1;
139     const gmx_domdec_comm_t      &comm              = *dd_->comm;
140     const gmx_domdec_comm_dim_t  &cd                = comm.cd[0];
141     const gmx_domdec_ind_t       &ind               = cd.ind[0];
142     int                           newSize           = ind.nsend[nzone+1];
143
144     GMX_RELEASE_ASSERT(cd.numPulses() == 1, "Multiple pulses are not yet supported in GPU halo exchange");
145     GMX_ASSERT(cd.receiveInPlace, "Out-of-place receive is not yet supported in GPU halo exchange");
146
147     // reallocates only if needed
148     h_indexMap_.resize(newSize);
149     // reallocate on device only if needed
150     if (newSize > maxPackedBufferSize_)
151     {
152         reallocateDeviceBuffer(&d_indexMap_, newSize, &indexMapSize_, &indexMapSizeAlloc_, nullptr);
153         reallocateDeviceBuffer(&d_sendBuf_, newSize, &sendBufSize_, &sendBufSizeAlloc_, nullptr);
154         reallocateDeviceBuffer(&d_recvBuf_, newSize, &recvBufSize_, &recvBufSizeAlloc_, nullptr);
155         maxPackedBufferSize_ = newSize;
156     }
157
158     xSendSize_ = newSize;
159 #if GMX_MPI
160     MPI_Sendrecv(&xSendSize_, sizeof(int), MPI_BYTE, sendRankX_, 0,
161                  &xRecvSize_, sizeof(int), MPI_BYTE, recvRankX_, 0,
162                  mpi_comm_mysim_, MPI_STATUS_IGNORE);
163 #endif
164     fSendSize_ = xRecvSize_;
165     fRecvSize_ = xSendSize_;
166
167     numHomeAtoms_ = comm.atomRanges.numHomeAtoms();  //offset for data recieved by this rank
168
169     GMX_ASSERT(ind.index.size() == h_indexMap_.size(), "Size mismatch");
170     std::copy(ind.index.begin(), ind.index.end(), h_indexMap_.begin());
171
172     copyToDeviceBuffer(&d_indexMap_, h_indexMap_.data(), 0, newSize, stream, GpuApiCallBehavior::Async, nullptr);
173
174     // This rank will push data to its neighbor, so needs to know
175     // the remote receive address and similarly send its receive
176     // address to other neighbour. We can do this here in reinit fn
177     // since the pointers will not change until the next NS step.
178
179     //Coordinates buffer:
180 #if GMX_MPI
181     void* recvPtr  = static_cast<void*> (&d_coordinatesBuffer[numHomeAtoms_]);
182     MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankX_, 0,
183                  &remoteXPtr_, sizeof(void*), MPI_BYTE, sendRankX_, 0,
184                  mpi_comm_mysim_, MPI_STATUS_IGNORE);
185
186     //Force buffer:
187     recvPtr  = static_cast<void*> (d_recvBuf_);
188     MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankF_, 0,
189                  &remoteFPtr_, sizeof(void*), MPI_BYTE, sendRankF_, 0,
190                  mpi_comm_mysim_, MPI_STATUS_IGNORE);
191 #endif
192
193
194     return;
195 }
196
197 void GpuHaloExchange::Impl::communicateHaloCoordinates(const matrix          box,
198                                                        GpuEventSynchronizer *coordinatesReadyOnDeviceEvent)
199 {
200
201     //ensure stream waits until coordinate data is available on device
202     coordinatesReadyOnDeviceEvent->enqueueWaitEvent(nonLocalStream_);
203
204     // launch kernel to pack send buffer
205     KernelLaunchConfig config;
206     config.blockSize[0]     = c_threadsPerBlock;
207     config.blockSize[1]     = 1;
208     config.blockSize[2]     = 1;
209     config.gridSize[0]      = (xSendSize_+c_threadsPerBlock-1)/c_threadsPerBlock;
210     config.gridSize[1]      = 1;
211     config.gridSize[2]      = 1;
212     config.sharedMemorySize = 0;
213     config.stream           = nonLocalStream_;
214
215     const float3     *sendBuf  = d_sendBuf_;
216     const float3     *d_x      = d_x_;
217     const int        *indexMap = d_indexMap_;
218     const int         size     = xSendSize_;
219     // The coordinateShift changes between steps when we have
220     // performed a DD partition, or have updated the box e.g. when
221     // performing pressure coupling. So, for simplicity, the the box
222     // is used every step to pass the shift vector as an argument of
223     // the packing kernel.
224     //
225     // Because only one-dimensional DD is supported, the coordinate
226     // shift only needs to handle that dimension.
227     const int         dimensionIndex = dd_->dim[0];
228     const float3      coordinateShift {
229         box[dimensionIndex][XX], box[dimensionIndex][YY], box[dimensionIndex][ZZ]
230     };
231
232     // Avoid launching kernel when there is no work to do
233     if (size > 0)
234     {
235         auto              kernelFn = usePBC_ ? packSendBufKernel<true> : packSendBufKernel<false>;
236
237         const auto        kernelArgs   = prepareGpuKernelArguments(kernelFn, config, &sendBuf, &d_x, &indexMap,
238                                                                    &size, &coordinateShift);
239
240         launchGpuKernel(kernelFn, config, nullptr, "Domdec GPU Apply X Halo Exchange", kernelArgs);
241     }
242
243     communicateHaloData(d_x_, HaloQuantity::HaloCoordinates);
244
245     return;
246 }
247
248 // The following method should be called after non-local buffer operations,
249 // and before the local buffer operations. It operates in the non-local stream.
250 void GpuHaloExchange::Impl::communicateHaloForces(bool accumulateForces)
251 {
252
253     // Communicate halo data (in non-local stream)
254     communicateHaloData(d_f_, HaloQuantity::HaloForces);
255
256     float3            *d_f            = d_f_;
257
258     if (!accumulateForces)
259     {
260         //Clear local portion of force array (in local stream)
261         cudaMemsetAsync(d_f, 0, numHomeAtoms_*sizeof(rvec), localStream_);
262     }
263
264     // ensure non-local stream waits for local stream, due to dependence on
265     // the previous H2D copy of CPU forces (if accumulateForces is true)
266     // or the above clearing.
267     // TODO remove this dependency on localStream - edmine issue #3093
268     GpuEventSynchronizer eventLocal;
269     eventLocal.markEvent(localStream_);
270     eventLocal.enqueueWaitEvent(nonLocalStream_);
271
272     //Unpack halo buffer into force array
273
274     KernelLaunchConfig config;
275     config.blockSize[0]     = c_threadsPerBlock;
276     config.blockSize[1]     = 1;
277     config.blockSize[2]     = 1;
278     config.gridSize[0]      = (fRecvSize_+c_threadsPerBlock-1)/c_threadsPerBlock;
279     config.gridSize[1]      = 1;
280     config.gridSize[2]      = 1;
281     config.sharedMemorySize = 0;
282     config.stream           = nonLocalStream_;
283
284     const float3    *recvBuf    = d_recvBuf_;
285     const int       *indexMap   = d_indexMap_;
286     const int        size       = fRecvSize_;
287
288     if (size > 0)
289     {
290         auto             kernelFn = accumulateForces ? unpackRecvBufKernel<true> : unpackRecvBufKernel<false>;
291
292         const auto       kernelArgs   = prepareGpuKernelArguments(kernelFn, config, &d_f,
293                                                                   &recvBuf, &indexMap,
294                                                                   &size);
295
296         launchGpuKernel(kernelFn, config, nullptr, "Domdec GPU Apply F Halo Exchange", kernelArgs);
297     }
298 }
299
300
301 void GpuHaloExchange::Impl::communicateHaloData(float3     * d_ptr,
302                                                 HaloQuantity haloQuantity)
303 {
304
305     void * sendPtr;
306     int    sendSize;
307     void * remotePtr;
308     int    sendRank;
309     int    recvRank;
310
311     if (haloQuantity == HaloQuantity::HaloCoordinates)
312     {
313         sendPtr   = static_cast<void*> (d_sendBuf_);
314         sendSize  = xSendSize_;
315         remotePtr = remoteXPtr_;
316         sendRank  = sendRankX_;
317         recvRank  = recvRankX_;
318
319 #if GMX_MPI
320         //Wait for signal from receiving task that it is ready, and similarly send signal to task that will push data to this task
321         char thisTaskIsReady, remoteTaskIsReady;
322         MPI_Sendrecv(&thisTaskIsReady, sizeof(char), MPI_BYTE, recvRank, 0,
323                      &remoteTaskIsReady, sizeof(char), MPI_BYTE, sendRank, 0,
324                      mpi_comm_mysim_, MPI_STATUS_IGNORE);
325 #endif
326     }
327     else
328     {
329         sendPtr   = static_cast<void*> (&(d_ptr[numHomeAtoms_]));
330         sendSize  = fSendSize_;
331         remotePtr = remoteFPtr_;
332         sendRank  = sendRankF_;
333         recvRank  = recvRankF_;
334     }
335
336     communicateHaloDataWithCudaDirect(sendPtr, sendSize, sendRank, remotePtr, recvRank);
337 }
338
339 void GpuHaloExchange::Impl::communicateHaloDataWithCudaDirect(void *sendPtr,
340                                                               int   sendSize,
341                                                               int   sendRank,
342                                                               void *remotePtr,
343                                                               int   recvRank)
344 {
345
346     cudaError_t  stat;
347     cudaStream_t stream = nonLocalStream_;
348
349     // We asynchronously push data to remote rank. The remote
350     // destination pointer has already been set in the init fn.  We
351     // don't need to worry about overwriting data the remote ranks
352     // still needs since the halo exchange is just done once per
353     // timestep, for each of X and F.
354
355     // send data to neighbor, if any data exists to send
356     if (sendSize > 0)
357     {
358         stat = cudaMemcpyAsync(remotePtr, sendPtr, sendSize*DIM*sizeof(float), cudaMemcpyDeviceToDevice, stream);
359         CU_RET_ERR(stat, "cudaMemcpyAsync on GPU Domdec CUDA direct data transfer failed");
360     }
361
362 #if GMX_MPI
363     //ensure pushed data has arrived before remote rank progresses
364     // This rank records an event and sends it to the remote rank which has just been pushed data.
365     // This rank recieves event from remote rank which has pushed data here, and enqueues that event to
366     // its stream.
367     GpuEventSynchronizer *haloDataTransferRemote;
368
369     haloDataTransferLaunched_->markEvent(stream);
370
371     MPI_Sendrecv(&haloDataTransferLaunched_, sizeof(GpuEventSynchronizer*), MPI_BYTE, sendRank, 0,
372                  &haloDataTransferRemote, sizeof(GpuEventSynchronizer*), MPI_BYTE, recvRank, 0,
373                  mpi_comm_mysim_, MPI_STATUS_IGNORE);
374
375     haloDataTransferRemote->enqueueWaitEvent(stream);
376 #else
377     GMX_UNUSED_VALUE(sendRank);
378     GMX_UNUSED_VALUE(recvRank);
379 #endif
380
381 }
382
383 /*! \brief Create Domdec GPU object */
384 GpuHaloExchange::Impl::Impl(gmx_domdec_t *dd,
385                             MPI_Comm      mpi_comm_mysim,
386                             void        * localStream,
387                             void        * nonLocalStream)
388     : dd_(dd),
389       sendRankX_(dd->neighbor[0][1]),
390       recvRankX_(dd->neighbor[0][0]),
391       sendRankF_(dd->neighbor[0][0]),
392       recvRankF_(dd->neighbor[0][1]),
393       usePBC_(dd->ci[dd->dim[0]] == 0),
394       haloDataTransferLaunched_(new GpuEventSynchronizer()),
395       mpi_comm_mysim_(mpi_comm_mysim),
396       localStream_(*static_cast<cudaStream_t*> (localStream)),
397       nonLocalStream_(*static_cast<cudaStream_t*> (nonLocalStream))
398 {
399
400     GMX_RELEASE_ASSERT(GMX_THREAD_MPI, "GPU Halo exchange is currently only supported with thread-MPI enabled");
401
402     if (dd->ndim > 1)
403     {
404         gmx_fatal(FARGS, "Error: dd->ndim > 1 is not yet supported in GPU halo exchange");
405     }
406
407     if (usePBC_ && dd->unitCellInfo.haveScrewPBC)
408     {
409         gmx_fatal(FARGS, "Error: screw is not yet supported in GPU halo exchange\n");
410     }
411
412     changePinningPolicy(&h_indexMap_, gmx::PinningPolicy::PinnedIfSupported);
413
414     allocateDeviceBuffer(&d_fShift_, 1, nullptr);
415
416 }
417
418 GpuHaloExchange::Impl::~Impl()
419 {
420     freeDeviceBuffer(&d_indexMap_);
421     freeDeviceBuffer(&d_sendBuf_);
422     freeDeviceBuffer(&d_recvBuf_);
423     freeDeviceBuffer(&d_fShift_);
424     delete haloDataTransferLaunched_;
425 }
426
427 GpuHaloExchange::GpuHaloExchange(gmx_domdec_t *dd,
428                                  MPI_Comm      mpi_comm_mysim,
429                                  void         *localStream,
430                                  void         *nonLocalStream)
431     : impl_(new Impl(dd, mpi_comm_mysim, localStream, nonLocalStream))
432 {
433 }
434
435 GpuHaloExchange::~GpuHaloExchange() = default;
436
437 void GpuHaloExchange::reinitHalo(DeviceBuffer<float>  d_coordinatesBuffer,
438                                  DeviceBuffer<float>  d_forcesBuffer)
439 {
440     impl_->reinitHalo(reinterpret_cast<float3*>(d_coordinatesBuffer), reinterpret_cast<float3*>(d_forcesBuffer));
441 }
442
443 void GpuHaloExchange::communicateHaloCoordinates(const matrix box, GpuEventSynchronizer *coordinatesReadyOnDeviceEvent)
444 {
445     impl_->communicateHaloCoordinates(box, coordinatesReadyOnDeviceEvent);
446 }
447
448 void GpuHaloExchange::communicateHaloForces(bool accumulateForces)
449 {
450     impl_->communicateHaloForces(accumulateForces);
451 }
452
453 } //namespace gmx