StatePropagatorDataGpu object to manage GPU forces, positions and velocities buffers
[alexxy/gromacs.git] / src / gromacs / domdec / gpuhaloexchange_impl.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Implements GPU halo exchange using CUDA.
38  *
39  *
40  * \author Alan Gray <alang@nvidia.com>
41  *
42  * \ingroup module_domdec
43  */
44 #include "gmxpre.h"
45
46 #include "gpuhaloexchange_impl.cuh"
47
48 #include "config.h"
49
50 #include <assert.h>
51 #include <stdio.h>
52
53 #include "gromacs/domdec/domdec.h"
54 #include "gromacs/domdec/domdec_struct.h"
55 #include "gromacs/domdec/gpuhaloexchange.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/gpu_utils/devicebuffer.h"
58 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
59 #include "gromacs/gpu_utils/vectype_ops.cuh"
60 #include "gromacs/pbcutil/ishift.h"
61
62 #include "domdec_internal.h"
63
64 namespace gmx
65 {
66
67 //! Number of CUDA threads in a block
68 //TODO Optimize this through experimentation
69 constexpr static int c_threadsPerBlock = 256;
70
71 template <bool usePBC>
72 __global__ void packSendBufKernel(float3 * __restrict__       dataPacked,
73                                   const float3 * __restrict__ data,
74                                   const int * __restrict__    map,
75                                   const int                   mapSize,
76                                   const float3                coordinateShift)
77 {
78     int           threadIndex          = blockIdx.x*blockDim.x+threadIdx.x;
79     float3       *gm_dataDest          = &dataPacked[threadIndex];
80     const float3 *gm_dataSrc           = &data[map[threadIndex]];
81
82     if (threadIndex < mapSize)
83     {
84         if (usePBC)
85         {
86             *gm_dataDest = *gm_dataSrc + coordinateShift;
87         }
88         else
89         {
90             *gm_dataDest = *gm_dataSrc;
91         }
92
93     }
94
95     return;
96 }
97
98 /*! \brief unpack non-local force data buffer on the GPU using pre-populated "map" containing index information
99  * \param[out] data        full array of force values
100  * \param[in]  dataPacked  packed array of force values to be transferred
101  * \param[in]  map         array of indices defining mapping from full to packed array
102  * \param[in]  mapSize     number of elements in map array
103  */
104 template <bool accumulate>
105 __global__ void unpackRecvBufKernel(float3 * __restrict__       data,
106                                     const float3 * __restrict__ dataPacked,
107                                     const int * __restrict__    map,
108                                     const int                   mapSize)
109 {
110
111     int           threadIndex           = blockIdx.x*blockDim.x+threadIdx.x;
112     const float3 *gm_dataSrc            = &dataPacked[threadIndex];
113     float3       *gm_dataDest           = &data[map[threadIndex]];
114
115     if (threadIndex < mapSize)
116     {
117         if (accumulate)
118         {
119             *gm_dataDest += *gm_dataSrc;
120         }
121         else
122         {
123             *gm_dataDest = *gm_dataSrc;
124         }
125     }
126
127     return;
128 }
129
130 void GpuHaloExchange::Impl::reinitHalo(float3      *d_coordinatesBuffer,
131                                        float3      *d_forcesBuffer)
132 {
133
134     d_x_ = d_coordinatesBuffer;
135     d_f_ = d_forcesBuffer;
136
137     cudaStream_t                  stream            = nonLocalStream_;
138     int                           nzone             = 1;
139     const gmx_domdec_comm_t      &comm              = *dd_->comm;
140     const gmx_domdec_comm_dim_t  &cd                = comm.cd[0];
141     const gmx_domdec_ind_t       &ind               = cd.ind[0];
142     int                           newSize           = ind.nsend[nzone+1];
143
144     GMX_RELEASE_ASSERT(cd.numPulses() == 1, "Multiple pulses are not yet supported in GPU halo exchange");
145     GMX_ASSERT(cd.receiveInPlace, "Out-of-place receive is not yet supported in GPU halo exchange");
146
147     // reallocates only if needed
148     h_indexMap_.resize(newSize);
149     // reallocate on device only if needed
150     if (newSize > maxPackedBufferSize_)
151     {
152         reallocateDeviceBuffer(&d_indexMap_, newSize, &indexMapSize_, &indexMapSizeAlloc_, nullptr);
153         reallocateDeviceBuffer(&d_sendBuf_, newSize, &sendBufSize_, &sendBufSizeAlloc_, nullptr);
154         reallocateDeviceBuffer(&d_recvBuf_, newSize, &recvBufSize_, &recvBufSizeAlloc_, nullptr);
155         maxPackedBufferSize_ = newSize;
156     }
157
158     xSendSize_ = newSize;
159 #if GMX_MPI
160     MPI_Sendrecv(&xSendSize_, sizeof(int), MPI_BYTE, sendRankX_, 0,
161                  &xRecvSize_, sizeof(int), MPI_BYTE, recvRankX_, 0,
162                  mpi_comm_mysim_, MPI_STATUS_IGNORE);
163 #endif
164     fSendSize_ = xRecvSize_;
165     fRecvSize_ = xSendSize_;
166
167     numHomeAtoms_ = comm.atomRanges.numHomeAtoms();  //offset for data recieved by this rank
168
169     GMX_ASSERT(ind.index.size() == h_indexMap_.size(), "Size mismatch");
170     std::copy(ind.index.begin(), ind.index.end(), h_indexMap_.begin());
171
172     copyToDeviceBuffer(&d_indexMap_, h_indexMap_.data(), 0, newSize, stream, GpuApiCallBehavior::Async, nullptr);
173
174     // This rank will push data to its neighbor, so needs to know
175     // the remote receive address and similarly send its receive
176     // address to other neighbour. We can do this here in reinit fn
177     // since the pointers will not change until the next NS step.
178
179     //Coordinates buffer:
180 #if GMX_MPI
181     void* recvPtr  = static_cast<void*> (&d_coordinatesBuffer[numHomeAtoms_]);
182     MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankX_, 0,
183                  &remoteXPtr_, sizeof(void*), MPI_BYTE, sendRankX_, 0,
184                  mpi_comm_mysim_, MPI_STATUS_IGNORE);
185
186     //Force buffer:
187     recvPtr  = static_cast<void*> (d_recvBuf_);
188     MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankF_, 0,
189                  &remoteFPtr_, sizeof(void*), MPI_BYTE, sendRankF_, 0,
190                  mpi_comm_mysim_, MPI_STATUS_IGNORE);
191 #endif
192
193
194     return;
195 }
196
197 // The following method be called after local setCoordinates (which records the coordinatesOnDeviceEvent_
198 // event when the coordinate data has been copied to the device).
199 void GpuHaloExchange::Impl::communicateHaloCoordinates(const matrix box)
200 {
201
202     //ensure stream waits until coordinate data is available on device
203     coordinatesOnDeviceEvent_->enqueueWaitEvent(nonLocalStream_);
204
205     // launch kernel to pack send buffer
206     KernelLaunchConfig config;
207     config.blockSize[0]     = c_threadsPerBlock;
208     config.blockSize[1]     = 1;
209     config.blockSize[2]     = 1;
210     config.gridSize[0]      = (xSendSize_+c_threadsPerBlock-1)/c_threadsPerBlock;
211     config.gridSize[1]      = 1;
212     config.gridSize[2]      = 1;
213     config.sharedMemorySize = 0;
214     config.stream           = nonLocalStream_;
215
216     const float3     *sendBuf  = d_sendBuf_;
217     const float3     *d_x      = d_x_;
218     const int        *indexMap = d_indexMap_;
219     const int         size     = xSendSize_;
220     // The coordinateShift changes between steps when we have
221     // performed a DD partition, or have updated the box e.g. when
222     // performing pressure coupling. So, for simplicity, the the box
223     // is used every step to pass the shift vector as an argument of
224     // the packing kernel.
225     //
226     // Because only one-dimensional DD is supported, the coordinate
227     // shift only needs to handle that dimension.
228     const int         dimensionIndex = dd_->dim[0];
229     const float3      coordinateShift {
230         box[dimensionIndex][XX], box[dimensionIndex][YY], box[dimensionIndex][ZZ]
231     };
232
233     // Avoid launching kernel when there is no work to do
234     if (size > 0)
235     {
236         auto              kernelFn = usePBC_ ? packSendBufKernel<true> : packSendBufKernel<false>;
237
238         const auto        kernelArgs   = prepareGpuKernelArguments(kernelFn, config, &sendBuf, &d_x, &indexMap,
239                                                                    &size, &coordinateShift);
240
241         launchGpuKernel(kernelFn, config, nullptr, "Domdec GPU Apply X Halo Exchange", kernelArgs);
242     }
243
244     communicateHaloData(d_x_, HaloQuantity::HaloCoordinates);
245
246     return;
247 }
248
249 // The following method should be called after non-local buffer operations,
250 // and before the local buffer operations. It operates in the non-local stream.
251 void GpuHaloExchange::Impl::communicateHaloForces(bool accumulateForces)
252 {
253
254     // Communicate halo data (in non-local stream)
255     communicateHaloData(d_f_, HaloQuantity::HaloForces);
256
257     float3            *d_f            = d_f_;
258
259     if (!accumulateForces)
260     {
261         //Clear local portion of force array (in local stream)
262         cudaMemsetAsync(d_f, 0, numHomeAtoms_*sizeof(rvec), localStream_);
263     }
264
265     // ensure non-local stream waits for local stream, due to dependence on
266     // the previous H2D copy of CPU forces (if accumulateForces is true)
267     // or the above clearing.
268     // TODO remove this dependency on localStream - edmine issue #3093
269     GpuEventSynchronizer eventLocal;
270     eventLocal.markEvent(localStream_);
271     eventLocal.enqueueWaitEvent(nonLocalStream_);
272
273     //Unpack halo buffer into force array
274
275     KernelLaunchConfig config;
276     config.blockSize[0]     = c_threadsPerBlock;
277     config.blockSize[1]     = 1;
278     config.blockSize[2]     = 1;
279     config.gridSize[0]      = (fRecvSize_+c_threadsPerBlock-1)/c_threadsPerBlock;
280     config.gridSize[1]      = 1;
281     config.gridSize[2]      = 1;
282     config.sharedMemorySize = 0;
283     config.stream           = nonLocalStream_;
284
285     const float3    *recvBuf    = d_recvBuf_;
286     const int       *indexMap   = d_indexMap_;
287     const int        size       = fRecvSize_;
288
289     if (size > 0)
290     {
291         auto             kernelFn = accumulateForces ? unpackRecvBufKernel<true> : unpackRecvBufKernel<false>;
292
293         const auto       kernelArgs   = prepareGpuKernelArguments(kernelFn, config, &d_f,
294                                                                   &recvBuf, &indexMap,
295                                                                   &size);
296
297         launchGpuKernel(kernelFn, config, nullptr, "Domdec GPU Apply F Halo Exchange", kernelArgs);
298     }
299 }
300
301
302 void GpuHaloExchange::Impl::communicateHaloData(float3     * d_ptr,
303                                                 HaloQuantity haloQuantity)
304 {
305
306     void * sendPtr;
307     int    sendSize;
308     void * remotePtr;
309     int    sendRank;
310     int    recvRank;
311
312     if (haloQuantity == HaloQuantity::HaloCoordinates)
313     {
314         sendPtr   = static_cast<void*> (d_sendBuf_);
315         sendSize  = xSendSize_;
316         remotePtr = remoteXPtr_;
317         sendRank  = sendRankX_;
318         recvRank  = recvRankX_;
319
320 #if GMX_MPI
321         //Wait for signal from receiving task that it is ready, and similarly send signal to task that will push data to this task
322         char thisTaskIsReady, remoteTaskIsReady;
323         MPI_Sendrecv(&thisTaskIsReady, sizeof(char), MPI_BYTE, recvRank, 0,
324                      &remoteTaskIsReady, sizeof(char), MPI_BYTE, sendRank, 0,
325                      mpi_comm_mysim_, MPI_STATUS_IGNORE);
326 #endif
327     }
328     else
329     {
330         sendPtr   = static_cast<void*> (&(d_ptr[numHomeAtoms_]));
331         sendSize  = fSendSize_;
332         remotePtr = remoteFPtr_;
333         sendRank  = sendRankF_;
334         recvRank  = recvRankF_;
335     }
336
337     communicateHaloDataWithCudaDirect(sendPtr, sendSize, sendRank, remotePtr, recvRank);
338 }
339
340 void GpuHaloExchange::Impl::communicateHaloDataWithCudaDirect(void *sendPtr,
341                                                               int   sendSize,
342                                                               int   sendRank,
343                                                               void *remotePtr,
344                                                               int   recvRank)
345 {
346
347     cudaError_t  stat;
348     cudaStream_t stream = nonLocalStream_;
349
350     // We asynchronously push data to remote rank. The remote
351     // destination pointer has already been set in the init fn.  We
352     // don't need to worry about overwriting data the remote ranks
353     // still needs since the halo exchange is just done once per
354     // timestep, for each of X and F.
355
356     // send data to neighbor, if any data exists to send
357     if (sendSize > 0)
358     {
359         stat = cudaMemcpyAsync(remotePtr, sendPtr, sendSize*DIM*sizeof(float), cudaMemcpyDeviceToDevice, stream);
360         CU_RET_ERR(stat, "cudaMemcpyAsync on GPU Domdec CUDA direct data transfer failed");
361     }
362
363 #if GMX_MPI
364     //ensure pushed data has arrived before remote rank progresses
365     // This rank records an event and sends it to the remote rank which has just been pushed data.
366     // This rank recieves event from remote rank which has pushed data here, and enqueues that event to
367     // its stream.
368     GpuEventSynchronizer *haloDataTransferRemote;
369
370     haloDataTransferLaunched_->markEvent(stream);
371
372     MPI_Sendrecv(&haloDataTransferLaunched_, sizeof(GpuEventSynchronizer*), MPI_BYTE, sendRank, 0,
373                  &haloDataTransferRemote, sizeof(GpuEventSynchronizer*), MPI_BYTE, recvRank, 0,
374                  mpi_comm_mysim_, MPI_STATUS_IGNORE);
375
376     haloDataTransferRemote->enqueueWaitEvent(stream);
377 #else
378     GMX_UNUSED_VALUE(sendRank);
379     GMX_UNUSED_VALUE(recvRank);
380 #endif
381
382 }
383
384 /*! \brief Create Domdec GPU object */
385 GpuHaloExchange::Impl::Impl(gmx_domdec_t *dd,
386                             MPI_Comm      mpi_comm_mysim,
387                             void        * localStream,
388                             void        * nonLocalStream,
389                             void        * coordinatesOnDeviceEvent)
390     : dd_(dd),
391       sendRankX_(dd->neighbor[0][1]),
392       recvRankX_(dd->neighbor[0][0]),
393       sendRankF_(dd->neighbor[0][0]),
394       recvRankF_(dd->neighbor[0][1]),
395       usePBC_(dd->ci[dd->dim[0]] == 0),
396       haloDataTransferLaunched_(new GpuEventSynchronizer()),
397       mpi_comm_mysim_(mpi_comm_mysim),
398       localStream_(*static_cast<cudaStream_t*> (localStream)),
399       nonLocalStream_(*static_cast<cudaStream_t*> (nonLocalStream)),
400       coordinatesOnDeviceEvent_(static_cast<GpuEventSynchronizer*> (coordinatesOnDeviceEvent))
401 {
402
403     GMX_RELEASE_ASSERT(GMX_THREAD_MPI, "GPU Halo exchange is currently only supported with thread-MPI enabled");
404
405     if (dd->ndim > 1)
406     {
407         gmx_fatal(FARGS, "Error: dd->ndim > 1 is not yet supported in GPU halo exchange");
408     }
409
410     if (usePBC_ && dd->unitCellInfo.haveScrewPBC)
411     {
412         gmx_fatal(FARGS, "Error: screw is not yet supported in GPU halo exchange\n");
413     }
414
415     changePinningPolicy(&h_indexMap_, gmx::PinningPolicy::PinnedIfSupported);
416
417     allocateDeviceBuffer(&d_fShift_, 1, nullptr);
418
419 }
420
421 GpuHaloExchange::Impl::~Impl()
422 {
423     freeDeviceBuffer(&d_indexMap_);
424     freeDeviceBuffer(&d_sendBuf_);
425     freeDeviceBuffer(&d_recvBuf_);
426     freeDeviceBuffer(&d_fShift_);
427     delete haloDataTransferLaunched_;
428 }
429
430 GpuHaloExchange::GpuHaloExchange(gmx_domdec_t *dd,
431                                  MPI_Comm      mpi_comm_mysim,
432                                  void         *localStream,
433                                  void         *nonLocalStream,
434                                  void         *coordinatesOnDeviceEvent)
435     : impl_(new Impl(dd, mpi_comm_mysim, localStream, nonLocalStream, coordinatesOnDeviceEvent))
436 {
437 }
438
439 GpuHaloExchange::~GpuHaloExchange() = default;
440
441 void GpuHaloExchange::reinitHalo(DeviceBuffer<float>  d_coordinatesBuffer,
442                                  DeviceBuffer<float>  d_forcesBuffer)
443 {
444     impl_->reinitHalo(reinterpret_cast<float3*>(d_coordinatesBuffer), reinterpret_cast<float3*>(d_forcesBuffer));
445 }
446
447 void GpuHaloExchange::communicateHaloCoordinates(const matrix box)
448 {
449     impl_->communicateHaloCoordinates(box);
450 }
451
452 void GpuHaloExchange::communicateHaloForces(bool accumulateForces)
453 {
454     impl_->communicateHaloForces(accumulateForces);
455 }
456
457 } //namespace gmx