e7045d8b2a6fccf05247fc8b46c16d53f4051e72
[alexxy/gromacs.git] / src / gromacs / domdec / gpuhaloexchange_impl.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Implements GPU halo exchange using CUDA.
38  *
39  *
40  * \author Alan Gray <alang@nvidia.com>
41  *
42  * \ingroup module_domdec
43  */
44 #include "gmxpre.h"
45
46 #include "gpuhaloexchange_impl.cuh"
47
48 #include "config.h"
49
50 #include <assert.h>
51 #include <stdio.h>
52
53 #include "gromacs/domdec/domdec.h"
54 #include "gromacs/domdec/domdec_struct.h"
55 #include "gromacs/domdec/gpuhaloexchange.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/gpu_utils/device_context.h"
58 #include "gromacs/gpu_utils/devicebuffer.h"
59 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
60 #include "gromacs/gpu_utils/typecasts.cuh"
61 #include "gromacs/gpu_utils/vectype_ops.cuh"
62 #include "gromacs/math/vectypes.h"
63 #include "gromacs/pbcutil/ishift.h"
64 #include "gromacs/timing/wallcycle.h"
65 #include "gromacs/utility/gmxmpi.h"
66
67 #include "domdec_internal.h"
68
69 namespace gmx
70 {
71
72 //! Number of CUDA threads in a block
73 // TODO Optimize this through experimentation
74 constexpr static int c_threadsPerBlock = 256;
75
76 template<bool usePBC>
77 __global__ void packSendBufKernel(float3* __restrict__ dataPacked,
78                                   const float3* __restrict__ data,
79                                   const int* __restrict__ map,
80                                   const int    mapSize,
81                                   const float3 coordinateShift)
82 {
83     int           threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
84     float3*       gm_dataDest = &dataPacked[threadIndex];
85     const float3* gm_dataSrc  = &data[map[threadIndex]];
86
87     if (threadIndex < mapSize)
88     {
89         if (usePBC)
90         {
91             *gm_dataDest = *gm_dataSrc + coordinateShift;
92         }
93         else
94         {
95             *gm_dataDest = *gm_dataSrc;
96         }
97     }
98
99     return;
100 }
101
102 /*! \brief unpack non-local force data buffer on the GPU using pre-populated "map" containing index
103  * information \param[out] data        full array of force values \param[in]  dataPacked  packed
104  * array of force values to be transferred \param[in]  map         array of indices defining mapping
105  * from full to packed array \param[in]  mapSize     number of elements in map array
106  */
107 template<bool accumulate>
108 __global__ void unpackRecvBufKernel(float3* __restrict__ data,
109                                     const float3* __restrict__ dataPacked,
110                                     const int* __restrict__ map,
111                                     const int mapSize)
112 {
113
114     int           threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
115     const float3* gm_dataSrc  = &dataPacked[threadIndex];
116     float3*       gm_dataDest = &data[map[threadIndex]];
117
118     if (threadIndex < mapSize)
119     {
120         if (accumulate)
121         {
122             *gm_dataDest += *gm_dataSrc;
123         }
124         else
125         {
126             *gm_dataDest = *gm_dataSrc;
127         }
128     }
129
130     return;
131 }
132
133 void GpuHaloExchange::Impl::reinitHalo(float3* d_coordinatesBuffer, float3* d_forcesBuffer)
134 {
135     wallcycle_start(wcycle_, ewcDOMDEC);
136     wallcycle_sub_start(wcycle_, ewcsDD_GPU);
137
138     d_x_ = d_coordinatesBuffer;
139     d_f_ = d_forcesBuffer;
140
141     const gmx_domdec_comm_t&     comm = *dd_->comm;
142     const gmx_domdec_comm_dim_t& cd   = comm.cd[dimIndex_];
143     const gmx_domdec_ind_t&      ind  = cd.ind[pulse_];
144
145     numHomeAtoms_ = comm.atomRanges.numHomeAtoms(); // offset for data recieved by this rank
146
147     // Determine receive offset for the dimension index and pulse of this halo exchange object
148     int numZoneTemp   = 1;
149     int numZone       = 0;
150     int numAtomsTotal = numHomeAtoms_;
151     for (int i = 0; i <= dimIndex_; i++)
152     {
153         int pulseMax = (i == dimIndex_) ? pulse_ : (comm.cd[i].numPulses() - 1);
154         for (int p = 0; p <= pulseMax; p++)
155         {
156             atomOffset_                     = numAtomsTotal;
157             const gmx_domdec_ind_t& indTemp = comm.cd[i].ind[p];
158             numAtomsTotal += indTemp.nrecv[numZoneTemp + 1];
159         }
160         numZone = numZoneTemp;
161         numZoneTemp += numZoneTemp;
162     }
163
164     int newSize = ind.nsend[numZone + 1];
165
166     GMX_ASSERT(cd.receiveInPlace, "Out-of-place receive is not yet supported in GPU halo exchange");
167
168     // reallocates only if needed
169     h_indexMap_.resize(newSize);
170     // reallocate on device only if needed
171     if (newSize > maxPackedBufferSize_)
172     {
173         reallocateDeviceBuffer(&d_indexMap_, newSize, &indexMapSize_, &indexMapSizeAlloc_, deviceContext_);
174         reallocateDeviceBuffer(&d_sendBuf_, newSize, &sendBufSize_, &sendBufSizeAlloc_, deviceContext_);
175         reallocateDeviceBuffer(&d_recvBuf_, newSize, &recvBufSize_, &recvBufSizeAlloc_, deviceContext_);
176         maxPackedBufferSize_ = newSize;
177     }
178
179     xSendSize_ = newSize;
180 #if GMX_MPI
181     MPI_Sendrecv(&xSendSize_, sizeof(int), MPI_BYTE, sendRankX_, 0, &xRecvSize_, sizeof(int),
182                  MPI_BYTE, recvRankX_, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
183 #endif
184     fSendSize_ = xRecvSize_;
185     fRecvSize_ = xSendSize_;
186
187     if (newSize > 0)
188     {
189         GMX_ASSERT(ind.index.size() == h_indexMap_.size(),
190                    "Size mismatch between domain decomposition communication index array and GPU "
191                    "halo exchange index mapping array");
192         std::copy(ind.index.begin(), ind.index.end(), h_indexMap_.begin());
193
194         copyToDeviceBuffer(&d_indexMap_, h_indexMap_.data(), 0, newSize, nonLocalStream_,
195                            GpuApiCallBehavior::Async, nullptr);
196     }
197     // This rank will push data to its neighbor, so needs to know
198     // the remote receive address and similarly send its receive
199     // address to other neighbour. We can do this here in reinit fn
200     // since the pointers will not change until the next NS step.
201
202     // Coordinates buffer:
203     void* recvPtr = static_cast<void*>(&d_x_[atomOffset_]);
204 #if GMX_MPI
205     MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankX_, 0, &remoteXPtr_, sizeof(void*),
206                  MPI_BYTE, sendRankX_, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
207
208     // Force buffer:
209     recvPtr = static_cast<void*>(d_recvBuf_);
210     MPI_Sendrecv(&recvPtr, sizeof(void*), MPI_BYTE, recvRankF_, 0, &remoteFPtr_, sizeof(void*),
211                  MPI_BYTE, sendRankF_, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
212 #endif
213
214     wallcycle_sub_stop(wcycle_, ewcsDD_GPU);
215     wallcycle_stop(wcycle_, ewcDOMDEC);
216
217     return;
218 }
219
220 void GpuHaloExchange::Impl::communicateHaloCoordinates(const matrix          box,
221                                                        GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
222 {
223
224     wallcycle_start(wcycle_, ewcLAUNCH_GPU);
225     if (pulse_ == 0)
226     {
227         // ensure stream waits until coordinate data is available on device
228         coordinatesReadyOnDeviceEvent->enqueueWaitEvent(nonLocalStream_);
229     }
230
231     wallcycle_sub_start(wcycle_, ewcsLAUNCH_GPU_MOVEX);
232
233     // launch kernel to pack send buffer
234     KernelLaunchConfig config;
235     config.blockSize[0]     = c_threadsPerBlock;
236     config.blockSize[1]     = 1;
237     config.blockSize[2]     = 1;
238     config.gridSize[0]      = (xSendSize_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
239     config.gridSize[1]      = 1;
240     config.gridSize[2]      = 1;
241     config.sharedMemorySize = 0;
242
243     const float3* sendBuf  = d_sendBuf_;
244     const float3* d_x      = d_x_;
245     const int*    indexMap = d_indexMap_;
246     const int     size     = xSendSize_;
247     // The coordinateShift changes between steps when we have
248     // performed a DD partition, or have updated the box e.g. when
249     // performing pressure coupling. So, for simplicity, the box
250     // is used every step to pass the shift vector as an argument of
251     // the packing kernel.
252     const int    boxDimensionIndex = dd_->dim[dimIndex_];
253     const float3 coordinateShift{ box[boxDimensionIndex][XX], box[boxDimensionIndex][YY],
254                                   box[boxDimensionIndex][ZZ] };
255
256     // Avoid launching kernel when there is no work to do
257     if (size > 0)
258     {
259         auto kernelFn = usePBC_ ? packSendBufKernel<true> : packSendBufKernel<false>;
260
261         const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config, &sendBuf, &d_x,
262                                                           &indexMap, &size, &coordinateShift);
263
264         launchGpuKernel(kernelFn, config, nonLocalStream_, nullptr,
265                         "Domdec GPU Apply X Halo Exchange", kernelArgs);
266     }
267
268     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_GPU_MOVEX);
269     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
270
271     // Consider time spent in communicateHaloData as Comm.X counter
272     // ToDo: We need further refinement here as communicateHaloData includes launch time for cudamemcpyasync
273     wallcycle_start(wcycle_, ewcMOVEX);
274
275     communicateHaloData(d_x_, HaloQuantity::HaloCoordinates, coordinatesReadyOnDeviceEvent);
276
277     wallcycle_stop(wcycle_, ewcMOVEX);
278
279     return;
280 }
281
282 // The following method should be called after non-local buffer operations,
283 // and before the local buffer operations. It operates in the non-local stream.
284 void GpuHaloExchange::Impl::communicateHaloForces(bool accumulateForces)
285 {
286     // Consider time spent in communicateHaloData as Comm.F counter
287     // ToDo: We need further refinement here as communicateHaloData includes launch time for cudamemcpyasync
288     wallcycle_start(wcycle_, ewcMOVEF);
289
290     // Communicate halo data (in non-local stream)
291     communicateHaloData(d_f_, HaloQuantity::HaloForces, nullptr);
292
293     wallcycle_stop(wcycle_, ewcMOVEF);
294
295     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
296     wallcycle_sub_start(wcycle_, ewcsLAUNCH_GPU_MOVEF);
297
298     float3* d_f = d_f_;
299     // If this is the last pulse and index (noting the force halo
300     // exchanges across multiple pulses and indices are called in
301     // reverse order) then perform the following preparation
302     // activities
303     if ((pulse_ == (dd_->comm->cd[dimIndex_].numPulses() - 1)) && (dimIndex_ == (dd_->ndim - 1)))
304     {
305         if (!accumulateForces)
306         {
307             // Clear local portion of force array (in local stream)
308             cudaMemsetAsync(d_f, 0, numHomeAtoms_ * sizeof(rvec), localStream_.stream());
309         }
310
311         // ensure non-local stream waits for local stream, due to dependence on
312         // the previous H2D copy of CPU forces (if accumulateForces is true)
313         // or the above clearing.
314         // TODO remove this dependency on localStream - edmine Issue #3093
315         GpuEventSynchronizer eventLocal;
316         eventLocal.markEvent(localStream_);
317         eventLocal.enqueueWaitEvent(nonLocalStream_);
318     }
319
320     // Unpack halo buffer into force array
321
322     KernelLaunchConfig config;
323     config.blockSize[0]     = c_threadsPerBlock;
324     config.blockSize[1]     = 1;
325     config.blockSize[2]     = 1;
326     config.gridSize[0]      = (fRecvSize_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
327     config.gridSize[1]      = 1;
328     config.gridSize[2]      = 1;
329     config.sharedMemorySize = 0;
330
331     const float3* recvBuf  = d_recvBuf_;
332     const int*    indexMap = d_indexMap_;
333     const int     size     = fRecvSize_;
334
335     if (pulse_ > 0 || dd_->ndim > 1)
336     {
337         // We need to accumulate rather than set, since it is possible
338         // that, in this pulse/dim, a value could be written to a location
339         // corresponding to the halo region of a following pulse/dim.
340         accumulateForces = true;
341     }
342
343     if (size > 0)
344     {
345         auto kernelFn = accumulateForces ? unpackRecvBufKernel<true> : unpackRecvBufKernel<false>;
346
347         const auto kernelArgs =
348                 prepareGpuKernelArguments(kernelFn, config, &d_f, &recvBuf, &indexMap, &size);
349
350         launchGpuKernel(kernelFn, config, nonLocalStream_, nullptr,
351                         "Domdec GPU Apply F Halo Exchange", kernelArgs);
352     }
353
354     if (pulse_ == 0)
355     {
356         fReadyOnDevice_.markEvent(nonLocalStream_);
357     }
358
359     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_GPU_MOVEF);
360     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
361 }
362
363
364 void GpuHaloExchange::Impl::communicateHaloData(float3*               d_ptr,
365                                                 HaloQuantity          haloQuantity,
366                                                 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
367 {
368
369     void* sendPtr;
370     int   sendSize;
371     void* remotePtr;
372     int   sendRank;
373     int   recvRank;
374
375     if (haloQuantity == HaloQuantity::HaloCoordinates)
376     {
377         sendPtr   = static_cast<void*>(d_sendBuf_);
378         sendSize  = xSendSize_;
379         remotePtr = remoteXPtr_;
380         sendRank  = sendRankX_;
381         recvRank  = recvRankX_;
382
383 #if GMX_MPI
384         // Wait for event from receiving task that remote coordinates are ready, and enqueue that event to stream used
385         // for subsequent data push. This avoids a race condition with the remote data being written in the previous timestep.
386         // Similarly send event to task that will push data to this task.
387         GpuEventSynchronizer* remoteCoordinatesReadyOnDeviceEvent;
388         MPI_Sendrecv(&coordinatesReadyOnDeviceEvent, sizeof(GpuEventSynchronizer*), MPI_BYTE,
389                      recvRank, 0, &remoteCoordinatesReadyOnDeviceEvent, sizeof(GpuEventSynchronizer*),
390                      MPI_BYTE, sendRank, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
391         remoteCoordinatesReadyOnDeviceEvent->enqueueWaitEvent(nonLocalStream_);
392 #else
393         GMX_UNUSED_VALUE(coordinatesReadyOnDeviceEvent);
394 #endif
395     }
396     else
397     {
398         sendPtr   = static_cast<void*>(&(d_ptr[atomOffset_]));
399         sendSize  = fSendSize_;
400         remotePtr = remoteFPtr_;
401         sendRank  = sendRankF_;
402         recvRank  = recvRankF_;
403     }
404
405     communicateHaloDataWithCudaDirect(sendPtr, sendSize, sendRank, remotePtr, recvRank);
406 }
407
408 void GpuHaloExchange::Impl::communicateHaloDataWithCudaDirect(void* sendPtr,
409                                                               int   sendSize,
410                                                               int   sendRank,
411                                                               void* remotePtr,
412                                                               int   recvRank)
413 {
414
415     cudaError_t stat;
416
417     // We asynchronously push data to remote rank. The remote
418     // destination pointer has already been set in the init fn.  We
419     // don't need to worry about overwriting data the remote ranks
420     // still needs since the halo exchange is just done once per
421     // timestep, for each of X and F.
422
423     // send data to neighbor, if any data exists to send
424     if (sendSize > 0)
425     {
426         stat = cudaMemcpyAsync(remotePtr, sendPtr, sendSize * DIM * sizeof(float),
427                                cudaMemcpyDeviceToDevice, nonLocalStream_.stream());
428
429         CU_RET_ERR(stat, "cudaMemcpyAsync on GPU Domdec CUDA direct data transfer failed");
430     }
431
432 #if GMX_MPI
433     // ensure pushed data has arrived before remote rank progresses
434     // This rank records an event and sends it to the remote rank which has just been pushed data.
435     // This rank recieves event from remote rank which has pushed data here, and enqueues that event
436     // to its stream.
437     GpuEventSynchronizer* haloDataTransferRemote;
438
439     haloDataTransferLaunched_->markEvent(nonLocalStream_);
440
441     MPI_Sendrecv(&haloDataTransferLaunched_, sizeof(GpuEventSynchronizer*), MPI_BYTE, sendRank, 0,
442                  &haloDataTransferRemote, sizeof(GpuEventSynchronizer*), MPI_BYTE, recvRank, 0,
443                  mpi_comm_mysim_, MPI_STATUS_IGNORE);
444
445     haloDataTransferRemote->enqueueWaitEvent(nonLocalStream_);
446 #else
447     GMX_UNUSED_VALUE(sendRank);
448     GMX_UNUSED_VALUE(recvRank);
449 #endif
450 }
451
452 GpuEventSynchronizer* GpuHaloExchange::Impl::getForcesReadyOnDeviceEvent()
453 {
454     return &fReadyOnDevice_;
455 }
456
457 /*! \brief Create Domdec GPU object */
458 GpuHaloExchange::Impl::Impl(gmx_domdec_t*        dd,
459                             int                  dimIndex,
460                             MPI_Comm             mpi_comm_mysim,
461                             const DeviceContext& deviceContext,
462                             const DeviceStream&  localStream,
463                             const DeviceStream&  nonLocalStream,
464                             int                  pulse,
465                             gmx_wallcycle*       wcycle) :
466     dd_(dd),
467     sendRankX_(dd->neighbor[dimIndex][1]),
468     recvRankX_(dd->neighbor[dimIndex][0]),
469     sendRankF_(dd->neighbor[dimIndex][0]),
470     recvRankF_(dd->neighbor[dimIndex][1]),
471     usePBC_(dd->ci[dd->dim[dimIndex]] == 0),
472     haloDataTransferLaunched_(new GpuEventSynchronizer()),
473     mpi_comm_mysim_(mpi_comm_mysim),
474     deviceContext_(deviceContext),
475     localStream_(localStream),
476     nonLocalStream_(nonLocalStream),
477     dimIndex_(dimIndex),
478     pulse_(pulse),
479     wcycle_(wcycle)
480 {
481
482     GMX_RELEASE_ASSERT(GMX_THREAD_MPI,
483                        "GPU Halo exchange is currently only supported with thread-MPI enabled");
484
485     if (usePBC_ && dd->unitCellInfo.haveScrewPBC)
486     {
487         gmx_fatal(FARGS, "Error: screw is not yet supported in GPU halo exchange\n");
488     }
489
490     changePinningPolicy(&h_indexMap_, gmx::PinningPolicy::PinnedIfSupported);
491
492     allocateDeviceBuffer(&d_fShift_, 1, deviceContext_);
493 }
494
495 GpuHaloExchange::Impl::~Impl()
496 {
497     freeDeviceBuffer(&d_indexMap_);
498     freeDeviceBuffer(&d_sendBuf_);
499     freeDeviceBuffer(&d_recvBuf_);
500     freeDeviceBuffer(&d_fShift_);
501     delete haloDataTransferLaunched_;
502 }
503
504 GpuHaloExchange::GpuHaloExchange(gmx_domdec_t*        dd,
505                                  int                  dimIndex,
506                                  MPI_Comm             mpi_comm_mysim,
507                                  const DeviceContext& deviceContext,
508                                  const DeviceStream&  localStream,
509                                  const DeviceStream&  nonLocalStream,
510                                  int                  pulse,
511                                  gmx_wallcycle*       wcycle) :
512     impl_(new Impl(dd, dimIndex, mpi_comm_mysim, deviceContext, localStream, nonLocalStream, pulse, wcycle))
513 {
514 }
515
516 GpuHaloExchange::~GpuHaloExchange() = default;
517
518 void GpuHaloExchange::reinitHalo(DeviceBuffer<RVec> d_coordinatesBuffer, DeviceBuffer<RVec> d_forcesBuffer)
519 {
520     impl_->reinitHalo(asFloat3(d_coordinatesBuffer), asFloat3(d_forcesBuffer));
521 }
522
523 void GpuHaloExchange::communicateHaloCoordinates(const matrix          box,
524                                                  GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
525 {
526     impl_->communicateHaloCoordinates(box, coordinatesReadyOnDeviceEvent);
527 }
528
529 void GpuHaloExchange::communicateHaloForces(bool accumulateForces)
530 {
531     impl_->communicateHaloForces(accumulateForces);
532 }
533
534 GpuEventSynchronizer* GpuHaloExchange::getForcesReadyOnDeviceEvent()
535 {
536     return impl_->getForcesReadyOnDeviceEvent();
537 }
538 } // namespace gmx