Merge branch release-2021 into master
[alexxy/gromacs.git] / src / gromacs / domdec / gpuhaloexchange_impl.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Implements GPU halo exchange using CUDA.
38  *
39  *
40  * \author Alan Gray <alang@nvidia.com>
41  *
42  * \ingroup module_domdec
43  */
44 #include "gmxpre.h"
45
46 #include "gpuhaloexchange_impl.cuh"
47
48 #include "config.h"
49
50 #include <assert.h>
51 #include <stdio.h>
52
53 #include <utility>
54
55 #include "gromacs/domdec/domdec.h"
56 #include "gromacs/domdec/domdec_struct.h"
57 #include "gromacs/domdec/gpuhaloexchange.h"
58 #include "gromacs/gpu_utils/cudautils.cuh"
59 #include "gromacs/gpu_utils/device_context.h"
60 #include "gromacs/gpu_utils/devicebuffer.h"
61 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
62 #include "gromacs/gpu_utils/typecasts.cuh"
63 #include "gromacs/gpu_utils/vectype_ops.cuh"
64 #include "gromacs/math/vectypes.h"
65 #include "gromacs/pbcutil/ishift.h"
66 #include "gromacs/timing/wallcycle.h"
67 #include "gromacs/utility/gmxmpi.h"
68
69 #include "domdec_internal.h"
70
71 namespace gmx
72 {
73
74 //! Number of CUDA threads in a block
75 // TODO Optimize this through experimentation
76 constexpr static int c_threadsPerBlock = 256;
77
78 template<bool usePBC>
79 __global__ void packSendBufKernel(float3* __restrict__ dataPacked,
80                                   const float3* __restrict__ data,
81                                   const int* __restrict__ map,
82                                   const int    mapSize,
83                                   const float3 coordinateShift)
84 {
85     int           threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
86     float3*       gm_dataDest = &dataPacked[threadIndex];
87     const float3* gm_dataSrc  = &data[map[threadIndex]];
88
89     if (threadIndex < mapSize)
90     {
91         if (usePBC)
92         {
93             *gm_dataDest = *gm_dataSrc + coordinateShift;
94         }
95         else
96         {
97             *gm_dataDest = *gm_dataSrc;
98         }
99     }
100
101     return;
102 }
103
104 /*! \brief unpack non-local force data buffer on the GPU using pre-populated "map" containing index
105  * information \param[out] data        full array of force values \param[in]  dataPacked  packed
106  * array of force values to be transferred \param[in]  map         array of indices defining mapping
107  * from full to packed array \param[in]  mapSize     number of elements in map array
108  */
109 template<bool accumulate>
110 __global__ void unpackRecvBufKernel(float3* __restrict__ data,
111                                     const float3* __restrict__ dataPacked,
112                                     const int* __restrict__ map,
113                                     const int mapSize)
114 {
115
116     int           threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
117     const float3* gm_dataSrc  = &dataPacked[threadIndex];
118     float3*       gm_dataDest = &data[map[threadIndex]];
119
120     if (threadIndex < mapSize)
121     {
122         if (accumulate)
123         {
124             *gm_dataDest += *gm_dataSrc;
125         }
126         else
127         {
128             *gm_dataDest = *gm_dataSrc;
129         }
130     }
131
132     return;
133 }
134
135 void GpuHaloExchange::Impl::reinitHalo(float3* d_coordinatesBuffer, float3* d_forcesBuffer)
136 {
137     wallcycle_start(wcycle_, ewcDOMDEC);
138     wallcycle_sub_start(wcycle_, ewcsDD_GPU);
139
140     d_x_ = d_coordinatesBuffer;
141     d_f_ = d_forcesBuffer;
142
143     const gmx_domdec_comm_t&     comm = *dd_->comm;
144     const gmx_domdec_comm_dim_t& cd   = comm.cd[dimIndex_];
145     const gmx_domdec_ind_t&      ind  = cd.ind[pulse_];
146
147     numHomeAtoms_ = comm.atomRanges.numHomeAtoms(); // offset for data recieved by this rank
148
149     // Determine receive offset for the dimension index and pulse of this halo exchange object
150     int numZoneTemp   = 1;
151     int numZone       = 0;
152     int numAtomsTotal = numHomeAtoms_;
153     for (int i = 0; i <= dimIndex_; i++)
154     {
155         int pulseMax = (i == dimIndex_) ? pulse_ : (comm.cd[i].numPulses() - 1);
156         for (int p = 0; p <= pulseMax; p++)
157         {
158             atomOffset_                     = numAtomsTotal;
159             const gmx_domdec_ind_t& indTemp = comm.cd[i].ind[p];
160             numAtomsTotal += indTemp.nrecv[numZoneTemp + 1];
161         }
162         numZone = numZoneTemp;
163         numZoneTemp += numZoneTemp;
164     }
165
166     int newSize = ind.nsend[numZone + 1];
167
168     GMX_ASSERT(cd.receiveInPlace, "Out-of-place receive is not yet supported in GPU halo exchange");
169
170     // reallocates only if needed
171     h_indexMap_.resize(newSize);
172     // reallocate on device only if needed
173     if (newSize > maxPackedBufferSize_)
174     {
175         reallocateDeviceBuffer(&d_indexMap_, newSize, &indexMapSize_, &indexMapSizeAlloc_, deviceContext_);
176         reallocateDeviceBuffer(&d_sendBuf_, newSize, &sendBufSize_, &sendBufSizeAlloc_, deviceContext_);
177         reallocateDeviceBuffer(&d_recvBuf_, newSize, &recvBufSize_, &recvBufSizeAlloc_, deviceContext_);
178         maxPackedBufferSize_ = newSize;
179     }
180
181     xSendSize_ = newSize;
182 #if GMX_MPI
183     MPI_Sendrecv(&xSendSize_,
184                  sizeof(int),
185                  MPI_BYTE,
186                  sendRankX_,
187                  0,
188                  &xRecvSize_,
189                  sizeof(int),
190                  MPI_BYTE,
191                  recvRankX_,
192                  0,
193                  mpi_comm_mysim_,
194                  MPI_STATUS_IGNORE);
195 #endif
196     fSendSize_ = xRecvSize_;
197     fRecvSize_ = xSendSize_;
198
199     if (newSize > 0)
200     {
201         GMX_ASSERT(ind.index.size() == h_indexMap_.size(),
202                    "Size mismatch between domain decomposition communication index array and GPU "
203                    "halo exchange index mapping array");
204         std::copy(ind.index.begin(), ind.index.end(), h_indexMap_.begin());
205
206         copyToDeviceBuffer(
207                 &d_indexMap_, h_indexMap_.data(), 0, newSize, nonLocalStream_, GpuApiCallBehavior::Async, nullptr);
208     }
209     // This rank will push data to its neighbor, so needs to know
210     // the remote receive address and similarly send its receive
211     // address to other neighbour. We can do this here in reinit fn
212     // since the pointers will not change until the next NS step.
213
214     // Coordinates buffer:
215     void* recvPtr = static_cast<void*>(&d_x_[atomOffset_]);
216 #if GMX_MPI
217     MPI_Sendrecv(&recvPtr,
218                  sizeof(void*),
219                  MPI_BYTE,
220                  recvRankX_,
221                  0,
222                  &remoteXPtr_,
223                  sizeof(void*),
224                  MPI_BYTE,
225                  sendRankX_,
226                  0,
227                  mpi_comm_mysim_,
228                  MPI_STATUS_IGNORE);
229
230     // Force buffer:
231     recvPtr = static_cast<void*>(d_recvBuf_);
232     MPI_Sendrecv(&recvPtr,
233                  sizeof(void*),
234                  MPI_BYTE,
235                  recvRankF_,
236                  0,
237                  &remoteFPtr_,
238                  sizeof(void*),
239                  MPI_BYTE,
240                  sendRankF_,
241                  0,
242                  mpi_comm_mysim_,
243                  MPI_STATUS_IGNORE);
244 #endif
245
246     wallcycle_sub_stop(wcycle_, ewcsDD_GPU);
247     wallcycle_stop(wcycle_, ewcDOMDEC);
248
249     return;
250 }
251
252 void GpuHaloExchange::Impl::communicateHaloCoordinates(const matrix          box,
253                                                        GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
254 {
255
256     wallcycle_start(wcycle_, ewcLAUNCH_GPU);
257     if (pulse_ == 0)
258     {
259         // ensure stream waits until coordinate data is available on device
260         coordinatesReadyOnDeviceEvent->enqueueWaitEvent(nonLocalStream_);
261     }
262
263     wallcycle_sub_start(wcycle_, ewcsLAUNCH_GPU_MOVEX);
264
265     // launch kernel to pack send buffer
266     KernelLaunchConfig config;
267     config.blockSize[0]     = c_threadsPerBlock;
268     config.blockSize[1]     = 1;
269     config.blockSize[2]     = 1;
270     config.gridSize[0]      = (xSendSize_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
271     config.gridSize[1]      = 1;
272     config.gridSize[2]      = 1;
273     config.sharedMemorySize = 0;
274
275     const float3* sendBuf  = d_sendBuf_;
276     const float3* d_x      = d_x_;
277     const int*    indexMap = d_indexMap_;
278     const int     size     = xSendSize_;
279     // The coordinateShift changes between steps when we have
280     // performed a DD partition, or have updated the box e.g. when
281     // performing pressure coupling. So, for simplicity, the box
282     // is used every step to pass the shift vector as an argument of
283     // the packing kernel.
284     const int    boxDimensionIndex = dd_->dim[dimIndex_];
285     const float3 coordinateShift{ box[boxDimensionIndex][XX],
286                                   box[boxDimensionIndex][YY],
287                                   box[boxDimensionIndex][ZZ] };
288
289     // Avoid launching kernel when there is no work to do
290     if (size > 0)
291     {
292         auto kernelFn = usePBC_ ? packSendBufKernel<true> : packSendBufKernel<false>;
293
294         const auto kernelArgs = prepareGpuKernelArguments(
295                 kernelFn, config, &sendBuf, &d_x, &indexMap, &size, &coordinateShift);
296
297         launchGpuKernel(
298                 kernelFn, config, nonLocalStream_, nullptr, "Domdec GPU Apply X Halo Exchange", kernelArgs);
299     }
300
301     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_GPU_MOVEX);
302     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
303
304     // Consider time spent in communicateHaloData as Comm.X counter
305     // ToDo: We need further refinement here as communicateHaloData includes launch time for cudamemcpyasync
306     wallcycle_start(wcycle_, ewcMOVEX);
307
308     communicateHaloData(d_x_, HaloQuantity::HaloCoordinates, coordinatesReadyOnDeviceEvent);
309
310     wallcycle_stop(wcycle_, ewcMOVEX);
311
312     return;
313 }
314
315 // The following method should be called after non-local buffer operations,
316 // and before the local buffer operations. It operates in the non-local stream.
317 void GpuHaloExchange::Impl::communicateHaloForces(bool accumulateForces)
318 {
319     // Consider time spent in communicateHaloData as Comm.F counter
320     // ToDo: We need further refinement here as communicateHaloData includes launch time for cudamemcpyasync
321     wallcycle_start(wcycle_, ewcMOVEF);
322
323     // Communicate halo data (in non-local stream)
324     communicateHaloData(d_f_, HaloQuantity::HaloForces, nullptr);
325
326     wallcycle_stop(wcycle_, ewcMOVEF);
327
328     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
329     wallcycle_sub_start(wcycle_, ewcsLAUNCH_GPU_MOVEF);
330
331     float3* d_f = d_f_;
332     // If this is the last pulse and index (noting the force halo
333     // exchanges across multiple pulses and indices are called in
334     // reverse order) then perform the following preparation
335     // activities
336     if ((pulse_ == (dd_->comm->cd[dimIndex_].numPulses() - 1)) && (dimIndex_ == (dd_->ndim - 1)))
337     {
338         if (!accumulateForces)
339         {
340             // Clear local portion of force array (in local stream)
341             cudaMemsetAsync(d_f, 0, numHomeAtoms_ * sizeof(rvec), localStream_.stream());
342         }
343
344         // ensure non-local stream waits for local stream, due to dependence on
345         // the previous H2D copy of CPU forces (if accumulateForces is true)
346         // or the above clearing.
347         // TODO remove this dependency on localStream - edmine Issue #3093
348         GpuEventSynchronizer eventLocal;
349         eventLocal.markEvent(localStream_);
350         eventLocal.enqueueWaitEvent(nonLocalStream_);
351     }
352
353     // Unpack halo buffer into force array
354
355     KernelLaunchConfig config;
356     config.blockSize[0]     = c_threadsPerBlock;
357     config.blockSize[1]     = 1;
358     config.blockSize[2]     = 1;
359     config.gridSize[0]      = (fRecvSize_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
360     config.gridSize[1]      = 1;
361     config.gridSize[2]      = 1;
362     config.sharedMemorySize = 0;
363
364     const float3* recvBuf  = d_recvBuf_;
365     const int*    indexMap = d_indexMap_;
366     const int     size     = fRecvSize_;
367
368     if (pulse_ > 0 || dd_->ndim > 1)
369     {
370         // We need to accumulate rather than set, since it is possible
371         // that, in this pulse/dim, a value could be written to a location
372         // corresponding to the halo region of a following pulse/dim.
373         accumulateForces = true;
374     }
375
376     if (size > 0)
377     {
378         auto kernelFn = accumulateForces ? unpackRecvBufKernel<true> : unpackRecvBufKernel<false>;
379
380         const auto kernelArgs =
381                 prepareGpuKernelArguments(kernelFn, config, &d_f, &recvBuf, &indexMap, &size);
382
383         launchGpuKernel(
384                 kernelFn, config, nonLocalStream_, nullptr, "Domdec GPU Apply F Halo Exchange", kernelArgs);
385     }
386
387     if (pulse_ == 0)
388     {
389         fReadyOnDevice_.markEvent(nonLocalStream_);
390     }
391
392     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_GPU_MOVEF);
393     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
394 }
395
396
397 void GpuHaloExchange::Impl::communicateHaloData(float3*               d_ptr,
398                                                 HaloQuantity          haloQuantity,
399                                                 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
400 {
401
402     void* sendPtr;
403     int   sendSize;
404     void* remotePtr;
405     int   sendRank;
406     int   recvRank;
407
408     if (haloQuantity == HaloQuantity::HaloCoordinates)
409     {
410         sendPtr   = static_cast<void*>(d_sendBuf_);
411         sendSize  = xSendSize_;
412         remotePtr = remoteXPtr_;
413         sendRank  = sendRankX_;
414         recvRank  = recvRankX_;
415
416 #if GMX_MPI
417         // Wait for event from receiving task that remote coordinates are ready, and enqueue that event to stream used
418         // for subsequent data push. This avoids a race condition with the remote data being written in the previous timestep.
419         // Similarly send event to task that will push data to this task.
420         GpuEventSynchronizer* remoteCoordinatesReadyOnDeviceEvent;
421         MPI_Sendrecv(&coordinatesReadyOnDeviceEvent,
422                      sizeof(GpuEventSynchronizer*),
423                      MPI_BYTE,
424                      recvRank,
425                      0,
426                      &remoteCoordinatesReadyOnDeviceEvent,
427                      sizeof(GpuEventSynchronizer*),
428                      MPI_BYTE,
429                      sendRank,
430                      0,
431                      mpi_comm_mysim_,
432                      MPI_STATUS_IGNORE);
433         remoteCoordinatesReadyOnDeviceEvent->enqueueWaitEvent(nonLocalStream_);
434 #else
435         GMX_UNUSED_VALUE(coordinatesReadyOnDeviceEvent);
436 #endif
437     }
438     else
439     {
440         sendPtr   = static_cast<void*>(&(d_ptr[atomOffset_]));
441         sendSize  = fSendSize_;
442         remotePtr = remoteFPtr_;
443         sendRank  = sendRankF_;
444         recvRank  = recvRankF_;
445     }
446
447     communicateHaloDataWithCudaDirect(sendPtr, sendSize, sendRank, remotePtr, recvRank);
448 }
449
450 void GpuHaloExchange::Impl::communicateHaloDataWithCudaDirect(void* sendPtr,
451                                                               int   sendSize,
452                                                               int   sendRank,
453                                                               void* remotePtr,
454                                                               int   recvRank)
455 {
456
457     cudaError_t stat;
458
459     // We asynchronously push data to remote rank. The remote
460     // destination pointer has already been set in the init fn.  We
461     // don't need to worry about overwriting data the remote ranks
462     // still needs since the halo exchange is just done once per
463     // timestep, for each of X and F.
464
465     // send data to neighbor, if any data exists to send
466     if (sendSize > 0)
467     {
468         stat = cudaMemcpyAsync(remotePtr,
469                                sendPtr,
470                                sendSize * DIM * sizeof(float),
471                                cudaMemcpyDeviceToDevice,
472                                nonLocalStream_.stream());
473
474         CU_RET_ERR(stat, "cudaMemcpyAsync on GPU Domdec CUDA direct data transfer failed");
475     }
476
477 #if GMX_MPI
478     // ensure pushed data has arrived before remote rank progresses
479     // This rank records an event and sends it to the remote rank which has just been pushed data.
480     // This rank recieves event from remote rank which has pushed data here, and enqueues that event
481     // to its stream.
482     GpuEventSynchronizer* haloDataTransferRemote;
483
484     haloDataTransferLaunched_->markEvent(nonLocalStream_);
485
486     MPI_Sendrecv(&haloDataTransferLaunched_,
487                  sizeof(GpuEventSynchronizer*),
488                  MPI_BYTE,
489                  sendRank,
490                  0,
491                  &haloDataTransferRemote,
492                  sizeof(GpuEventSynchronizer*),
493                  MPI_BYTE,
494                  recvRank,
495                  0,
496                  mpi_comm_mysim_,
497                  MPI_STATUS_IGNORE);
498
499     haloDataTransferRemote->enqueueWaitEvent(nonLocalStream_);
500 #else
501     GMX_UNUSED_VALUE(sendRank);
502     GMX_UNUSED_VALUE(recvRank);
503 #endif
504 }
505
506 GpuEventSynchronizer* GpuHaloExchange::Impl::getForcesReadyOnDeviceEvent()
507 {
508     return &fReadyOnDevice_;
509 }
510
511 /*! \brief Create Domdec GPU object */
512 GpuHaloExchange::Impl::Impl(gmx_domdec_t*        dd,
513                             int                  dimIndex,
514                             MPI_Comm             mpi_comm_mysim,
515                             const DeviceContext& deviceContext,
516                             const DeviceStream&  localStream,
517                             const DeviceStream&  nonLocalStream,
518                             int                  pulse,
519                             gmx_wallcycle*       wcycle) :
520     dd_(dd),
521     sendRankX_(dd->neighbor[dimIndex][1]),
522     recvRankX_(dd->neighbor[dimIndex][0]),
523     sendRankF_(dd->neighbor[dimIndex][0]),
524     recvRankF_(dd->neighbor[dimIndex][1]),
525     usePBC_(dd->ci[dd->dim[dimIndex]] == 0),
526     haloDataTransferLaunched_(new GpuEventSynchronizer()),
527     mpi_comm_mysim_(mpi_comm_mysim),
528     deviceContext_(deviceContext),
529     localStream_(localStream),
530     nonLocalStream_(nonLocalStream),
531     dimIndex_(dimIndex),
532     pulse_(pulse),
533     wcycle_(wcycle)
534 {
535
536     GMX_RELEASE_ASSERT(GMX_THREAD_MPI,
537                        "GPU Halo exchange is currently only supported with thread-MPI enabled");
538
539     if (usePBC_ && dd->unitCellInfo.haveScrewPBC)
540     {
541         gmx_fatal(FARGS, "Error: screw is not yet supported in GPU halo exchange\n");
542     }
543
544     changePinningPolicy(&h_indexMap_, gmx::PinningPolicy::PinnedIfSupported);
545
546     allocateDeviceBuffer(&d_fShift_, 1, deviceContext_);
547 }
548
549 GpuHaloExchange::Impl::~Impl()
550 {
551     freeDeviceBuffer(&d_indexMap_);
552     freeDeviceBuffer(&d_sendBuf_);
553     freeDeviceBuffer(&d_recvBuf_);
554     freeDeviceBuffer(&d_fShift_);
555     delete haloDataTransferLaunched_;
556 }
557
558 GpuHaloExchange::GpuHaloExchange(gmx_domdec_t*        dd,
559                                  int                  dimIndex,
560                                  MPI_Comm             mpi_comm_mysim,
561                                  const DeviceContext& deviceContext,
562                                  const DeviceStream&  localStream,
563                                  const DeviceStream&  nonLocalStream,
564                                  int                  pulse,
565                                  gmx_wallcycle*       wcycle) :
566     impl_(new Impl(dd, dimIndex, mpi_comm_mysim, deviceContext, localStream, nonLocalStream, pulse, wcycle))
567 {
568 }
569
570 GpuHaloExchange::GpuHaloExchange(GpuHaloExchange&&) noexcept = default;
571
572 GpuHaloExchange& GpuHaloExchange::operator=(GpuHaloExchange&& other) noexcept
573 {
574     std::swap(impl_, other.impl_);
575     return *this;
576 }
577
578 GpuHaloExchange::~GpuHaloExchange() = default;
579
580 void GpuHaloExchange::reinitHalo(DeviceBuffer<RVec> d_coordinatesBuffer, DeviceBuffer<RVec> d_forcesBuffer)
581 {
582     impl_->reinitHalo(asFloat3(d_coordinatesBuffer), asFloat3(d_forcesBuffer));
583 }
584
585 void GpuHaloExchange::communicateHaloCoordinates(const matrix          box,
586                                                  GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
587 {
588     impl_->communicateHaloCoordinates(box, coordinatesReadyOnDeviceEvent);
589 }
590
591 void GpuHaloExchange::communicateHaloForces(bool accumulateForces)
592 {
593     impl_->communicateHaloForces(accumulateForces);
594 }
595
596 GpuEventSynchronizer* GpuHaloExchange::getForcesReadyOnDeviceEvent()
597 {
598     return impl_->getForcesReadyOnDeviceEvent();
599 }
600 } // namespace gmx