f8950c13f6093d28dc4bbfe6bf1ac40814d61a43
[alexxy/gromacs.git] / src / gromacs / domdec / gpuhaloexchange_impl.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Implements GPU halo exchange using CUDA.
38  *
39  *
40  * \author Alan Gray <alang@nvidia.com>
41  *
42  * \ingroup module_domdec
43  */
44 #include "gmxpre.h"
45
46 #include "gpuhaloexchange_impl.cuh"
47
48 #include "config.h"
49
50 #include <assert.h>
51 #include <stdio.h>
52
53 #include <utility>
54
55 #include "gromacs/domdec/domdec.h"
56 #include "gromacs/domdec/domdec_struct.h"
57 #include "gromacs/domdec/gpuhaloexchange.h"
58 #include "gromacs/gpu_utils/cudautils.cuh"
59 #include "gromacs/gpu_utils/device_context.h"
60 #include "gromacs/gpu_utils/devicebuffer.h"
61 #include "gromacs/gpu_utils/gpueventsynchronizer.h"
62 #include "gromacs/gpu_utils/typecasts.cuh"
63 #include "gromacs/gpu_utils/vectype_ops.cuh"
64 #include "gromacs/math/vectypes.h"
65 #include "gromacs/pbcutil/ishift.h"
66 #include "gromacs/timing/wallcycle.h"
67 #include "gromacs/utility/gmxmpi.h"
68
69 #include "domdec_internal.h"
70
71 namespace gmx
72 {
73
74 //! Number of CUDA threads in a block
75 // TODO Optimize this through experimentation
76 constexpr static int c_threadsPerBlock = 256;
77
78 template<bool usePBC>
79 __global__ void packSendBufKernel(float3* __restrict__ dataPacked,
80                                   const float3* __restrict__ data,
81                                   const int* __restrict__ map,
82                                   const int    mapSize,
83                                   const float3 coordinateShift)
84 {
85     int           threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
86     float3*       gm_dataDest = &dataPacked[threadIndex];
87     const float3* gm_dataSrc  = &data[map[threadIndex]];
88
89     if (threadIndex < mapSize)
90     {
91         if (usePBC)
92         {
93             *gm_dataDest = *gm_dataSrc + coordinateShift;
94         }
95         else
96         {
97             *gm_dataDest = *gm_dataSrc;
98         }
99     }
100 }
101
102 /*! \brief unpack non-local force data buffer on the GPU using pre-populated "map" containing index
103  * information
104  * \param[out] data        full array of force values
105  * \param[in]  dataPacked  packed array of force values to be transferred
106  * \param[in]  map         array of indices defining mapping from full to packed array
107  * \param[in]  mapSize     number of elements in map array
108  */
109 template<bool accumulate>
110 __global__ void unpackRecvBufKernel(float3* __restrict__ data,
111                                     const float3* __restrict__ dataPacked,
112                                     const int* __restrict__ map,
113                                     const int mapSize)
114 {
115
116     int           threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
117     const float3* gm_dataSrc  = &dataPacked[threadIndex];
118     float3*       gm_dataDest = &data[map[threadIndex]];
119
120     if (threadIndex < mapSize)
121     {
122         if (accumulate)
123         {
124             *gm_dataDest += *gm_dataSrc;
125         }
126         else
127         {
128             *gm_dataDest = *gm_dataSrc;
129         }
130     }
131 }
132
133 void GpuHaloExchange::Impl::reinitHalo(float3* d_coordinatesBuffer, float3* d_forcesBuffer)
134 {
135     wallcycle_start(wcycle_, WallCycleCounter::Domdec);
136     wallcycle_sub_start(wcycle_, WallCycleSubCounter::DDGpu);
137
138     d_x_ = d_coordinatesBuffer;
139     d_f_ = d_forcesBuffer;
140
141     const gmx_domdec_comm_t&     comm = *dd_->comm;
142     const gmx_domdec_comm_dim_t& cd   = comm.cd[dimIndex_];
143     const gmx_domdec_ind_t&      ind  = cd.ind[pulse_];
144
145     numHomeAtoms_ = comm.atomRanges.numHomeAtoms(); // offset for data received by this rank
146
147     // Determine receive offset for the dimension index and pulse of this halo exchange object
148     int numZoneTemp   = 1;
149     int numZone       = 0;
150     int numAtomsTotal = numHomeAtoms_;
151     for (int i = 0; i <= dimIndex_; i++)
152     {
153         int pulseMax = (i == dimIndex_) ? pulse_ : (comm.cd[i].numPulses() - 1);
154         for (int p = 0; p <= pulseMax; p++)
155         {
156             atomOffset_                     = numAtomsTotal;
157             const gmx_domdec_ind_t& indTemp = comm.cd[i].ind[p];
158             numAtomsTotal += indTemp.nrecv[numZoneTemp + 1];
159         }
160         numZone = numZoneTemp;
161         numZoneTemp += numZoneTemp;
162     }
163
164     int newSize = ind.nsend[numZone + 1];
165
166     GMX_ASSERT(cd.receiveInPlace, "Out-of-place receive is not yet supported in GPU halo exchange");
167
168     // reallocates only if needed
169     h_indexMap_.resize(newSize);
170     // reallocate on device only if needed
171     if (newSize > maxPackedBufferSize_)
172     {
173         reallocateDeviceBuffer(&d_indexMap_, newSize, &indexMapSize_, &indexMapSizeAlloc_, deviceContext_);
174         reallocateDeviceBuffer(&d_sendBuf_, newSize, &sendBufSize_, &sendBufSizeAlloc_, deviceContext_);
175         reallocateDeviceBuffer(&d_recvBuf_, newSize, &recvBufSize_, &recvBufSizeAlloc_, deviceContext_);
176         maxPackedBufferSize_ = newSize;
177     }
178
179     xSendSize_ = newSize;
180 #if GMX_MPI
181     MPI_Sendrecv(&xSendSize_,
182                  sizeof(int),
183                  MPI_BYTE,
184                  sendRankX_,
185                  0,
186                  &xRecvSize_,
187                  sizeof(int),
188                  MPI_BYTE,
189                  recvRankX_,
190                  0,
191                  mpi_comm_mysim_,
192                  MPI_STATUS_IGNORE);
193 #endif
194     fSendSize_ = xRecvSize_;
195     fRecvSize_ = xSendSize_;
196
197     if (newSize > 0)
198     {
199         GMX_ASSERT(ind.index.size() == h_indexMap_.size(),
200                    "Size mismatch between domain decomposition communication index array and GPU "
201                    "halo exchange index mapping array");
202         std::copy(ind.index.begin(), ind.index.end(), h_indexMap_.begin());
203
204         copyToDeviceBuffer(
205                 &d_indexMap_, h_indexMap_.data(), 0, newSize, *haloStream_, GpuApiCallBehavior::Async, nullptr);
206     }
207
208 #if GMX_MPI
209     // Exchange of remote addresses from neighboring ranks is needed only with CUDA-direct as cudamemcpy needs both src/dst pointer
210     // MPI calls such as MPI_send doesn't worry about receiving address, that is taken care by MPI_recv call in neighboring rank
211     if (GMX_THREAD_MPI)
212     {
213         // This rank will push data to its neighbor, so needs to know
214         // the remote receive address and similarly send its receive
215         // address to other neighbour. We can do this here in reinit fn
216         // since the pointers will not change until the next NS step.
217
218         // Coordinates buffer:
219         float3* recvPtr = &d_x_[atomOffset_];
220         MPI_Sendrecv(&recvPtr,
221                      sizeof(void*),
222                      MPI_BYTE,
223                      recvRankX_,
224                      0,
225                      &remoteXPtr_,
226                      sizeof(void*),
227                      MPI_BYTE,
228                      sendRankX_,
229                      0,
230                      mpi_comm_mysim_,
231                      MPI_STATUS_IGNORE);
232
233         // Force buffer:
234         recvPtr = d_recvBuf_;
235         MPI_Sendrecv(&recvPtr,
236                      sizeof(void*),
237                      MPI_BYTE,
238                      recvRankF_,
239                      0,
240                      &remoteFPtr_,
241                      sizeof(void*),
242                      MPI_BYTE,
243                      sendRankF_,
244                      0,
245                      mpi_comm_mysim_,
246                      MPI_STATUS_IGNORE);
247     }
248 #endif
249
250     wallcycle_sub_stop(wcycle_, WallCycleSubCounter::DDGpu);
251     wallcycle_stop(wcycle_, WallCycleCounter::Domdec);
252 }
253
254 void GpuHaloExchange::Impl::enqueueWaitRemoteCoordinatesReadyEvent(GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
255 {
256     GMX_ASSERT(coordinatesReadyOnDeviceEvent != nullptr,
257                "Co-ordinate Halo exchange requires valid co-ordinate ready event");
258
259     // Wait for event from receiving task that remote coordinates are ready, and enqueue that event to stream used
260     // for subsequent data push. This avoids a race condition with the remote data being written in the previous timestep.
261     // Similarly send event to task that will push data to this task.
262     GpuEventSynchronizer* remoteCoordinatesReadyOnDeviceEvent;
263     MPI_Sendrecv(&coordinatesReadyOnDeviceEvent,
264                  sizeof(GpuEventSynchronizer*), //NOLINT(bugprone-sizeof-expression)
265                  MPI_BYTE,
266                  recvRankX_,
267                  0,
268                  &remoteCoordinatesReadyOnDeviceEvent,
269                  sizeof(GpuEventSynchronizer*), //NOLINT(bugprone-sizeof-expression)
270                  MPI_BYTE,
271                  sendRankX_,
272                  0,
273                  mpi_comm_mysim_,
274                  MPI_STATUS_IGNORE);
275     remoteCoordinatesReadyOnDeviceEvent->enqueueWaitEvent(*haloStream_);
276 }
277
278 GpuEventSynchronizer* GpuHaloExchange::Impl::communicateHaloCoordinates(const matrix box,
279                                                                         GpuEventSynchronizer* dependencyEvent)
280 {
281     wallcycle_start(wcycle_, WallCycleCounter::LaunchGpu);
282
283     // ensure stream waits until dependency has been satisfied
284     dependencyEvent->enqueueWaitEvent(*haloStream_);
285
286     wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchGpuMoveX);
287
288     // launch kernel to pack send buffer
289     KernelLaunchConfig config;
290     config.blockSize[0]     = c_threadsPerBlock;
291     config.blockSize[1]     = 1;
292     config.blockSize[2]     = 1;
293     config.gridSize[0]      = (xSendSize_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
294     config.gridSize[1]      = 1;
295     config.gridSize[2]      = 1;
296     config.sharedMemorySize = 0;
297
298     const float3* sendBuf  = d_sendBuf_;
299     const float3* d_x      = d_x_;
300     const int*    indexMap = d_indexMap_;
301     const int     size     = xSendSize_;
302     // The coordinateShift changes between steps when we have
303     // performed a DD partition, or have updated the box e.g. when
304     // performing pressure coupling. So, for simplicity, the box
305     // is used every step to pass the shift vector as an argument of
306     // the packing kernel.
307     const int    boxDimensionIndex = dd_->dim[dimIndex_];
308     const float3 coordinateShift{ box[boxDimensionIndex][XX],
309                                   box[boxDimensionIndex][YY],
310                                   box[boxDimensionIndex][ZZ] };
311
312     // Avoid launching kernel when there is no work to do
313     if (size > 0)
314     {
315         auto kernelFn = usePBC_ ? packSendBufKernel<true> : packSendBufKernel<false>;
316
317         const auto kernelArgs = prepareGpuKernelArguments(
318                 kernelFn, config, &sendBuf, &d_x, &indexMap, &size, &coordinateShift);
319
320         launchGpuKernel(kernelFn, config, *haloStream_, nullptr, "Domdec GPU Apply X Halo Exchange", kernelArgs);
321     }
322
323     wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchGpuMoveX);
324     wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
325
326     // Consider time spent in communicateHaloData as Comm.X counter
327     // ToDo: We need further refinement here as communicateHaloData includes launch time for cudamemcpyasync
328     wallcycle_start(wcycle_, WallCycleCounter::MoveX);
329
330     // wait for remote co-ordinates is implicit with process-MPI as non-local stream is synchronized before MPI calls
331     // and MPI_Waitall call makes sure both neighboring ranks' non-local stream is synchronized before data transfer is initiated
332     if (GMX_THREAD_MPI && dimIndex_ == 0 && pulse_ == 0)
333     {
334         enqueueWaitRemoteCoordinatesReadyEvent(dependencyEvent);
335     }
336
337     float3* recvPtr = GMX_THREAD_MPI ? remoteXPtr_ : &d_x_[atomOffset_];
338     communicateHaloData(d_sendBuf_, xSendSize_, sendRankX_, recvPtr, xRecvSize_, recvRankX_);
339
340     coordinateHaloLaunched_.markEvent(*haloStream_);
341
342     wallcycle_stop(wcycle_, WallCycleCounter::MoveX);
343
344     return &coordinateHaloLaunched_;
345 }
346
347 // The following method should be called after non-local buffer operations,
348 // and before the local buffer operations.
349 void GpuHaloExchange::Impl::communicateHaloForces(bool accumulateForces,
350                                                   FixedCapacityVector<GpuEventSynchronizer*, 2>* dependencyEvents)
351 {
352
353     // Consider time spent in communicateHaloData as Comm.F counter
354     // ToDo: We need further refinement here as communicateHaloData includes launch time for cudamemcpyasync
355     wallcycle_start(wcycle_, WallCycleCounter::MoveF);
356
357     while (!dependencyEvents->empty())
358     {
359         auto* dependency = dependencyEvents->back();
360         dependency->enqueueWaitEvent(*haloStream_);
361         dependencyEvents->pop_back();
362     }
363
364     float3* recvPtr = GMX_THREAD_MPI ? remoteFPtr_ : d_recvBuf_;
365
366     // Communicate halo data
367     communicateHaloData(&(d_f_[atomOffset_]), fSendSize_, sendRankF_, recvPtr, fRecvSize_, recvRankF_);
368
369     wallcycle_stop(wcycle_, WallCycleCounter::MoveF);
370
371     wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
372     wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchGpuMoveF);
373
374     float3* d_f = d_f_;
375
376     // Unpack halo buffer into force array
377
378     KernelLaunchConfig config;
379     config.blockSize[0]     = c_threadsPerBlock;
380     config.blockSize[1]     = 1;
381     config.blockSize[2]     = 1;
382     config.gridSize[0]      = (fRecvSize_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
383     config.gridSize[1]      = 1;
384     config.gridSize[2]      = 1;
385     config.sharedMemorySize = 0;
386
387     const float3* recvBuf  = d_recvBuf_;
388     const int*    indexMap = d_indexMap_;
389     const int     size     = fRecvSize_;
390
391     if (pulse_ > 0 || dd_->ndim > 1)
392     {
393         // We need to accumulate rather than set, since it is possible
394         // that, in this pulse/dim, a value could be written to a location
395         // corresponding to the halo region of a following pulse/dim.
396         accumulateForces = true;
397     }
398
399     if (size > 0)
400     {
401         auto kernelFn = accumulateForces ? unpackRecvBufKernel<true> : unpackRecvBufKernel<false>;
402
403         const auto kernelArgs =
404                 prepareGpuKernelArguments(kernelFn, config, &d_f, &recvBuf, &indexMap, &size);
405
406         launchGpuKernel(kernelFn, config, *haloStream_, nullptr, "Domdec GPU Apply F Halo Exchange", kernelArgs);
407     }
408
409     fReadyOnDevice_.markEvent(*haloStream_);
410
411     wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchGpuMoveF);
412     wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
413 }
414
415 void GpuHaloExchange::Impl::communicateHaloData(float3* sendPtr,
416                                                 int     sendSize,
417                                                 int     sendRank,
418                                                 float3* recvPtr,
419                                                 int     recvSize,
420                                                 int     recvRank)
421 {
422     if (GMX_THREAD_MPI)
423     {
424         // no need to explicitly sync with GMX_THREAD_MPI as all operations are
425         // anyway launched in correct stream
426         communicateHaloDataWithCudaDirect(sendPtr, sendSize, sendRank, recvPtr, recvRank);
427     }
428     else
429     {
430         communicateHaloDataWithCudaMPI(sendPtr, sendSize, sendRank, recvPtr, recvSize, recvRank);
431     }
432 }
433
434 void GpuHaloExchange::Impl::communicateHaloDataWithCudaMPI(float3* sendPtr,
435                                                            int     sendSize,
436                                                            int     sendRank,
437                                                            float3* recvPtr,
438                                                            int     recvSize,
439                                                            int     recvRank)
440 {
441     // no need to wait for haloDataReadyOnDevice event if this rank is not sending any data
442     if (sendSize > 0)
443     {
444         // wait for halo stream to complete all outstanding
445         // activities, to ensure that buffer is up-to-date in GPU memory
446         // before transferring to remote rank
447
448         // ToDo: Replace stream synchronize with event synchronize
449         haloStream_->synchronize();
450     }
451
452     // perform halo exchange directly in device buffers
453 #if GMX_MPI
454     MPI_Request request;
455
456     // recv remote data into halo region
457     MPI_Irecv(recvPtr, recvSize * DIM, MPI_FLOAT, recvRank, 0, mpi_comm_mysim_, &request);
458
459     // send data to remote halo region
460     MPI_Send(sendPtr, sendSize * DIM, MPI_FLOAT, sendRank, 0, mpi_comm_mysim_);
461
462     MPI_Wait(&request, MPI_STATUS_IGNORE);
463 #endif
464 }
465
466 void GpuHaloExchange::Impl::communicateHaloDataWithCudaDirect(float3* sendPtr,
467                                                               int     sendSize,
468                                                               int     sendRank,
469                                                               float3* remotePtr,
470                                                               int     recvRank)
471 {
472
473     cudaError_t stat;
474
475     // We asynchronously push data to remote rank. The remote
476     // destination pointer has already been set in the init fn.  We
477     // don't need to worry about overwriting data the remote ranks
478     // still needs since the halo exchange is just done once per
479     // timestep, for each of X and F.
480
481     // send data to neighbor, if any data exists to send
482     if (sendSize > 0)
483     {
484         stat = cudaMemcpyAsync(remotePtr,
485                                sendPtr,
486                                sendSize * DIM * sizeof(float),
487                                cudaMemcpyDeviceToDevice,
488                                haloStream_->stream());
489
490         CU_RET_ERR(stat, "cudaMemcpyAsync on GPU Domdec CUDA direct data transfer failed");
491     }
492
493 #if GMX_MPI
494     // ensure pushed data has arrived before remote rank progresses
495     // This rank records an event and sends it to the remote rank which has just been pushed data.
496     // This rank recieves event from remote rank which has pushed data here, and enqueues that event
497     // to its stream.
498     GpuEventSynchronizer* haloDataTransferRemote;
499
500     GMX_ASSERT(haloDataTransferLaunched_ != nullptr,
501                "Halo exchange requires valid event to synchronize data transfer initiated in "
502                "remote rank");
503     haloDataTransferLaunched_->markEvent(*haloStream_);
504
505     MPI_Sendrecv(&haloDataTransferLaunched_,
506                  sizeof(GpuEventSynchronizer*), //NOLINT(bugprone-sizeof-expression)
507                  MPI_BYTE,
508                  sendRank,
509                  0,
510                  &haloDataTransferRemote,
511                  sizeof(GpuEventSynchronizer*), //NOLINT(bugprone-sizeof-expression)
512                  MPI_BYTE,
513                  recvRank,
514                  0,
515                  mpi_comm_mysim_,
516                  MPI_STATUS_IGNORE);
517
518     haloDataTransferRemote->enqueueWaitEvent(*haloStream_);
519 #else
520     GMX_UNUSED_VALUE(sendRank);
521     GMX_UNUSED_VALUE(recvRank);
522 #endif
523 }
524
525 GpuEventSynchronizer* GpuHaloExchange::Impl::getForcesReadyOnDeviceEvent()
526 {
527     return &fReadyOnDevice_;
528 }
529
530 /*! \brief Create Domdec GPU object */
531 GpuHaloExchange::Impl::Impl(gmx_domdec_t*        dd,
532                             int                  dimIndex,
533                             MPI_Comm             mpi_comm_mysim,
534                             const DeviceContext& deviceContext,
535                             int                  pulse,
536                             gmx_wallcycle*       wcycle) :
537     dd_(dd),
538     sendRankX_(dd->neighbor[dimIndex][1]),
539     recvRankX_(dd->neighbor[dimIndex][0]),
540     sendRankF_(dd->neighbor[dimIndex][0]),
541     recvRankF_(dd->neighbor[dimIndex][1]),
542     usePBC_(dd->ci[dd->dim[dimIndex]] == 0),
543     haloDataTransferLaunched_(GMX_THREAD_MPI ? new GpuEventSynchronizer() : nullptr),
544     mpi_comm_mysim_(mpi_comm_mysim),
545     deviceContext_(deviceContext),
546     haloStream_(new DeviceStream(deviceContext, DeviceStreamPriority::High, false)),
547     dimIndex_(dimIndex),
548     pulse_(pulse),
549     wcycle_(wcycle)
550 {
551     if (usePBC_ && dd->unitCellInfo.haveScrewPBC)
552     {
553         gmx_fatal(FARGS, "Error: screw is not yet supported in GPU halo exchange\n");
554     }
555
556     changePinningPolicy(&h_indexMap_, gmx::PinningPolicy::PinnedIfSupported);
557
558     allocateDeviceBuffer(&d_fShift_, 1, deviceContext_);
559 }
560
561 GpuHaloExchange::Impl::~Impl()
562 {
563     freeDeviceBuffer(&d_indexMap_);
564     freeDeviceBuffer(&d_sendBuf_);
565     freeDeviceBuffer(&d_recvBuf_);
566     freeDeviceBuffer(&d_fShift_);
567     delete haloDataTransferLaunched_;
568 }
569
570 GpuHaloExchange::GpuHaloExchange(gmx_domdec_t*        dd,
571                                  int                  dimIndex,
572                                  MPI_Comm             mpi_comm_mysim,
573                                  const DeviceContext& deviceContext,
574                                  int                  pulse,
575                                  gmx_wallcycle*       wcycle) :
576     impl_(new Impl(dd, dimIndex, mpi_comm_mysim, deviceContext, pulse, wcycle))
577 {
578 }
579
580 GpuHaloExchange::GpuHaloExchange(GpuHaloExchange&&) noexcept = default;
581
582 GpuHaloExchange& GpuHaloExchange::operator=(GpuHaloExchange&& other) noexcept
583 {
584     std::swap(impl_, other.impl_);
585     return *this;
586 }
587
588 GpuHaloExchange::~GpuHaloExchange() = default;
589
590 void GpuHaloExchange::reinitHalo(DeviceBuffer<RVec> d_coordinatesBuffer, DeviceBuffer<RVec> d_forcesBuffer)
591 {
592     impl_->reinitHalo(asFloat3(d_coordinatesBuffer), asFloat3(d_forcesBuffer));
593 }
594
595 GpuEventSynchronizer* GpuHaloExchange::communicateHaloCoordinates(const matrix          box,
596                                                                   GpuEventSynchronizer* dependencyEvent)
597 {
598     return impl_->communicateHaloCoordinates(box, dependencyEvent);
599 }
600
601 void GpuHaloExchange::communicateHaloForces(bool accumulateForces,
602                                             FixedCapacityVector<GpuEventSynchronizer*, 2>* dependencyEvents)
603 {
604     impl_->communicateHaloForces(accumulateForces, dependencyEvents);
605 }
606
607 GpuEventSynchronizer* GpuHaloExchange::getForcesReadyOnDeviceEvent()
608 {
609     return impl_->getForcesReadyOnDeviceEvent();
610 }
611 } // namespace gmx