Fix random typos
[alexxy/gromacs.git] / src / gromacs / ewald / pme_force_sender_gpu_impl.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Implements PME-PP communication using CUDA
38  *
39  *
40  * \author Alan Gray <alang@nvidia.com>
41  *
42  * \ingroup module_ewald
43  */
44 #include "gmxpre.h"
45
46 #include "pme_force_sender_gpu_impl.h"
47
48 #include "config.h"
49
50 #include "gromacs/gpu_utils/cudautils.cuh"
51 #include "gromacs/gpu_utils/gpueventsynchronizer.h"
52 #include "gromacs/utility/gmxmpi.h"
53
54 namespace gmx
55 {
56
57 /*! \brief Create PME-PP GPU communication object */
58 PmeForceSenderGpu::Impl::Impl(GpuEventSynchronizer*  pmeForcesReady,
59                               MPI_Comm               comm,
60                               const DeviceContext&   deviceContext,
61                               gmx::ArrayRef<PpRanks> ppRanks) :
62     pmeForcesReady_(pmeForcesReady),
63     comm_(comm),
64     ppRanks_(ppRanks),
65     ppCommStream_(ppRanks.size()),
66     ppCommEvent_(ppRanks.size()),
67     ppCommEventRecorded_(ppRanks.size()),
68     deviceContext_(deviceContext),
69     pmeRemoteCpuForcePtr_(ppRanks.size()),
70     pmeRemoteGpuForcePtr_(ppRanks.size())
71 {
72     // Create streams and events to manage pushing of force buffers to remote PP ranks
73     std::unique_ptr<DeviceStream>         stream;
74     std::unique_ptr<GpuEventSynchronizer> event;
75     size_t                                i = 0;
76     for (i = 0; i < ppRanks_.size(); i++)
77     {
78         stream = std::make_unique<DeviceStream>(deviceContext_, DeviceStreamPriority::High, false);
79         ppCommStream_[i] = std::move(stream);
80         event            = std::make_unique<GpuEventSynchronizer>();
81         ppCommEvent_[i]  = std::move(event);
82     }
83 }
84
85 PmeForceSenderGpu::Impl::~Impl() = default;
86
87 /*! \brief Sets location of force to be sent to each PP rank  */
88 void PmeForceSenderGpu::Impl::setForceSendBuffer(DeviceBuffer<Float3> d_f)
89 {
90
91     // Need to send address to PP rank only for thread-MPI as PP rank pulls
92     // data using cudamemcpy
93     if (!GMX_THREAD_MPI)
94     {
95         return;
96     }
97
98 #if GMX_MPI
99
100     if (localForcePtr_.empty())
101     {
102         localForcePtr_.resize(ppRanks_.size());
103     }
104     int ind_start = 0;
105     int ind_end   = 0;
106     int i         = 0;
107     for (const auto& receiver : ppRanks_)
108     {
109         ind_start = ind_end;
110         ind_end   = ind_start + receiver.numAtoms;
111
112         localForcePtr_[i] = &d_f[ind_start];
113         // NOLINTNEXTLINE(bugprone-sizeof-expression)
114         MPI_Recv(&pmeRemoteGpuForcePtr_[i], sizeof(float3*), MPI_BYTE, receiver.rankId, 0, comm_, MPI_STATUS_IGNORE);
115         // NOLINTNEXTLINE(bugprone-sizeof-expression)
116         MPI_Recv(&pmeRemoteCpuForcePtr_[i], sizeof(float3*), MPI_BYTE, receiver.rankId, 0, comm_, MPI_STATUS_IGNORE);
117         // Send address of event and associated flag to PP rank, to allow remote enqueueing
118         // NOLINTNEXTLINE(bugprone-sizeof-expression)
119         MPI_Send(&ppCommEvent_[i], sizeof(GpuEventSynchronizer*), MPI_BYTE, receiver.rankId, 0, comm_);
120
121         std::atomic<bool>* tmpPpCommEventRecordedPtr =
122                 reinterpret_cast<std::atomic<bool>*>(&(ppCommEventRecorded_[i]));
123         tmpPpCommEventRecordedPtr->store(false, std::memory_order_release);
124         // NOLINTNEXTLINE(bugprone-sizeof-expression)
125         MPI_Send(&tmpPpCommEventRecordedPtr, sizeof(std::atomic<bool>*), MPI_BYTE, receiver.rankId, 0, comm_);
126         i++;
127     }
128
129 #else
130     GMX_UNUSED_VALUE(d_f);
131 #endif
132 }
133
134
135 /*! \brief Send PME synchronizer directly using CUDA memory copy */
136 void PmeForceSenderGpu::Impl::sendFToPpCudaDirect(int ppRank, int numAtoms, bool sendForcesDirectToPpGpu)
137 {
138
139     GMX_ASSERT(GMX_THREAD_MPI, "sendFToPpCudaDirect is expected to be called only for Thread-MPI");
140
141
142 #if GMX_MPI
143     float3* pmeRemoteForcePtr =
144             sendForcesDirectToPpGpu ? pmeRemoteGpuForcePtr_[ppRank] : pmeRemoteCpuForcePtr_[ppRank];
145
146     pmeForcesReady_->enqueueWaitEvent(*ppCommStream_[ppRank]);
147
148     cudaError_t stat = cudaMemcpyAsync(pmeRemoteForcePtr,
149                                        localForcePtr_[ppRank],
150                                        numAtoms * sizeof(rvec),
151                                        cudaMemcpyDefault,
152                                        ppCommStream_[ppRank]->stream());
153     CU_RET_ERR(stat, "cudaMemcpyAsync on Recv from PME CUDA direct data transfer failed");
154     ppCommEvent_[ppRank]->markEvent(*ppCommStream_[ppRank]);
155     std::atomic<bool>* tmpPpCommEventRecordedPtr =
156             reinterpret_cast<std::atomic<bool>*>(&(ppCommEventRecorded_[ppRank]));
157     tmpPpCommEventRecordedPtr->store(true, std::memory_order_release);
158 #else
159     GMX_UNUSED_VALUE(ppRank);
160     GMX_UNUSED_VALUE(numAtoms);
161 #endif
162 }
163
164 /*! \brief Send PME data directly using CUDA-aware MPI */
165 void PmeForceSenderGpu::Impl::sendFToPpCudaMpi(DeviceBuffer<RVec> sendbuf,
166                                                int                offset,
167                                                int                numBytes,
168                                                int                ppRank,
169                                                MPI_Request*       request)
170 {
171     GMX_ASSERT(GMX_LIB_MPI, "sendFToPpCudaMpi is expected to be called only for Lib-MPI");
172
173 #if GMX_MPI
174     // if using GPU direct comm with CUDA-aware MPI, make sure forces are ready on device
175     // before sending it to PP ranks
176     pmeForcesReady_->waitForEvent();
177
178     MPI_Isend(sendbuf[offset], numBytes, MPI_BYTE, ppRank, 0, comm_, request);
179
180 #else
181     GMX_UNUSED_VALUE(sendbuf);
182     GMX_UNUSED_VALUE(offset);
183     GMX_UNUSED_VALUE(numBytes);
184     GMX_UNUSED_VALUE(ppRank);
185     GMX_UNUSED_VALUE(request);
186 #endif
187 }
188
189 PmeForceSenderGpu::PmeForceSenderGpu(GpuEventSynchronizer*  pmeForcesReady,
190                                      MPI_Comm               comm,
191                                      const DeviceContext&   deviceContext,
192                                      gmx::ArrayRef<PpRanks> ppRanks) :
193     impl_(new Impl(pmeForcesReady, comm, deviceContext, ppRanks))
194 {
195 }
196
197 PmeForceSenderGpu::~PmeForceSenderGpu() = default;
198
199
200 void PmeForceSenderGpu::setForceSendBuffer(DeviceBuffer<RVec> d_f)
201 {
202     impl_->setForceSendBuffer(d_f);
203 }
204
205 void PmeForceSenderGpu::sendFToPpCudaMpi(DeviceBuffer<RVec> sendbuf,
206                                          int                offset,
207                                          int                numBytes,
208                                          int                ppRank,
209                                          MPI_Request*       request)
210 {
211     impl_->sendFToPpCudaMpi(sendbuf, offset, numBytes, ppRank, request);
212 }
213
214 void PmeForceSenderGpu::sendFToPpCudaDirect(int ppRank, int numAtoms, bool sendForcesDirectToPpGpu)
215 {
216     impl_->sendFToPpCudaDirect(ppRank, numAtoms, sendForcesDirectToPpGpu);
217 }
218
219
220 } // namespace gmx