9de7c6676f53eea9b228e46a1562599b9d5001b9
[alexxy/gromacs.git] / src / gromacs / ewald / pme_coordinate_receiver_gpu_impl.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Implements class which recieves coordinates to GPU memory on PME task using CUDA
38  *
39  *
40  * \author Alan Gray <alang@nvidia.com>
41  *
42  * \ingroup module_ewald
43  */
44 #include "gmxpre.h"
45
46 #include "gromacs/ewald/pme_pp_communication.h"
47 #include "pme_coordinate_receiver_gpu_impl.h"
48
49 #include "config.h"
50
51 #include "gromacs/ewald/pme_force_sender_gpu.h"
52 #include "gromacs/gpu_utils/cudautils.cuh"
53 #include "gromacs/gpu_utils/gpueventsynchronizer.h"
54 #include "gromacs/utility/gmxmpi.h"
55
56 namespace gmx
57 {
58
59 PmeCoordinateReceiverGpu::Impl::Impl(const DeviceStream&    pmeStream,
60                                      MPI_Comm               comm,
61                                      gmx::ArrayRef<PpRanks> ppRanks) :
62     pmeStream_(pmeStream), comm_(comm), ppRanks_(ppRanks)
63 {
64     request_.resize(ppRanks.size());
65     ppSync_.resize(ppRanks.size());
66 }
67
68 PmeCoordinateReceiverGpu::Impl::~Impl() = default;
69
70 void PmeCoordinateReceiverGpu::Impl::sendCoordinateBufferAddressToPpRanks(DeviceBuffer<RVec> d_x)
71 {
72     // Need to send address to PP rank only for thread-MPI as PP rank pushes data using cudamemcpy
73     if (GMX_THREAD_MPI)
74     {
75         int ind_start = 0;
76         int ind_end   = 0;
77         for (const auto& receiver : ppRanks_)
78         {
79             ind_start = ind_end;
80             ind_end   = ind_start + receiver.numAtoms;
81
82             // Data will be transferred directly from GPU.
83             void* sendBuf = reinterpret_cast<void*>(&d_x[ind_start]);
84 #if GMX_MPI
85             MPI_Send(&sendBuf, sizeof(void**), MPI_BYTE, receiver.rankId, 0, comm_);
86 #else
87             GMX_UNUSED_VALUE(sendBuf);
88 #endif
89         }
90     }
91 }
92
93 /*! \brief Receive coordinate synchronizer pointer from the PP ranks. */
94 void PmeCoordinateReceiverGpu::Impl::receiveCoordinatesSynchronizerFromPpCudaDirect(int ppRank)
95 {
96     GMX_ASSERT(GMX_THREAD_MPI,
97                "receiveCoordinatesSynchronizerFromPpCudaDirect is expected to be called only for "
98                "Thread-MPI");
99
100     // Data will be pushed directly from PP task
101
102 #if GMX_MPI
103     // Receive event from PP task
104     // NOLINTNEXTLINE(bugprone-sizeof-expression)
105     MPI_Irecv(&ppSync_[recvCount_], sizeof(GpuEventSynchronizer*), MPI_BYTE, ppRank, 0, comm_, &request_[recvCount_]);
106     recvCount_++;
107 #else
108     GMX_UNUSED_VALUE(ppRank);
109 #endif
110 }
111
112 /*! \brief Receive coordinate data using CUDA-aware MPI */
113 void PmeCoordinateReceiverGpu::Impl::launchReceiveCoordinatesFromPpCudaMpi(DeviceBuffer<RVec> recvbuf,
114                                                                            int numAtoms,
115                                                                            int numBytes,
116                                                                            int ppRank)
117 {
118     GMX_ASSERT(GMX_LIB_MPI,
119                "launchReceiveCoordinatesFromPpCudaMpi is expected to be called only for Lib-MPI");
120
121 #if GMX_MPI
122     MPI_Irecv(&recvbuf[numAtoms], numBytes, MPI_BYTE, ppRank, eCommType_COORD_GPU, comm_, &request_[recvCount_++]);
123 #else
124     GMX_UNUSED_VALUE(recvbuf);
125     GMX_UNUSED_VALUE(numAtoms);
126     GMX_UNUSED_VALUE(numBytes);
127     GMX_UNUSED_VALUE(ppRank);
128 #endif
129 }
130
131 void PmeCoordinateReceiverGpu::Impl::synchronizeOnCoordinatesFromPpRanks()
132 {
133     if (recvCount_ > 0)
134     {
135         // ensure PME calculation doesn't commence until coordinate data/remote events
136         // has been transferred
137 #if GMX_MPI
138         MPI_Waitall(recvCount_, request_.data(), MPI_STATUS_IGNORE);
139 #endif
140
141         // Make PME stream wait on PP to PME data trasnfer events
142         if (GMX_THREAD_MPI)
143         {
144             for (int i = 0; i < recvCount_; i++)
145             {
146                 ppSync_[i]->enqueueWaitEvent(pmeStream_);
147             }
148         }
149
150         // reset receive counter
151         recvCount_ = 0;
152     }
153 }
154
155 PmeCoordinateReceiverGpu::PmeCoordinateReceiverGpu(const DeviceStream&    pmeStream,
156                                                    MPI_Comm               comm,
157                                                    gmx::ArrayRef<PpRanks> ppRanks) :
158     impl_(new Impl(pmeStream, comm, ppRanks))
159 {
160 }
161
162 PmeCoordinateReceiverGpu::~PmeCoordinateReceiverGpu() = default;
163
164 void PmeCoordinateReceiverGpu::sendCoordinateBufferAddressToPpRanks(DeviceBuffer<RVec> d_x)
165 {
166     impl_->sendCoordinateBufferAddressToPpRanks(d_x);
167 }
168
169 void PmeCoordinateReceiverGpu::receiveCoordinatesSynchronizerFromPpCudaDirect(int ppRank)
170 {
171     impl_->receiveCoordinatesSynchronizerFromPpCudaDirect(ppRank);
172 }
173
174 void PmeCoordinateReceiverGpu::launchReceiveCoordinatesFromPpCudaMpi(DeviceBuffer<RVec> recvbuf,
175                                                                      int                numAtoms,
176                                                                      int                numBytes,
177                                                                      int                ppRank)
178 {
179     impl_->launchReceiveCoordinatesFromPpCudaMpi(recvbuf, numAtoms, numBytes, ppRank);
180 }
181
182 void PmeCoordinateReceiverGpu::synchronizeOnCoordinatesFromPpRanks()
183 {
184     impl_->synchronizeOnCoordinatesFromPpRanks();
185 }
186
187 } // namespace gmx