2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Declares CUDA implementation class for PME-PP communications
39 * \author Alan Gray <alang@nvidia.com>
41 * \ingroup module_ewald
43 #ifndef GMX_PME_PP_COMM_GPU_IMPL_H
44 #define GMX_PME_PP_COMM_GPU_IMPL_H
48 #include "gromacs/ewald/pme_pp_comm_gpu.h"
49 #include "gromacs/gpu_utils/gpueventsynchronizer.h"
50 #include "gromacs/math/vectypes.h"
51 #include "gromacs/utility/gmxmpi.h"
56 /*! \internal \brief Class with interfaces and data for CUDA version of PME-PP Communication */
57 class PmePpCommGpu::Impl
61 /*! \brief Creates PME-PP GPU communication object.
63 * \param[in] comm Communicator used for simulation
64 * \param[in] pmeRank Rank of PME task
65 * \param[in] pmeCpuForceBuffer Buffer for PME force in CPU memory
66 * \param[in] deviceContext GPU context.
67 * \param[in] deviceStream GPU stream.
71 std::vector<gmx::RVec>* pmeCpuForceBuffer,
72 const DeviceContext& deviceContext,
73 const DeviceStream& deviceStream);
76 /*! \brief Perform steps required when buffer size changes
77 * \param[in] size Number of elements in buffer
79 void reinit(int size);
81 /*! \brief Pull force buffer directly from GPU memory on PME
82 * rank to either GPU or CPU memory on PP task using CUDA
83 * Memory copy or CUDA-aware MPI.
85 * recvPtr should be in GPU or CPU memory if recvPmeForceToGpu
86 * is true or false, respectively. If receiving to GPU, this
87 * method should be called before the local GPU buffer
88 * operations. If receiving to CPU it should be called
89 * before forces are reduced with the other force
90 * contributions on the CPU. It will automatically wait for
91 * remote PME force data to be ready.
93 * \param[out] recvPtr CPU buffer to receive PME force data
94 * \param[in] recvSize Number of elements to receive
95 * \param[in] receivePmeForceToGpu Whether receive is to GPU, otherwise CPU
97 void receiveForceFromPme(float3* recvPtr, int recvSize, bool receivePmeForceToGpu);
99 /*! \brief Push coordinates buffer directly to GPU memory on PME
100 * task, from either GPU or CPU memory on PP task using CUDA
101 * Memory copy or CUDA-aware MPI. If sending from GPU, this method should
102 * be called after the local GPU coordinate buffer operations.
103 * The remote PME task will automatically wait for data to be copied
104 * before commencing PME force calculations.
105 * \param[in] sendPtr Buffer with coordinate data
106 * \param[in] sendSize Number of elements to send
107 * \param[in] coordinatesReadyOnDeviceEvent Event recorded when coordinates are available on device
109 void sendCoordinatesToPme(float3* sendPtr, int sendSize, GpuEventSynchronizer* coordinatesReadyOnDeviceEvent);
112 * Return pointer to buffer used for staging PME force on GPU
114 DeviceBuffer<Float3> getGpuForceStagingPtr();
117 * Return pointer to event recorded when forces are ready
119 GpuEventSynchronizer* getForcesReadySynchronizer();
122 /*! \brief Pull force buffer directly from GPU memory on PME
123 * rank to either GPU or CPU memory on PP task using CUDA
124 * Memory copy. This method is used with Thread-MPI.
125 * \param[in] receivePmeForceToGpu Whether receive is to GPU, otherwise CPU
127 void receiveForceFromPmeCudaDirect(bool receivePmeForceToGpu);
129 /*! \brief Pull force buffer directly from GPU memory on PME
130 * rank to either GPU or CPU memory on PP task using CUDA-aware
131 * MPI. This method is used with process-MPI.
132 * \param[out] recvPtr CPU buffer to receive PME force data
133 * \param[in] recvSize Number of elements to receive
135 void receiveForceFromPmeCudaMpi(float3* recvPtr, int recvSize);
137 /*! \brief Push coordinates buffer directly to GPU memory on PME
138 * task, from either GPU or CPU memory on PP task using CUDA Memory copy.
139 * This method is used with Thread-MPI.
140 * \param[in] sendPtr Buffer with coordinate data
141 * \param[in] sendSize Number of elements to send
142 * \param[in] coordinatesReadyOnDeviceEvent Event recorded when coordinates are available on device
144 void sendCoordinatesToPmeCudaDirect(float3* sendPtr,
146 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent);
148 /*! \brief Push coordinates buffer directly to GPU memory on PME
149 * task, from either GPU or CPU memory on PP task using CUDA-aware MPI.
150 * This method is used with process-MPI.
151 * \param[in] sendPtr Buffer with coordinate data
152 * \param[in] sendSize Number of elements to send
153 * \param[in] coordinatesReadyOnDeviceEvent Event recorded when coordinates are available on device
155 void sendCoordinatesToPmeCudaMpi(float3* sendPtr,
157 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent);
159 //! GPU context handle (not used in CUDA)
160 const DeviceContext& deviceContext_;
161 //! Handle for CUDA stream used for the communication operations in this class
162 const DeviceStream& pmePpCommStream_;
163 //! Remote location of PME coordinate data buffer
164 float3* remotePmeXBuffer_ = nullptr;
165 //! communicator for simulation
169 //! Buffer for PME force on CPU
170 std::vector<gmx::RVec>* pmeCpuForceBuffer_;
171 //! Buffer for staging PME force on GPU
172 DeviceBuffer<gmx::RVec> d_pmeForces_;
173 //! number of atoms in PME force staging array
174 int d_pmeForcesSize_ = -1;
175 //! number of atoms allocated in recvbuf array
176 int d_pmeForcesSizeAlloc_ = -1;
177 //! Event recorded when PME forces are ready on PME task
178 GpuEventSynchronizer forcesReadySynchronizer_;
179 //! Event recorded when coordinates have been transferred to PME task
180 GpuEventSynchronizer pmeCoordinatesSynchronizer_;
181 //! Event recorded by remote PME task when forces have been transferred
182 GpuEventSynchronizer* remotePmeForceSendEvent_;
183 //! Flag to track when remote PP event has been recorded, ready for enqueueing
184 volatile std::atomic<bool>* remotePmeForceSendEventRecorded_;