2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Declares CUDA implementation class for PME-PP communications
39 * \author Alan Gray <alang@nvidia.com>
41 * \ingroup module_ewald
43 #ifndef GMX_PME_PP_COMM_GPU_IMPL_H
44 #define GMX_PME_PP_COMM_GPU_IMPL_H
46 #include "gromacs/ewald/pme_pp_comm_gpu.h"
47 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
48 #include "gromacs/math/vectypes.h"
49 #include "gromacs/utility/gmxmpi.h"
54 /*! \internal \brief Class with interfaces and data for CUDA version of PME-PP Communication */
55 class PmePpCommGpu::Impl
59 /*! \brief Creates PME-PP GPU communication object.
60 * \param[in] comm Communicator used for simulation
61 * \param[in] pmeRank Rank of PME task
63 Impl(MPI_Comm comm, int pmeRank);
66 /*! \brief Perform steps required when buffer size changes
67 * \param[in] size Number of elements in buffer
69 void reinit(int size);
71 /*! \brief Pull force buffer directly from GPU memory on PME
72 * rank to either GPU or CPU memory on PP task using CUDA
75 * recvPtr should be in GPU or CPU memory if recvPmeForceToGpu
76 * is true or false, respectively. If receiving to GPU, this
77 * method should be called before the local GPU buffer
78 * operations. If receiving to CPU it should be called
79 * before forces are reduced with the other force
80 * contributions on the CPU. It will automatically wait for
81 * remote PME force data to be ready.
83 * \param[out] recvPtr CPU buffer to receive PME force data
84 * \param[in] recvSize Number of elements to receive
85 * \param[in] receivePmeForceToGpu Whether receive is to GPU, otherwise CPU
87 void receiveForceFromPmeCudaDirect(void *recvPtr, int recvSize, bool receivePmeForceToGpu);
90 /*! \brief Push coordinates buffer directly to GPU memory on PME
91 * task, from either GPU or CPU memory on PP task using CUDA
92 * Memory copy. sendPtr should be in GPU or CPU memory if
93 * sendPmeCoordinatesFromGpu is true or false respectively. If
94 * sending from GPU, this method should be called after the
95 * local GPU coordinate buffer operations. The remote PME task will
96 * automatically wait for data to be copied before commencing PME force calculations.
97 * \param[in] sendPtr Buffer with coordinate data
98 * \param[in] sendSize Number of elements to send
99 * \param[in] sendPmeCoordinatesFromGpu Whether send is from GPU, otherwise CPU
100 * \param[in] coordinatesReadyOnDeviceEvent Event recorded when coordinates are available on device
102 void sendCoordinatesToPmeCudaDirect(void *sendPtr, int sendSize, bool sendPmeCoordinatesFromGpu, GpuEventSynchronizer* coordinatesReadyOnDeviceEvent);
105 * Return pointer to buffer used for staging PME force on GPU
107 void* getGpuForceStagingPtr();
110 * Return pointer to event recorded when forces are ready
112 void* getForcesReadySynchronizer();
115 //! CUDA stream used for the communication operations in this class
116 cudaStream_t pmePpCommStream_ = nullptr;
117 //! Remote location of PME coordinate data buffer
118 void *remotePmeXBuffer_ = nullptr;
119 //! Remote location of PME force data buffer
120 void *remotePmeFBuffer_ = nullptr;
121 //! communicator for simulation
125 //! Buffer for staging PME force on GPU
126 rvec *d_pmeForces_ = nullptr;
127 //! number of atoms in PME force staging array
128 int d_pmeForcesSize_ = -1;
129 //! number of atoms allocated in recvbuf array
130 int d_pmeForcesSizeAlloc_ = -1;
131 //! Event recorded when PME forces are ready on PME task
132 GpuEventSynchronizer forcesReadySynchronizer_;
133 //! Event recorded when coordinates have been transferred to PME task
134 GpuEventSynchronizer pmeCoordinatesSynchronizer_;