2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Declares CUDA implementation of GPU Halo Exchange.
39 * This header file is needed to include from both the device-side
40 * kernels file, and the host-side management code.
42 * \author Alan Gray <alang@nvidia.com>
44 * \ingroup module_domdec
46 #ifndef GMX_DOMDEC_GPUHALOEXCHANGE_IMPL_H
47 #define GMX_DOMDEC_GPUHALOEXCHANGE_IMPL_H
49 #include "gromacs/domdec/gpuhaloexchange.h"
50 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
51 #include "gromacs/gpu_utils/hostallocator.h"
52 #include "gromacs/utility/gmxmpi.h"
57 /*! \brief switch for whether coordinates or force halo is being applied */
58 enum class HaloQuantity
64 /*! \internal \brief Class with interfaces and data for GPU Halo Exchange */
65 class GpuHaloExchange::Impl
69 /*! \brief Creates GPU Halo Exchange object.
71 * \param [inout] dd domdec structure
72 * \param [in] mpi_comm_mysim communicator used for simulation
73 * \param [in] localStream local NB CUDA stream
74 * \param [in] nonLocalStream non-local NB CUDA stream
76 Impl(gmx_domdec_t* dd, MPI_Comm mpi_comm_mysim, void* localStream, void* nonLocalStream);
80 * (Re-) Initialization for GPU halo exchange
81 * \param [in] d_coordinatesBuffer pointer to coordinates buffer in GPU memory
82 * \param [in] d_forcesBuffer pointer to forces buffer in GPU memory
84 void reinitHalo(float3* d_coordinatesBuffer, float3* d_forcesBuffer);
88 * GPU halo exchange of coordinates buffer
89 * \param [in] box Coordinate box (from which shifts will be constructed)
90 * \param [in] coordinatesReadyOnDeviceEvent event recorded when coordinates have been copied to device
92 void communicateHaloCoordinates(const matrix box, GpuEventSynchronizer* coordinatesReadyOnDeviceEvent);
94 /*! \brief GPU halo exchange of force buffer
95 * \param[in] accumulateForces True if forces should accumulate, otherwise they are set
97 void communicateHaloForces(bool accumulateForces);
100 /*! \brief Data transfer wrapper for GPU halo exchange
101 * \param [inout] d_ptr pointer to coordinates or force buffer in GPU memory
102 * \param [in] haloQuantity switch on whether X or F halo exchange is being performed
103 * \param [in] coordinatesReadyOnDeviceEvent event recorded when coordinates have been copied to device
105 void communicateHaloData(float3* d_ptr,
106 HaloQuantity haloQuantity,
107 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent);
109 /*! \brief Data transfer for GPU halo exchange using CUDA memcopies
110 * \param [inout] sendPtr address to send data from
111 * \param [in] sendSize number of atoms to be sent
112 * \param [in] sendRank rank to send data to
113 * \param [inout] remotePtr remote address to recv data
114 * \param [in] recvRank rank to recv data from
116 void communicateHaloDataWithCudaDirect(void* sendPtr, int sendSize, int sendRank, void* remotePtr, int recvRank);
118 //! Domain decomposition object
119 gmx_domdec_t* dd_ = nullptr;
120 //! map of indices to be sent from this rank
121 gmx::HostVector<int> h_indexMap_;
122 //! device copy of index map
123 int* d_indexMap_ = nullptr;
124 //! number of elements in index map array
125 int indexMapSize_ = -1;
126 //! number of elements allocated in index map array
127 int indexMapSizeAlloc_ = -1;
128 //! device buffer for sending packed data
129 float3* d_sendBuf_ = nullptr;
130 //! number of atoms in sendbuf array
131 int sendBufSize_ = -1;
132 //! number of atoms allocated in sendbuf array
133 int sendBufSizeAlloc_ = -1;
134 //! device buffer for receiving packed data
135 float3* d_recvBuf_ = nullptr;
136 //! maximum size of packed buffer
137 int maxPackedBufferSize_ = 0;
138 //! number of atoms in recvbuf array
139 int recvBufSize_ = -1;
140 //! number of atoms allocated in recvbuf array
141 int recvBufSizeAlloc_ = -1;
142 //! rank to send data to for X
144 //! rank to recv data from for X
146 //! rank to send data to for F
148 //! rank to recv data from for F
150 //! send copy size from this rank for X
152 //! recv copy size to this rank for X
154 //! send copy size from this rank for F
156 //! recv copy size to this rank for F
158 //! number of home atoms - offset of local halo region
159 int numHomeAtoms_ = 0;
160 //! remote GPU coordinates buffer pointer for pushing data
161 void* remoteXPtr_ = nullptr;
162 //! remote GPU force buffer pointer for pushing data
163 void* remoteFPtr_ = nullptr;
164 //! Periodic Boundary Conditions for this rank
165 bool usePBC_ = false;
166 //! force shift buffer on device
167 float3* d_fShift_ = nullptr;
168 //! Event triggered when halo transfer has been launched with direct CUD memory copy
169 GpuEventSynchronizer* haloDataTransferLaunched_ = nullptr;
170 //! MPI communicator used for simulation
171 MPI_Comm mpi_comm_mysim_;
172 //! CUDA stream for local non-bonded calculations
173 cudaStream_t localStream_ = nullptr;
174 //! CUDA stream for non-local non-bonded calculations
175 cudaStream_t nonLocalStream_ = nullptr;
176 //! full coordinates buffer in GPU memory
177 float3* d_x_ = nullptr;
178 //! full forces buffer in GPU memory
179 float3* d_f_ = nullptr;