2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Declares CUDA implementation of GPU Halo Exchange.
39 * This header file is needed to include from both the device-side
40 * kernels file, and the host-side management code.
42 * \author Alan Gray <alang@nvidia.com>
44 * \ingroup module_domdec
46 #ifndef GMX_DOMDEC_GPUHALOEXCHANGE_IMPL_H
47 #define GMX_DOMDEC_GPUHALOEXCHANGE_IMPL_H
49 #include "gromacs/domdec/gpuhaloexchange.h"
50 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
51 #include "gromacs/gpu_utils/hostallocator.h"
52 #include "gromacs/utility/gmxmpi.h"
57 /*! \brief switch for whether coordinates or force halo is being applied */
58 enum class HaloQuantity
60 HaloCoordinates, HaloForces
63 /*! \internal \brief Class with interfaces and data for GPU Halo Exchange */
64 class GpuHaloExchange::Impl
68 /*! \brief Creates GPU Halo Exchange object.
70 * \param [inout] dd domdec structure
71 * \param [in] mpi_comm_mysim communicator used for simulation
72 * \param [in] localStream local NB CUDA stream
73 * \param [in] nonLocalStream non-local NB CUDA stream
75 Impl(gmx_domdec_t *dd,
76 MPI_Comm mpi_comm_mysim,
78 void *nonLocalStream);
82 * (Re-) Initialization for GPU halo exchange
83 * \param [in] d_coordinatesBuffer pointer to coordinates buffer in GPU memory
84 * \param [in] d_forcesBuffer pointer to forces buffer in GPU memory
86 void reinitHalo(float3 *d_coordinatesBuffer,
87 float3 *d_forcesBuffer);
91 * GPU halo exchange of coordinates buffer
92 * \param [in] box Coordinate box (from which shifts will be constructed)
93 * \param [in] coordinatesReadyOnDeviceEvent event recorded when coordinates have been copied to device
95 void communicateHaloCoordinates(const matrix box,
96 GpuEventSynchronizer *coordinatesReadyOnDeviceEvent);
98 /*! \brief GPU halo exchange of force buffer
99 * \param[in] accumulateForces True if forces should accumulate, otherwise they are set
101 void communicateHaloForces(bool accumulateForces);
105 /*! \brief Data transfer wrapper for GPU halo exchange
106 * \param [inout] d_ptr pointer to coordinates or force buffer in GPU memory
107 * \param [in] haloQuantity switch on whether X or F halo exchange is being performed
109 void communicateHaloData(float3 *d_ptr,
110 HaloQuantity haloQuantity);
112 /*! \brief Data transfer for GPU halo exchange using CUDA memcopies
113 * \param [inout] sendPtr address to send data from
114 * \param [in] sendSize number of atoms to be sent
115 * \param [in] sendRank rank to send data to
116 * \param [inout] remotePtr remote address to recv data
117 * \param [in] recvRank rank to recv data from
119 void communicateHaloDataWithCudaDirect(void *sendPtr,
125 //! Domain decomposition object
126 gmx_domdec_t *dd_ = nullptr;
127 //! map of indices to be sent from this rank
128 gmx::HostVector<int> h_indexMap_;
129 //! device copy of index map
130 int *d_indexMap_ = nullptr;
131 //! number of elements in index map array
132 int indexMapSize_ = -1;
133 //! number of elements allocated in index map array
134 int indexMapSizeAlloc_ = -1;
135 //! device buffer for sending packed data
136 float3 *d_sendBuf_ = nullptr;
137 //! number of atoms in sendbuf array
138 int sendBufSize_ = -1;
139 //! number of atoms allocated in sendbuf array
140 int sendBufSizeAlloc_ = -1;
141 //! device buffer for receiving packed data
142 float3 *d_recvBuf_ = nullptr;
143 //! maximum size of packed buffer
144 int maxPackedBufferSize_ = 0;
145 //! number of atoms in recvbuf array
146 int recvBufSize_ = -1;
147 //! number of atoms allocated in recvbuf array
148 int recvBufSizeAlloc_ = -1;
149 //! rank to send data to for X
151 //! rank to recv data from for X
153 //! rank to send data to for F
155 //! rank to recv data from for F
157 //! send copy size from this rank for X
159 //! recv copy size to this rank for X
161 //! send copy size from this rank for F
163 //! recv copy size to this rank for F
165 //! number of home atoms - offset of local halo region
166 int numHomeAtoms_ = 0;
167 //! remote GPU coordinates buffer pointer for pushing data
168 void *remoteXPtr_ = nullptr;
169 //! remote GPU force buffer pointer for pushing data
170 void *remoteFPtr_ = nullptr;
171 //! Periodic Boundary Conditions for this rank
172 bool usePBC_ = false;
173 //! force shift buffer on device
174 float3 * d_fShift_ = nullptr;
175 //! Event triggered when halo transfer has been launched with direct CUD memory copy
176 GpuEventSynchronizer *haloDataTransferLaunched_ = nullptr;
177 //! MPI communicator used for simulation
178 MPI_Comm mpi_comm_mysim_;
179 //! CUDA stream for local non-bonded calculations
180 cudaStream_t localStream_ = nullptr;
181 //! CUDA stream for non-local non-bonded calculations
182 cudaStream_t nonLocalStream_ = nullptr;
183 //! full coordinates buffer in GPU memory
184 float3 *d_x_ = nullptr;
185 //! full forces buffer in GPU memory
186 float3 *d_f_ = nullptr;