2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Declaration of class which sends PME Force from GPU memory to PP task
39 * \author Alan Gray <alang@nvidia.com>
41 * \ingroup module_ewald
43 #ifndef GMX_PMEFORCESENDERGPU_IMPL_H
44 #define GMX_PMEFORCESENDERGPU_IMPL_H
49 #include "gromacs/ewald/pme_force_sender_gpu.h"
50 #include "gromacs/gpu_utils/devicebuffer_datatype.h"
51 #include "gromacs/gpu_utils/gputraits.h"
52 #include "gromacs/utility/arrayref.h"
54 // Portable definition of cache line size
55 #ifdef __cpp_lib_hardware_interference_size
56 using std::hardware_destructive_interference_size;
58 constexpr std::size_t hardware_destructive_interference_size = 64;
61 class GpuEventSynchronizer;
66 /*! \internal \brief Class with interfaces and data for CUDA version of PME Force sending functionality*/
68 typedef struct CacheLineAlignedFlag
70 alignas(hardware_destructive_interference_size) bool flag;
71 } CacheLineAlignedFlag;
73 class PmeForceSenderGpu::Impl
77 /*! \brief Creates PME GPU Force sender object
78 * \param[in] pmeForcesReady Event synchronizer marked when PME forces are ready on the GPU
79 * \param[in] comm Communicator used for simulation
80 * \param[in] deviceContext GPU context
81 * \param[in] ppRanks List of PP ranks
83 Impl(GpuEventSynchronizer* pmeForcesReady,
85 const DeviceContext& deviceContext,
86 gmx::ArrayRef<PpRanks> ppRanks);
87 // NOLINTNEXTLINE(performance-trivially-destructible)
91 * Sets location of force to be sent to each PP rank
92 * \param[in] d_f force buffer in GPU memory
94 void setForceSendBuffer(DeviceBuffer<Float3> d_f);
97 * Send force to PP rank (used with Thread-MPI)
98 * \param[in] ppRank PP rank to receive data
99 * \param[in] numAtoms number of atoms to send
100 * \param[in] sendForcesDirectToPpGpu whether forces are transferred direct to remote GPU memory
102 void sendFToPpCudaDirect(int ppRank, int numAtoms, bool sendForcesDirectToPpGpu);
105 * Send force to PP rank (used with Lib-MPI)
106 * \param[in] sendbuf force buffer in GPU memory
107 * \param[in] offset starting element in buffer
108 * \param[in] numBytes number of bytes to transfer
109 * \param[in] ppRank PP rank to receive data
110 * \param[in] request MPI request to track asynchronous MPI call status
112 void sendFToPpCudaMpi(DeviceBuffer<RVec> sendbuf, int offset, int numBytes, int ppRank, MPI_Request* request);
115 //! Event indicating when PME forces are ready on the GPU in order for PP stream to sync with the PME stream
116 GpuEventSynchronizer* pmeForcesReady_;
117 //! communicator for simulation
120 gmx::ArrayRef<PpRanks> ppRanks_;
121 //! Streams used for pushing force to remote PP ranks
122 std::vector<std::unique_ptr<DeviceStream>> ppCommStream_;
123 //! Events used for manging sync with remote PP ranks
124 std::vector<std::unique_ptr<GpuEventSynchronizer>> ppCommEvent_;
125 //! Vector of flags to track when PP transfer events have been recorded
126 std::vector<std::atomic<CacheLineAlignedFlag>> ppCommEventRecorded_;
127 //! Addresses of local force buffers to send to remote PP ranks
128 std::vector<DeviceBuffer<RVec>> localForcePtr_;
129 //! GPU context handle (not used in CUDA)
130 const DeviceContext& deviceContext_;
131 //! Vector of CPU force buffer pointers for multiple remote PP tasks
132 std::vector<float3*> pmeRemoteCpuForcePtr_;
133 //! Vector of GPU force buffer pointers for multiple remote PP tasks
134 std::vector<float3*> pmeRemoteGpuForcePtr_;