2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Declares GPU implementation class for CUDA bonded
39 * This header file is needed to include from both the device-side
40 * kernels file, and the host-side management code.
42 * \author Berk Hess <hess@kth.se>
43 * \author Szilárd Páll <pall.szilard@gmail.com>
44 * \author Mark Abraham <mark.j.abraham@gmail.com>
46 * \ingroup module_listed_forces
48 #ifndef GMX_LISTED_FORCES_GPUBONDED_IMPL_H
49 #define GMX_LISTED_FORCES_GPUBONDED_IMPL_H
51 #include "gromacs/gpu_utils/device_context.h"
52 #include "gromacs/gpu_utils/gputraits.cuh"
53 #include "gromacs/gpu_utils/hostallocator.h"
54 #include "gromacs/listed_forces/gpubonded.h"
55 #include "gromacs/pbcutil/pbc_aiuc.h"
57 struct gmx_ffparams_t;
63 /*! \internal \brief Version of InteractionList that supports pinning */
64 struct HostInteractionList
66 /*! \brief Returns the total number of elements in iatoms */
67 int size() const { return iatoms.size(); }
69 //! List of interactions, see \c HostInteractionLists
70 HostVector<int> iatoms = { {}, gmx::HostAllocationPolicy(gmx::PinningPolicy::PinnedIfSupported) };
73 /* \brief Bonded parameters and GPU pointers
75 * This is used to accumulate all the parameters and pointers so they can be passed
76 * to the GPU as a single structure.
79 struct BondedCudaKernelParameters
81 //! Periodic boundary data
85 //! The bonded types on GPU
86 int fTypesOnGpu[numFTypesOnGpu];
87 //! The number of interaction atom (iatom) elements for every function type
88 int numFTypeIAtoms[numFTypesOnGpu];
89 //! The number of bonds for every function type
90 int numFTypeBonds[numFTypesOnGpu];
91 //! The start index in the range of each interaction type
92 int fTypeRangeStart[numFTypesOnGpu];
93 //! The end index in the range of each interaction type
94 int fTypeRangeEnd[numFTypesOnGpu];
96 //! Force parameters (on GPU)
97 t_iparams* d_forceParams;
98 //! Coordinates before the timestep (on GPU)
100 //! Forces on atoms (on GPU)
102 //! Force shifts on atoms (on GPU)
104 //! Total Energy (on GPU)
106 //! Interaction list atoms (on GPU)
107 t_iatom* d_iatoms[numFTypesOnGpu];
109 BondedCudaKernelParameters()
111 matrix boxDummy = { { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } };
113 setPbcAiuc(0, boxDummy, &pbcAiuc);
116 d_forceParams = nullptr;
124 /*! \internal \brief Implements GPU bondeds */
125 class GpuBonded::Impl
129 Impl(const gmx_ffparams_t& ffparams, void* streamPtr, gmx_wallcycle* wcycle);
130 /*! \brief Destructor, non-default needed for freeing
131 * device-side buffers */
133 /*! \brief Update lists of interactions from idef suitable for the GPU,
134 * using the data structures prepared for PP work.
136 * Intended to be called after each neighbour search
137 * stage. Copies the bonded interactions assigned to the GPU
138 * to device data structures, and updates device buffers that
139 * may have been updated after search. */
140 void updateInteractionListsAndDeviceBuffers(ArrayRef<const int> nbnxnAtomOrder,
141 const InteractionDefinitions& idef,
143 DeviceBuffer<RVec> forceDevice,
144 DeviceBuffer<RVec> fshiftDevice);
146 /*! \brief Launches bonded kernel on a GPU */
147 template<bool calcVir, bool calcEner>
148 void launchKernel(const t_forcerec* fr, const matrix box);
149 /*! \brief Returns whether there are bonded interactions
150 * assigned to the GPU */
151 bool haveInteractions() const;
152 /*! \brief Launches the transfer of computed bonded energies. */
153 void launchEnergyTransfer();
154 /*! \brief Waits on the energy transfer, and accumulates bonded energies to \c enerd. */
155 void waitAccumulateEnergyTerms(gmx_enerdata_t* enerd);
156 /*! \brief Clears the device side energy buffer */
157 void clearEnergies();
160 /*! \brief The interaction lists
162 * \todo This is potentially several pinned allocations, which
163 * could contribute to exhausting such pages. */
164 std::array<HostInteractionList, F_NRE> iLists_;
166 //! Tells whether there are any interaction in iLists.
167 bool haveInteractions_;
168 //! Interaction lists on the device.
169 t_ilist d_iLists_[F_NRE] = {};
170 //! Bonded parameters for device-side use.
171 t_iparams* d_forceParams_ = nullptr;
172 //! Position-charge vector on the device.
173 const float4* d_xq_ = nullptr;
174 //! Force vector on the device.
175 float3* d_f_ = nullptr;
176 //! Shift force vector on the device.
177 float3* d_fShift_ = nullptr;
178 //! \brief Host-side virial buffer
179 HostVector<float> vTot_ = { {}, gmx::HostAllocationPolicy(gmx::PinningPolicy::PinnedIfSupported) };
180 //! \brief Device-side total virial
181 float* d_vTot_ = nullptr;
183 //! Dummy GPU context object
184 const DeviceContext deviceContext_;
185 //! \brief Bonded GPU stream, not owned by this module
186 CommandStream stream_;
188 //! Parameters and pointers, passed to the CUDA kernel
189 BondedCudaKernelParameters kernelParams_;
191 //! \brief Pointer to wallcycle structure.
192 gmx_wallcycle* wcycle_;