2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS development team.
6 * Copyright (c) 2013-2019,2020,2021, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
40 * Data types used internally in the nbnxn_cuda module.
42 * \author Szilárd Páll <pall.szilard@gmail.com>
43 * \ingroup module_nbnxm
46 #ifndef NBNXM_CUDA_TYPES_H
47 #define NBNXM_CUDA_TYPES_H
49 #include "gromacs/gpu_utils/cuda_arch_utils.cuh"
50 #include "gromacs/gpu_utils/cudautils.cuh"
51 #include "gromacs/gpu_utils/devicebuffer.h"
52 #include "gromacs/gpu_utils/devicebuffer_datatype.h"
53 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
54 #include "gromacs/gpu_utils/gputraits.cuh"
55 #include "gromacs/mdtypes/interaction_const.h"
56 #include "gromacs/nbnxm/gpu_types_common.h"
57 #include "gromacs/nbnxm/nbnxm.h"
58 #include "gromacs/nbnxm/pairlist.h"
59 #include "gromacs/timing/gpu_timing.h"
60 #include "gromacs/utility/enumerationhelpers.h"
62 /*! \brief Macro definining default for the prune kernel's j4 processing concurrency.
64 * The GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY macro allows compile-time override.
66 #ifndef GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY
67 # define GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY 4
69 /*! \brief Default for the prune kernel's j4 processing concurrency.
71 * Initialized using the #GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY macro which allows compile-time override.
73 const int c_cudaPruneKernelJ4Concurrency = GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY;
75 /* TODO: consider moving this to kernel_utils */
76 /* Convenience defines */
77 /*! \brief cluster size = number of atoms per cluster. */
78 static constexpr int c_clSize = c_nbnxnGpuClusterSize;
80 /* All structs prefixed with "cu_" hold data used in GPU calculations and
81 * are passed to the kernels, except cu_timers_t. */
83 typedef struct cu_atomdata cu_atomdata_t;
88 * \brief Staging area for temporary data downloaded from the GPU.
90 * The energies/shift forces get downloaded here first, before getting added
91 * to the CPU-side aggregate values.
96 float* e_lj = nullptr;
97 //! electrostatic energy
98 float* e_el = nullptr;
100 float3* fshift = nullptr;
104 * \brief Nonbonded atom data - both inputs and outputs.
110 //! number of local atoms
112 //! allocation size for the atom data (xq, f)
115 //! atom coordinates + charges, size natoms
116 DeviceBuffer<float4> xq;
117 //! force output array, size natoms
118 DeviceBuffer<float3> f;
120 //! LJ energy output, size 1
121 DeviceBuffer<float> e_lj;
122 //! Electrostatics energy input, size 1
123 DeviceBuffer<float> e_el;
126 DeviceBuffer<float3> fshift;
128 //! number of atom types
130 //! atom type indices, size natoms
131 DeviceBuffer<int> atom_types;
132 //! sqrt(c6),sqrt(c12) size natoms
133 DeviceBuffer<float2> lj_comb;
136 DeviceBuffer<float3> shift_vec;
137 //! true if the shift vector has been uploaded
138 bool bShiftVecUploaded;
142 * \brief Typedef of actual timer type.
144 typedef struct Nbnxm::gpu_timers_t cu_timers_t;
147 * \brief Main data structure for CUDA nonbonded force calculations.
151 /*! \brief GPU device context.
153 * \todo Make it constant reference, once NbnxmGpu is a proper class.
155 const DeviceContext* deviceContext_;
156 /*! \brief true if doing both local/non-local NB work on GPU */
157 bool bUseTwoStreams = false;
158 //! true indicates that the nonlocal_done event was marked
159 bool bNonLocalStreamDoneMarked = false;
161 /*! \brief atom data */
162 cu_atomdata_t* atdat = nullptr;
163 /*! \brief array of atom indices */
164 int* atomIndices = nullptr;
165 /*! \brief size of atom indices */
166 int atomIndicesSize = 0;
167 /*! \brief size of atom indices allocated in device buffer */
168 int atomIndicesSize_alloc = 0;
169 /*! \brief x buf ops num of atoms */
170 int* cxy_na = nullptr;
171 /*! \brief number of elements in cxy_na */
173 /*! \brief number of elements allocated allocated in device buffer */
174 int ncxy_na_alloc = 0;
175 /*! \brief x buf ops cell index mapping */
176 int* cxy_ind = nullptr;
177 /*! \brief number of elements in cxy_ind */
179 /*! \brief number of elements allocated allocated in device buffer */
180 int ncxy_ind_alloc = 0;
181 /*! \brief parameters required for the non-bonded calc. */
182 NBParamGpu* nbparam = nullptr;
183 /*! \brief pair-list data structures (local and non-local) */
184 gmx::EnumerationArray<Nbnxm::InteractionLocality, Nbnxm::gpu_plist*> plist = { { nullptr } };
185 /*! \brief staging area where fshift/energies get downloaded */
187 /*! \brief local and non-local GPU streams */
188 gmx::EnumerationArray<Nbnxm::InteractionLocality, const DeviceStream*> deviceStreams;
190 /*! \brief Event triggered when the non-local non-bonded
191 * kernel is done (and the local transfer can proceed) */
192 GpuEventSynchronizer nonlocal_done;
193 /*! \brief Event triggered when the tasks issued in the local
194 * stream that need to precede the non-local force or buffer
195 * operation calculations are done (e.g. f buffer 0-ing, local
196 * x/q H2D, buffer op initialization in local stream that is
197 * required also by nonlocal stream ) */
198 GpuEventSynchronizer misc_ops_and_local_H2D_done;
200 /*! \brief True if there is work for the current domain in the
201 * respective locality.
203 * This includes local/nonlocal GPU work, either bonded or
204 * nonbonded, scheduled to be executed in the current
205 * domain. As long as bonded work is not split up into
206 * local/nonlocal, if there is bonded GPU work, both flags
208 gmx::EnumerationArray<Nbnxm::InteractionLocality, bool> haveWork = { { false } };
210 /* NOTE: With current CUDA versions (<=5.0) timing doesn't work with multiple
211 * concurrent streams, so we won't time if both l/nl work is done on GPUs.
212 * Timer init/uninit is still done even with timing off so only the condition
213 * setting bDoTime needs to be change if this CUDA "feature" gets fixed. */
214 /*! \brief True if event-based timing is enabled. */
215 bool bDoTime = false;
216 /*! \brief CUDA event-based timers. */
217 cu_timers_t* timers = nullptr;
218 /*! \brief Timing data. TODO: deprecate this and query timers for accumulated data instead */
219 gmx_wallclock_gpu_nbnxn_t* timings = nullptr;
222 #endif /* NBNXN_CUDA_TYPES_H */