2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define OpenCL implementation of nbnxm_gpu.h
39 * \author Anca Hamuraru <anca@streamcomputing.eu>
40 * \author Teemu Virolainen <teemu@streamcomputing.eu>
41 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
42 * \author Szilárd Páll <pall.szilard@gmail.com>
43 * \ingroup module_nbnxm
46 * - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
47 * - Rework the copying of OCL data structures done before every invocation of both
48 * nb and prune kernels (using fillin_ocl_structures); also consider at the same
49 * time calling clSetKernelArg only on the updated parameters (if tracking changed
50 * parameters is feasible);
51 * - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
52 * dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
53 * on the misc_ops_and_local_H2D_done event could be better expressed this way.
55 * - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
56 * - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
57 * is identical in the two implementations, so a 3-way split might allow sharing
71 #include "gromacs/gpu_utils/device_context.h"
72 #include "gromacs/gpu_utils/gputraits_ocl.h"
73 #include "gromacs/gpu_utils/oclutils.h"
74 #include "gromacs/hardware/device_information.h"
75 #include "gromacs/hardware/hw_info.h"
76 #include "gromacs/mdtypes/simulation_workload.h"
77 #include "gromacs/nbnxm/atomdata.h"
78 #include "gromacs/nbnxm/gpu_common.h"
79 #include "gromacs/nbnxm/gpu_common_utils.h"
80 #include "gromacs/nbnxm/gpu_data_mgmt.h"
81 #include "gromacs/nbnxm/nbnxm.h"
82 #include "gromacs/nbnxm/nbnxm_gpu.h"
83 #include "gromacs/nbnxm/pairlist.h"
84 #include "gromacs/pbcutil/ishift.h"
85 #include "gromacs/timing/gpu_timing.h"
86 #include "gromacs/utility/cstringutil.h"
87 #include "gromacs/utility/fatalerror.h"
88 #include "gromacs/utility/gmxassert.h"
90 #include "nbnxm_ocl_types.h"
95 /*! \brief Convenience constants */
97 static constexpr int c_clSize = c_nbnxnGpuClusterSize;
101 /*! \brief Validates the input global work size parameter.
103 static inline void validate_global_work_size(const KernelLaunchConfig& config,
105 const DeviceInformation* dinfo)
107 cl_uint device_size_t_size_bits;
108 cl_uint host_size_t_size_bits;
110 GMX_ASSERT(dinfo, "Need a valid device info object");
112 size_t global_work_size[3];
113 GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
114 for (int i = 0; i < work_dim; i++)
116 global_work_size[i] = config.blockSize[i] * config.gridSize[i];
119 /* Each component of a global_work_size must not exceed the range given by the
120 sizeof(device size_t) for the device on which the kernel execution will
122 https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
124 device_size_t_size_bits = dinfo->adress_bits;
125 host_size_t_size_bits = static_cast<cl_uint>(sizeof(size_t) * 8);
127 /* If sizeof(host size_t) <= sizeof(device size_t)
128 => global_work_size components will always be valid
130 => get device limit for global work size and
131 compare it against each component of global_work_size.
133 if (host_size_t_size_bits > device_size_t_size_bits)
137 device_limit = (1ULL << device_size_t_size_bits) - 1;
139 for (int i = 0; i < work_dim; i++)
141 if (global_work_size[i] > device_limit)
145 "Watch out, the input system is too large to simulate!\n"
146 "The number of nonbonded work units (=number of super-clusters) exceeds the"
147 "device capabilities. Global work size limit exceeded (%zu > %zu)!",
155 /* Constant arrays listing non-bonded kernel function names. The arrays are
156 * organized in 2-dim arrays by: electrostatics and VDW type.
158 * Note that the row- and column-order of function pointers has to match the
159 * order of corresponding enumerated electrostatics and vdw types, resp.,
160 * defined in nbnxm_ocl_types.h.
163 /*! \brief Force-only kernel function names. */
164 static const char* nb_kfunc_noener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
165 { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl",
166 "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",
167 "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl",
168 "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",
169 "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl",
170 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",
171 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
172 { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl",
173 "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",
174 "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl",
175 "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",
176 "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl",
177 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",
178 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
179 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl",
180 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",
181 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl",
182 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",
183 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",
184 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",
185 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
186 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl",
187 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl",
188 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl",
189 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl",
190 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl",
191 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl",
192 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
193 { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl",
194 "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",
195 "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl",
196 "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",
197 "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl",
198 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",
199 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
200 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",
201 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",
202 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",
203 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl",
204 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",
205 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",
206 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
209 /*! \brief Force + energy kernel function pointers. */
210 static const char* nb_kfunc_ener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
211 { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl",
212 "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",
213 "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl",
214 "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",
215 "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl",
216 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",
217 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
218 { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl",
219 "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",
220 "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl",
221 "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",
222 "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl",
223 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",
224 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
225 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl",
226 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",
227 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",
228 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl",
229 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",
230 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",
231 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
232 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl",
233 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl",
234 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl",
235 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl",
236 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl",
237 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl",
238 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
239 { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl",
240 "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",
241 "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl",
242 "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",
243 "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl",
244 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",
245 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
246 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",
247 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",
248 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",
249 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",
250 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",
251 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",
252 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
255 /*! \brief Force + pruning kernel function pointers. */
256 static const char* nb_kfunc_noener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
257 { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",
258 "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",
259 "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",
260 "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl",
261 "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",
262 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",
263 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
264 { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl",
265 "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",
266 "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",
267 "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl",
268 "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",
269 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",
270 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
271 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",
272 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",
273 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",
274 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",
275 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",
276 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",
277 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
278 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl",
279 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl",
280 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl",
281 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl",
282 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl",
283 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl",
284 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
285 { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl",
286 "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",
287 "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",
288 "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl",
289 "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",
290 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",
291 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
292 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",
293 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",
294 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",
295 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",
296 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",
297 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",
298 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
301 /*! \brief Force + energy + pruning kernel function pointers. */
302 static const char* nb_kfunc_ener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
303 { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",
304 "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",
305 "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",
306 "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",
307 "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",
308 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",
309 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
310 { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",
311 "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",
312 "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",
313 "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl",
314 "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",
315 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",
316 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
317 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",
318 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",
319 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",
320 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",
321 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",
322 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",
323 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
324 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl",
325 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl",
326 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl",
327 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl",
328 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl",
329 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
330 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
331 { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",
332 "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",
333 "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",
334 "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl",
335 "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",
336 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",
337 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
338 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",
339 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",
340 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",
341 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",
342 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",
343 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
344 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
347 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
349 * \param[in] kernel_pruneonly array of prune kernel objects
350 * \param[in] firstPrunePass true if the first pruning pass is being executed
352 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[], bool firstPrunePass)
354 cl_kernel* kernelPtr;
358 kernelPtr = &(kernel_pruneonly[epruneFirst]);
362 kernelPtr = &(kernel_pruneonly[epruneRolling]);
364 // TODO: consider creating the prune kernel object here to avoid a
365 // clCreateKernel for the rolling prune kernel if this is not needed.
369 /*! \brief Return a pointer to the kernel version to be executed at the current step.
370 * OpenCL kernel objects are cached in nb. If the requested kernel is not
371 * found in the cache, it will be created and the cache will be updated.
373 static inline cl_kernel
374 select_nbnxn_kernel(NbnxmGpu* nb, enum ElecType elecType, enum VdwType vdwType, bool bDoEne, bool bDoPrune)
376 const char* kernel_name_to_run;
377 cl_kernel* kernel_ptr;
380 const int elecTypeIdx = static_cast<int>(elecType);
381 const int vdwTypeIdx = static_cast<int>(vdwType);
383 GMX_ASSERT(elecTypeIdx < c_numElecTypes,
384 "The electrostatics type requested is not implemented in the OpenCL kernels.");
385 GMX_ASSERT(vdwTypeIdx < c_numVdwTypes,
386 "The VdW type requested is not implemented in the OpenCL kernels.");
392 kernel_name_to_run = nb_kfunc_ener_prune_ptr[elecTypeIdx][vdwTypeIdx];
393 kernel_ptr = &(nb->kernel_ener_prune_ptr[elecTypeIdx][vdwTypeIdx]);
397 kernel_name_to_run = nb_kfunc_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
398 kernel_ptr = &(nb->kernel_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx]);
405 kernel_name_to_run = nb_kfunc_noener_prune_ptr[elecTypeIdx][vdwTypeIdx];
406 kernel_ptr = &(nb->kernel_noener_prune_ptr[elecTypeIdx][vdwTypeIdx]);
410 kernel_name_to_run = nb_kfunc_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
411 kernel_ptr = &(nb->kernel_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx]);
415 if (nullptr == kernel_ptr[0])
417 *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
418 GMX_ASSERT(cl_error == CL_SUCCESS,
419 ("clCreateKernel failed: " + ocl_get_error_string(cl_error)
420 + " for kernel named " + kernel_name_to_run)
427 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
429 static inline int calc_shmem_required_nonbonded(enum VdwType vdwType, bool bPrefetchLjParam)
433 /* size of shmem (force-buffers/xq/atom type preloading) */
434 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
435 /* i-atom x+q in shared memory */
436 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4; /* xqib */
437 /* cj in shared memory, for both warps separately
438 * TODO: in the "nowarp kernels we load cj only once so the factor 2 is not needed.
440 shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs */
441 if (bPrefetchLjParam)
443 if (useLjCombRule(vdwType))
445 /* i-atom LJ combination parameters in shared memory */
446 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * 2
447 * sizeof(float); /* atib abused for ljcp, float2 */
451 /* i-atom types in shared memory */
452 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int); /* atib */
455 /* force reduction buffers in shared memory */
456 shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
457 /* Warp vote. In fact it must be * number of warps in block.. */
458 shmem += sizeof(cl_uint) * 2; /* warp_any */
462 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
464 * The device can't use the same data structures as the host for two main reasons:
465 * - OpenCL restrictions (pointers are not accepted inside data structures)
466 * - some host side fields are not needed for the OpenCL kernels.
468 * This function is called before the launch of both nbnxn and prune kernels.
470 static void fillin_ocl_structures(NBParamGpu* nbp, cl_nbparam_params_t* nbparams_params)
472 nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
473 nbparams_params->c_rf = nbp->c_rf;
474 nbparams_params->dispersion_shift = nbp->dispersion_shift;
475 nbparams_params->elecType = nbp->elecType;
476 nbparams_params->epsfac = nbp->epsfac;
477 nbparams_params->ewaldcoeff_lj = nbp->ewaldcoeff_lj;
478 nbparams_params->ewald_beta = nbp->ewald_beta;
479 nbparams_params->rcoulomb_sq = nbp->rcoulomb_sq;
480 nbparams_params->repulsion_shift = nbp->repulsion_shift;
481 nbparams_params->rlistOuter_sq = nbp->rlistOuter_sq;
482 nbparams_params->rvdw_sq = nbp->rvdw_sq;
483 nbparams_params->rlistInner_sq = nbp->rlistInner_sq;
484 nbparams_params->rvdw_switch = nbp->rvdw_switch;
485 nbparams_params->sh_ewald = nbp->sh_ewald;
486 nbparams_params->sh_lj_ewald = nbp->sh_lj_ewald;
487 nbparams_params->two_k_rf = nbp->two_k_rf;
488 nbparams_params->vdwType = nbp->vdwType;
489 nbparams_params->vdw_switch = nbp->vdw_switch;
492 void nbnxnInsertNonlocalGpuDependency(NbnxmGpu* nb, const InteractionLocality interactionLocality)
494 const DeviceStream& deviceStream = *nb->deviceStreams[interactionLocality];
496 /* When we get here all misc operations issued in the local stream as well as
497 the local xq H2D are done,
498 so we record that in the local stream and wait for it in the nonlocal one.
499 This wait needs to precede any PP tasks, bonded or nonbonded, that may
500 compute on interactions between local and nonlocal atoms.
502 if (nb->bUseTwoStreams)
504 if (interactionLocality == InteractionLocality::Local)
506 nb->misc_ops_and_local_H2D_done.markEvent(deviceStream);
508 /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
509 * in the local stream in order to be able to sync with the above event
510 * from the non-local stream.
512 cl_int gmx_used_in_debug cl_error = clFlush(deviceStream.stream());
513 GMX_ASSERT(cl_error == CL_SUCCESS,
514 ("clFlush failed: " + ocl_get_error_string(cl_error)).c_str());
518 nb->misc_ops_and_local_H2D_done.enqueueWaitEvent(deviceStream);
523 /*! \brief Launch asynchronously the xq buffer host to device copy. */
524 void gpu_copy_xq_to_gpu(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom, const AtomLocality atomLocality)
526 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
528 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
530 NBAtomData* adat = nb->atdat;
531 gpu_plist* plist = nb->plist[iloc];
532 cl_timers_t* t = nb->timers;
533 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
535 bool bDoTime = nb->bDoTime;
537 /* Don't launch the non-local H2D copy if there is no dependent
538 work to do: neither non-local nor other (e.g. bonded) work
539 to do that has as input the nbnxn coordinates.
540 Doing the same for the local kernel is more complicated, since the
541 local part of the force array also depends on the non-local kernel.
542 So to avoid complicating the code and to reduce the risk of bugs,
543 we always call the local local x+q copy (and the rest of the local
544 work in nbnxn_gpu_launch_kernel().
546 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
548 plist->haveFreshList = false;
550 // The event is marked for Local interactions unconditionally,
551 // so it has to be released here because of the early return
552 // for NonLocal interactions.
553 nb->misc_ops_and_local_H2D_done.reset();
558 /* local/nonlocal offset and length used for xq and f */
559 auto atomsRange = getGpuAtomRange(adat, atomLocality);
561 /* beginning of timed HtoD section */
564 t->xf[atomLocality].nb_h2d.openTimingRegion(deviceStream);
568 static_assert(sizeof(float) == sizeof(*nbatom->x().data()),
569 "The size of the xyzq buffer element should be equal to the size of float4.");
570 copyToDeviceBuffer(&adat->xq,
571 reinterpret_cast<const Float4*>(nbatom->x().data()) + atomsRange.begin(),
575 GpuApiCallBehavior::Async,
576 bDoTime ? t->xf[atomLocality].nb_h2d.fetchNextEvent() : nullptr);
580 t->xf[atomLocality].nb_h2d.closeTimingRegion(deviceStream);
583 /* When we get here all misc operations issued in the local stream as well as
584 the local xq H2D are done,
585 so we record that in the local stream and wait for it in the nonlocal one.
586 This wait needs to precede any PP tasks, bonded or nonbonded, that may
587 compute on interactions between local and nonlocal atoms.
589 nbnxnInsertNonlocalGpuDependency(nb, iloc);
593 /*! \brief Launch GPU kernel
595 As we execute nonbonded workload in separate queues, before launching
596 the kernel we need to make sure that he following operations have completed:
597 - atomdata allocation and related H2D transfers (every nstlist step);
598 - pair list H2D transfer (every nstlist step);
599 - shift vector H2D transfer (every nstlist step);
600 - force (+shift force and energy) output clearing (every step).
602 These operations are issued in the local queue at the beginning of the step
603 and therefore always complete before the local kernel launch. The non-local
604 kernel is launched after the local on the same device/context, so this is
605 inherently scheduled after the operations in the local stream (including the
607 However, for the sake of having a future-proof implementation, we use the
608 misc_ops_done event to record the point in time when the above operations
609 are finished and synchronize with this event in the non-local stream.
611 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const Nbnxm::InteractionLocality iloc)
613 NBAtomData* adat = nb->atdat;
614 NBParamGpu* nbp = nb->nbparam;
615 gpu_plist* plist = nb->plist[iloc];
616 cl_timers_t* t = nb->timers;
617 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
619 bool bDoTime = nb->bDoTime;
621 cl_nbparam_params_t nbparams_params;
623 /* Don't launch the non-local kernel if there is no work to do.
624 Doing the same for the local kernel is more complicated, since the
625 local part of the force array also depends on the non-local kernel.
626 So to avoid complicating the code and to reduce the risk of bugs,
627 we always call the local kernel and later (not in
628 this function) the stream wait, local f copyback and the f buffer
629 clearing. All these operations, except for the local interaction kernel,
630 are needed for the non-local interactions. The skip of the local kernel
631 call is taken care of later in this function. */
632 if (canSkipNonbondedWork(*nb, iloc))
634 plist->haveFreshList = false;
639 if (nbp->useDynamicPruning && plist->haveFreshList)
641 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
642 (that's the way the timing accounting can distinguish between
643 separate prune kernel and combined force+prune).
645 Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
648 if (plist->nsci == 0)
650 /* Don't launch an empty local kernel (is not allowed with OpenCL).
655 /* beginning of timed nonbonded calculation section */
658 t->interaction[iloc].nb_k.openTimingRegion(deviceStream);
661 /* kernel launch config */
663 KernelLaunchConfig config;
664 config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwType, nb->bPrefetchLjParam);
665 config.blockSize[0] = c_clSize;
666 config.blockSize[1] = c_clSize;
667 config.gridSize[0] = plist->nsci;
669 validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
674 "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
675 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
679 config.blockSize[0] * config.gridSize[0],
680 config.blockSize[1] * config.gridSize[1],
681 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
682 c_nbnxnGpuNumClusterPerSupercluster,
686 fillin_ocl_structures(nbp, &nbparams_params);
688 auto* timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
689 constexpr char kernelName[] = "k_calc_nb";
691 select_nbnxn_kernel(nb,
694 stepWork.computeEnergy,
695 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
698 // The OpenCL kernel takes int as second to last argument because bool is
699 // not supported as a kernel argument type (sizeof(bool) is implementation defined).
700 const int computeFshift = static_cast<int>(stepWork.computeVirial);
701 if (useLjCombRule(nb->nbparam->vdwType))
703 const auto kernelArgs = prepareGpuKernelArguments(kernel,
721 launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
725 const auto kernelArgs = prepareGpuKernelArguments(kernel,
743 launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
748 t->interaction[iloc].nb_k.closeTimingRegion(deviceStream);
753 /*! \brief Calculates the amount of shared memory required by the prune kernel.
755 * Note that for the sake of simplicity we use the CUDA terminology "shared memory"
756 * for OpenCL local memory.
758 * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd
759 * dimension. \returns the amount of local memory in bytes required by the pruning kernel
761 static inline int calc_shmem_required_prune(const int num_threads_z)
765 /* i-atom x in shared memory (for convenience we load all 4 components including q) */
766 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4;
767 /* cj in shared memory, for each warp separately
768 * Note: only need to load once per wavefront, but to keep the code simple,
769 * for now we load twice on AMD.
771 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
772 /* Warp vote, requires one uint per warp/32 threads per block. */
773 shmem += sizeof(cl_uint) * 2 * num_threads_z;
779 * Launch the pairlist prune only kernel for the given locality.
780 * \p numParts tells in how many parts, i.e. calls the list will be pruned.
782 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
784 NBAtomData* adat = nb->atdat;
785 NBParamGpu* nbp = nb->nbparam;
786 gpu_plist* plist = nb->plist[iloc];
787 cl_timers_t* t = nb->timers;
788 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
789 bool bDoTime = nb->bDoTime;
791 if (plist->haveFreshList)
793 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
795 /* Set rollingPruningNumParts to signal that it is not set */
796 plist->rollingPruningNumParts = 0;
797 plist->rollingPruningPart = 0;
801 if (plist->rollingPruningNumParts == 0)
803 plist->rollingPruningNumParts = numParts;
807 GMX_ASSERT(numParts == plist->rollingPruningNumParts,
808 "It is not allowed to change numParts in between list generation steps");
812 /* Use a local variable for part and update in plist, so we can return here
813 * without duplicating the part increment code.
815 int part = plist->rollingPruningPart;
817 plist->rollingPruningPart++;
818 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
820 plist->rollingPruningPart = 0;
823 /* Compute the number of list entries to prune in this pass */
824 int numSciInPart = (plist->nsci - part) / numParts;
826 /* Don't launch the kernel if there is no work to do. */
827 if (numSciInPart <= 0)
829 plist->haveFreshList = false;
834 GpuRegionTimer* timer = nullptr;
837 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
840 /* beginning of timed prune calculation section */
843 timer->openTimingRegion(deviceStream);
846 /* Kernel launch config:
847 * - The thread block dimensions match the size of i-clusters, j-clusters,
848 * and j-cluster concurrency, in x, y, and z, respectively.
849 * - The 1D block-grid contains as many blocks as super-clusters.
851 int num_threads_z = c_pruneKernelJ4Concurrency;
852 /* kernel launch config */
853 KernelLaunchConfig config;
854 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
855 config.blockSize[0] = c_clSize;
856 config.blockSize[1] = c_clSize;
857 config.blockSize[2] = num_threads_z;
858 config.gridSize[0] = numSciInPart;
860 validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
865 "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
866 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
871 config.blockSize[0] * config.gridSize[0],
872 config.blockSize[1] * config.gridSize[1],
873 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
874 c_nbnxnGpuNumClusterPerSupercluster,
876 config.sharedMemorySize);
879 cl_nbparam_params_t nbparams_params;
880 fillin_ocl_structures(nbp, &nbparams_params);
882 auto* timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
883 constexpr char kernelName[] = "k_pruneonly";
884 const auto pruneKernel = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
885 const auto kernelArgs = prepareGpuKernelArguments(pruneKernel,
895 launchGpuKernel(pruneKernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
897 if (plist->haveFreshList)
899 plist->haveFreshList = false;
900 /* Mark that pruning has been done */
901 nb->timers->interaction[iloc].didPrune = true;
905 /* Mark that rolling pruning has been done */
906 nb->timers->interaction[iloc].didRollingPrune = true;
911 timer->closeTimingRegion(deviceStream);
916 * Launch asynchronously the download of nonbonded forces from the GPU
917 * (and energies/shift forces if required).
919 void gpu_launch_cpyback(NbnxmGpu* nb,
920 struct nbnxn_atomdata_t* nbatom,
921 const gmx::StepWorkload& stepWork,
922 const AtomLocality atomLocality)
924 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
926 cl_int gmx_unused cl_error;
928 /* determine interaction locality from atom locality */
929 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
930 GMX_ASSERT(iloc == InteractionLocality::Local
931 || (iloc == InteractionLocality::NonLocal && nb->bNonLocalStreamDoneMarked == false),
932 "Non-local stream is indicating that the copy back event is enqueued at the "
933 "beginning of the copy back function.");
935 NBAtomData* adat = nb->atdat;
936 cl_timers_t* t = nb->timers;
937 bool bDoTime = nb->bDoTime;
938 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
940 /* don't launch non-local copy-back if there was no non-local work to do */
941 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
943 /* TODO An alternative way to signal that non-local work is
944 complete is to use a clEnqueueMarker+clEnqueueBarrier
945 pair. However, the use of bNonLocalStreamDoneMarked has the
946 advantage of being local to the host, so probably minimizes
947 overhead. Curiously, for NVIDIA OpenCL with an empty-domain
948 test case, overall simulation performance was higher with
949 the API calls, but this has not been tested on AMD OpenCL,
950 so could be worth considering in future. */
951 nb->bNonLocalStreamDoneMarked = false;
955 /* local/nonlocal offset and length used for xq and f */
956 auto atomsRange = getGpuAtomRange(adat, atomLocality);
958 /* beginning of timed D2H section */
961 t->xf[atomLocality].nb_d2h.openTimingRegion(deviceStream);
964 /* With DD the local D2H transfer can only start after the non-local
965 has been launched. */
966 if (iloc == InteractionLocality::Local && nb->bNonLocalStreamDoneMarked)
968 nb->nonlocal_done.enqueueWaitEvent(deviceStream);
969 nb->bNonLocalStreamDoneMarked = false;
973 GMX_ASSERT(sizeof(*nbatom->out[0].f.data()) == sizeof(float),
974 "The host force buffer should be in single precision to match device data size.");
975 copyFromDeviceBuffer(reinterpret_cast<Float3*>(nbatom->out[0].f.data()) + atomsRange.begin(),
980 GpuApiCallBehavior::Async,
981 bDoTime ? t->xf[atomLocality].nb_d2h.fetchNextEvent() : nullptr);
984 cl_error = clFlush(deviceStream.stream());
985 GMX_ASSERT(cl_error == CL_SUCCESS, ("clFlush failed: " + ocl_get_error_string(cl_error)).c_str());
987 /* After the non-local D2H is launched the nonlocal_done event can be
988 recorded which signals that the local D2H can proceed. This event is not
989 placed after the non-local kernel because we first need the non-local
991 if (iloc == InteractionLocality::NonLocal)
993 nb->nonlocal_done.markEvent(deviceStream);
994 nb->bNonLocalStreamDoneMarked = true;
997 /* only transfer energies in the local stream */
998 if (iloc == InteractionLocality::Local)
1000 /* DtoH fshift when virial is needed */
1001 if (stepWork.computeVirial)
1004 sizeof(*nb->nbst.fShift) == sizeof(Float3),
1005 "Sizes of host- and device-side shift vector elements should be the same.");
1006 copyFromDeviceBuffer(nb->nbst.fShift,
1011 GpuApiCallBehavior::Async,
1012 bDoTime ? t->xf[atomLocality].nb_d2h.fetchNextEvent() : nullptr);
1016 if (stepWork.computeEnergy)
1018 static_assert(sizeof(*nb->nbst.eLJ) == sizeof(float),
1019 "Sizes of host- and device-side LJ energy terms should be the same.");
1020 copyFromDeviceBuffer(nb->nbst.eLJ,
1025 GpuApiCallBehavior::Async,
1026 bDoTime ? t->xf[atomLocality].nb_d2h.fetchNextEvent() : nullptr);
1027 static_assert(sizeof(*nb->nbst.eElec) == sizeof(float),
1028 "Sizes of host- and device-side electrostatic energy terms should be the "
1030 copyFromDeviceBuffer(nb->nbst.eElec,
1035 GpuApiCallBehavior::Async,
1036 bDoTime ? t->xf[atomLocality].nb_d2h.fetchNextEvent() : nullptr);
1042 t->xf[atomLocality].nb_d2h.closeTimingRegion(deviceStream);
1046 } // namespace Nbnxm