2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define OpenCL implementation of nbnxm_gpu.h
39 * \author Anca Hamuraru <anca@streamcomputing.eu>
40 * \author Teemu Virolainen <teemu@streamcomputing.eu>
41 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
42 * \author Szilárd Páll <pall.szilard@gmail.com>
43 * \ingroup module_nbnxm
46 * - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
47 * - Rework the copying of OCL data structures done before every invocation of both
48 * nb and prune kernels (using fillin_ocl_structures); also consider at the same
49 * time calling clSetKernelArg only on the updated parameters (if tracking changed
50 * parameters is feasible);
51 * - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
52 * dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
53 * on the misc_ops_and_local_H2D_done event could be better expressed this way.
55 * - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
56 * - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
57 * is identical in the two implementations, so a 3-way split might allow sharing
71 #include "gromacs/gpu_utils/device_context.h"
72 #include "gromacs/gpu_utils/gputraits_ocl.h"
73 #include "gromacs/gpu_utils/oclutils.h"
74 #include "gromacs/hardware/device_information.h"
75 #include "gromacs/hardware/hw_info.h"
76 #include "gromacs/mdtypes/simulation_workload.h"
77 #include "gromacs/nbnxm/atomdata.h"
78 #include "gromacs/nbnxm/gpu_common.h"
79 #include "gromacs/nbnxm/gpu_common_utils.h"
80 #include "gromacs/nbnxm/gpu_data_mgmt.h"
81 #include "gromacs/nbnxm/nbnxm.h"
82 #include "gromacs/nbnxm/nbnxm_gpu.h"
83 #include "gromacs/nbnxm/pairlist.h"
84 #include "gromacs/pbcutil/ishift.h"
85 #include "gromacs/timing/gpu_timing.h"
86 #include "gromacs/utility/cstringutil.h"
87 #include "gromacs/utility/fatalerror.h"
88 #include "gromacs/utility/gmxassert.h"
90 #include "nbnxm_ocl_types.h"
95 /*! \brief Convenience constants */
97 static constexpr int c_clSize = c_nbnxnGpuClusterSize;
101 /*! \brief Validates the input global work size parameter.
103 static inline void validate_global_work_size(const KernelLaunchConfig& config,
105 const DeviceInformation* dinfo)
107 cl_uint device_size_t_size_bits;
108 cl_uint host_size_t_size_bits;
110 GMX_ASSERT(dinfo, "Need a valid device info object");
112 size_t global_work_size[3];
113 GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
114 for (int i = 0; i < work_dim; i++)
116 global_work_size[i] = config.blockSize[i] * config.gridSize[i];
119 /* Each component of a global_work_size must not exceed the range given by the
120 sizeof(device size_t) for the device on which the kernel execution will
122 https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
124 device_size_t_size_bits = dinfo->adress_bits;
125 host_size_t_size_bits = static_cast<cl_uint>(sizeof(size_t) * 8);
127 /* If sizeof(host size_t) <= sizeof(device size_t)
128 => global_work_size components will always be valid
130 => get device limit for global work size and
131 compare it against each component of global_work_size.
133 if (host_size_t_size_bits > device_size_t_size_bits)
137 device_limit = (1ULL << device_size_t_size_bits) - 1;
139 for (int i = 0; i < work_dim; i++)
141 if (global_work_size[i] > device_limit)
145 "Watch out, the input system is too large to simulate!\n"
146 "The number of nonbonded work units (=number of super-clusters) exceeds the"
147 "device capabilities. Global work size limit exceeded (%zu > %zu)!",
155 /* Constant arrays listing non-bonded kernel function names. The arrays are
156 * organized in 2-dim arrays by: electrostatics and VDW type.
158 * Note that the row- and column-order of function pointers has to match the
159 * order of corresponding enumerated electrostatics and vdw types, resp.,
160 * defined in nbnxm_ocl_types.h.
163 /*! \brief Force-only kernel function names. */
164 static const char* nb_kfunc_noener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
165 { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl",
166 "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",
167 "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl",
168 "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",
169 "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl",
170 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",
171 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
172 { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl",
173 "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",
174 "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl",
175 "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",
176 "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl",
177 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",
178 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
179 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl",
180 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",
181 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl",
182 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",
183 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",
184 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",
185 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
186 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl",
187 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl",
188 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl",
189 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl",
190 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl",
191 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl",
192 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
193 { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl",
194 "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",
195 "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl",
196 "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",
197 "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl",
198 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",
199 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
200 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",
201 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",
202 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",
203 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl",
204 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",
205 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",
206 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
209 /*! \brief Force + energy kernel function pointers. */
210 static const char* nb_kfunc_ener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
211 { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl",
212 "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",
213 "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl",
214 "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",
215 "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl",
216 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",
217 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
218 { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl",
219 "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",
220 "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl",
221 "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",
222 "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl",
223 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",
224 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
225 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl",
226 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",
227 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",
228 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl",
229 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",
230 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",
231 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
232 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl",
233 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl",
234 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl",
235 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl",
236 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl",
237 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl",
238 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
239 { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl",
240 "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",
241 "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl",
242 "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",
243 "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl",
244 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",
245 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
246 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",
247 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",
248 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",
249 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",
250 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",
251 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",
252 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
255 /*! \brief Force + pruning kernel function pointers. */
256 static const char* nb_kfunc_noener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
257 { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",
258 "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",
259 "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",
260 "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl",
261 "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",
262 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",
263 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
264 { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl",
265 "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",
266 "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",
267 "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl",
268 "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",
269 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",
270 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
271 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",
272 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",
273 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",
274 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",
275 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",
276 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",
277 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
278 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl",
279 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl",
280 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl",
281 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl",
282 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl",
283 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl",
284 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
285 { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl",
286 "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",
287 "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",
288 "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl",
289 "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",
290 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",
291 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
292 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",
293 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",
294 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",
295 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",
296 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",
297 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",
298 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
301 /*! \brief Force + energy + pruning kernel function pointers. */
302 static const char* nb_kfunc_ener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
303 { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",
304 "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",
305 "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",
306 "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",
307 "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",
308 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",
309 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
310 { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",
311 "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",
312 "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",
313 "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl",
314 "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",
315 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",
316 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
317 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",
318 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",
319 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",
320 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",
321 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",
322 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",
323 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
324 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl",
325 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl",
326 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl",
327 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl",
328 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl",
329 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
330 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
331 { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",
332 "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",
333 "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",
334 "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl",
335 "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",
336 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",
337 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
338 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",
339 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",
340 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",
341 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",
342 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",
343 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
344 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
347 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
349 * \param[in] kernel_pruneonly array of prune kernel objects
350 * \param[in] firstPrunePass true if the first pruning pass is being executed
352 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[], bool firstPrunePass)
354 cl_kernel* kernelPtr;
358 kernelPtr = &(kernel_pruneonly[epruneFirst]);
362 kernelPtr = &(kernel_pruneonly[epruneRolling]);
364 // TODO: consider creating the prune kernel object here to avoid a
365 // clCreateKernel for the rolling prune kernel if this is not needed.
369 /*! \brief Return a pointer to the kernel version to be executed at the current step.
370 * OpenCL kernel objects are cached in nb. If the requested kernel is not
371 * found in the cache, it will be created and the cache will be updated.
373 static inline cl_kernel
374 select_nbnxn_kernel(NbnxmGpu* nb, enum ElecType elecType, enum VdwType vdwType, bool bDoEne, bool bDoPrune)
376 const char* kernel_name_to_run;
377 cl_kernel* kernel_ptr;
380 const int elecTypeIdx = static_cast<int>(elecType);
381 const int vdwTypeIdx = static_cast<int>(vdwType);
383 GMX_ASSERT(elecTypeIdx < c_numElecTypes,
384 "The electrostatics type requested is not implemented in the OpenCL kernels.");
385 GMX_ASSERT(vdwTypeIdx < c_numVdwTypes,
386 "The VdW type requested is not implemented in the OpenCL kernels.");
392 kernel_name_to_run = nb_kfunc_ener_prune_ptr[elecTypeIdx][vdwTypeIdx];
393 kernel_ptr = &(nb->kernel_ener_prune_ptr[elecTypeIdx][vdwTypeIdx]);
397 kernel_name_to_run = nb_kfunc_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
398 kernel_ptr = &(nb->kernel_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx]);
405 kernel_name_to_run = nb_kfunc_noener_prune_ptr[elecTypeIdx][vdwTypeIdx];
406 kernel_ptr = &(nb->kernel_noener_prune_ptr[elecTypeIdx][vdwTypeIdx]);
410 kernel_name_to_run = nb_kfunc_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
411 kernel_ptr = &(nb->kernel_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx]);
415 if (nullptr == kernel_ptr[0])
417 *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
418 GMX_ASSERT(cl_error == CL_SUCCESS,
419 ("clCreateKernel failed: " + ocl_get_error_string(cl_error)
420 + " for kernel named " + kernel_name_to_run)
427 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
429 static inline int calc_shmem_required_nonbonded(enum VdwType vdwType, bool bPrefetchLjParam)
433 /* size of shmem (force-buffers/xq/atom type preloading) */
434 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
435 /* i-atom x+q in shared memory */
436 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4; /* xqib */
437 /* cj in shared memory, for both warps separately
438 * TODO: in the "nowarp kernels we load cj only once so the factor 2 is not needed.
440 shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs */
441 if (bPrefetchLjParam)
443 if (useLjCombRule(vdwType))
445 /* i-atom LJ combination parameters in shared memory */
446 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * 2
447 * sizeof(float); /* atib abused for ljcp, float2 */
451 /* i-atom types in shared memory */
452 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int); /* atib */
455 /* force reduction buffers in shared memory */
456 shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
457 /* Warp vote. In fact it must be * number of warps in block.. */
458 shmem += sizeof(cl_uint) * 2; /* warp_any */
462 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
464 * The device can't use the same data structures as the host for two main reasons:
465 * - OpenCL restrictions (pointers are not accepted inside data structures)
466 * - some host side fields are not needed for the OpenCL kernels.
468 * This function is called before the launch of both nbnxn and prune kernels.
470 static void fillin_ocl_structures(NBParamGpu* nbp, cl_nbparam_params_t* nbparams_params)
472 nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
473 nbparams_params->c_rf = nbp->c_rf;
474 nbparams_params->dispersion_shift = nbp->dispersion_shift;
475 nbparams_params->elecType = nbp->elecType;
476 nbparams_params->epsfac = nbp->epsfac;
477 nbparams_params->ewaldcoeff_lj = nbp->ewaldcoeff_lj;
478 nbparams_params->ewald_beta = nbp->ewald_beta;
479 nbparams_params->rcoulomb_sq = nbp->rcoulomb_sq;
480 nbparams_params->repulsion_shift = nbp->repulsion_shift;
481 nbparams_params->rlistOuter_sq = nbp->rlistOuter_sq;
482 nbparams_params->rvdw_sq = nbp->rvdw_sq;
483 nbparams_params->rlistInner_sq = nbp->rlistInner_sq;
484 nbparams_params->rvdw_switch = nbp->rvdw_switch;
485 nbparams_params->sh_ewald = nbp->sh_ewald;
486 nbparams_params->sh_lj_ewald = nbp->sh_lj_ewald;
487 nbparams_params->two_k_rf = nbp->two_k_rf;
488 nbparams_params->vdwType = nbp->vdwType;
489 nbparams_params->vdw_switch = nbp->vdw_switch;
492 /*! \brief Launch GPU kernel
494 As we execute nonbonded workload in separate queues, before launching
495 the kernel we need to make sure that he following operations have completed:
496 - atomdata allocation and related H2D transfers (every nstlist step);
497 - pair list H2D transfer (every nstlist step);
498 - shift vector H2D transfer (every nstlist step);
499 - force (+shift force and energy) output clearing (every step).
501 These operations are issued in the local queue at the beginning of the step
502 and therefore always complete before the local kernel launch. The non-local
503 kernel is launched after the local on the same device/context, so this is
504 inherently scheduled after the operations in the local stream (including the
506 However, for the sake of having a future-proof implementation, we use the
507 misc_ops_done event to record the point in time when the above operations
508 are finished and synchronize with this event in the non-local stream.
510 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const Nbnxm::InteractionLocality iloc)
512 NBAtomData* adat = nb->atdat;
513 NBParamGpu* nbp = nb->nbparam;
514 gpu_plist* plist = nb->plist[iloc];
515 Nbnxm::GpuTimers* timers = nb->timers;
516 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
518 bool bDoTime = nb->bDoTime;
520 cl_nbparam_params_t nbparams_params;
522 /* Don't launch the non-local kernel if there is no work to do.
523 Doing the same for the local kernel is more complicated, since the
524 local part of the force array also depends on the non-local kernel.
525 So to avoid complicating the code and to reduce the risk of bugs,
526 we always call the local kernel and later (not in
527 this function) the stream wait, local f copyback and the f buffer
528 clearing. All these operations, except for the local interaction kernel,
529 are needed for the non-local interactions. The skip of the local kernel
530 call is taken care of later in this function. */
531 if (canSkipNonbondedWork(*nb, iloc))
533 plist->haveFreshList = false;
538 if (nbp->useDynamicPruning && plist->haveFreshList)
540 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
541 (that's the way the timing accounting can distinguish between
542 separate prune kernel and combined force+prune).
544 Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
547 if (plist->nsci == 0)
549 /* Don't launch an empty local kernel (is not allowed with OpenCL).
554 /* beginning of timed nonbonded calculation section */
557 timers->interaction[iloc].nb_k.openTimingRegion(deviceStream);
560 /* kernel launch config */
562 KernelLaunchConfig config;
563 config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwType, nb->bPrefetchLjParam);
564 config.blockSize[0] = c_clSize;
565 config.blockSize[1] = c_clSize;
566 config.gridSize[0] = plist->nsci;
568 validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
573 "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
574 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
578 config.blockSize[0] * config.gridSize[0],
579 config.blockSize[1] * config.gridSize[1],
580 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
581 c_nbnxnGpuNumClusterPerSupercluster,
585 fillin_ocl_structures(nbp, &nbparams_params);
587 auto* timingEvent = bDoTime ? timers->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
588 constexpr char kernelName[] = "k_calc_nb";
590 select_nbnxn_kernel(nb,
593 stepWork.computeEnergy,
594 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
597 // The OpenCL kernel takes int as second to last argument because bool is
598 // not supported as a kernel argument type (sizeof(bool) is implementation defined).
599 const int computeFshift = static_cast<int>(stepWork.computeVirial);
600 if (useLjCombRule(nb->nbparam->vdwType))
602 const auto kernelArgs = prepareGpuKernelArguments(kernel,
620 launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
624 const auto kernelArgs = prepareGpuKernelArguments(kernel,
642 launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
647 timers->interaction[iloc].nb_k.closeTimingRegion(deviceStream);
652 /*! \brief Calculates the amount of shared memory required by the prune kernel.
654 * Note that for the sake of simplicity we use the CUDA terminology "shared memory"
655 * for OpenCL local memory.
657 * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd
658 * dimension. \returns the amount of local memory in bytes required by the pruning kernel
660 static inline int calc_shmem_required_prune(const int num_threads_z)
664 /* i-atom x in shared memory (for convenience we load all 4 components including q) */
665 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4;
666 /* cj in shared memory, for each warp separately
667 * Note: only need to load once per wavefront, but to keep the code simple,
668 * for now we load twice on AMD.
670 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
671 /* Warp vote, requires one uint per warp/32 threads per block. */
672 shmem += sizeof(cl_uint) * 2 * num_threads_z;
678 * Launch the pairlist prune only kernel for the given locality.
679 * \p numParts tells in how many parts, i.e. calls the list will be pruned.
681 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
683 NBAtomData* adat = nb->atdat;
684 NBParamGpu* nbp = nb->nbparam;
685 gpu_plist* plist = nb->plist[iloc];
686 Nbnxm::GpuTimers* timers = nb->timers;
687 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
688 bool bDoTime = nb->bDoTime;
690 if (plist->haveFreshList)
692 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
694 /* Set rollingPruningNumParts to signal that it is not set */
695 plist->rollingPruningNumParts = 0;
696 plist->rollingPruningPart = 0;
700 if (plist->rollingPruningNumParts == 0)
702 plist->rollingPruningNumParts = numParts;
706 GMX_ASSERT(numParts == plist->rollingPruningNumParts,
707 "It is not allowed to change numParts in between list generation steps");
711 /* Use a local variable for part and update in plist, so we can return here
712 * without duplicating the part increment code.
714 int part = plist->rollingPruningPart;
716 plist->rollingPruningPart++;
717 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
719 plist->rollingPruningPart = 0;
722 /* Compute the number of list entries to prune in this pass */
723 int numSciInPart = (plist->nsci - part) / numParts;
725 /* Don't launch the kernel if there is no work to do. */
726 if (numSciInPart <= 0)
728 plist->haveFreshList = false;
733 GpuRegionTimer* timer = nullptr;
736 timer = &(plist->haveFreshList ? timers->interaction[iloc].prune_k
737 : timers->interaction[iloc].rollingPrune_k);
740 /* beginning of timed prune calculation section */
743 timer->openTimingRegion(deviceStream);
746 /* Kernel launch config:
747 * - The thread block dimensions match the size of i-clusters, j-clusters,
748 * and j-cluster concurrency, in x, y, and z, respectively.
749 * - The 1D block-grid contains as many blocks as super-clusters.
751 int num_threads_z = c_pruneKernelJ4Concurrency;
752 /* kernel launch config */
753 KernelLaunchConfig config;
754 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
755 config.blockSize[0] = c_clSize;
756 config.blockSize[1] = c_clSize;
757 config.blockSize[2] = num_threads_z;
758 config.gridSize[0] = numSciInPart;
760 validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
765 "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
766 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
771 config.blockSize[0] * config.gridSize[0],
772 config.blockSize[1] * config.gridSize[1],
773 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
774 c_nbnxnGpuNumClusterPerSupercluster,
776 config.sharedMemorySize);
779 cl_nbparam_params_t nbparams_params;
780 fillin_ocl_structures(nbp, &nbparams_params);
782 auto* timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
783 constexpr char kernelName[] = "k_pruneonly";
784 const auto pruneKernel = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
785 const auto kernelArgs = prepareGpuKernelArguments(pruneKernel,
795 launchGpuKernel(pruneKernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
797 if (plist->haveFreshList)
799 plist->haveFreshList = false;
800 /* Mark that pruning has been done */
801 nb->timers->interaction[iloc].didPrune = true;
805 /* Mark that rolling pruning has been done */
806 nb->timers->interaction[iloc].didRollingPrune = true;
811 timer->closeTimingRegion(deviceStream);
816 * Launch asynchronously the download of nonbonded forces from the GPU
817 * (and energies/shift forces if required).
819 void gpu_launch_cpyback(NbnxmGpu* nb,
820 struct nbnxn_atomdata_t* nbatom,
821 const gmx::StepWorkload& stepWork,
822 const AtomLocality atomLocality)
824 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
826 cl_int gmx_unused cl_error;
828 /* determine interaction locality from atom locality */
829 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
830 GMX_ASSERT(iloc == InteractionLocality::Local
831 || (iloc == InteractionLocality::NonLocal && nb->bNonLocalStreamDoneMarked == false),
832 "Non-local stream is indicating that the copy back event is enqueued at the "
833 "beginning of the copy back function.");
835 NBAtomData* adat = nb->atdat;
836 Nbnxm::GpuTimers* timers = nb->timers;
837 bool bDoTime = nb->bDoTime;
838 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
840 /* don't launch non-local copy-back if there was no non-local work to do */
841 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
843 /* TODO An alternative way to signal that non-local work is
844 complete is to use a clEnqueueMarker+clEnqueueBarrier
845 pair. However, the use of bNonLocalStreamDoneMarked has the
846 advantage of being local to the host, so probably minimizes
847 overhead. Curiously, for NVIDIA OpenCL with an empty-domain
848 test case, overall simulation performance was higher with
849 the API calls, but this has not been tested on AMD OpenCL,
850 so could be worth considering in future. */
851 nb->bNonLocalStreamDoneMarked = false;
855 /* local/nonlocal offset and length used for xq and f */
856 auto atomsRange = getGpuAtomRange(adat, atomLocality);
858 /* beginning of timed D2H section */
861 timers->xf[atomLocality].nb_d2h.openTimingRegion(deviceStream);
864 /* With DD the local D2H transfer can only start after the non-local
865 has been launched. */
866 if (iloc == InteractionLocality::Local && nb->bNonLocalStreamDoneMarked)
868 nb->nonlocal_done.enqueueWaitEvent(deviceStream);
869 nb->bNonLocalStreamDoneMarked = false;
873 GMX_ASSERT(sizeof(*nbatom->out[0].f.data()) == sizeof(float),
874 "The host force buffer should be in single precision to match device data size.");
875 copyFromDeviceBuffer(reinterpret_cast<Float3*>(nbatom->out[0].f.data()) + atomsRange.begin(),
880 GpuApiCallBehavior::Async,
881 bDoTime ? timers->xf[atomLocality].nb_d2h.fetchNextEvent() : nullptr);
884 cl_error = clFlush(deviceStream.stream());
885 GMX_ASSERT(cl_error == CL_SUCCESS, ("clFlush failed: " + ocl_get_error_string(cl_error)).c_str());
887 /* After the non-local D2H is launched the nonlocal_done event can be
888 recorded which signals that the local D2H can proceed. This event is not
889 placed after the non-local kernel because we first need the non-local
891 if (iloc == InteractionLocality::NonLocal)
893 nb->nonlocal_done.markEvent(deviceStream);
894 nb->bNonLocalStreamDoneMarked = true;
897 /* only transfer energies in the local stream */
898 if (iloc == InteractionLocality::Local)
900 /* DtoH fshift when virial is needed */
901 if (stepWork.computeVirial)
904 sizeof(*nb->nbst.fShift) == sizeof(Float3),
905 "Sizes of host- and device-side shift vector elements should be the same.");
906 copyFromDeviceBuffer(nb->nbst.fShift,
911 GpuApiCallBehavior::Async,
912 bDoTime ? timers->xf[atomLocality].nb_d2h.fetchNextEvent() : nullptr);
916 if (stepWork.computeEnergy)
918 static_assert(sizeof(*nb->nbst.eLJ) == sizeof(float),
919 "Sizes of host- and device-side LJ energy terms should be the same.");
920 copyFromDeviceBuffer(nb->nbst.eLJ,
925 GpuApiCallBehavior::Async,
926 bDoTime ? timers->xf[atomLocality].nb_d2h.fetchNextEvent() : nullptr);
927 static_assert(sizeof(*nb->nbst.eElec) == sizeof(float),
928 "Sizes of host- and device-side electrostatic energy terms should be the "
930 copyFromDeviceBuffer(nb->nbst.eElec,
935 GpuApiCallBehavior::Async,
936 bDoTime ? timers->xf[atomLocality].nb_d2h.fetchNextEvent() : nullptr);
942 timers->xf[atomLocality].nb_d2h.closeTimingRegion(deviceStream);