2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define OpenCL implementation of nbnxm_gpu.h
39 * \author Anca Hamuraru <anca@streamcomputing.eu>
40 * \author Teemu Virolainen <teemu@streamcomputing.eu>
41 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
42 * \author Szilárd Páll <pall.szilard@gmail.com>
43 * \ingroup module_nbnxm
46 * - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
47 * - Rework the copying of OCL data structures done before every invocation of both
48 * nb and prune kernels (using fillin_ocl_structures); also consider at the same
49 * time calling clSetKernelArg only on the updated parameters (if tracking changed
50 * parameters is feasible);
51 * - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
52 * dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
53 * on the misc_ops_and_local_H2D_done event could be better expressed this way.
55 * - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
56 * - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
57 * is identical in the two implementations, so a 3-way split might allow sharing
71 #include "thread_mpi/atomic.h"
73 #include "gromacs/gpu_utils/gputraits_ocl.h"
74 #include "gromacs/gpu_utils/oclutils.h"
75 #include "gromacs/hardware/hw_info.h"
76 #include "gromacs/mdtypes/simulation_workload.h"
77 #include "gromacs/nbnxm/atomdata.h"
78 #include "gromacs/nbnxm/gpu_common.h"
79 #include "gromacs/nbnxm/gpu_common_utils.h"
80 #include "gromacs/nbnxm/gpu_data_mgmt.h"
81 #include "gromacs/nbnxm/nbnxm.h"
82 #include "gromacs/nbnxm/nbnxm_gpu.h"
83 #include "gromacs/nbnxm/pairlist.h"
84 #include "gromacs/pbcutil/ishift.h"
85 #include "gromacs/timing/gpu_timing.h"
86 #include "gromacs/utility/cstringutil.h"
87 #include "gromacs/utility/fatalerror.h"
88 #include "gromacs/utility/gmxassert.h"
90 #include "nbnxm_ocl_internal.h"
91 #include "nbnxm_ocl_types.h"
96 /*! \brief Convenience constants */
98 static constexpr int c_clSize = c_nbnxnGpuClusterSize;
102 /*! \brief Validates the input global work size parameter.
104 static inline void validate_global_work_size(const KernelLaunchConfig& config,
106 const gmx_device_info_t* dinfo)
108 cl_uint device_size_t_size_bits;
109 cl_uint host_size_t_size_bits;
113 size_t global_work_size[3];
114 GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
115 for (int i = 0; i < work_dim; i++)
117 global_work_size[i] = config.blockSize[i] * config.gridSize[i];
120 /* Each component of a global_work_size must not exceed the range given by the
121 sizeof(device size_t) for the device on which the kernel execution will
123 https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
125 device_size_t_size_bits = dinfo->adress_bits;
126 host_size_t_size_bits = static_cast<cl_uint>(sizeof(size_t) * 8);
128 /* If sizeof(host size_t) <= sizeof(device size_t)
129 => global_work_size components will always be valid
131 => get device limit for global work size and
132 compare it against each component of global_work_size.
134 if (host_size_t_size_bits > device_size_t_size_bits)
138 device_limit = (1ULL << device_size_t_size_bits) - 1;
140 for (int i = 0; i < work_dim; i++)
142 if (global_work_size[i] > device_limit)
146 "Watch out, the input system is too large to simulate!\n"
147 "The number of nonbonded work units (=number of super-clusters) exceeds the"
148 "device capabilities. Global work size limit exceeded (%zu > %zu)!",
149 global_work_size[i], device_limit);
155 /* Constant arrays listing non-bonded kernel function names. The arrays are
156 * organized in 2-dim arrays by: electrostatics and VDW type.
158 * Note that the row- and column-order of function pointers has to match the
159 * order of corresponding enumerated electrostatics and vdw types, resp.,
160 * defined in nbnxm_ocl_types.h.
163 /*! \brief Force-only kernel function names. */
164 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] = {
165 { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",
166 "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",
167 "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",
168 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
169 { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",
170 "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",
171 "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",
172 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
173 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",
174 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",
175 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",
176 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",
177 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
178 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl",
179 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl",
180 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl",
181 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl",
182 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl",
183 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl",
184 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
185 { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",
186 "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",
187 "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",
188 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
189 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",
190 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",
191 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",
192 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",
193 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",
194 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
197 /*! \brief Force + energy kernel function pointers. */
198 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] = {
199 { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",
200 "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",
201 "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",
202 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
203 { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",
204 "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",
205 "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",
206 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
207 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",
208 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",
209 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",
210 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",
211 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
212 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl",
213 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl",
214 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl",
215 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl",
216 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl",
217 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl",
218 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
219 { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",
220 "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",
221 "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",
222 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
223 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",
224 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",
225 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",
226 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",
227 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",
228 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",
229 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
232 /*! \brief Force + pruning kernel function pointers. */
233 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] = {
234 { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",
235 "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",
236 "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",
237 "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",
238 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",
239 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
240 { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",
241 "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",
242 "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",
243 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",
244 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
245 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",
246 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",
247 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",
248 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",
249 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",
250 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",
251 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
252 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl",
253 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl",
254 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl",
255 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl",
256 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl",
257 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl",
258 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
259 { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",
260 "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",
261 "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",
262 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",
263 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
264 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",
265 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",
266 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",
267 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",
268 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",
269 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",
270 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
273 /*! \brief Force + energy + pruning kernel function pointers. */
274 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] = {
275 { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",
276 "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",
277 "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",
278 "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",
279 "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",
280 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",
281 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
282 { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",
283 "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",
284 "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",
285 "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",
286 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",
287 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
288 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",
289 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",
290 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",
291 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",
292 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",
293 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",
294 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
295 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl",
296 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl",
297 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl",
298 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl",
299 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl",
300 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
301 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
302 { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",
303 "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",
304 "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",
305 "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",
306 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",
307 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
308 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",
309 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",
310 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",
311 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",
312 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",
313 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
314 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
317 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
319 * \param[in] kernel_pruneonly array of prune kernel objects
320 * \param[in] firstPrunePass true if the first pruning pass is being executed
322 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[], bool firstPrunePass)
324 cl_kernel* kernelPtr;
328 kernelPtr = &(kernel_pruneonly[epruneFirst]);
332 kernelPtr = &(kernel_pruneonly[epruneRolling]);
334 // TODO: consider creating the prune kernel object here to avoid a
335 // clCreateKernel for the rolling prune kernel if this is not needed.
339 /*! \brief Return a pointer to the kernel version to be executed at the current step.
340 * OpenCL kernel objects are cached in nb. If the requested kernel is not
341 * found in the cache, it will be created and the cache will be updated.
343 static inline cl_kernel select_nbnxn_kernel(NbnxmGpu* nb, int eeltype, int evdwtype, bool bDoEne, bool bDoPrune)
345 const char* kernel_name_to_run;
346 cl_kernel* kernel_ptr;
349 assert(eeltype < eelOclNR);
350 assert(evdwtype < evdwOclNR);
356 kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
357 kernel_ptr = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
361 kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
362 kernel_ptr = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
369 kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
370 kernel_ptr = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
374 kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
375 kernel_ptr = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
379 if (nullptr == kernel_ptr[0])
381 *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
382 assert(cl_error == CL_SUCCESS);
384 // TODO: handle errors
389 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
391 static inline int calc_shmem_required_nonbonded(int vdwType, bool bPrefetchLjParam)
395 /* size of shmem (force-buffers/xq/atom type preloading) */
396 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
397 /* i-atom x+q in shared memory */
398 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4; /* xqib */
399 /* cj in shared memory, for both warps separately
400 * TODO: in the "nowarp kernels we load cj only once so the factor 2 is not needed.
402 shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs */
403 if (bPrefetchLjParam)
405 if (useLjCombRule(vdwType))
407 /* i-atom LJ combination parameters in shared memory */
408 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * 2
409 * sizeof(float); /* atib abused for ljcp, float2 */
413 /* i-atom types in shared memory */
414 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int); /* atib */
417 /* force reduction buffers in shared memory */
418 shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
419 /* Warp vote. In fact it must be * number of warps in block.. */
420 shmem += sizeof(cl_uint) * 2; /* warp_any */
424 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
426 * The device can't use the same data structures as the host for two main reasons:
427 * - OpenCL restrictions (pointers are not accepted inside data structures)
428 * - some host side fields are not needed for the OpenCL kernels.
430 * This function is called before the launch of both nbnxn and prune kernels.
432 static void fillin_ocl_structures(cl_nbparam_t* nbp, cl_nbparam_params_t* nbparams_params)
434 nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
435 nbparams_params->c_rf = nbp->c_rf;
436 nbparams_params->dispersion_shift = nbp->dispersion_shift;
437 nbparams_params->eeltype = nbp->eeltype;
438 nbparams_params->epsfac = nbp->epsfac;
439 nbparams_params->ewaldcoeff_lj = nbp->ewaldcoeff_lj;
440 nbparams_params->ewald_beta = nbp->ewald_beta;
441 nbparams_params->rcoulomb_sq = nbp->rcoulomb_sq;
442 nbparams_params->repulsion_shift = nbp->repulsion_shift;
443 nbparams_params->rlistOuter_sq = nbp->rlistOuter_sq;
444 nbparams_params->rvdw_sq = nbp->rvdw_sq;
445 nbparams_params->rlistInner_sq = nbp->rlistInner_sq;
446 nbparams_params->rvdw_switch = nbp->rvdw_switch;
447 nbparams_params->sh_ewald = nbp->sh_ewald;
448 nbparams_params->sh_lj_ewald = nbp->sh_lj_ewald;
449 nbparams_params->two_k_rf = nbp->two_k_rf;
450 nbparams_params->vdwtype = nbp->vdwtype;
451 nbparams_params->vdw_switch = nbp->vdw_switch;
454 /*! \brief Enqueues a wait for event completion.
456 * Then it releases the event and sets it to 0.
457 * Don't use this function when more than one wait will be issued for the event.
458 * Equivalent to Cuda Stream Sync. */
459 static void sync_ocl_event(cl_command_queue stream, cl_event* ocl_event)
461 cl_int gmx_unused cl_error;
464 cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
465 GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
467 /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
468 cl_error = clReleaseEvent(*ocl_event);
469 assert(CL_SUCCESS == cl_error);
470 *ocl_event = nullptr;
473 /*! \brief Launch asynchronously the xq buffer host to device copy. */
474 void gpu_copy_xq_to_gpu(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom, const AtomLocality atomLocality)
476 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
478 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
480 /* local/nonlocal offset and length used for xq and f */
481 int adat_begin, adat_len;
483 cl_atomdata_t* adat = nb->atdat;
484 cl_plist_t* plist = nb->plist[iloc];
485 cl_timers_t* t = nb->timers;
486 cl_command_queue stream = nb->stream[iloc];
488 bool bDoTime = (nb->bDoTime) != 0;
490 /* Don't launch the non-local H2D copy if there is no dependent
491 work to do: neither non-local nor other (e.g. bonded) work
492 to do that has as input the nbnxn coordinates.
493 Doing the same for the local kernel is more complicated, since the
494 local part of the force array also depends on the non-local kernel.
495 So to avoid complicating the code and to reduce the risk of bugs,
496 we always call the local local x+q copy (and the rest of the local
497 work in nbnxn_gpu_launch_kernel().
499 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
501 plist->haveFreshList = false;
506 /* calculate the atom data index range based on locality */
507 if (atomLocality == AtomLocality::Local)
510 adat_len = adat->natoms_local;
514 adat_begin = adat->natoms_local;
515 adat_len = adat->natoms - adat->natoms_local;
518 /* beginning of timed HtoD section */
521 t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
525 ocl_copy_H2D_async(adat->xq, nbatom->x().data() + adat_begin * 4,
526 adat_begin * sizeof(float) * 4, adat_len * sizeof(float) * 4, stream,
527 bDoTime ? t->xf[atomLocality].nb_h2d.fetchNextEvent() : nullptr);
531 t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
534 /* When we get here all misc operations issues in the local stream as well as
535 the local xq H2D are done,
536 so we record that in the local stream and wait for it in the nonlocal one. */
537 if (nb->bUseTwoStreams)
539 if (iloc == InteractionLocality::Local)
541 cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(
542 stream, 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
543 assert(CL_SUCCESS == cl_error);
545 /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
546 * in the local stream in order to be able to sync with the above event
547 * from the non-local stream.
549 cl_error = clFlush(stream);
550 assert(CL_SUCCESS == cl_error);
554 sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
560 /*! \brief Launch GPU kernel
562 As we execute nonbonded workload in separate queues, before launching
563 the kernel we need to make sure that he following operations have completed:
564 - atomdata allocation and related H2D transfers (every nstlist step);
565 - pair list H2D transfer (every nstlist step);
566 - shift vector H2D transfer (every nstlist step);
567 - force (+shift force and energy) output clearing (every step).
569 These operations are issued in the local queue at the beginning of the step
570 and therefore always complete before the local kernel launch. The non-local
571 kernel is launched after the local on the same device/context, so this is
572 inherently scheduled after the operations in the local stream (including the
574 However, for the sake of having a future-proof implementation, we use the
575 misc_ops_done event to record the point in time when the above operations
576 are finished and synchronize with this event in the non-local stream.
578 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const Nbnxm::InteractionLocality iloc)
580 cl_atomdata_t* adat = nb->atdat;
581 cl_nbparam_t* nbp = nb->nbparam;
582 cl_plist_t* plist = nb->plist[iloc];
583 cl_timers_t* t = nb->timers;
584 cl_command_queue stream = nb->stream[iloc];
586 bool bDoTime = (nb->bDoTime) != 0;
588 cl_nbparam_params_t nbparams_params;
590 /* Don't launch the non-local kernel if there is no work to do.
591 Doing the same for the local kernel is more complicated, since the
592 local part of the force array also depends on the non-local kernel.
593 So to avoid complicating the code and to reduce the risk of bugs,
594 we always call the local kernel and later (not in
595 this function) the stream wait, local f copyback and the f buffer
596 clearing. All these operations, except for the local interaction kernel,
597 are needed for the non-local interactions. The skip of the local kernel
598 call is taken care of later in this function. */
599 if (canSkipNonbondedWork(*nb, iloc))
601 plist->haveFreshList = false;
606 if (nbp->useDynamicPruning && plist->haveFreshList)
608 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
609 (that's the way the timing accounting can distinguish between
610 separate prune kernel and combined force+prune).
612 Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
615 if (plist->nsci == 0)
617 /* Don't launch an empty local kernel (is not allowed with OpenCL).
622 /* beginning of timed nonbonded calculation section */
625 t->interaction[iloc].nb_k.openTimingRegion(stream);
628 /* kernel launch config */
630 KernelLaunchConfig config;
631 config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
632 config.stream = stream;
633 config.blockSize[0] = c_clSize;
634 config.blockSize[1] = c_clSize;
635 config.gridSize[0] = plist->nsci;
637 validate_global_work_size(config, 3, nb->dev_info);
642 "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
643 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
644 config.blockSize[0], config.blockSize[1], config.blockSize[2],
645 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1],
646 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
647 c_nbnxnGpuNumClusterPerSupercluster, plist->na_c);
650 fillin_ocl_structures(nbp, &nbparams_params);
652 auto* timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
653 constexpr char kernelName[] = "k_calc_nb";
655 select_nbnxn_kernel(nb, nbp->eeltype, nbp->vdwtype, stepWork.computeEnergy,
656 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
659 // The OpenCL kernel takes int as second to last argument because bool is
660 // not supported as a kernel argument type (sizeof(bool) is implementation defined).
661 const int computeFshift = static_cast<int>(stepWork.computeVirial);
662 if (useLjCombRule(nb->nbparam->vdwtype))
664 const auto kernelArgs = prepareGpuKernelArguments(
665 kernel, config, &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el,
666 &adat->fshift, &adat->lj_comb, &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d,
667 &nbp->coulomb_tab_climg2d, &plist->sci, &plist->cj4, &plist->excl, &computeFshift);
669 launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
673 const auto kernelArgs = prepareGpuKernelArguments(
674 kernel, config, &adat->ntypes, &nbparams_params, &adat->xq, &adat->f, &adat->e_lj,
675 &adat->e_el, &adat->fshift, &adat->atom_types, &adat->shift_vec, &nbp->nbfp_climg2d,
676 &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d, &plist->sci, &plist->cj4,
677 &plist->excl, &computeFshift);
678 launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
683 t->interaction[iloc].nb_k.closeTimingRegion(stream);
688 /*! \brief Calculates the amount of shared memory required by the prune kernel.
690 * Note that for the sake of simplicity we use the CUDA terminology "shared memory"
691 * for OpenCL local memory.
693 * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd
694 * dimension. \returns the amount of local memory in bytes required by the pruning kernel
696 static inline int calc_shmem_required_prune(const int num_threads_z)
700 /* i-atom x in shared memory (for convenience we load all 4 components including q) */
701 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4;
702 /* cj in shared memory, for each warp separately
703 * Note: only need to load once per wavefront, but to keep the code simple,
704 * for now we load twice on AMD.
706 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
707 /* Warp vote, requires one uint per warp/32 threads per block. */
708 shmem += sizeof(cl_uint) * 2 * num_threads_z;
714 * Launch the pairlist prune only kernel for the given locality.
715 * \p numParts tells in how many parts, i.e. calls the list will be pruned.
717 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
719 cl_atomdata_t* adat = nb->atdat;
720 cl_nbparam_t* nbp = nb->nbparam;
721 cl_plist_t* plist = nb->plist[iloc];
722 cl_timers_t* t = nb->timers;
723 cl_command_queue stream = nb->stream[iloc];
724 bool bDoTime = nb->bDoTime == CL_TRUE;
726 if (plist->haveFreshList)
728 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
730 /* Set rollingPruningNumParts to signal that it is not set */
731 plist->rollingPruningNumParts = 0;
732 plist->rollingPruningPart = 0;
736 if (plist->rollingPruningNumParts == 0)
738 plist->rollingPruningNumParts = numParts;
742 GMX_ASSERT(numParts == plist->rollingPruningNumParts,
743 "It is not allowed to change numParts in between list generation steps");
747 /* Use a local variable for part and update in plist, so we can return here
748 * without duplicating the part increment code.
750 int part = plist->rollingPruningPart;
752 plist->rollingPruningPart++;
753 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
755 plist->rollingPruningPart = 0;
758 /* Compute the number of list entries to prune in this pass */
759 int numSciInPart = (plist->nsci - part) / numParts;
761 /* Don't launch the kernel if there is no work to do. */
762 if (numSciInPart <= 0)
764 plist->haveFreshList = false;
769 GpuRegionTimer* timer = nullptr;
772 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
775 /* beginning of timed prune calculation section */
778 timer->openTimingRegion(stream);
781 /* Kernel launch config:
782 * - The thread block dimensions match the size of i-clusters, j-clusters,
783 * and j-cluster concurrency, in x, y, and z, respectively.
784 * - The 1D block-grid contains as many blocks as super-clusters.
786 int num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
788 /* kernel launch config */
789 KernelLaunchConfig config;
790 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
791 config.stream = stream;
792 config.blockSize[0] = c_clSize;
793 config.blockSize[1] = c_clSize;
794 config.blockSize[2] = num_threads_z;
795 config.gridSize[0] = numSciInPart;
797 validate_global_work_size(config, 3, nb->dev_info);
802 "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
803 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
805 config.blockSize[0], config.blockSize[1], config.blockSize[2],
806 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1],
807 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
808 c_nbnxnGpuNumClusterPerSupercluster, plist->na_c, config.sharedMemorySize);
811 cl_nbparam_params_t nbparams_params;
812 fillin_ocl_structures(nbp, &nbparams_params);
814 auto* timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
815 constexpr char kernelName[] = "k_pruneonly";
816 const auto pruneKernel = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
817 const auto kernelArgs = prepareGpuKernelArguments(pruneKernel, config, &nbparams_params,
818 &adat->xq, &adat->shift_vec, &plist->sci,
819 &plist->cj4, &plist->imask, &numParts, &part);
820 launchGpuKernel(pruneKernel, config, timingEvent, kernelName, kernelArgs);
822 if (plist->haveFreshList)
824 plist->haveFreshList = false;
825 /* Mark that pruning has been done */
826 nb->timers->interaction[iloc].didPrune = true;
830 /* Mark that rolling pruning has been done */
831 nb->timers->interaction[iloc].didRollingPrune = true;
836 timer->closeTimingRegion(stream);
841 * Launch asynchronously the download of nonbonded forces from the GPU
842 * (and energies/shift forces if required).
844 void gpu_launch_cpyback(NbnxmGpu* nb,
845 struct nbnxn_atomdata_t* nbatom,
846 const gmx::StepWorkload& stepWork,
847 const AtomLocality aloc)
849 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
851 cl_int gmx_unused cl_error;
852 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
854 /* determine interaction locality from atom locality */
855 const InteractionLocality iloc = gpuAtomToInteractionLocality(aloc);
857 cl_atomdata_t* adat = nb->atdat;
858 cl_timers_t* t = nb->timers;
859 bool bDoTime = nb->bDoTime == CL_TRUE;
860 cl_command_queue stream = nb->stream[iloc];
862 /* don't launch non-local copy-back if there was no non-local work to do */
863 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
865 /* TODO An alternative way to signal that non-local work is
866 complete is to use a clEnqueueMarker+clEnqueueBarrier
867 pair. However, the use of bNonLocalStreamActive has the
868 advantage of being local to the host, so probably minimizes
869 overhead. Curiously, for NVIDIA OpenCL with an empty-domain
870 test case, overall simulation performance was higher with
871 the API calls, but this has not been tested on AMD OpenCL,
872 so could be worth considering in future. */
873 nb->bNonLocalStreamActive = CL_FALSE;
877 getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
879 /* beginning of timed D2H section */
882 t->xf[aloc].nb_d2h.openTimingRegion(stream);
885 /* With DD the local D2H transfer can only start after the non-local
886 has been launched. */
887 if (iloc == InteractionLocality::Local && nb->bNonLocalStreamActive)
889 sync_ocl_event(stream, &(nb->nonlocal_done));
893 ocl_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * DIM, adat->f,
894 adat_begin * DIM * sizeof(nbatom->out[0].f[0]),
895 adat_len * DIM * sizeof(nbatom->out[0].f[0]), stream,
896 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
899 cl_error = clFlush(stream);
900 assert(CL_SUCCESS == cl_error);
902 /* After the non-local D2H is launched the nonlocal_done event can be
903 recorded which signals that the local D2H can proceed. This event is not
904 placed after the non-local kernel because we first need the non-local
906 if (iloc == InteractionLocality::NonLocal)
908 cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->nonlocal_done));
909 assert(CL_SUCCESS == cl_error);
910 nb->bNonLocalStreamActive = CL_TRUE;
913 /* only transfer energies in the local stream */
914 if (iloc == InteractionLocality::Local)
916 /* DtoH fshift when virial is needed */
917 if (stepWork.computeVirial)
919 ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0, SHIFTS * sizeof(nb->nbst.fshift[0]),
920 stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
924 if (stepWork.computeEnergy)
926 ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0, sizeof(float), stream,
927 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
929 ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0, sizeof(float), stream,
930 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
936 t->xf[aloc].nb_d2h.closeTimingRegion(stream);
941 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
942 int nbnxn_gpu_pick_ewald_kernel_type(const interaction_const_t& ic)
944 bool bTwinCut = (ic.rcoulomb != ic.rvdw);
945 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
948 /* Benchmarking/development environment variables to force the use of
949 analytical or tabulated Ewald kernel. */
950 bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != nullptr);
951 bForceTabulatedEwald = (getenv("GMX_OCL_NB_TAB_EWALD") != nullptr);
953 if (bForceAnalyticalEwald && bForceTabulatedEwald)
956 "Both analytical and tabulated Ewald OpenCL non-bonded kernels "
957 "requested through environment variables.");
960 /* OpenCL: By default, use analytical Ewald
961 * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
963 * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
966 /* By default use analytical Ewald. */
967 bUseAnalyticalEwald = true;
968 if (bForceAnalyticalEwald)
972 fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
975 else if (bForceTabulatedEwald)
977 bUseAnalyticalEwald = false;
981 fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
985 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
986 forces it (use it for debugging/benchmarking only). */
987 if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == nullptr))
989 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
993 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;