2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define OpenCL implementation of nbnxm_gpu.h
39 * \author Anca Hamuraru <anca@streamcomputing.eu>
40 * \author Teemu Virolainen <teemu@streamcomputing.eu>
41 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
42 * \author Szilárd Páll <pall.szilard@gmail.com>
43 * \ingroup module_nbnxm
46 * - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
47 * - Rework the copying of OCL data structures done before every invocation of both
48 * nb and prune kernels (using fillin_ocl_structures); also consider at the same
49 * time calling clSetKernelArg only on the updated parameters (if tracking changed
50 * parameters is feasible);
51 * - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
52 * dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
53 * on the misc_ops_and_local_H2D_done event could be better expressed this way.
55 * - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
56 * - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
57 * is identical in the two implementations, so a 3-way split might allow sharing
71 #include "thread_mpi/atomic.h"
73 #include "gromacs/gpu_utils/gputraits_ocl.h"
74 #include "gromacs/gpu_utils/oclutils.h"
75 #include "gromacs/hardware/hw_info.h"
76 #include "gromacs/mdtypes/simulation_workload.h"
77 #include "gromacs/nbnxm/atomdata.h"
78 #include "gromacs/nbnxm/gpu_common.h"
79 #include "gromacs/nbnxm/gpu_common_utils.h"
80 #include "gromacs/nbnxm/gpu_data_mgmt.h"
81 #include "gromacs/nbnxm/nbnxm.h"
82 #include "gromacs/nbnxm/nbnxm_gpu.h"
83 #include "gromacs/nbnxm/pairlist.h"
84 #include "gromacs/pbcutil/ishift.h"
85 #include "gromacs/timing/gpu_timing.h"
86 #include "gromacs/utility/cstringutil.h"
87 #include "gromacs/utility/fatalerror.h"
88 #include "gromacs/utility/gmxassert.h"
90 #include "nbnxm_ocl_internal.h"
91 #include "nbnxm_ocl_types.h"
96 /*! \brief Convenience constants */
98 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
99 static const int c_clSize = c_nbnxnGpuClusterSize;
103 /*! \brief Validates the input global work size parameter.
105 static inline void validate_global_work_size(const KernelLaunchConfig& config,
107 const gmx_device_info_t* dinfo)
109 cl_uint device_size_t_size_bits;
110 cl_uint host_size_t_size_bits;
114 size_t global_work_size[3];
115 GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
116 for (int i = 0; i < work_dim; i++)
118 global_work_size[i] = config.blockSize[i] * config.gridSize[i];
121 /* Each component of a global_work_size must not exceed the range given by the
122 sizeof(device size_t) for the device on which the kernel execution will
124 https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
126 device_size_t_size_bits = dinfo->adress_bits;
127 host_size_t_size_bits = static_cast<cl_uint>(sizeof(size_t) * 8);
129 /* If sizeof(host size_t) <= sizeof(device size_t)
130 => global_work_size components will always be valid
132 => get device limit for global work size and
133 compare it against each component of global_work_size.
135 if (host_size_t_size_bits > device_size_t_size_bits)
139 device_limit = (1ULL << device_size_t_size_bits) - 1;
141 for (int i = 0; i < work_dim; i++)
143 if (global_work_size[i] > device_limit)
147 "Watch out, the input system is too large to simulate!\n"
148 "The number of nonbonded work units (=number of super-clusters) exceeds the"
149 "device capabilities. Global work size limit exceeded (%zu > %zu)!",
150 global_work_size[i], device_limit);
156 /* Constant arrays listing non-bonded kernel function names. The arrays are
157 * organized in 2-dim arrays by: electrostatics and VDW type.
159 * Note that the row- and column-order of function pointers has to match the
160 * order of corresponding enumerated electrostatics and vdw types, resp.,
161 * defined in nbnxm_ocl_types.h.
164 /*! \brief Force-only kernel function names. */
165 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] = {
166 { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",
167 "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",
168 "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",
169 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
170 { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",
171 "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",
172 "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",
173 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
174 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",
175 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",
176 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",
177 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",
178 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
179 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl",
180 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl",
181 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl",
182 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl",
183 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl",
184 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl",
185 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
186 { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",
187 "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",
188 "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",
189 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
190 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",
191 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",
192 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",
193 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",
194 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",
195 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
198 /*! \brief Force + energy kernel function pointers. */
199 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] = {
200 { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",
201 "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",
202 "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",
203 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
204 { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",
205 "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",
206 "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",
207 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
208 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",
209 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",
210 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",
211 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",
212 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
213 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl",
214 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl",
215 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl",
216 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl",
217 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl",
218 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl",
219 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
220 { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",
221 "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",
222 "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",
223 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
224 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",
225 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",
226 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",
227 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",
228 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",
229 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",
230 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
233 /*! \brief Force + pruning kernel function pointers. */
234 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] = {
235 { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",
236 "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",
237 "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",
238 "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",
239 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",
240 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
241 { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",
242 "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",
243 "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",
244 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",
245 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
246 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",
247 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",
248 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",
249 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",
250 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",
251 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",
252 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
253 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl",
254 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl",
255 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl",
256 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl",
257 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl",
258 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl",
259 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
260 { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",
261 "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",
262 "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",
263 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",
264 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
265 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",
266 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",
267 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",
268 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",
269 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",
270 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",
271 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
274 /*! \brief Force + energy + pruning kernel function pointers. */
275 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] = {
276 { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",
277 "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",
278 "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",
279 "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",
280 "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",
281 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",
282 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
283 { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",
284 "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",
285 "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",
286 "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",
287 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",
288 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
289 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",
290 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",
291 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",
292 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",
293 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",
294 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",
295 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
296 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl",
297 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl",
298 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl",
299 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl",
300 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl",
301 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
302 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
303 { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",
304 "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",
305 "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",
306 "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",
307 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",
308 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
309 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",
310 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",
311 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",
312 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",
313 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",
314 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
315 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
318 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
320 * \param[in] kernel_pruneonly array of prune kernel objects
321 * \param[in] firstPrunePass true if the first pruning pass is being executed
323 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[], bool firstPrunePass)
325 cl_kernel* kernelPtr;
329 kernelPtr = &(kernel_pruneonly[epruneFirst]);
333 kernelPtr = &(kernel_pruneonly[epruneRolling]);
335 // TODO: consider creating the prune kernel object here to avoid a
336 // clCreateKernel for the rolling prune kernel if this is not needed.
340 /*! \brief Return a pointer to the kernel version to be executed at the current step.
341 * OpenCL kernel objects are cached in nb. If the requested kernel is not
342 * found in the cache, it will be created and the cache will be updated.
344 static inline cl_kernel select_nbnxn_kernel(gmx_nbnxn_ocl_t* nb, int eeltype, int evdwtype, bool bDoEne, bool bDoPrune)
346 const char* kernel_name_to_run;
347 cl_kernel* kernel_ptr;
350 assert(eeltype < eelOclNR);
351 assert(evdwtype < evdwOclNR);
357 kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
358 kernel_ptr = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
362 kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
363 kernel_ptr = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
370 kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
371 kernel_ptr = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
375 kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
376 kernel_ptr = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
380 if (nullptr == kernel_ptr[0])
382 *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
383 assert(cl_error == CL_SUCCESS);
385 // TODO: handle errors
390 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
392 static inline int calc_shmem_required_nonbonded(int vdwType, bool bPrefetchLjParam)
396 /* size of shmem (force-buffers/xq/atom type preloading) */
397 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
398 /* i-atom x+q in shared memory */
399 shmem = c_numClPerSupercl * c_clSize * sizeof(float) * 4; /* xqib */
400 /* cj in shared memory, for both warps separately
401 * TODO: in the "nowarp kernels we load cj only once so the factor 2 is not needed.
403 shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs */
404 if (bPrefetchLjParam)
406 if (useLjCombRule(vdwType))
408 /* i-atom LJ combination parameters in shared memory */
409 shmem += c_numClPerSupercl * c_clSize * 2 * sizeof(float); /* atib abused for ljcp, float2 */
413 /* i-atom types in shared memory */
414 shmem += c_numClPerSupercl * c_clSize * sizeof(int); /* atib */
417 /* force reduction buffers in shared memory */
418 shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
419 /* Warp vote. In fact it must be * number of warps in block.. */
420 shmem += sizeof(cl_uint) * 2; /* warp_any */
424 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
426 * The device can't use the same data structures as the host for two main reasons:
427 * - OpenCL restrictions (pointers are not accepted inside data structures)
428 * - some host side fields are not needed for the OpenCL kernels.
430 * This function is called before the launch of both nbnxn and prune kernels.
432 static void fillin_ocl_structures(cl_nbparam_t* nbp, cl_nbparam_params_t* nbparams_params)
434 nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
435 nbparams_params->c_rf = nbp->c_rf;
436 nbparams_params->dispersion_shift = nbp->dispersion_shift;
437 nbparams_params->eeltype = nbp->eeltype;
438 nbparams_params->epsfac = nbp->epsfac;
439 nbparams_params->ewaldcoeff_lj = nbp->ewaldcoeff_lj;
440 nbparams_params->ewald_beta = nbp->ewald_beta;
441 nbparams_params->rcoulomb_sq = nbp->rcoulomb_sq;
442 nbparams_params->repulsion_shift = nbp->repulsion_shift;
443 nbparams_params->rlistOuter_sq = nbp->rlistOuter_sq;
444 nbparams_params->rvdw_sq = nbp->rvdw_sq;
445 nbparams_params->rlistInner_sq = nbp->rlistInner_sq;
446 nbparams_params->rvdw_switch = nbp->rvdw_switch;
447 nbparams_params->sh_ewald = nbp->sh_ewald;
448 nbparams_params->sh_lj_ewald = nbp->sh_lj_ewald;
449 nbparams_params->two_k_rf = nbp->two_k_rf;
450 nbparams_params->vdwtype = nbp->vdwtype;
451 nbparams_params->vdw_switch = nbp->vdw_switch;
454 /*! \brief Enqueues a wait for event completion.
456 * Then it releases the event and sets it to 0.
457 * Don't use this function when more than one wait will be issued for the event.
458 * Equivalent to Cuda Stream Sync. */
459 static void sync_ocl_event(cl_command_queue stream, cl_event* ocl_event)
461 cl_int gmx_unused cl_error;
464 cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
465 GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
467 /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
468 cl_error = clReleaseEvent(*ocl_event);
469 assert(CL_SUCCESS == cl_error);
470 *ocl_event = nullptr;
473 /*! \brief Launch asynchronously the xq buffer host to device copy. */
474 void gpu_copy_xq_to_gpu(gmx_nbnxn_ocl_t* nb, const nbnxn_atomdata_t* nbatom, const AtomLocality atomLocality)
476 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
478 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
480 /* local/nonlocal offset and length used for xq and f */
481 int adat_begin, adat_len;
483 cl_atomdata_t* adat = nb->atdat;
484 cl_plist_t* plist = nb->plist[iloc];
485 cl_timers_t* t = nb->timers;
486 cl_command_queue stream = nb->stream[iloc];
488 bool bDoTime = (nb->bDoTime) != 0;
490 /* Don't launch the non-local H2D copy if there is no dependent
491 work to do: neither non-local nor other (e.g. bonded) work
492 to do that has as input the nbnxn coordinates.
493 Doing the same for the local kernel is more complicated, since the
494 local part of the force array also depends on the non-local kernel.
495 So to avoid complicating the code and to reduce the risk of bugs,
496 we always call the local local x+q copy (and the rest of the local
497 work in nbnxn_gpu_launch_kernel().
499 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
501 plist->haveFreshList = false;
506 /* calculate the atom data index range based on locality */
507 if (atomLocality == AtomLocality::Local)
510 adat_len = adat->natoms_local;
514 adat_begin = adat->natoms_local;
515 adat_len = adat->natoms - adat->natoms_local;
518 /* beginning of timed HtoD section */
521 t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
525 ocl_copy_H2D_async(adat->xq, nbatom->x().data() + adat_begin * 4,
526 adat_begin * sizeof(float) * 4, adat_len * sizeof(float) * 4, stream,
527 bDoTime ? t->xf[atomLocality].nb_h2d.fetchNextEvent() : nullptr);
531 t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
534 /* When we get here all misc operations issues in the local stream as well as
535 the local xq H2D are done,
536 so we record that in the local stream and wait for it in the nonlocal one. */
537 if (nb->bUseTwoStreams)
539 if (iloc == InteractionLocality::Local)
541 cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(
542 stream, 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
543 assert(CL_SUCCESS == cl_error);
545 /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
546 * in the local stream in order to be able to sync with the above event
547 * from the non-local stream.
549 cl_error = clFlush(stream);
550 assert(CL_SUCCESS == cl_error);
554 sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
560 /*! \brief Launch GPU kernel
562 As we execute nonbonded workload in separate queues, before launching
563 the kernel we need to make sure that he following operations have completed:
564 - atomdata allocation and related H2D transfers (every nstlist step);
565 - pair list H2D transfer (every nstlist step);
566 - shift vector H2D transfer (every nstlist step);
567 - force (+shift force and energy) output clearing (every step).
569 These operations are issued in the local queue at the beginning of the step
570 and therefore always complete before the local kernel launch. The non-local
571 kernel is launched after the local on the same device/context, so this is
572 inherently scheduled after the operations in the local stream (including the
574 However, for the sake of having a future-proof implementation, we use the
575 misc_ops_done event to record the point in time when the above operations
576 are finished and synchronize with this event in the non-local stream.
578 void gpu_launch_kernel(gmx_nbnxn_ocl_t* nb, const gmx::StepWorkload& stepWork, const Nbnxm::InteractionLocality iloc)
580 cl_atomdata_t* adat = nb->atdat;
581 cl_nbparam_t* nbp = nb->nbparam;
582 cl_plist_t* plist = nb->plist[iloc];
583 cl_timers_t* t = nb->timers;
584 cl_command_queue stream = nb->stream[iloc];
586 bool bDoTime = (nb->bDoTime) != 0;
588 cl_nbparam_params_t nbparams_params;
590 /* Don't launch the non-local kernel if there is no work to do.
591 Doing the same for the local kernel is more complicated, since the
592 local part of the force array also depends on the non-local kernel.
593 So to avoid complicating the code and to reduce the risk of bugs,
594 we always call the local kernel and later (not in
595 this function) the stream wait, local f copyback and the f buffer
596 clearing. All these operations, except for the local interaction kernel,
597 are needed for the non-local interactions. The skip of the local kernel
598 call is taken care of later in this function. */
599 if (canSkipNonbondedWork(*nb, iloc))
601 plist->haveFreshList = false;
606 if (nbp->useDynamicPruning && plist->haveFreshList)
608 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
609 (that's the way the timing accounting can distinguish between
610 separate prune kernel and combined force+prune).
612 Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
615 if (plist->nsci == 0)
617 /* Don't launch an empty local kernel (is not allowed with OpenCL).
622 /* beginning of timed nonbonded calculation section */
625 t->interaction[iloc].nb_k.openTimingRegion(stream);
628 /* kernel launch config */
630 KernelLaunchConfig config;
631 config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
632 config.stream = stream;
633 config.blockSize[0] = c_clSize;
634 config.blockSize[1] = c_clSize;
635 config.gridSize[0] = plist->nsci;
637 validate_global_work_size(config, 3, nb->dev_info);
642 "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
643 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
644 config.blockSize[0], config.blockSize[1], config.blockSize[2],
645 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1],
646 plist->nsci * c_numClPerSupercl, c_numClPerSupercl, plist->na_c);
649 fillin_ocl_structures(nbp, &nbparams_params);
651 auto* timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
652 constexpr char kernelName[] = "k_calc_nb";
654 select_nbnxn_kernel(nb, nbp->eeltype, nbp->vdwtype, stepWork.computeEnergy,
655 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
658 // The OpenCL kernel takes int as second to last argument because bool is
659 // not supported as a kernel argument type (sizeof(bool) is implementation defined).
660 const int computeFshift = static_cast<int>(stepWork.computeVirial);
661 if (useLjCombRule(nb->nbparam->vdwtype))
663 const auto kernelArgs = prepareGpuKernelArguments(
664 kernel, config, &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el,
665 &adat->fshift, &adat->lj_comb, &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d,
666 &nbp->coulomb_tab_climg2d, &plist->sci, &plist->cj4, &plist->excl, &computeFshift);
668 launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
672 const auto kernelArgs = prepareGpuKernelArguments(
673 kernel, config, &adat->ntypes, &nbparams_params, &adat->xq, &adat->f, &adat->e_lj,
674 &adat->e_el, &adat->fshift, &adat->atom_types, &adat->shift_vec, &nbp->nbfp_climg2d,
675 &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d, &plist->sci, &plist->cj4,
676 &plist->excl, &computeFshift);
677 launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
682 t->interaction[iloc].nb_k.closeTimingRegion(stream);
687 /*! \brief Calculates the amount of shared memory required by the prune kernel.
689 * Note that for the sake of simplicity we use the CUDA terminology "shared memory"
690 * for OpenCL local memory.
692 * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd
693 * dimension. \returns the amount of local memory in bytes required by the pruning kernel
695 static inline int calc_shmem_required_prune(const int num_threads_z)
699 /* i-atom x in shared memory (for convenience we load all 4 components including q) */
700 shmem = c_numClPerSupercl * c_clSize * sizeof(float) * 4;
701 /* cj in shared memory, for each warp separately
702 * Note: only need to load once per wavefront, but to keep the code simple,
703 * for now we load twice on AMD.
705 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
706 /* Warp vote, requires one uint per warp/32 threads per block. */
707 shmem += sizeof(cl_uint) * 2 * num_threads_z;
713 * Launch the pairlist prune only kernel for the given locality.
714 * \p numParts tells in how many parts, i.e. calls the list will be pruned.
716 void gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t* nb, const InteractionLocality iloc, const int numParts)
718 cl_atomdata_t* adat = nb->atdat;
719 cl_nbparam_t* nbp = nb->nbparam;
720 cl_plist_t* plist = nb->plist[iloc];
721 cl_timers_t* t = nb->timers;
722 cl_command_queue stream = nb->stream[iloc];
723 bool bDoTime = nb->bDoTime == CL_TRUE;
725 if (plist->haveFreshList)
727 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
729 /* Set rollingPruningNumParts to signal that it is not set */
730 plist->rollingPruningNumParts = 0;
731 plist->rollingPruningPart = 0;
735 if (plist->rollingPruningNumParts == 0)
737 plist->rollingPruningNumParts = numParts;
741 GMX_ASSERT(numParts == plist->rollingPruningNumParts,
742 "It is not allowed to change numParts in between list generation steps");
746 /* Use a local variable for part and update in plist, so we can return here
747 * without duplicating the part increment code.
749 int part = plist->rollingPruningPart;
751 plist->rollingPruningPart++;
752 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
754 plist->rollingPruningPart = 0;
757 /* Compute the number of list entries to prune in this pass */
758 int numSciInPart = (plist->nsci - part) / numParts;
760 /* Don't launch the kernel if there is no work to do. */
761 if (numSciInPart <= 0)
763 plist->haveFreshList = false;
768 GpuRegionTimer* timer = nullptr;
771 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
774 /* beginning of timed prune calculation section */
777 timer->openTimingRegion(stream);
780 /* Kernel launch config:
781 * - The thread block dimensions match the size of i-clusters, j-clusters,
782 * and j-cluster concurrency, in x, y, and z, respectively.
783 * - The 1D block-grid contains as many blocks as super-clusters.
785 int num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
787 /* kernel launch config */
788 KernelLaunchConfig config;
789 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
790 config.stream = stream;
791 config.blockSize[0] = c_clSize;
792 config.blockSize[1] = c_clSize;
793 config.blockSize[2] = num_threads_z;
794 config.gridSize[0] = numSciInPart;
796 validate_global_work_size(config, 3, nb->dev_info);
801 "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
802 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
804 config.blockSize[0], config.blockSize[1], config.blockSize[2],
805 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1],
806 plist->nsci * c_numClPerSupercl, c_numClPerSupercl, plist->na_c, config.sharedMemorySize);
809 cl_nbparam_params_t nbparams_params;
810 fillin_ocl_structures(nbp, &nbparams_params);
812 auto* timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
813 constexpr char kernelName[] = "k_pruneonly";
814 const auto pruneKernel = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
815 const auto kernelArgs = prepareGpuKernelArguments(pruneKernel, config, &nbparams_params,
816 &adat->xq, &adat->shift_vec, &plist->sci,
817 &plist->cj4, &plist->imask, &numParts, &part);
818 launchGpuKernel(pruneKernel, config, timingEvent, kernelName, kernelArgs);
820 if (plist->haveFreshList)
822 plist->haveFreshList = false;
823 /* Mark that pruning has been done */
824 nb->timers->interaction[iloc].didPrune = true;
828 /* Mark that rolling pruning has been done */
829 nb->timers->interaction[iloc].didRollingPrune = true;
834 timer->closeTimingRegion(stream);
839 * Launch asynchronously the download of nonbonded forces from the GPU
840 * (and energies/shift forces if required).
842 void gpu_launch_cpyback(gmx_nbnxn_ocl_t* nb,
843 struct nbnxn_atomdata_t* nbatom,
844 const gmx::StepWorkload& stepWork,
845 const AtomLocality aloc)
847 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
849 cl_int gmx_unused cl_error;
850 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
852 /* determine interaction locality from atom locality */
853 const InteractionLocality iloc = gpuAtomToInteractionLocality(aloc);
855 cl_atomdata_t* adat = nb->atdat;
856 cl_timers_t* t = nb->timers;
857 bool bDoTime = nb->bDoTime == CL_TRUE;
858 cl_command_queue stream = nb->stream[iloc];
860 /* don't launch non-local copy-back if there was no non-local work to do */
861 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
863 /* TODO An alternative way to signal that non-local work is
864 complete is to use a clEnqueueMarker+clEnqueueBarrier
865 pair. However, the use of bNonLocalStreamActive has the
866 advantage of being local to the host, so probably minimizes
867 overhead. Curiously, for NVIDIA OpenCL with an empty-domain
868 test case, overall simulation performance was higher with
869 the API calls, but this has not been tested on AMD OpenCL,
870 so could be worth considering in future. */
871 nb->bNonLocalStreamActive = CL_FALSE;
875 getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
877 /* beginning of timed D2H section */
880 t->xf[aloc].nb_d2h.openTimingRegion(stream);
883 /* With DD the local D2H transfer can only start after the non-local
884 has been launched. */
885 if (iloc == InteractionLocality::Local && nb->bNonLocalStreamActive)
887 sync_ocl_event(stream, &(nb->nonlocal_done));
891 ocl_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f,
892 adat_begin * 3 * sizeof(float), (adat_len)*adat->f_elem_size, stream,
893 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
896 cl_error = clFlush(stream);
897 assert(CL_SUCCESS == cl_error);
899 /* After the non-local D2H is launched the nonlocal_done event can be
900 recorded which signals that the local D2H can proceed. This event is not
901 placed after the non-local kernel because we first need the non-local
903 if (iloc == InteractionLocality::NonLocal)
905 cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->nonlocal_done));
906 assert(CL_SUCCESS == cl_error);
907 nb->bNonLocalStreamActive = CL_TRUE;
910 /* only transfer energies in the local stream */
911 if (iloc == InteractionLocality::Local)
913 /* DtoH fshift when virial is needed */
914 if (stepWork.computeVirial)
916 ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0, SHIFTS * adat->fshift_elem_size,
917 stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
921 if (stepWork.computeEnergy)
923 ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0, sizeof(float), stream,
924 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
926 ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0, sizeof(float), stream,
927 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
933 t->xf[aloc].nb_d2h.closeTimingRegion(stream);
938 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
939 int nbnxn_gpu_pick_ewald_kernel_type(const interaction_const_t& ic)
941 bool bTwinCut = (ic.rcoulomb != ic.rvdw);
942 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
945 /* Benchmarking/development environment variables to force the use of
946 analytical or tabulated Ewald kernel. */
947 bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != nullptr);
948 bForceTabulatedEwald = (getenv("GMX_OCL_NB_TAB_EWALD") != nullptr);
950 if (bForceAnalyticalEwald && bForceTabulatedEwald)
953 "Both analytical and tabulated Ewald OpenCL non-bonded kernels "
954 "requested through environment variables.");
957 /* OpenCL: By default, use analytical Ewald
958 * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
960 * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
963 /* By default use analytical Ewald. */
964 bUseAnalyticalEwald = true;
965 if (bForceAnalyticalEwald)
969 fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
972 else if (bForceTabulatedEwald)
974 bUseAnalyticalEwald = false;
978 fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
982 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
983 forces it (use it for debugging/benchmarking only). */
984 if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == nullptr))
986 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
990 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;