2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define OpenCL implementation of nbnxm_gpu.h
39 * \author Anca Hamuraru <anca@streamcomputing.eu>
40 * \author Teemu Virolainen <teemu@streamcomputing.eu>
41 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
42 * \author Szilárd Páll <pall.szilard@gmail.com>
43 * \ingroup module_nbnxm
46 * - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
47 * - Rework the copying of OCL data structures done before every invocation of both
48 * nb and prune kernels (using fillin_ocl_structures); also consider at the same
49 * time calling clSetKernelArg only on the updated parameters (if tracking changed
50 * parameters is feasible);
51 * - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
52 * dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
53 * on the misc_ops_and_local_H2D_done event could be better expressed this way.
55 * - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
56 * - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
57 * is identical in the two implementations, so a 3-way split might allow sharing
71 #include "thread_mpi/atomic.h"
73 #include "gromacs/gpu_utils/device_context.h"
74 #include "gromacs/gpu_utils/gputraits_ocl.h"
75 #include "gromacs/gpu_utils/oclutils.h"
76 #include "gromacs/hardware/hw_info.h"
77 #include "gromacs/mdtypes/simulation_workload.h"
78 #include "gromacs/nbnxm/atomdata.h"
79 #include "gromacs/nbnxm/gpu_common.h"
80 #include "gromacs/nbnxm/gpu_common_utils.h"
81 #include "gromacs/nbnxm/gpu_data_mgmt.h"
82 #include "gromacs/nbnxm/nbnxm.h"
83 #include "gromacs/nbnxm/nbnxm_gpu.h"
84 #include "gromacs/nbnxm/pairlist.h"
85 #include "gromacs/pbcutil/ishift.h"
86 #include "gromacs/timing/gpu_timing.h"
87 #include "gromacs/utility/cstringutil.h"
88 #include "gromacs/utility/fatalerror.h"
89 #include "gromacs/utility/gmxassert.h"
91 #include "nbnxm_ocl_internal.h"
92 #include "nbnxm_ocl_types.h"
97 /*! \brief Convenience constants */
99 static constexpr int c_clSize = c_nbnxnGpuClusterSize;
103 /*! \brief Validates the input global work size parameter.
105 static inline void validate_global_work_size(const KernelLaunchConfig& config,
107 const DeviceInformation* dinfo)
109 cl_uint device_size_t_size_bits;
110 cl_uint host_size_t_size_bits;
112 GMX_ASSERT(dinfo, "Need a valid device info object");
114 size_t global_work_size[3];
115 GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
116 for (int i = 0; i < work_dim; i++)
118 global_work_size[i] = config.blockSize[i] * config.gridSize[i];
121 /* Each component of a global_work_size must not exceed the range given by the
122 sizeof(device size_t) for the device on which the kernel execution will
124 https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
126 device_size_t_size_bits = dinfo->adress_bits;
127 host_size_t_size_bits = static_cast<cl_uint>(sizeof(size_t) * 8);
129 /* If sizeof(host size_t) <= sizeof(device size_t)
130 => global_work_size components will always be valid
132 => get device limit for global work size and
133 compare it against each component of global_work_size.
135 if (host_size_t_size_bits > device_size_t_size_bits)
139 device_limit = (1ULL << device_size_t_size_bits) - 1;
141 for (int i = 0; i < work_dim; i++)
143 if (global_work_size[i] > device_limit)
147 "Watch out, the input system is too large to simulate!\n"
148 "The number of nonbonded work units (=number of super-clusters) exceeds the"
149 "device capabilities. Global work size limit exceeded (%zu > %zu)!",
150 global_work_size[i], device_limit);
156 /* Constant arrays listing non-bonded kernel function names. The arrays are
157 * organized in 2-dim arrays by: electrostatics and VDW type.
159 * Note that the row- and column-order of function pointers has to match the
160 * order of corresponding enumerated electrostatics and vdw types, resp.,
161 * defined in nbnxm_ocl_types.h.
164 /*! \brief Force-only kernel function names. */
165 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] = {
166 { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",
167 "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",
168 "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",
169 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
170 { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",
171 "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",
172 "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",
173 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
174 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",
175 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",
176 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",
177 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",
178 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
179 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl",
180 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl",
181 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl",
182 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl",
183 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl",
184 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl",
185 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
186 { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",
187 "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",
188 "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",
189 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
190 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",
191 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",
192 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",
193 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",
194 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",
195 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
198 /*! \brief Force + energy kernel function pointers. */
199 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] = {
200 { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",
201 "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",
202 "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",
203 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
204 { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",
205 "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",
206 "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",
207 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
208 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",
209 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",
210 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",
211 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",
212 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
213 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl",
214 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl",
215 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl",
216 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl",
217 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl",
218 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl",
219 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
220 { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",
221 "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",
222 "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",
223 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
224 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",
225 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",
226 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",
227 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",
228 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",
229 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",
230 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
233 /*! \brief Force + pruning kernel function pointers. */
234 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] = {
235 { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",
236 "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",
237 "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",
238 "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",
239 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",
240 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
241 { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",
242 "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",
243 "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",
244 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",
245 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
246 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",
247 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",
248 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",
249 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",
250 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",
251 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",
252 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
253 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl",
254 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl",
255 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl",
256 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl",
257 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl",
258 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl",
259 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
260 { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",
261 "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",
262 "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",
263 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",
264 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
265 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",
266 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",
267 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",
268 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",
269 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",
270 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",
271 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
274 /*! \brief Force + energy + pruning kernel function pointers. */
275 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] = {
276 { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",
277 "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",
278 "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",
279 "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",
280 "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",
281 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",
282 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
283 { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",
284 "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",
285 "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",
286 "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",
287 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",
288 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
289 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",
290 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",
291 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",
292 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",
293 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",
294 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",
295 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
296 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl",
297 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl",
298 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl",
299 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl",
300 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl",
301 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
302 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
303 { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",
304 "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",
305 "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",
306 "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",
307 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",
308 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
309 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",
310 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",
311 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",
312 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",
313 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",
314 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
315 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
318 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
320 * \param[in] kernel_pruneonly array of prune kernel objects
321 * \param[in] firstPrunePass true if the first pruning pass is being executed
323 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[], bool firstPrunePass)
325 cl_kernel* kernelPtr;
329 kernelPtr = &(kernel_pruneonly[epruneFirst]);
333 kernelPtr = &(kernel_pruneonly[epruneRolling]);
335 // TODO: consider creating the prune kernel object here to avoid a
336 // clCreateKernel for the rolling prune kernel if this is not needed.
340 /*! \brief Return a pointer to the kernel version to be executed at the current step.
341 * OpenCL kernel objects are cached in nb. If the requested kernel is not
342 * found in the cache, it will be created and the cache will be updated.
344 static inline cl_kernel select_nbnxn_kernel(NbnxmGpu* nb, int eeltype, int evdwtype, bool bDoEne, bool bDoPrune)
346 const char* kernel_name_to_run;
347 cl_kernel* kernel_ptr;
350 GMX_ASSERT(eeltype < eelOclNR,
351 "The electrostatics type requested is not implemented in the OpenCL kernels.");
352 GMX_ASSERT(evdwtype < evdwOclNR,
353 "The VdW type requested is not implemented in the OpenCL kernels.");
359 kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
360 kernel_ptr = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
364 kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
365 kernel_ptr = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
372 kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
373 kernel_ptr = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
377 kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
378 kernel_ptr = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
382 if (nullptr == kernel_ptr[0])
384 *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
385 GMX_ASSERT(cl_error == CL_SUCCESS,
386 ("clCreateKernel failed: " + ocl_get_error_string(cl_error)).c_str());
392 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
394 static inline int calc_shmem_required_nonbonded(int vdwType, bool bPrefetchLjParam)
398 /* size of shmem (force-buffers/xq/atom type preloading) */
399 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
400 /* i-atom x+q in shared memory */
401 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4; /* xqib */
402 /* cj in shared memory, for both warps separately
403 * TODO: in the "nowarp kernels we load cj only once so the factor 2 is not needed.
405 shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs */
406 if (bPrefetchLjParam)
408 if (useLjCombRule(vdwType))
410 /* i-atom LJ combination parameters in shared memory */
411 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * 2
412 * sizeof(float); /* atib abused for ljcp, float2 */
416 /* i-atom types in shared memory */
417 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int); /* atib */
420 /* force reduction buffers in shared memory */
421 shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
422 /* Warp vote. In fact it must be * number of warps in block.. */
423 shmem += sizeof(cl_uint) * 2; /* warp_any */
427 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
429 * The device can't use the same data structures as the host for two main reasons:
430 * - OpenCL restrictions (pointers are not accepted inside data structures)
431 * - some host side fields are not needed for the OpenCL kernels.
433 * This function is called before the launch of both nbnxn and prune kernels.
435 static void fillin_ocl_structures(cl_nbparam_t* nbp, cl_nbparam_params_t* nbparams_params)
437 nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
438 nbparams_params->c_rf = nbp->c_rf;
439 nbparams_params->dispersion_shift = nbp->dispersion_shift;
440 nbparams_params->eeltype = nbp->eeltype;
441 nbparams_params->epsfac = nbp->epsfac;
442 nbparams_params->ewaldcoeff_lj = nbp->ewaldcoeff_lj;
443 nbparams_params->ewald_beta = nbp->ewald_beta;
444 nbparams_params->rcoulomb_sq = nbp->rcoulomb_sq;
445 nbparams_params->repulsion_shift = nbp->repulsion_shift;
446 nbparams_params->rlistOuter_sq = nbp->rlistOuter_sq;
447 nbparams_params->rvdw_sq = nbp->rvdw_sq;
448 nbparams_params->rlistInner_sq = nbp->rlistInner_sq;
449 nbparams_params->rvdw_switch = nbp->rvdw_switch;
450 nbparams_params->sh_ewald = nbp->sh_ewald;
451 nbparams_params->sh_lj_ewald = nbp->sh_lj_ewald;
452 nbparams_params->two_k_rf = nbp->two_k_rf;
453 nbparams_params->vdwtype = nbp->vdwtype;
454 nbparams_params->vdw_switch = nbp->vdw_switch;
457 /*! \brief Enqueues a wait for event completion.
459 * Then it releases the event and sets it to 0.
460 * Don't use this function when more than one wait will be issued for the event.
461 * Equivalent to Cuda Stream Sync. */
462 static void sync_ocl_event(cl_command_queue stream, cl_event* ocl_event)
464 cl_int gmx_unused cl_error;
467 cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
468 GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
470 /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
471 cl_error = clReleaseEvent(*ocl_event);
472 GMX_ASSERT(cl_error == CL_SUCCESS,
473 ("clReleaseEvent failed: " + ocl_get_error_string(cl_error)).c_str());
474 *ocl_event = nullptr;
477 /*! \brief Launch asynchronously the xq buffer host to device copy. */
478 void gpu_copy_xq_to_gpu(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom, const AtomLocality atomLocality)
480 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
482 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
484 /* local/nonlocal offset and length used for xq and f */
485 int adat_begin, adat_len;
487 cl_atomdata_t* adat = nb->atdat;
488 cl_plist_t* plist = nb->plist[iloc];
489 cl_timers_t* t = nb->timers;
490 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
492 bool bDoTime = nb->bDoTime;
494 /* Don't launch the non-local H2D copy if there is no dependent
495 work to do: neither non-local nor other (e.g. bonded) work
496 to do that has as input the nbnxn coordinates.
497 Doing the same for the local kernel is more complicated, since the
498 local part of the force array also depends on the non-local kernel.
499 So to avoid complicating the code and to reduce the risk of bugs,
500 we always call the local local x+q copy (and the rest of the local
501 work in nbnxn_gpu_launch_kernel().
503 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
505 plist->haveFreshList = false;
510 /* calculate the atom data index range based on locality */
511 if (atomLocality == AtomLocality::Local)
514 adat_len = adat->natoms_local;
518 adat_begin = adat->natoms_local;
519 adat_len = adat->natoms - adat->natoms_local;
522 /* beginning of timed HtoD section */
525 t->xf[atomLocality].nb_h2d.openTimingRegion(deviceStream);
529 ocl_copy_H2D_async(adat->xq, nbatom->x().data() + adat_begin * 4, adat_begin * sizeof(float) * 4,
530 adat_len * sizeof(float) * 4, deviceStream.stream(),
531 bDoTime ? t->xf[atomLocality].nb_h2d.fetchNextEvent() : nullptr);
535 t->xf[atomLocality].nb_h2d.closeTimingRegion(deviceStream);
538 /* When we get here all misc operations issues in the local stream as well as
539 the local xq H2D are done,
540 so we record that in the local stream and wait for it in the nonlocal one. */
541 if (nb->bUseTwoStreams)
543 if (iloc == InteractionLocality::Local)
545 cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(
546 deviceStream.stream(), 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
547 GMX_ASSERT(cl_error == CL_SUCCESS,
548 ("clEnqueueMarkerWithWaitList failed: " + ocl_get_error_string(cl_error)).c_str());
550 /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
551 * in the local stream in order to be able to sync with the above event
552 * from the non-local stream.
554 cl_error = clFlush(deviceStream.stream());
555 GMX_ASSERT(cl_error == CL_SUCCESS,
556 ("clFlush failed: " + ocl_get_error_string(cl_error)).c_str());
560 sync_ocl_event(deviceStream.stream(), &(nb->misc_ops_and_local_H2D_done));
566 /*! \brief Launch GPU kernel
568 As we execute nonbonded workload in separate queues, before launching
569 the kernel we need to make sure that he following operations have completed:
570 - atomdata allocation and related H2D transfers (every nstlist step);
571 - pair list H2D transfer (every nstlist step);
572 - shift vector H2D transfer (every nstlist step);
573 - force (+shift force and energy) output clearing (every step).
575 These operations are issued in the local queue at the beginning of the step
576 and therefore always complete before the local kernel launch. The non-local
577 kernel is launched after the local on the same device/context, so this is
578 inherently scheduled after the operations in the local stream (including the
580 However, for the sake of having a future-proof implementation, we use the
581 misc_ops_done event to record the point in time when the above operations
582 are finished and synchronize with this event in the non-local stream.
584 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const Nbnxm::InteractionLocality iloc)
586 cl_atomdata_t* adat = nb->atdat;
587 cl_nbparam_t* nbp = nb->nbparam;
588 cl_plist_t* plist = nb->plist[iloc];
589 cl_timers_t* t = nb->timers;
590 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
592 bool bDoTime = nb->bDoTime;
594 cl_nbparam_params_t nbparams_params;
596 /* Don't launch the non-local kernel if there is no work to do.
597 Doing the same for the local kernel is more complicated, since the
598 local part of the force array also depends on the non-local kernel.
599 So to avoid complicating the code and to reduce the risk of bugs,
600 we always call the local kernel and later (not in
601 this function) the stream wait, local f copyback and the f buffer
602 clearing. All these operations, except for the local interaction kernel,
603 are needed for the non-local interactions. The skip of the local kernel
604 call is taken care of later in this function. */
605 if (canSkipNonbondedWork(*nb, iloc))
607 plist->haveFreshList = false;
612 if (nbp->useDynamicPruning && plist->haveFreshList)
614 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
615 (that's the way the timing accounting can distinguish between
616 separate prune kernel and combined force+prune).
618 Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
621 if (plist->nsci == 0)
623 /* Don't launch an empty local kernel (is not allowed with OpenCL).
628 /* beginning of timed nonbonded calculation section */
631 t->interaction[iloc].nb_k.openTimingRegion(deviceStream);
634 /* kernel launch config */
636 KernelLaunchConfig config;
637 config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
638 config.blockSize[0] = c_clSize;
639 config.blockSize[1] = c_clSize;
640 config.gridSize[0] = plist->nsci;
642 validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
647 "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
648 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
649 config.blockSize[0], config.blockSize[1], config.blockSize[2],
650 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1],
651 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
652 c_nbnxnGpuNumClusterPerSupercluster, plist->na_c);
655 fillin_ocl_structures(nbp, &nbparams_params);
657 auto* timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
658 constexpr char kernelName[] = "k_calc_nb";
660 select_nbnxn_kernel(nb, nbp->eeltype, nbp->vdwtype, stepWork.computeEnergy,
661 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
664 // The OpenCL kernel takes int as second to last argument because bool is
665 // not supported as a kernel argument type (sizeof(bool) is implementation defined).
666 const int computeFshift = static_cast<int>(stepWork.computeVirial);
667 if (useLjCombRule(nb->nbparam->vdwtype))
669 const auto kernelArgs = prepareGpuKernelArguments(
670 kernel, config, &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el,
671 &adat->fshift, &adat->lj_comb, &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d,
672 &nbp->coulomb_tab_climg2d, &plist->sci, &plist->cj4, &plist->excl, &computeFshift);
674 launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
678 const auto kernelArgs = prepareGpuKernelArguments(
679 kernel, config, &adat->ntypes, &nbparams_params, &adat->xq, &adat->f, &adat->e_lj,
680 &adat->e_el, &adat->fshift, &adat->atom_types, &adat->shift_vec, &nbp->nbfp_climg2d,
681 &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d, &plist->sci, &plist->cj4,
682 &plist->excl, &computeFshift);
683 launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
688 t->interaction[iloc].nb_k.closeTimingRegion(deviceStream);
693 /*! \brief Calculates the amount of shared memory required by the prune kernel.
695 * Note that for the sake of simplicity we use the CUDA terminology "shared memory"
696 * for OpenCL local memory.
698 * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd
699 * dimension. \returns the amount of local memory in bytes required by the pruning kernel
701 static inline int calc_shmem_required_prune(const int num_threads_z)
705 /* i-atom x in shared memory (for convenience we load all 4 components including q) */
706 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4;
707 /* cj in shared memory, for each warp separately
708 * Note: only need to load once per wavefront, but to keep the code simple,
709 * for now we load twice on AMD.
711 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
712 /* Warp vote, requires one uint per warp/32 threads per block. */
713 shmem += sizeof(cl_uint) * 2 * num_threads_z;
719 * Launch the pairlist prune only kernel for the given locality.
720 * \p numParts tells in how many parts, i.e. calls the list will be pruned.
722 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
724 cl_atomdata_t* adat = nb->atdat;
725 cl_nbparam_t* nbp = nb->nbparam;
726 cl_plist_t* plist = nb->plist[iloc];
727 cl_timers_t* t = nb->timers;
728 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
729 bool bDoTime = nb->bDoTime;
731 if (plist->haveFreshList)
733 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
735 /* Set rollingPruningNumParts to signal that it is not set */
736 plist->rollingPruningNumParts = 0;
737 plist->rollingPruningPart = 0;
741 if (plist->rollingPruningNumParts == 0)
743 plist->rollingPruningNumParts = numParts;
747 GMX_ASSERT(numParts == plist->rollingPruningNumParts,
748 "It is not allowed to change numParts in between list generation steps");
752 /* Use a local variable for part and update in plist, so we can return here
753 * without duplicating the part increment code.
755 int part = plist->rollingPruningPart;
757 plist->rollingPruningPart++;
758 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
760 plist->rollingPruningPart = 0;
763 /* Compute the number of list entries to prune in this pass */
764 int numSciInPart = (plist->nsci - part) / numParts;
766 /* Don't launch the kernel if there is no work to do. */
767 if (numSciInPart <= 0)
769 plist->haveFreshList = false;
774 GpuRegionTimer* timer = nullptr;
777 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
780 /* beginning of timed prune calculation section */
783 timer->openTimingRegion(deviceStream);
786 /* Kernel launch config:
787 * - The thread block dimensions match the size of i-clusters, j-clusters,
788 * and j-cluster concurrency, in x, y, and z, respectively.
789 * - The 1D block-grid contains as many blocks as super-clusters.
791 int num_threads_z = c_oclPruneKernelJ4ConcurrencyDEFAULT;
794 /* kernel launch config */
795 KernelLaunchConfig config;
796 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
797 config.blockSize[0] = c_clSize;
798 config.blockSize[1] = c_clSize;
799 config.blockSize[2] = num_threads_z;
800 config.gridSize[0] = numSciInPart;
802 validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
807 "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
808 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
810 config.blockSize[0], config.blockSize[1], config.blockSize[2],
811 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1],
812 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
813 c_nbnxnGpuNumClusterPerSupercluster, plist->na_c, config.sharedMemorySize);
816 cl_nbparam_params_t nbparams_params;
817 fillin_ocl_structures(nbp, &nbparams_params);
819 auto* timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
820 constexpr char kernelName[] = "k_pruneonly";
821 const auto pruneKernel = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
822 const auto kernelArgs = prepareGpuKernelArguments(pruneKernel, config, &nbparams_params,
823 &adat->xq, &adat->shift_vec, &plist->sci,
824 &plist->cj4, &plist->imask, &numParts, &part);
825 launchGpuKernel(pruneKernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
827 if (plist->haveFreshList)
829 plist->haveFreshList = false;
830 /* Mark that pruning has been done */
831 nb->timers->interaction[iloc].didPrune = true;
835 /* Mark that rolling pruning has been done */
836 nb->timers->interaction[iloc].didRollingPrune = true;
841 timer->closeTimingRegion(deviceStream);
846 * Launch asynchronously the download of nonbonded forces from the GPU
847 * (and energies/shift forces if required).
849 void gpu_launch_cpyback(NbnxmGpu* nb,
850 struct nbnxn_atomdata_t* nbatom,
851 const gmx::StepWorkload& stepWork,
852 const AtomLocality aloc)
854 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
856 cl_int gmx_unused cl_error;
857 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
859 /* determine interaction locality from atom locality */
860 const InteractionLocality iloc = gpuAtomToInteractionLocality(aloc);
862 cl_atomdata_t* adat = nb->atdat;
863 cl_timers_t* t = nb->timers;
864 bool bDoTime = nb->bDoTime;
865 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
867 /* don't launch non-local copy-back if there was no non-local work to do */
868 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
870 /* TODO An alternative way to signal that non-local work is
871 complete is to use a clEnqueueMarker+clEnqueueBarrier
872 pair. However, the use of bNonLocalStreamActive has the
873 advantage of being local to the host, so probably minimizes
874 overhead. Curiously, for NVIDIA OpenCL with an empty-domain
875 test case, overall simulation performance was higher with
876 the API calls, but this has not been tested on AMD OpenCL,
877 so could be worth considering in future. */
878 nb->bNonLocalStreamActive = CL_FALSE;
882 getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
884 /* beginning of timed D2H section */
887 t->xf[aloc].nb_d2h.openTimingRegion(deviceStream);
890 /* With DD the local D2H transfer can only start after the non-local
891 has been launched. */
892 if (iloc == InteractionLocality::Local && nb->bNonLocalStreamActive)
894 sync_ocl_event(deviceStream.stream(), &(nb->nonlocal_done));
898 ocl_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * DIM, adat->f,
899 adat_begin * DIM * sizeof(nbatom->out[0].f[0]),
900 adat_len * DIM * sizeof(nbatom->out[0].f[0]), deviceStream.stream(),
901 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
904 cl_error = clFlush(deviceStream.stream());
905 GMX_ASSERT(cl_error == CL_SUCCESS, ("clFlush failed: " + ocl_get_error_string(cl_error)).c_str());
907 /* After the non-local D2H is launched the nonlocal_done event can be
908 recorded which signals that the local D2H can proceed. This event is not
909 placed after the non-local kernel because we first need the non-local
911 if (iloc == InteractionLocality::NonLocal)
913 cl_error = clEnqueueMarkerWithWaitList(deviceStream.stream(), 0, nullptr, &(nb->nonlocal_done));
914 GMX_ASSERT(cl_error == CL_SUCCESS,
915 ("clEnqueueMarkerWithWaitList failed: " + ocl_get_error_string(cl_error)).c_str());
916 nb->bNonLocalStreamActive = CL_TRUE;
919 /* only transfer energies in the local stream */
920 if (iloc == InteractionLocality::Local)
922 /* DtoH fshift when virial is needed */
923 if (stepWork.computeVirial)
925 ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0,
926 SHIFTS * sizeof(nb->nbst.fshift[0]), deviceStream.stream(),
927 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
931 if (stepWork.computeEnergy)
933 ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0, sizeof(float), deviceStream.stream(),
934 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
936 ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0, sizeof(float), deviceStream.stream(),
937 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
943 t->xf[aloc].nb_d2h.closeTimingRegion(deviceStream);
948 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
949 int nbnxn_gpu_pick_ewald_kernel_type(const interaction_const_t& ic)
951 bool bTwinCut = (ic.rcoulomb != ic.rvdw);
952 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
955 /* Benchmarking/development environment variables to force the use of
956 analytical or tabulated Ewald kernel. */
957 bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != nullptr);
958 bForceTabulatedEwald = (getenv("GMX_OCL_NB_TAB_EWALD") != nullptr);
960 if (bForceAnalyticalEwald && bForceTabulatedEwald)
963 "Both analytical and tabulated Ewald OpenCL non-bonded kernels "
964 "requested through environment variables.");
967 /* OpenCL: By default, use analytical Ewald
968 * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
971 /* By default use analytical Ewald. */
972 bUseAnalyticalEwald = true;
973 if (bForceAnalyticalEwald)
977 fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
980 else if (bForceTabulatedEwald)
982 bUseAnalyticalEwald = false;
986 fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
990 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
991 forces it (use it for debugging/benchmarking only). */
992 if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == nullptr))
994 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
998 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;
1004 } // namespace Nbnxm