2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define OpenCL implementation of nbnxn_gpu.h
38 * \author Anca Hamuraru <anca@streamcomputing.eu>
39 * \author Teemu Virolainen <teemu@streamcomputing.eu>
40 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
41 * \author Szilárd Páll <pall.szilard@gmail.com>
42 * \ingroup module_mdlib
45 * - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
46 * - Rework the copying of OCL data structures done before every invocation of both
47 * nb and prune kernels (using fillin_ocl_structures); also consider at the same
48 * time calling clSetKernelArg only on the updated parameters (if tracking changed
49 * parameters is feasible);
50 * - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
51 * dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
52 * on the misc_ops_and_local_H2D_done event could be better expressed this way.
54 * - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
55 * - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
56 * is identical in the two implementations, so a 3-way split might allow sharing
70 #include "thread_mpi/atomic.h"
72 #include "gromacs/gpu_utils/oclutils.h"
73 #include "gromacs/hardware/hw_info.h"
74 #include "gromacs/mdlib/force_flags.h"
75 #include "gromacs/mdlib/nb_verlet.h"
76 #include "gromacs/mdlib/nbnxn_consts.h"
77 #include "gromacs/mdlib/nbnxn_gpu.h"
78 #include "gromacs/mdlib/nbnxn_gpu_common.h"
79 #include "gromacs/mdlib/nbnxn_gpu_common_utils.h"
80 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
81 #include "gromacs/mdlib/nbnxn_pairlist.h"
82 #include "gromacs/pbcutil/ishift.h"
83 #include "gromacs/timing/gpu_timing.h"
84 #include "gromacs/utility/cstringutil.h"
85 #include "gromacs/utility/fatalerror.h"
86 #include "gromacs/utility/gmxassert.h"
88 #include "nbnxn_ocl_internal.h"
89 #include "nbnxn_ocl_types.h"
91 #if defined TEXOBJ_SUPPORTED && __CUDA_ARCH__ >= 300
95 /*! \brief Convenience constants */
97 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
98 static const int c_clSize = c_nbnxnGpuClusterSize;
102 /* Uncomment this define to enable kernel debugging */
105 /*! \brief Specifies which kernel run to debug */
106 #define DEBUG_RUN_STEP 2
108 /*! \brief Validates the input global work size parameter.
110 static inline void validate_global_work_size(size_t *global_work_size, int work_dim, const gmx_device_info_t *dinfo)
112 cl_uint device_size_t_size_bits;
113 cl_uint host_size_t_size_bits;
117 /* Each component of a global_work_size must not exceed the range given by the
118 sizeof(device size_t) for the device on which the kernel execution will
120 https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
122 device_size_t_size_bits = dinfo->adress_bits;
123 host_size_t_size_bits = (cl_uint)(sizeof(size_t) * 8);
125 /* If sizeof(host size_t) <= sizeof(device size_t)
126 => global_work_size components will always be valid
128 => get device limit for global work size and
129 compare it against each component of global_work_size.
131 if (host_size_t_size_bits > device_size_t_size_bits)
135 device_limit = (((size_t)1) << device_size_t_size_bits) - 1;
137 for (int i = 0; i < work_dim; i++)
139 if (global_work_size[i] > device_limit)
141 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
142 "The number of nonbonded work units (=number of super-clusters) exceeds the"
143 "device capabilities. Global work size limit exceeded (%d > %d)!",
144 global_work_size[i], device_limit);
150 /* Constant arrays listing non-bonded kernel function names. The arrays are
151 * organized in 2-dim arrays by: electrostatics and VDW type.
153 * Note that the row- and column-order of function pointers has to match the
154 * order of corresponding enumerated electrostatics and vdw types, resp.,
155 * defined in nbnxn_cuda_types.h.
158 /*! \brief Force-only kernel function names. */
159 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] =
161 { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
162 { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
163 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
164 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
165 { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
166 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
169 /*! \brief Force + energy kernel function pointers. */
170 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] =
172 { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
173 { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
174 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
175 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
176 { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
177 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
180 /*! \brief Force + pruning kernel function pointers. */
181 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] =
183 { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
184 { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
185 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
186 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
187 { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
188 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
191 /*! \brief Force + energy + pruning kernel function pointers. */
192 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] =
194 { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
195 { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
196 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
197 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
198 { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
199 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
202 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
204 * \param[in] kernel_pruneonly array of prune kernel objects
205 * \param[in] firstPrunePass true if the first pruning pass is being executed
207 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[],
210 cl_kernel *kernelPtr;
214 kernelPtr = &(kernel_pruneonly[epruneFirst]);
218 kernelPtr = &(kernel_pruneonly[epruneRolling]);
220 // TODO: consider creating the prune kernel object here to avoid a
221 // clCreateKernel for the rolling prune kernel if this is not needed.
225 /*! \brief Return a pointer to the kernel version to be executed at the current step.
226 * OpenCL kernel objects are cached in nb. If the requested kernel is not
227 * found in the cache, it will be created and the cache will be updated.
229 static inline cl_kernel select_nbnxn_kernel(gmx_nbnxn_ocl_t *nb,
235 const char* kernel_name_to_run;
236 cl_kernel *kernel_ptr;
239 assert(eeltype < eelOclNR);
240 assert(evdwtype < evdwOclNR);
246 kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
247 kernel_ptr = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
251 kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
252 kernel_ptr = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
259 kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
260 kernel_ptr = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
264 kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
265 kernel_ptr = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
269 if (NULL == kernel_ptr[0])
271 *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
272 assert(cl_error == CL_SUCCESS);
274 // TODO: handle errors
279 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
281 static inline int calc_shmem_required_nonbonded(int vdwType,
282 bool bPrefetchLjParam)
286 /* size of shmem (force-buffers/xq/atom type preloading) */
287 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
288 /* i-atom x+q in shared memory */
289 shmem = c_numClPerSupercl * c_clSize * sizeof(float) * 4; /* xqib */
290 /* cj in shared memory, for both warps separately */
291 shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs */
292 if (bPrefetchLjParam)
294 if (useLjCombRule(vdwType))
296 /* i-atom LJ combination parameters in shared memory */
297 shmem += c_numClPerSupercl * c_clSize * 2*sizeof(float); /* atib abused for ljcp, float2 */
301 /* i-atom types in shared memory */
302 shmem += c_numClPerSupercl * c_clSize * sizeof(int); /* atib */
305 /* force reduction buffers in shared memory */
306 shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
307 /* Warp vote. In fact it must be * number of warps in block.. */
308 shmem += sizeof(cl_uint) * 2; /* warp_any */
312 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
314 * The device can't use the same data structures as the host for two main reasons:
315 * - OpenCL restrictions (pointers are not accepted inside data structures)
316 * - some host side fields are not needed for the OpenCL kernels.
318 * This function is called before the launch of both nbnxn and prune kernels.
320 static void fillin_ocl_structures(cl_nbparam_t *nbp,
321 cl_nbparam_params_t *nbparams_params)
323 nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
324 nbparams_params->c_rf = nbp->c_rf;
325 nbparams_params->dispersion_shift = nbp->dispersion_shift;
326 nbparams_params->eeltype = nbp->eeltype;
327 nbparams_params->epsfac = nbp->epsfac;
328 nbparams_params->ewaldcoeff_lj = nbp->ewaldcoeff_lj;
329 nbparams_params->ewald_beta = nbp->ewald_beta;
330 nbparams_params->rcoulomb_sq = nbp->rcoulomb_sq;
331 nbparams_params->repulsion_shift = nbp->repulsion_shift;
332 nbparams_params->rlistOuter_sq = nbp->rlistOuter_sq;
333 nbparams_params->rvdw_sq = nbp->rvdw_sq;
334 nbparams_params->rlistInner_sq = nbp->rlistInner_sq;
335 nbparams_params->rvdw_switch = nbp->rvdw_switch;
336 nbparams_params->sh_ewald = nbp->sh_ewald;
337 nbparams_params->sh_lj_ewald = nbp->sh_lj_ewald;
338 nbparams_params->two_k_rf = nbp->two_k_rf;
339 nbparams_params->vdwtype = nbp->vdwtype;
340 nbparams_params->vdw_switch = nbp->vdw_switch;
343 /*! \brief Enqueues a wait for event completion.
345 * Then it releases the event and sets it to 0.
346 * Don't use this function when more than one wait will be issued for the event.
347 * Equivalent to Cuda Stream Sync. */
348 static void sync_ocl_event(cl_command_queue stream, cl_event *ocl_event)
350 cl_int gmx_unused cl_error;
353 #ifdef CL_VERSION_1_2
354 cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, NULL);
356 cl_error = clEnqueueWaitForEvents(stream, 1, ocl_event);
359 GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
361 /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
362 cl_error = clReleaseEvent(*ocl_event);
363 assert(CL_SUCCESS == cl_error);
367 /*! \brief Launch GPU kernel
369 As we execute nonbonded workload in separate queues, before launching
370 the kernel we need to make sure that he following operations have completed:
371 - atomdata allocation and related H2D transfers (every nstlist step);
372 - pair list H2D transfer (every nstlist step);
373 - shift vector H2D transfer (every nstlist step);
374 - force (+shift force and energy) output clearing (every step).
376 These operations are issued in the local queue at the beginning of the step
377 and therefore always complete before the local kernel launch. The non-local
378 kernel is launched after the local on the same device/context, so this is
379 inherently scheduled after the operations in the local stream (including the
381 However, for the sake of having a future-proof implementation, we use the
382 misc_ops_done event to record the point in time when the above operations
383 are finished and synchronize with this event in the non-local stream.
385 void nbnxn_gpu_launch_kernel(gmx_nbnxn_ocl_t *nb,
386 const struct nbnxn_atomdata_t *nbatom,
391 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
392 /* OpenCL kernel launch-related stuff */
394 size_t local_work_size[3], global_work_size[3];
395 cl_kernel nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
397 cl_atomdata_t *adat = nb->atdat;
398 cl_nbparam_t *nbp = nb->nbparam;
399 cl_plist_t *plist = nb->plist[iloc];
400 cl_timers_t *t = nb->timers;
401 cl_command_queue stream = nb->stream[iloc];
403 bool bCalcEner = flags & GMX_FORCE_ENERGY;
404 int bCalcFshift = flags & GMX_FORCE_VIRIAL;
405 bool bDoTime = nb->bDoTime;
408 cl_nbparam_params_t nbparams_params;
410 float * debug_buffer_h;
411 size_t debug_buffer_size;
414 /* Don't launch the non-local kernel if there is no work to do.
415 Doing the same for the local kernel is more complicated, since the
416 local part of the force array also depends on the non-local kernel.
417 So to avoid complicating the code and to reduce the risk of bugs,
418 we always call the local kernel, the local x+q copy and later (not in
419 this function) the stream wait, local f copyback and the f buffer
420 clearing. All these operations, except for the local interaction kernel,
421 are needed for the non-local interactions. The skip of the local kernel
422 call is taken care of later in this function. */
423 if (canSkipWork(nb, iloc))
425 plist->haveFreshList = false;
430 /* calculate the atom data index range based on locality */
434 adat_len = adat->natoms_local;
438 adat_begin = adat->natoms_local;
439 adat_len = adat->natoms - adat->natoms_local;
442 /* beginning of timed HtoD section */
445 t->nb_h2d[iloc].openTimingRegion(stream);
449 ocl_copy_H2D_async(adat->xq, nbatom->x + adat_begin * 4, adat_begin*sizeof(float)*4,
450 adat_len * sizeof(float) * 4, stream, bDoTime ? t->nb_h2d[iloc].fetchNextEvent() : nullptr);
454 t->nb_h2d[iloc].closeTimingRegion(stream);
457 /* When we get here all misc operations issues in the local stream as well as
458 the local xq H2D are done,
459 so we record that in the local stream and wait for it in the nonlocal one. */
460 if (nb->bUseTwoStreams)
462 if (iloc == eintLocal)
464 #ifdef CL_VERSION_1_2
465 cl_error = clEnqueueMarkerWithWaitList(stream, 0, NULL, &(nb->misc_ops_and_local_H2D_done));
467 cl_error = clEnqueueMarker(stream, &(nb->misc_ops_and_local_H2D_done));
469 assert(CL_SUCCESS == cl_error);
471 /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
472 * in the local stream in order to be able to sync with the above event
473 * from the non-local stream.
475 cl_error = clFlush(stream);
476 assert(CL_SUCCESS == cl_error);
480 sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
484 if (nbp->useDynamicPruning && plist->haveFreshList)
486 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
487 (that's the way the timing accounting can distinguish between
488 separate prune kernel and combined force+prune).
490 nbnxn_gpu_launch_kernel_pruneonly(nb, iloc, 1);
493 if (plist->nsci == 0)
495 /* Don't launch an empty local kernel (is not allowed with OpenCL).
496 * TODO: Separate H2D and kernel launch into separate functions.
501 /* beginning of timed nonbonded calculation section */
504 t->nb_k[iloc].openTimingRegion(stream);
507 /* get the pointer to the kernel flavor we need to use */
508 nb_kernel = select_nbnxn_kernel(nb,
512 (plist->haveFreshList && !nb->timers->didPrune[iloc]));
514 /* kernel launch config */
515 local_work_size[0] = c_clSize;
516 local_work_size[1] = c_clSize;
517 local_work_size[2] = 1;
519 global_work_size[0] = plist->nsci * local_work_size[0];
520 global_work_size[1] = 1 * local_work_size[1];
521 global_work_size[2] = 1 * local_work_size[2];
523 validate_global_work_size(global_work_size, 3, nb->dev_info);
525 shmem = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
529 static int run_step = 1;
531 if (DEBUG_RUN_STEP == run_step)
533 debug_buffer_size = global_work_size[0] * global_work_size[1] * global_work_size[2] * sizeof(float);
534 debug_buffer_h = (float*)calloc(1, debug_buffer_size);
535 assert(NULL != debug_buffer_h);
537 if (NULL == nb->debug_buffer)
539 nb->debug_buffer = clCreateBuffer(nb->dev_rundata->context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
540 debug_buffer_size, debug_buffer_h, &cl_error);
542 assert(CL_SUCCESS == cl_error);
551 fprintf(debug, "Non-bonded GPU launch configuration:\n\tLocal work size: %dx%dx%d\n\t"
552 "Global work size : %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n",
553 (int)(local_work_size[0]), (int)(local_work_size[1]), (int)(local_work_size[2]),
554 (int)(global_work_size[0]), (int)(global_work_size[1]), plist->nsci*c_numClPerSupercl,
555 c_numClPerSupercl, plist->na_c);
558 fillin_ocl_structures(nbp, &nbparams_params);
561 cl_error = CL_SUCCESS;
562 if (!useLjCombRule(nb->nbparam->vdwtype))
564 cl_error = clSetKernelArg(nb_kernel, arg_no++, sizeof(int), &(adat->ntypes));
566 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(nbparams_params), &(nbparams_params));
567 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->xq));
568 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->f));
569 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->e_lj));
570 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->e_el));
571 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->fshift));
572 if (useLjCombRule(nb->nbparam->vdwtype))
574 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->lj_comb));
578 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->atom_types));
580 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->shift_vec));
581 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(nbp->nbfp_climg2d));
582 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(nbp->nbfp_comb_climg2d));
583 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(nbp->coulomb_tab_climg2d));
584 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(plist->sci));
585 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(plist->cj4));
586 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(plist->excl));
587 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(int), &bCalcFshift);
588 cl_error |= clSetKernelArg(nb_kernel, arg_no++, shmem, NULL);
589 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(nb->debug_buffer));
591 assert(cl_error == CL_SUCCESS);
595 printf("OpenCL error: %s\n", ocl_get_error_string(cl_error).c_str());
597 cl_error = clEnqueueNDRangeKernel(stream, nb_kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, bDoTime ? t->nb_k[iloc].fetchNextEvent() : nullptr);
598 assert(cl_error == CL_SUCCESS);
602 t->nb_k[iloc].closeTimingRegion(stream);
607 static int run_step = 1;
609 if (DEBUG_RUN_STEP == run_step)
612 char file_name[256] = {0};
614 ocl_copy_D2H_async(debug_buffer_h, nb->debug_buffer, 0,
615 debug_buffer_size, stream, NULL);
617 // Make sure all data has been transfered back from device
620 printf("\nWriting debug_buffer to debug_buffer_ocl.txt...");
622 sprintf(file_name, "debug_buffer_ocl_%d.txt", DEBUG_RUN_STEP);
623 pf = fopen(file_name, "wt");
626 fprintf(pf, "%20s", "");
627 for (int j = 0; j < global_work_size[0]; j++)
630 sprintf(label, "(wIdx=%2d thIdx=%2d)", j / local_work_size[0], j % local_work_size[0]);
631 fprintf(pf, "%20s", label);
634 for (int i = 0; i < global_work_size[1]; i++)
637 sprintf(label, "(wIdy=%2d thIdy=%2d)", i / local_work_size[1], i % local_work_size[1]);
638 fprintf(pf, "\n%20s", label);
640 for (int j = 0; j < global_work_size[0]; j++)
642 fprintf(pf, "%20.5f", debug_buffer_h[i * global_work_size[0] + j]);
653 free(debug_buffer_h);
654 debug_buffer_h = NULL;
663 /*! \brief Calculates the amount of shared memory required by the prune kernel.
665 * Note that for the sake of simplicity we use the CUDA terminology "shared memory"
666 * for OpenCL local memory.
668 * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd dimension.
669 * \returns the amount of local memory in bytes required by the pruning kernel
671 static inline int calc_shmem_required_prune(const int num_threads_z)
675 /* i-atom x in shared memory (for convenience we load all 4 components including q) */
676 shmem = c_numClPerSupercl * c_clSize * sizeof(float)*4;
677 /* cj in shared memory, for each warp separately */
678 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
679 /* Warp vote, requires one uint per warp/32 threads per block. */
680 shmem += sizeof(cl_uint) * 2*num_threads_z;
685 void nbnxn_gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t *nb,
691 cl_atomdata_t *adat = nb->atdat;
692 cl_nbparam_t *nbp = nb->nbparam;
693 cl_plist_t *plist = nb->plist[iloc];
694 cl_timers_t *t = nb->timers;
695 cl_command_queue stream = nb->stream[iloc];
696 bool bDoTime = nb->bDoTime;
698 if (plist->haveFreshList)
700 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
702 /* Set rollingPruningNumParts to signal that it is not set */
703 plist->rollingPruningNumParts = 0;
704 plist->rollingPruningPart = 0;
708 if (plist->rollingPruningNumParts == 0)
710 plist->rollingPruningNumParts = numParts;
714 GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
718 /* Use a local variable for part and update in plist, so we can return here
719 * without duplicating the part increment code.
721 int part = plist->rollingPruningPart;
723 plist->rollingPruningPart++;
724 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
726 plist->rollingPruningPart = 0;
729 /* Compute the number of list entries to prune in this pass */
730 int numSciInPart = (plist->nsci - part)/numParts;
732 /* Don't launch the kernel if there is no work to do. */
733 if (numSciInPart <= 0)
735 plist->haveFreshList = false;
740 GpuRegionTimer *timer = nullptr;
743 timer = &(plist->haveFreshList ? t->prune_k[iloc] : t->rollingPrune_k[iloc]);
746 /* beginning of timed prune calculation section */
749 timer->openTimingRegion(stream);
752 /* Kernel launch config:
753 * - The thread block dimensions match the size of i-clusters, j-clusters,
754 * and j-cluster concurrency, in x, y, and z, respectively.
755 * - The 1D block-grid contains as many blocks as super-clusters.
757 int num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
758 cl_kernel pruneKernel = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
760 /* kernel launch config */
761 size_t local_work_size[3], global_work_size[3];
762 local_work_size[0] = c_clSize;
763 local_work_size[1] = c_clSize;
764 local_work_size[2] = num_threads_z;
766 global_work_size[0] = numSciInPart * local_work_size[0];
767 global_work_size[1] = 1 * local_work_size[1];
768 global_work_size[2] = 1 * local_work_size[2];
770 validate_global_work_size(global_work_size, 3, nb->dev_info);
772 int shmem = calc_shmem_required_prune(num_threads_z);
776 fprintf(debug, "Pruning GPU kernel launch configuration:\n\tLocal work size: %dx%dx%d\n\t"
777 "\tGlobal work size: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n"
779 (int)(local_work_size[0]), (int)(local_work_size[1]), (int)(local_work_size[2]),
780 (int)(global_work_size[0]), (int)(global_work_size[1]), plist->nsci*c_numClPerSupercl,
781 c_numClPerSupercl, plist->na_c, shmem);
784 cl_nbparam_params_t nbparams_params;
785 fillin_ocl_structures(nbp, &nbparams_params);
788 cl_error = CL_SUCCESS;
790 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(nbparams_params), &(nbparams_params));
791 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(adat->xq));
792 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(adat->shift_vec));
793 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(plist->sci));
794 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(plist->cj4));
795 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(plist->imask));
796 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(int), &(numParts));
797 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(int), &(part));
798 cl_error |= clSetKernelArg(pruneKernel, arg_no++, shmem, nullptr);
799 assert(cl_error == CL_SUCCESS);
801 cl_error = clEnqueueNDRangeKernel(stream, pruneKernel, 3,
802 nullptr, global_work_size, local_work_size,
803 0, nullptr, bDoTime ? timer->fetchNextEvent() : nullptr);
804 GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
806 if (plist->haveFreshList)
808 plist->haveFreshList = false;
809 /* Mark that pruning has been done */
810 nb->timers->didPrune[iloc] = true;
814 /* Mark that rolling pruning has been done */
815 nb->timers->didRollingPrune[iloc] = true;
820 timer->closeTimingRegion(stream);
825 * Launch asynchronously the download of nonbonded forces from the GPU
826 * (and energies/shift forces if required).
828 void nbnxn_gpu_launch_cpyback(gmx_nbnxn_ocl_t *nb,
829 const struct nbnxn_atomdata_t *nbatom,
833 cl_int gmx_unused cl_error;
834 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
836 /* determine interaction locality from atom locality */
837 int iloc = gpuAtomToInteractionLocality(aloc);
839 cl_atomdata_t *adat = nb->atdat;
840 cl_timers_t *t = nb->timers;
841 bool bDoTime = nb->bDoTime;
842 cl_command_queue stream = nb->stream[iloc];
844 bool bCalcEner = flags & GMX_FORCE_ENERGY;
845 int bCalcFshift = flags & GMX_FORCE_VIRIAL;
848 /* don't launch non-local copy-back if there was no non-local work to do */
849 if (canSkipWork(nb, iloc))
851 /* TODO An alternative way to signal that non-local work is
852 complete is to use a clEnqueueMarker+clEnqueueBarrier
853 pair. However, the use of bNonLocalStreamActive has the
854 advantage of being local to the host, so probably minimizes
855 overhead. Curiously, for NVIDIA OpenCL with an empty-domain
856 test case, overall simulation performance was higher with
857 the API calls, but this has not been tested on AMD OpenCL,
858 so could be worth considering in future. */
859 nb->bNonLocalStreamActive = false;
863 getGpuAtomRange(adat, aloc, adat_begin, adat_len);
865 /* beginning of timed D2H section */
868 t->nb_d2h[iloc].openTimingRegion(stream);
871 /* With DD the local D2H transfer can only start after the non-local
872 has been launched. */
873 if (iloc == eintLocal && nb->bNonLocalStreamActive)
875 sync_ocl_event(stream, &(nb->nonlocal_done));
879 ocl_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f, adat_begin*3*sizeof(float),
880 (adat_len)* adat->f_elem_size, stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
883 cl_error = clFlush(stream);
884 assert(CL_SUCCESS == cl_error);
886 /* After the non-local D2H is launched the nonlocal_done event can be
887 recorded which signals that the local D2H can proceed. This event is not
888 placed after the non-local kernel because we first need the non-local
890 if (iloc == eintNonlocal)
892 #ifdef CL_VERSION_1_2
893 cl_error = clEnqueueMarkerWithWaitList(stream, 0, NULL, &(nb->nonlocal_done));
895 cl_error = clEnqueueMarker(stream, &(nb->nonlocal_done));
897 assert(CL_SUCCESS == cl_error);
898 nb->bNonLocalStreamActive = true;
901 /* only transfer energies in the local stream */
907 ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0,
908 SHIFTS * adat->fshift_elem_size, stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
914 ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0,
915 sizeof(float), stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
917 ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0,
918 sizeof(float), stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
924 t->nb_d2h[iloc].closeTimingRegion(stream);
929 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
930 int nbnxn_gpu_pick_ewald_kernel_type(bool bTwinCut)
932 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
935 /* Benchmarking/development environment variables to force the use of
936 analytical or tabulated Ewald kernel. */
937 bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != NULL);
938 bForceTabulatedEwald = (getenv("GMX_OCL_NB_TAB_EWALD") != NULL);
940 if (bForceAnalyticalEwald && bForceTabulatedEwald)
942 gmx_incons("Both analytical and tabulated Ewald OpenCL non-bonded kernels "
943 "requested through environment variables.");
946 /* OpenCL: By default, use analytical Ewald
947 * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
949 * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
952 //if ((dev_info->prop.major >= 3 || bForceAnalyticalEwald) && !bForceTabulatedEwald)
953 if ((1 || bForceAnalyticalEwald) && !bForceTabulatedEwald)
955 bUseAnalyticalEwald = true;
959 fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
964 bUseAnalyticalEwald = false;
968 fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
972 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
973 forces it (use it for debugging/benchmarking only). */
974 if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == NULL))
976 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
980 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;