2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define OpenCL implementation of nbnxm_gpu.h
38 * \author Anca Hamuraru <anca@streamcomputing.eu>
39 * \author Teemu Virolainen <teemu@streamcomputing.eu>
40 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
41 * \author Szilárd Páll <pall.szilard@gmail.com>
42 * \ingroup module_nbnxm
45 * - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
46 * - Rework the copying of OCL data structures done before every invocation of both
47 * nb and prune kernels (using fillin_ocl_structures); also consider at the same
48 * time calling clSetKernelArg only on the updated parameters (if tracking changed
49 * parameters is feasible);
50 * - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
51 * dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
52 * on the misc_ops_and_local_H2D_done event could be better expressed this way.
54 * - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
55 * - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
56 * is identical in the two implementations, so a 3-way split might allow sharing
70 #include "thread_mpi/atomic.h"
72 #include "gromacs/gpu_utils/gputraits_ocl.h"
73 #include "gromacs/gpu_utils/oclutils.h"
74 #include "gromacs/hardware/hw_info.h"
75 #include "gromacs/mdlib/force_flags.h"
76 #include "gromacs/nbnxm/gpu_common.h"
77 #include "gromacs/nbnxm/gpu_common_utils.h"
78 #include "gromacs/nbnxm/gpu_data_mgmt.h"
79 #include "gromacs/nbnxm/nbnxm.h"
80 #include "gromacs/nbnxm/nbnxm_gpu.h"
81 #include "gromacs/nbnxm/pairlist.h"
82 #include "gromacs/pbcutil/ishift.h"
83 #include "gromacs/timing/gpu_timing.h"
84 #include "gromacs/utility/cstringutil.h"
85 #include "gromacs/utility/fatalerror.h"
86 #include "gromacs/utility/gmxassert.h"
88 #include "nbnxm_ocl_internal.h"
89 #include "nbnxm_ocl_types.h"
91 /*! \brief Convenience constants */
93 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
94 static const int c_clSize = c_nbnxnGpuClusterSize;
98 /*! \brief Validates the input global work size parameter.
100 static inline void validate_global_work_size(const KernelLaunchConfig &config, int work_dim, const gmx_device_info_t *dinfo)
102 cl_uint device_size_t_size_bits;
103 cl_uint host_size_t_size_bits;
107 size_t global_work_size[3];
108 GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
109 for (int i = 0; i < work_dim; i++)
111 global_work_size[i] = config.blockSize[i] * config.gridSize[i];
114 /* Each component of a global_work_size must not exceed the range given by the
115 sizeof(device size_t) for the device on which the kernel execution will
117 https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
119 device_size_t_size_bits = dinfo->adress_bits;
120 host_size_t_size_bits = static_cast<cl_uint>(sizeof(size_t) * 8);
122 /* If sizeof(host size_t) <= sizeof(device size_t)
123 => global_work_size components will always be valid
125 => get device limit for global work size and
126 compare it against each component of global_work_size.
128 if (host_size_t_size_bits > device_size_t_size_bits)
132 device_limit = (1ull << device_size_t_size_bits) - 1;
134 for (int i = 0; i < work_dim; i++)
136 if (global_work_size[i] > device_limit)
138 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
139 "The number of nonbonded work units (=number of super-clusters) exceeds the"
140 "device capabilities. Global work size limit exceeded (%zu > %zu)!",
141 global_work_size[i], device_limit);
147 /* Constant arrays listing non-bonded kernel function names. The arrays are
148 * organized in 2-dim arrays by: electrostatics and VDW type.
150 * Note that the row- and column-order of function pointers has to match the
151 * order of corresponding enumerated electrostatics and vdw types, resp.,
152 * defined in nbnxm_ocl_types.h.
155 /*! \brief Force-only kernel function names. */
156 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] =
158 { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
159 { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
160 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
161 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
162 { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
163 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
166 /*! \brief Force + energy kernel function pointers. */
167 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] =
169 { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
170 { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
171 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
172 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
173 { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
174 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
177 /*! \brief Force + pruning kernel function pointers. */
178 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] =
180 { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
181 { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
182 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
183 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
184 { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
185 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
188 /*! \brief Force + energy + pruning kernel function pointers. */
189 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] =
191 { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
192 { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
193 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
194 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
195 { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
196 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
199 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
201 * \param[in] kernel_pruneonly array of prune kernel objects
202 * \param[in] firstPrunePass true if the first pruning pass is being executed
204 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[],
207 cl_kernel *kernelPtr;
211 kernelPtr = &(kernel_pruneonly[epruneFirst]);
215 kernelPtr = &(kernel_pruneonly[epruneRolling]);
217 // TODO: consider creating the prune kernel object here to avoid a
218 // clCreateKernel for the rolling prune kernel if this is not needed.
222 /*! \brief Return a pointer to the kernel version to be executed at the current step.
223 * OpenCL kernel objects are cached in nb. If the requested kernel is not
224 * found in the cache, it will be created and the cache will be updated.
226 static inline cl_kernel select_nbnxn_kernel(gmx_nbnxn_ocl_t *nb,
232 const char* kernel_name_to_run;
233 cl_kernel *kernel_ptr;
236 assert(eeltype < eelOclNR);
237 assert(evdwtype < evdwOclNR);
243 kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
244 kernel_ptr = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
248 kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
249 kernel_ptr = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
256 kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
257 kernel_ptr = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
261 kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
262 kernel_ptr = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
266 if (nullptr == kernel_ptr[0])
268 *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
269 assert(cl_error == CL_SUCCESS);
271 // TODO: handle errors
276 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
278 static inline int calc_shmem_required_nonbonded(int vdwType,
279 bool bPrefetchLjParam)
283 /* size of shmem (force-buffers/xq/atom type preloading) */
284 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
285 /* i-atom x+q in shared memory */
286 shmem = c_numClPerSupercl * c_clSize * sizeof(float) * 4; /* xqib */
287 /* cj in shared memory, for both warps separately
288 * TODO: in the "nowarp kernels we load cj only once so the factor 2 is not needed.
290 shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs */
291 if (bPrefetchLjParam)
293 if (useLjCombRule(vdwType))
295 /* i-atom LJ combination parameters in shared memory */
296 shmem += c_numClPerSupercl * c_clSize * 2*sizeof(float); /* atib abused for ljcp, float2 */
300 /* i-atom types in shared memory */
301 shmem += c_numClPerSupercl * c_clSize * sizeof(int); /* atib */
304 /* force reduction buffers in shared memory */
305 shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
306 /* Warp vote. In fact it must be * number of warps in block.. */
307 shmem += sizeof(cl_uint) * 2; /* warp_any */
311 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
313 * The device can't use the same data structures as the host for two main reasons:
314 * - OpenCL restrictions (pointers are not accepted inside data structures)
315 * - some host side fields are not needed for the OpenCL kernels.
317 * This function is called before the launch of both nbnxn and prune kernels.
319 static void fillin_ocl_structures(cl_nbparam_t *nbp,
320 cl_nbparam_params_t *nbparams_params)
322 nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
323 nbparams_params->c_rf = nbp->c_rf;
324 nbparams_params->dispersion_shift = nbp->dispersion_shift;
325 nbparams_params->eeltype = nbp->eeltype;
326 nbparams_params->epsfac = nbp->epsfac;
327 nbparams_params->ewaldcoeff_lj = nbp->ewaldcoeff_lj;
328 nbparams_params->ewald_beta = nbp->ewald_beta;
329 nbparams_params->rcoulomb_sq = nbp->rcoulomb_sq;
330 nbparams_params->repulsion_shift = nbp->repulsion_shift;
331 nbparams_params->rlistOuter_sq = nbp->rlistOuter_sq;
332 nbparams_params->rvdw_sq = nbp->rvdw_sq;
333 nbparams_params->rlistInner_sq = nbp->rlistInner_sq;
334 nbparams_params->rvdw_switch = nbp->rvdw_switch;
335 nbparams_params->sh_ewald = nbp->sh_ewald;
336 nbparams_params->sh_lj_ewald = nbp->sh_lj_ewald;
337 nbparams_params->two_k_rf = nbp->two_k_rf;
338 nbparams_params->vdwtype = nbp->vdwtype;
339 nbparams_params->vdw_switch = nbp->vdw_switch;
342 /*! \brief Enqueues a wait for event completion.
344 * Then it releases the event and sets it to 0.
345 * Don't use this function when more than one wait will be issued for the event.
346 * Equivalent to Cuda Stream Sync. */
347 static void sync_ocl_event(cl_command_queue stream, cl_event *ocl_event)
349 cl_int gmx_unused cl_error;
352 cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
353 GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
355 /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
356 cl_error = clReleaseEvent(*ocl_event);
357 assert(CL_SUCCESS == cl_error);
358 *ocl_event = nullptr;
361 /*! \brief Launch asynchronously the xq buffer host to device copy. */
362 void nbnxn_gpu_copy_xq_to_gpu(gmx_nbnxn_ocl_t *nb,
363 const nbnxn_atomdata_t *nbatom,
367 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
369 cl_atomdata_t *adat = nb->atdat;
370 cl_plist_t *plist = nb->plist[iloc];
371 cl_timers_t *t = nb->timers;
372 cl_command_queue stream = nb->stream[iloc];
374 bool bDoTime = (nb->bDoTime) != 0;
376 /* Don't launch the non-local H2D copy if there is no dependent
377 work to do: neither non-local nor other (e.g. bonded) work
378 to do that has as input the nbnxn coordinates.
379 Doing the same for the local kernel is more complicated, since the
380 local part of the force array also depends on the non-local kernel.
381 So to avoid complicating the code and to reduce the risk of bugs,
382 we always call the local local x+q copy (and the rest of the local
383 work in nbnxn_gpu_launch_kernel().
385 if (!haveOtherWork && canSkipWork(nb, iloc))
387 plist->haveFreshList = false;
392 /* calculate the atom data index range based on locality */
396 adat_len = adat->natoms_local;
400 adat_begin = adat->natoms_local;
401 adat_len = adat->natoms - adat->natoms_local;
404 /* beginning of timed HtoD section */
407 t->nb_h2d[iloc].openTimingRegion(stream);
411 ocl_copy_H2D_async(adat->xq, nbatom->x().data() + adat_begin * 4, adat_begin*sizeof(float)*4,
412 adat_len * sizeof(float) * 4, stream, bDoTime ? t->nb_h2d[iloc].fetchNextEvent() : nullptr);
416 t->nb_h2d[iloc].closeTimingRegion(stream);
419 /* When we get here all misc operations issues in the local stream as well as
420 the local xq H2D are done,
421 so we record that in the local stream and wait for it in the nonlocal one. */
422 if (nb->bUseTwoStreams)
424 if (iloc == eintLocal)
426 cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
427 assert(CL_SUCCESS == cl_error);
429 /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
430 * in the local stream in order to be able to sync with the above event
431 * from the non-local stream.
433 cl_error = clFlush(stream);
434 assert(CL_SUCCESS == cl_error);
438 sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
444 /*! \brief Launch GPU kernel
446 As we execute nonbonded workload in separate queues, before launching
447 the kernel we need to make sure that he following operations have completed:
448 - atomdata allocation and related H2D transfers (every nstlist step);
449 - pair list H2D transfer (every nstlist step);
450 - shift vector H2D transfer (every nstlist step);
451 - force (+shift force and energy) output clearing (every step).
453 These operations are issued in the local queue at the beginning of the step
454 and therefore always complete before the local kernel launch. The non-local
455 kernel is launched after the local on the same device/context, so this is
456 inherently scheduled after the operations in the local stream (including the
458 However, for the sake of having a future-proof implementation, we use the
459 misc_ops_done event to record the point in time when the above operations
460 are finished and synchronize with this event in the non-local stream.
462 void nbnxn_gpu_launch_kernel(gmx_nbnxn_ocl_t *nb,
466 /* OpenCL kernel launch-related stuff */
467 cl_kernel nb_kernel = nullptr; /* fn pointer to the nonbonded kernel */
469 cl_atomdata_t *adat = nb->atdat;
470 cl_nbparam_t *nbp = nb->nbparam;
471 cl_plist_t *plist = nb->plist[iloc];
472 cl_timers_t *t = nb->timers;
473 cl_command_queue stream = nb->stream[iloc];
475 bool bCalcEner = (flags & GMX_FORCE_ENERGY) != 0;
476 int bCalcFshift = flags & GMX_FORCE_VIRIAL;
477 bool bDoTime = (nb->bDoTime) != 0;
479 cl_nbparam_params_t nbparams_params;
481 /* Don't launch the non-local kernel if there is no work to do.
482 Doing the same for the local kernel is more complicated, since the
483 local part of the force array also depends on the non-local kernel.
484 So to avoid complicating the code and to reduce the risk of bugs,
485 we always call the local kernel and later (not in
486 this function) the stream wait, local f copyback and the f buffer
487 clearing. All these operations, except for the local interaction kernel,
488 are needed for the non-local interactions. The skip of the local kernel
489 call is taken care of later in this function. */
490 if (canSkipWork(nb, iloc))
492 plist->haveFreshList = false;
497 if (nbp->useDynamicPruning && plist->haveFreshList)
499 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
500 (that's the way the timing accounting can distinguish between
501 separate prune kernel and combined force+prune).
503 nbnxn_gpu_launch_kernel_pruneonly(nb, iloc, 1);
506 if (plist->nsci == 0)
508 /* Don't launch an empty local kernel (is not allowed with OpenCL).
513 /* beginning of timed nonbonded calculation section */
516 t->nb_k[iloc].openTimingRegion(stream);
519 /* get the pointer to the kernel flavor we need to use */
520 nb_kernel = select_nbnxn_kernel(nb,
524 (plist->haveFreshList && !nb->timers->didPrune[iloc]));
526 /* kernel launch config */
528 KernelLaunchConfig config;
529 config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
530 config.stream = stream;
531 config.blockSize[0] = c_clSize;
532 config.blockSize[1] = c_clSize;
533 config.gridSize[0] = plist->nsci;
535 validate_global_work_size(config, 3, nb->dev_info);
539 fprintf(debug, "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
540 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
541 config.blockSize[0], config.blockSize[1], config.blockSize[2],
542 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
543 c_numClPerSupercl, plist->na_c);
546 fillin_ocl_structures(nbp, &nbparams_params);
548 auto *timingEvent = bDoTime ? t->nb_k[iloc].fetchNextEvent() : nullptr;
549 constexpr char kernelName[] = "k_calc_nb";
550 if (useLjCombRule(nb->nbparam->vdwtype))
552 const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config,
553 &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
555 &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
556 &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
558 launchGpuKernel(nb_kernel, config, timingEvent, kernelName, kernelArgs);
562 const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config,
564 &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
566 &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
567 &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
568 launchGpuKernel(nb_kernel, config, timingEvent, kernelName, kernelArgs);
573 t->nb_k[iloc].closeTimingRegion(stream);
578 /*! \brief Calculates the amount of shared memory required by the prune kernel.
580 * Note that for the sake of simplicity we use the CUDA terminology "shared memory"
581 * for OpenCL local memory.
583 * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd dimension.
584 * \returns the amount of local memory in bytes required by the pruning kernel
586 static inline int calc_shmem_required_prune(const int num_threads_z)
590 /* i-atom x in shared memory (for convenience we load all 4 components including q) */
591 shmem = c_numClPerSupercl * c_clSize * sizeof(float)*4;
592 /* cj in shared memory, for each warp separately
593 * Note: only need to load once per wavefront, but to keep the code simple,
594 * for now we load twice on AMD.
596 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
597 /* Warp vote, requires one uint per warp/32 threads per block. */
598 shmem += sizeof(cl_uint) * 2*num_threads_z;
603 void nbnxn_gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t *nb,
607 cl_atomdata_t *adat = nb->atdat;
608 cl_nbparam_t *nbp = nb->nbparam;
609 cl_plist_t *plist = nb->plist[iloc];
610 cl_timers_t *t = nb->timers;
611 cl_command_queue stream = nb->stream[iloc];
612 bool bDoTime = nb->bDoTime == CL_TRUE;
614 if (plist->haveFreshList)
616 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
618 /* Set rollingPruningNumParts to signal that it is not set */
619 plist->rollingPruningNumParts = 0;
620 plist->rollingPruningPart = 0;
624 if (plist->rollingPruningNumParts == 0)
626 plist->rollingPruningNumParts = numParts;
630 GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
634 /* Use a local variable for part and update in plist, so we can return here
635 * without duplicating the part increment code.
637 int part = plist->rollingPruningPart;
639 plist->rollingPruningPart++;
640 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
642 plist->rollingPruningPart = 0;
645 /* Compute the number of list entries to prune in this pass */
646 int numSciInPart = (plist->nsci - part)/numParts;
648 /* Don't launch the kernel if there is no work to do. */
649 if (numSciInPart <= 0)
651 plist->haveFreshList = false;
656 GpuRegionTimer *timer = nullptr;
659 timer = &(plist->haveFreshList ? t->prune_k[iloc] : t->rollingPrune_k[iloc]);
662 /* beginning of timed prune calculation section */
665 timer->openTimingRegion(stream);
668 /* Kernel launch config:
669 * - The thread block dimensions match the size of i-clusters, j-clusters,
670 * and j-cluster concurrency, in x, y, and z, respectively.
671 * - The 1D block-grid contains as many blocks as super-clusters.
673 int num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
674 cl_kernel pruneKernel = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
676 /* kernel launch config */
677 KernelLaunchConfig config;
678 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
679 config.stream = stream;
680 config.blockSize[0] = c_clSize;
681 config.blockSize[1] = c_clSize;
682 config.blockSize[2] = num_threads_z;
683 config.gridSize[0] = numSciInPart;
685 validate_global_work_size(config, 3, nb->dev_info);
689 fprintf(debug, "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
690 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
692 config.blockSize[0], config.blockSize[1], config.blockSize[2],
693 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
694 c_numClPerSupercl, plist->na_c, config.sharedMemorySize);
697 cl_nbparam_params_t nbparams_params;
698 fillin_ocl_structures(nbp, &nbparams_params);
700 auto *timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
701 constexpr char kernelName[] = "k_pruneonly";
702 const auto kernelArgs = prepareGpuKernelArguments(pruneKernel, config,
703 &nbparams_params, &adat->xq, &adat->shift_vec,
704 &plist->sci, &plist->cj4, &plist->imask, &numParts, &part);
705 launchGpuKernel(pruneKernel, config, timingEvent, kernelName, kernelArgs);
707 if (plist->haveFreshList)
709 plist->haveFreshList = false;
710 /* Mark that pruning has been done */
711 nb->timers->didPrune[iloc] = true;
715 /* Mark that rolling pruning has been done */
716 nb->timers->didRollingPrune[iloc] = true;
721 timer->closeTimingRegion(stream);
726 * Launch asynchronously the download of nonbonded forces from the GPU
727 * (and energies/shift forces if required).
729 void nbnxn_gpu_launch_cpyback(gmx_nbnxn_ocl_t *nb,
730 struct nbnxn_atomdata_t *nbatom,
735 cl_int gmx_unused cl_error;
736 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
738 /* determine interaction locality from atom locality */
739 int iloc = gpuAtomToInteractionLocality(aloc);
741 cl_atomdata_t *adat = nb->atdat;
742 cl_timers_t *t = nb->timers;
743 bool bDoTime = nb->bDoTime == CL_TRUE;
744 cl_command_queue stream = nb->stream[iloc];
746 bool bCalcEner = (flags & GMX_FORCE_ENERGY) != 0;
747 int bCalcFshift = flags & GMX_FORCE_VIRIAL;
750 /* don't launch non-local copy-back if there was no non-local work to do */
751 if (!haveOtherWork && canSkipWork(nb, iloc))
753 /* TODO An alternative way to signal that non-local work is
754 complete is to use a clEnqueueMarker+clEnqueueBarrier
755 pair. However, the use of bNonLocalStreamActive has the
756 advantage of being local to the host, so probably minimizes
757 overhead. Curiously, for NVIDIA OpenCL with an empty-domain
758 test case, overall simulation performance was higher with
759 the API calls, but this has not been tested on AMD OpenCL,
760 so could be worth considering in future. */
761 nb->bNonLocalStreamActive = CL_FALSE;
765 getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
767 /* beginning of timed D2H section */
770 t->nb_d2h[iloc].openTimingRegion(stream);
773 /* With DD the local D2H transfer can only start after the non-local
774 has been launched. */
775 if (iloc == eintLocal && nb->bNonLocalStreamActive)
777 sync_ocl_event(stream, &(nb->nonlocal_done));
781 ocl_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f, adat_begin*3*sizeof(float),
782 (adat_len)* adat->f_elem_size, stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
785 cl_error = clFlush(stream);
786 assert(CL_SUCCESS == cl_error);
788 /* After the non-local D2H is launched the nonlocal_done event can be
789 recorded which signals that the local D2H can proceed. This event is not
790 placed after the non-local kernel because we first need the non-local
792 if (iloc == eintNonlocal)
794 cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->nonlocal_done));
795 assert(CL_SUCCESS == cl_error);
796 nb->bNonLocalStreamActive = CL_TRUE;
799 /* only transfer energies in the local stream */
805 ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0,
806 SHIFTS * adat->fshift_elem_size, stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
812 ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0,
813 sizeof(float), stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
815 ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0,
816 sizeof(float), stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
822 t->nb_d2h[iloc].closeTimingRegion(stream);
827 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
828 int nbnxn_gpu_pick_ewald_kernel_type(bool bTwinCut)
830 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
833 /* Benchmarking/development environment variables to force the use of
834 analytical or tabulated Ewald kernel. */
835 bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != nullptr);
836 bForceTabulatedEwald = (getenv("GMX_OCL_NB_TAB_EWALD") != nullptr);
838 if (bForceAnalyticalEwald && bForceTabulatedEwald)
840 gmx_incons("Both analytical and tabulated Ewald OpenCL non-bonded kernels "
841 "requested through environment variables.");
844 /* OpenCL: By default, use analytical Ewald
845 * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
847 * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
850 /* By default use analytical Ewald. */
851 bUseAnalyticalEwald = true;
852 if (bForceAnalyticalEwald)
856 fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
859 else if (bForceTabulatedEwald)
861 bUseAnalyticalEwald = false;
865 fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
869 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
870 forces it (use it for debugging/benchmarking only). */
871 if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == nullptr))
873 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
877 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;