2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define OpenCL implementation of nbnxm_gpu.h
38 * \author Anca Hamuraru <anca@streamcomputing.eu>
39 * \author Teemu Virolainen <teemu@streamcomputing.eu>
40 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
41 * \author Szilárd Páll <pall.szilard@gmail.com>
42 * \ingroup module_nbnxm
45 * - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
46 * - Rework the copying of OCL data structures done before every invocation of both
47 * nb and prune kernels (using fillin_ocl_structures); also consider at the same
48 * time calling clSetKernelArg only on the updated parameters (if tracking changed
49 * parameters is feasible);
50 * - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
51 * dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
52 * on the misc_ops_and_local_H2D_done event could be better expressed this way.
54 * - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
55 * - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
56 * is identical in the two implementations, so a 3-way split might allow sharing
70 #include "thread_mpi/atomic.h"
72 #include "gromacs/gpu_utils/gputraits_ocl.h"
73 #include "gromacs/gpu_utils/oclutils.h"
74 #include "gromacs/hardware/hw_info.h"
75 #include "gromacs/mdtypes/simulation_workload.h"
76 #include "gromacs/nbnxm/atomdata.h"
77 #include "gromacs/nbnxm/gpu_common.h"
78 #include "gromacs/nbnxm/gpu_common_utils.h"
79 #include "gromacs/nbnxm/gpu_data_mgmt.h"
80 #include "gromacs/nbnxm/nbnxm.h"
81 #include "gromacs/nbnxm/nbnxm_gpu.h"
82 #include "gromacs/nbnxm/pairlist.h"
83 #include "gromacs/pbcutil/ishift.h"
84 #include "gromacs/timing/gpu_timing.h"
85 #include "gromacs/utility/cstringutil.h"
86 #include "gromacs/utility/fatalerror.h"
87 #include "gromacs/utility/gmxassert.h"
89 #include "nbnxm_ocl_internal.h"
90 #include "nbnxm_ocl_types.h"
95 /*! \brief Convenience constants */
97 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
98 static const int c_clSize = c_nbnxnGpuClusterSize;
102 /*! \brief Validates the input global work size parameter.
104 static inline void validate_global_work_size(const KernelLaunchConfig &config, int work_dim, const gmx_device_info_t *dinfo)
106 cl_uint device_size_t_size_bits;
107 cl_uint host_size_t_size_bits;
111 size_t global_work_size[3];
112 GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
113 for (int i = 0; i < work_dim; i++)
115 global_work_size[i] = config.blockSize[i] * config.gridSize[i];
118 /* Each component of a global_work_size must not exceed the range given by the
119 sizeof(device size_t) for the device on which the kernel execution will
121 https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
123 device_size_t_size_bits = dinfo->adress_bits;
124 host_size_t_size_bits = static_cast<cl_uint>(sizeof(size_t) * 8);
126 /* If sizeof(host size_t) <= sizeof(device size_t)
127 => global_work_size components will always be valid
129 => get device limit for global work size and
130 compare it against each component of global_work_size.
132 if (host_size_t_size_bits > device_size_t_size_bits)
136 device_limit = (1ULL << device_size_t_size_bits) - 1;
138 for (int i = 0; i < work_dim; i++)
140 if (global_work_size[i] > device_limit)
142 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
143 "The number of nonbonded work units (=number of super-clusters) exceeds the"
144 "device capabilities. Global work size limit exceeded (%zu > %zu)!",
145 global_work_size[i], device_limit);
151 /* Constant arrays listing non-bonded kernel function names. The arrays are
152 * organized in 2-dim arrays by: electrostatics and VDW type.
154 * Note that the row- and column-order of function pointers has to match the
155 * order of corresponding enumerated electrostatics and vdw types, resp.,
156 * defined in nbnxm_ocl_types.h.
159 /*! \brief Force-only kernel function names. */
160 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] =
162 { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
163 { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
164 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
165 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
166 { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
167 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
170 /*! \brief Force + energy kernel function pointers. */
171 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] =
173 { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
174 { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
175 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
176 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
177 { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
178 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
181 /*! \brief Force + pruning kernel function pointers. */
182 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] =
184 { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
185 { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
186 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
187 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
188 { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
189 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
192 /*! \brief Force + energy + pruning kernel function pointers. */
193 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] =
195 { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
196 { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
197 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
198 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
199 { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
200 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
203 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
205 * \param[in] kernel_pruneonly array of prune kernel objects
206 * \param[in] firstPrunePass true if the first pruning pass is being executed
208 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[],
211 cl_kernel *kernelPtr;
215 kernelPtr = &(kernel_pruneonly[epruneFirst]);
219 kernelPtr = &(kernel_pruneonly[epruneRolling]);
221 // TODO: consider creating the prune kernel object here to avoid a
222 // clCreateKernel for the rolling prune kernel if this is not needed.
226 /*! \brief Return a pointer to the kernel version to be executed at the current step.
227 * OpenCL kernel objects are cached in nb. If the requested kernel is not
228 * found in the cache, it will be created and the cache will be updated.
230 static inline cl_kernel select_nbnxn_kernel(gmx_nbnxn_ocl_t *nb,
236 const char* kernel_name_to_run;
237 cl_kernel *kernel_ptr;
240 assert(eeltype < eelOclNR);
241 assert(evdwtype < evdwOclNR);
247 kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
248 kernel_ptr = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
252 kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
253 kernel_ptr = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
260 kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
261 kernel_ptr = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
265 kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
266 kernel_ptr = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
270 if (nullptr == kernel_ptr[0])
272 *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
273 assert(cl_error == CL_SUCCESS);
275 // TODO: handle errors
280 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
282 static inline int calc_shmem_required_nonbonded(int vdwType,
283 bool bPrefetchLjParam)
287 /* size of shmem (force-buffers/xq/atom type preloading) */
288 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
289 /* i-atom x+q in shared memory */
290 shmem = c_numClPerSupercl * c_clSize * sizeof(float) * 4; /* xqib */
291 /* cj in shared memory, for both warps separately
292 * TODO: in the "nowarp kernels we load cj only once so the factor 2 is not needed.
294 shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs */
295 if (bPrefetchLjParam)
297 if (useLjCombRule(vdwType))
299 /* i-atom LJ combination parameters in shared memory */
300 shmem += c_numClPerSupercl * c_clSize * 2*sizeof(float); /* atib abused for ljcp, float2 */
304 /* i-atom types in shared memory */
305 shmem += c_numClPerSupercl * c_clSize * sizeof(int); /* atib */
308 /* force reduction buffers in shared memory */
309 shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
310 /* Warp vote. In fact it must be * number of warps in block.. */
311 shmem += sizeof(cl_uint) * 2; /* warp_any */
315 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
317 * The device can't use the same data structures as the host for two main reasons:
318 * - OpenCL restrictions (pointers are not accepted inside data structures)
319 * - some host side fields are not needed for the OpenCL kernels.
321 * This function is called before the launch of both nbnxn and prune kernels.
323 static void fillin_ocl_structures(cl_nbparam_t *nbp,
324 cl_nbparam_params_t *nbparams_params)
326 nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
327 nbparams_params->c_rf = nbp->c_rf;
328 nbparams_params->dispersion_shift = nbp->dispersion_shift;
329 nbparams_params->eeltype = nbp->eeltype;
330 nbparams_params->epsfac = nbp->epsfac;
331 nbparams_params->ewaldcoeff_lj = nbp->ewaldcoeff_lj;
332 nbparams_params->ewald_beta = nbp->ewald_beta;
333 nbparams_params->rcoulomb_sq = nbp->rcoulomb_sq;
334 nbparams_params->repulsion_shift = nbp->repulsion_shift;
335 nbparams_params->rlistOuter_sq = nbp->rlistOuter_sq;
336 nbparams_params->rvdw_sq = nbp->rvdw_sq;
337 nbparams_params->rlistInner_sq = nbp->rlistInner_sq;
338 nbparams_params->rvdw_switch = nbp->rvdw_switch;
339 nbparams_params->sh_ewald = nbp->sh_ewald;
340 nbparams_params->sh_lj_ewald = nbp->sh_lj_ewald;
341 nbparams_params->two_k_rf = nbp->two_k_rf;
342 nbparams_params->vdwtype = nbp->vdwtype;
343 nbparams_params->vdw_switch = nbp->vdw_switch;
346 /*! \brief Enqueues a wait for event completion.
348 * Then it releases the event and sets it to 0.
349 * Don't use this function when more than one wait will be issued for the event.
350 * Equivalent to Cuda Stream Sync. */
351 static void sync_ocl_event(cl_command_queue stream, cl_event *ocl_event)
353 cl_int gmx_unused cl_error;
356 cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
357 GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
359 /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
360 cl_error = clReleaseEvent(*ocl_event);
361 assert(CL_SUCCESS == cl_error);
362 *ocl_event = nullptr;
365 /*! \brief Launch asynchronously the xq buffer host to device copy. */
366 void gpu_copy_xq_to_gpu(gmx_nbnxn_ocl_t *nb,
367 const nbnxn_atomdata_t *nbatom,
368 const AtomLocality atomLocality)
370 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
372 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
374 /* local/nonlocal offset and length used for xq and f */
375 int adat_begin, adat_len;
377 cl_atomdata_t *adat = nb->atdat;
378 cl_plist_t *plist = nb->plist[iloc];
379 cl_timers_t *t = nb->timers;
380 cl_command_queue stream = nb->stream[iloc];
382 bool bDoTime = (nb->bDoTime) != 0;
384 /* Don't launch the non-local H2D copy if there is no dependent
385 work to do: neither non-local nor other (e.g. bonded) work
386 to do that has as input the nbnxn coordinates.
387 Doing the same for the local kernel is more complicated, since the
388 local part of the force array also depends on the non-local kernel.
389 So to avoid complicating the code and to reduce the risk of bugs,
390 we always call the local local x+q copy (and the rest of the local
391 work in nbnxn_gpu_launch_kernel().
393 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
395 plist->haveFreshList = false;
400 /* calculate the atom data index range based on locality */
401 if (atomLocality == AtomLocality::Local)
404 adat_len = adat->natoms_local;
408 adat_begin = adat->natoms_local;
409 adat_len = adat->natoms - adat->natoms_local;
412 /* beginning of timed HtoD section */
415 t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
419 ocl_copy_H2D_async(adat->xq, nbatom->x().data() + adat_begin * 4, adat_begin*sizeof(float)*4,
420 adat_len * sizeof(float) * 4, stream, bDoTime ? t->xf[atomLocality].nb_h2d.fetchNextEvent() : nullptr);
424 t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
427 /* When we get here all misc operations issues in the local stream as well as
428 the local xq H2D are done,
429 so we record that in the local stream and wait for it in the nonlocal one. */
430 if (nb->bUseTwoStreams)
432 if (iloc == InteractionLocality::Local)
434 cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
435 assert(CL_SUCCESS == cl_error);
437 /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
438 * in the local stream in order to be able to sync with the above event
439 * from the non-local stream.
441 cl_error = clFlush(stream);
442 assert(CL_SUCCESS == cl_error);
446 sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
452 /*! \brief Launch GPU kernel
454 As we execute nonbonded workload in separate queues, before launching
455 the kernel we need to make sure that he following operations have completed:
456 - atomdata allocation and related H2D transfers (every nstlist step);
457 - pair list H2D transfer (every nstlist step);
458 - shift vector H2D transfer (every nstlist step);
459 - force (+shift force and energy) output clearing (every step).
461 These operations are issued in the local queue at the beginning of the step
462 and therefore always complete before the local kernel launch. The non-local
463 kernel is launched after the local on the same device/context, so this is
464 inherently scheduled after the operations in the local stream (including the
466 However, for the sake of having a future-proof implementation, we use the
467 misc_ops_done event to record the point in time when the above operations
468 are finished and synchronize with this event in the non-local stream.
470 void gpu_launch_kernel(gmx_nbnxn_ocl_t *nb,
471 const gmx::StepWorkload &stepWork,
472 const Nbnxm::InteractionLocality iloc)
474 cl_atomdata_t *adat = nb->atdat;
475 cl_nbparam_t *nbp = nb->nbparam;
476 cl_plist_t *plist = nb->plist[iloc];
477 cl_timers_t *t = nb->timers;
478 cl_command_queue stream = nb->stream[iloc];
480 bool bDoTime = (nb->bDoTime) != 0;
482 cl_nbparam_params_t nbparams_params;
484 /* Don't launch the non-local kernel if there is no work to do.
485 Doing the same for the local kernel is more complicated, since the
486 local part of the force array also depends on the non-local kernel.
487 So to avoid complicating the code and to reduce the risk of bugs,
488 we always call the local kernel and later (not in
489 this function) the stream wait, local f copyback and the f buffer
490 clearing. All these operations, except for the local interaction kernel,
491 are needed for the non-local interactions. The skip of the local kernel
492 call is taken care of later in this function. */
493 if (canSkipNonbondedWork(*nb, iloc))
495 plist->haveFreshList = false;
500 if (nbp->useDynamicPruning && plist->haveFreshList)
502 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
503 (that's the way the timing accounting can distinguish between
504 separate prune kernel and combined force+prune).
506 Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
509 if (plist->nsci == 0)
511 /* Don't launch an empty local kernel (is not allowed with OpenCL).
516 /* beginning of timed nonbonded calculation section */
519 t->interaction[iloc].nb_k.openTimingRegion(stream);
522 /* kernel launch config */
524 KernelLaunchConfig config;
525 config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
526 config.stream = stream;
527 config.blockSize[0] = c_clSize;
528 config.blockSize[1] = c_clSize;
529 config.gridSize[0] = plist->nsci;
531 validate_global_work_size(config, 3, nb->dev_info);
535 fprintf(debug, "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
536 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
537 config.blockSize[0], config.blockSize[1], config.blockSize[2],
538 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
539 c_numClPerSupercl, plist->na_c);
542 fillin_ocl_structures(nbp, &nbparams_params);
544 auto *timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
545 constexpr char kernelName[] = "k_calc_nb";
546 const auto kernel = select_nbnxn_kernel(nb,
549 stepWork.computeEnergy,
550 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
553 // The OpenCL kernel takes int as second to last argument because bool is
554 // not supported as a kernel argument type (sizeof(bool) is implementation defined).
555 const int computeFshift = static_cast<int>(stepWork.computeVirial);
556 if (useLjCombRule(nb->nbparam->vdwtype))
558 const auto kernelArgs = prepareGpuKernelArguments(kernel, config,
559 &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
561 &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
562 &plist->sci, &plist->cj4, &plist->excl, &computeFshift);
564 launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
568 const auto kernelArgs = prepareGpuKernelArguments(kernel, config,
570 &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
572 &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
573 &plist->sci, &plist->cj4, &plist->excl, &computeFshift);
574 launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
579 t->interaction[iloc].nb_k.closeTimingRegion(stream);
584 /*! \brief Calculates the amount of shared memory required by the prune kernel.
586 * Note that for the sake of simplicity we use the CUDA terminology "shared memory"
587 * for OpenCL local memory.
589 * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd dimension.
590 * \returns the amount of local memory in bytes required by the pruning kernel
592 static inline int calc_shmem_required_prune(const int num_threads_z)
596 /* i-atom x in shared memory (for convenience we load all 4 components including q) */
597 shmem = c_numClPerSupercl * c_clSize * sizeof(float)*4;
598 /* cj in shared memory, for each warp separately
599 * Note: only need to load once per wavefront, but to keep the code simple,
600 * for now we load twice on AMD.
602 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
603 /* Warp vote, requires one uint per warp/32 threads per block. */
604 shmem += sizeof(cl_uint) * 2*num_threads_z;
609 void gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t *nb,
610 const InteractionLocality iloc,
613 cl_atomdata_t *adat = nb->atdat;
614 cl_nbparam_t *nbp = nb->nbparam;
615 cl_plist_t *plist = nb->plist[iloc];
616 cl_timers_t *t = nb->timers;
617 cl_command_queue stream = nb->stream[iloc];
618 bool bDoTime = nb->bDoTime == CL_TRUE;
620 if (plist->haveFreshList)
622 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
624 /* Set rollingPruningNumParts to signal that it is not set */
625 plist->rollingPruningNumParts = 0;
626 plist->rollingPruningPart = 0;
630 if (plist->rollingPruningNumParts == 0)
632 plist->rollingPruningNumParts = numParts;
636 GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
640 /* Use a local variable for part and update in plist, so we can return here
641 * without duplicating the part increment code.
643 int part = plist->rollingPruningPart;
645 plist->rollingPruningPart++;
646 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
648 plist->rollingPruningPart = 0;
651 /* Compute the number of list entries to prune in this pass */
652 int numSciInPart = (plist->nsci - part)/numParts;
654 /* Don't launch the kernel if there is no work to do. */
655 if (numSciInPart <= 0)
657 plist->haveFreshList = false;
662 GpuRegionTimer *timer = nullptr;
665 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
668 /* beginning of timed prune calculation section */
671 timer->openTimingRegion(stream);
674 /* Kernel launch config:
675 * - The thread block dimensions match the size of i-clusters, j-clusters,
676 * and j-cluster concurrency, in x, y, and z, respectively.
677 * - The 1D block-grid contains as many blocks as super-clusters.
679 int num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
681 /* kernel launch config */
682 KernelLaunchConfig config;
683 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
684 config.stream = stream;
685 config.blockSize[0] = c_clSize;
686 config.blockSize[1] = c_clSize;
687 config.blockSize[2] = num_threads_z;
688 config.gridSize[0] = numSciInPart;
690 validate_global_work_size(config, 3, nb->dev_info);
694 fprintf(debug, "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
695 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
697 config.blockSize[0], config.blockSize[1], config.blockSize[2],
698 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
699 c_numClPerSupercl, plist->na_c, config.sharedMemorySize);
702 cl_nbparam_params_t nbparams_params;
703 fillin_ocl_structures(nbp, &nbparams_params);
705 auto *timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
706 constexpr char kernelName[] = "k_pruneonly";
707 const auto pruneKernel = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
708 const auto kernelArgs = prepareGpuKernelArguments(pruneKernel, config,
709 &nbparams_params, &adat->xq, &adat->shift_vec,
710 &plist->sci, &plist->cj4, &plist->imask, &numParts, &part);
711 launchGpuKernel(pruneKernel, config, timingEvent, kernelName, kernelArgs);
713 if (plist->haveFreshList)
715 plist->haveFreshList = false;
716 /* Mark that pruning has been done */
717 nb->timers->interaction[iloc].didPrune = true;
721 /* Mark that rolling pruning has been done */
722 nb->timers->interaction[iloc].didRollingPrune = true;
727 timer->closeTimingRegion(stream);
732 * Launch asynchronously the download of nonbonded forces from the GPU
733 * (and energies/shift forces if required).
735 void gpu_launch_cpyback(gmx_nbnxn_ocl_t *nb,
736 struct nbnxn_atomdata_t *nbatom,
737 const gmx::StepWorkload &stepWork,
738 const AtomLocality aloc,
739 const bool gmx_unused copyBackNbForce)
741 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
743 cl_int gmx_unused cl_error;
744 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
746 /* determine interaction locality from atom locality */
747 const InteractionLocality iloc = gpuAtomToInteractionLocality(aloc);
749 cl_atomdata_t *adat = nb->atdat;
750 cl_timers_t *t = nb->timers;
751 bool bDoTime = nb->bDoTime == CL_TRUE;
752 cl_command_queue stream = nb->stream[iloc];
754 /* don't launch non-local copy-back if there was no non-local work to do */
755 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
757 /* TODO An alternative way to signal that non-local work is
758 complete is to use a clEnqueueMarker+clEnqueueBarrier
759 pair. However, the use of bNonLocalStreamActive has the
760 advantage of being local to the host, so probably minimizes
761 overhead. Curiously, for NVIDIA OpenCL with an empty-domain
762 test case, overall simulation performance was higher with
763 the API calls, but this has not been tested on AMD OpenCL,
764 so could be worth considering in future. */
765 nb->bNonLocalStreamActive = CL_FALSE;
769 getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
771 /* beginning of timed D2H section */
774 t->xf[aloc].nb_d2h.openTimingRegion(stream);
777 /* With DD the local D2H transfer can only start after the non-local
778 has been launched. */
779 if (iloc == InteractionLocality::Local && nb->bNonLocalStreamActive)
781 sync_ocl_event(stream, &(nb->nonlocal_done));
785 ocl_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f, adat_begin*3*sizeof(float),
786 (adat_len)* adat->f_elem_size, stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
789 cl_error = clFlush(stream);
790 assert(CL_SUCCESS == cl_error);
792 /* After the non-local D2H is launched the nonlocal_done event can be
793 recorded which signals that the local D2H can proceed. This event is not
794 placed after the non-local kernel because we first need the non-local
796 if (iloc == InteractionLocality::NonLocal)
798 cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->nonlocal_done));
799 assert(CL_SUCCESS == cl_error);
800 nb->bNonLocalStreamActive = CL_TRUE;
803 /* only transfer energies in the local stream */
804 if (iloc == InteractionLocality::Local)
806 /* DtoH fshift when virial is needed */
807 if (stepWork.computeVirial)
809 ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0,
810 SHIFTS * adat->fshift_elem_size, stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
814 if (stepWork.computeEnergy)
816 ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0,
817 sizeof(float), stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
819 ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0,
820 sizeof(float), stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
826 t->xf[aloc].nb_d2h.closeTimingRegion(stream);
831 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
832 int nbnxn_gpu_pick_ewald_kernel_type(const interaction_const_t &ic)
834 bool bTwinCut = (ic.rcoulomb != ic.rvdw);
835 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
838 /* Benchmarking/development environment variables to force the use of
839 analytical or tabulated Ewald kernel. */
840 bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != nullptr);
841 bForceTabulatedEwald = (getenv("GMX_OCL_NB_TAB_EWALD") != nullptr);
843 if (bForceAnalyticalEwald && bForceTabulatedEwald)
845 gmx_incons("Both analytical and tabulated Ewald OpenCL non-bonded kernels "
846 "requested through environment variables.");
849 /* OpenCL: By default, use analytical Ewald
850 * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
852 * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
855 /* By default use analytical Ewald. */
856 bUseAnalyticalEwald = true;
857 if (bForceAnalyticalEwald)
861 fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
864 else if (bForceTabulatedEwald)
866 bUseAnalyticalEwald = false;
870 fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
874 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
875 forces it (use it for debugging/benchmarking only). */
876 if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == nullptr))
878 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
882 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;