2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define OpenCL implementation of nbnxn_gpu.h
38 * \author Anca Hamuraru <anca@streamcomputing.eu>
39 * \author Teemu Virolainen <teemu@streamcomputing.eu>
40 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
41 * \author Szilárd Páll <pall.szilard@gmail.com>
42 * \ingroup module_mdlib
45 * - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
46 * - Rework the copying of OCL data structures done before every invocation of both
47 * nb and prune kernels (using fillin_ocl_structures); also consider at the same
48 * time calling clSetKernelArg only on the updated parameters (if tracking changed
49 * parameters is feasible);
50 * - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
51 * dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
52 * on the misc_ops_and_local_H2D_done event could be better expressed this way.
54 * - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
55 * - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
56 * is identical in the two implementations, so a 3-way split might allow sharing
70 #include "thread_mpi/atomic.h"
72 #include "gromacs/gpu_utils/oclutils.h"
73 #include "gromacs/hardware/hw_info.h"
74 #include "gromacs/mdlib/force_flags.h"
75 #include "gromacs/mdlib/nb_verlet.h"
76 #include "gromacs/mdlib/nbnxn_consts.h"
77 #include "gromacs/mdlib/nbnxn_gpu.h"
78 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
79 #include "gromacs/mdlib/nbnxn_pairlist.h"
80 #include "gromacs/pbcutil/ishift.h"
81 #include "gromacs/timing/gpu_timing.h"
82 #include "gromacs/utility/cstringutil.h"
83 #include "gromacs/utility/fatalerror.h"
84 #include "gromacs/utility/gmxassert.h"
86 #include "nbnxn_ocl_internal.h"
87 #include "nbnxn_ocl_types.h"
89 #if defined TEXOBJ_SUPPORTED && __CUDA_ARCH__ >= 300
93 /*! \brief Convenience constants */
95 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
96 static const int c_clSize = c_nbnxnGpuClusterSize;
99 /*! \brief Always/never run the energy/pruning kernels -- only for benchmarking purposes */
101 static bool always_ener = (getenv("GMX_GPU_ALWAYS_ENER") != NULL);
102 static bool never_ener = (getenv("GMX_GPU_NEVER_ENER") != NULL);
103 static bool always_prune = (getenv("GMX_GPU_ALWAYS_PRUNE") != NULL);
106 /* Uncomment this define to enable kernel debugging */
109 /*! \brief Specifies which kernel run to debug */
110 #define DEBUG_RUN_STEP 2
112 /*! \brief Validates the input global work size parameter.
114 static inline void validate_global_work_size(size_t *global_work_size, int work_dim, const gmx_device_info_t *dinfo)
116 cl_uint device_size_t_size_bits;
117 cl_uint host_size_t_size_bits;
121 /* Each component of a global_work_size must not exceed the range given by the
122 sizeof(device size_t) for the device on which the kernel execution will
124 https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
126 device_size_t_size_bits = dinfo->adress_bits;
127 host_size_t_size_bits = (cl_uint)(sizeof(size_t) * 8);
129 /* If sizeof(host size_t) <= sizeof(device size_t)
130 => global_work_size components will always be valid
132 => get device limit for global work size and
133 compare it against each component of global_work_size.
135 if (host_size_t_size_bits > device_size_t_size_bits)
139 device_limit = (((size_t)1) << device_size_t_size_bits) - 1;
141 for (int i = 0; i < work_dim; i++)
143 if (global_work_size[i] > device_limit)
145 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
146 "The number of nonbonded work units (=number of super-clusters) exceeds the"
147 "device capabilities. Global work size limit exceeded (%d > %d)!",
148 global_work_size[i], device_limit);
154 /* Constant arrays listing non-bonded kernel function names. The arrays are
155 * organized in 2-dim arrays by: electrostatics and VDW type.
157 * Note that the row- and column-order of function pointers has to match the
158 * order of corresponding enumerated electrostatics and vdw types, resp.,
159 * defined in nbnxn_cuda_types.h.
162 /*! \brief Force-only kernel function names. */
163 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] =
165 { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
166 { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
167 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
168 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
169 { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
170 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
173 /*! \brief Force + energy kernel function pointers. */
174 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] =
176 { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
177 { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
178 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
179 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
180 { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
181 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
184 /*! \brief Force + pruning kernel function pointers. */
185 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] =
187 { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
188 { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
189 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
190 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
191 { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
192 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
195 /*! \brief Force + energy + pruning kernel function pointers. */
196 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] =
198 { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
199 { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
200 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
201 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
202 { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
203 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
206 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
208 * \param[in] kernel_pruneonly array of prune kernel objects
209 * \param[in] firstPrunePass true if the first pruning pass is being executed
211 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[],
214 cl_kernel *kernelPtr;
218 kernelPtr = &(kernel_pruneonly[epruneFirst]);
222 kernelPtr = &(kernel_pruneonly[epruneRolling]);
224 // TODO: consider creating the prune kernel object here to avoid a
225 // clCreateKernel for the rolling prune kernel if this is not needed.
229 /*! \brief Return a pointer to the kernel version to be executed at the current step.
230 * OpenCL kernel objects are cached in nb. If the requested kernel is not
231 * found in the cache, it will be created and the cache will be updated.
233 static inline cl_kernel select_nbnxn_kernel(gmx_nbnxn_ocl_t *nb,
239 const char* kernel_name_to_run;
240 cl_kernel *kernel_ptr;
243 assert(eeltype < eelOclNR);
244 assert(evdwtype < evdwOclNR);
250 kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
251 kernel_ptr = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
255 kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
256 kernel_ptr = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
263 kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
264 kernel_ptr = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
268 kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
269 kernel_ptr = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
273 if (NULL == kernel_ptr[0])
275 *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
276 assert(cl_error == CL_SUCCESS);
278 // TODO: handle errors
283 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
285 static inline int calc_shmem_required_nonbonded(int vdwType,
286 bool bPrefetchLjParam)
290 /* size of shmem (force-buffers/xq/atom type preloading) */
291 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
292 /* i-atom x+q in shared memory */
293 shmem = c_numClPerSupercl * c_clSize * sizeof(float) * 4; /* xqib */
294 /* cj in shared memory, for both warps separately */
295 shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs */
296 if (bPrefetchLjParam)
298 if (useLjCombRule(vdwType))
300 /* i-atom LJ combination parameters in shared memory */
301 shmem += c_numClPerSupercl * c_clSize * 2*sizeof(float); /* atib abused for ljcp, float2 */
305 /* i-atom types in shared memory */
306 shmem += c_numClPerSupercl * c_clSize * sizeof(int); /* atib */
309 /* force reduction buffers in shared memory */
310 shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
311 /* Warp vote. In fact it must be * number of warps in block.. */
312 shmem += sizeof(cl_uint) * 2; /* warp_any */
316 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
318 * The device can't use the same data structures as the host for two main reasons:
319 * - OpenCL restrictions (pointers are not accepted inside data structures)
320 * - some host side fields are not needed for the OpenCL kernels.
322 * This function is called before the launch of both nbnxn and prune kernels.
324 static void fillin_ocl_structures(cl_nbparam_t *nbp,
325 cl_nbparam_params_t *nbparams_params)
327 nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
328 nbparams_params->coulomb_tab_size = nbp->coulomb_tab_size;
329 nbparams_params->c_rf = nbp->c_rf;
330 nbparams_params->dispersion_shift = nbp->dispersion_shift;
331 nbparams_params->eeltype = nbp->eeltype;
332 nbparams_params->epsfac = nbp->epsfac;
333 nbparams_params->ewaldcoeff_lj = nbp->ewaldcoeff_lj;
334 nbparams_params->ewald_beta = nbp->ewald_beta;
335 nbparams_params->rcoulomb_sq = nbp->rcoulomb_sq;
336 nbparams_params->repulsion_shift = nbp->repulsion_shift;
337 nbparams_params->rlistOuter_sq = nbp->rlistOuter_sq;
338 nbparams_params->rvdw_sq = nbp->rvdw_sq;
339 nbparams_params->rlistInner_sq = nbp->rlistInner_sq;
340 nbparams_params->rvdw_switch = nbp->rvdw_switch;
341 nbparams_params->sh_ewald = nbp->sh_ewald;
342 nbparams_params->sh_lj_ewald = nbp->sh_lj_ewald;
343 nbparams_params->two_k_rf = nbp->two_k_rf;
344 nbparams_params->vdwtype = nbp->vdwtype;
345 nbparams_params->vdw_switch = nbp->vdw_switch;
348 /*! \brief Waits for the commands associated with the input event to finish.
349 * Then it releases the event and sets it to 0.
350 * Don't use this function when more than one wait will be issued for the event.
352 void wait_ocl_event(cl_event *ocl_event)
354 cl_int gmx_unused cl_error;
356 /* Blocking wait for the event */
357 cl_error = clWaitForEvents(1, ocl_event);
358 assert(CL_SUCCESS == cl_error);
360 /* Release event and reset it to 0 */
361 cl_error = clReleaseEvent(*ocl_event);
362 assert(CL_SUCCESS == cl_error);
366 /*! \brief Enqueues a wait for event completion.
368 * Then it releases the event and sets it to 0.
369 * Don't use this function when more than one wait will be issued for the event.
370 * Equivalent to Cuda Stream Sync. */
371 void sync_ocl_event(cl_command_queue stream, cl_event *ocl_event)
373 cl_int gmx_unused cl_error;
376 #ifdef CL_VERSION_1_2
377 cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, NULL);
379 cl_error = clEnqueueWaitForEvents(stream, 1, ocl_event);
382 GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
384 /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
385 cl_error = clReleaseEvent(*ocl_event);
386 assert(CL_SUCCESS == cl_error);
390 /*! \brief Returns the duration in milliseconds for the command associated with the event.
392 * It then releases the event and sets it to 0.
393 * Before calling this function, make sure the command has finished either by
394 * calling clFinish or clWaitForEvents.
395 * The function returns 0.0 if the input event, *ocl_event, is 0.
396 * Don't use this function when more than one wait will be issued for the event.
398 double ocl_event_elapsed_ms(cl_event *ocl_event)
400 cl_int gmx_unused cl_error;
401 cl_ulong start_ns, end_ns;
405 assert(NULL != ocl_event);
409 cl_error = clGetEventProfilingInfo(*ocl_event, CL_PROFILING_COMMAND_START,
410 sizeof(cl_ulong), &start_ns, NULL);
411 assert(CL_SUCCESS == cl_error);
413 cl_error = clGetEventProfilingInfo(*ocl_event, CL_PROFILING_COMMAND_END,
414 sizeof(cl_ulong), &end_ns, NULL);
415 assert(CL_SUCCESS == cl_error);
417 clReleaseEvent(*ocl_event);
420 elapsed_ms = (end_ns - start_ns) / 1000000.0;
426 /*! \brief Launch GPU kernel
428 As we execute nonbonded workload in separate queues, before launching
429 the kernel we need to make sure that he following operations have completed:
430 - atomdata allocation and related H2D transfers (every nstlist step);
431 - pair list H2D transfer (every nstlist step);
432 - shift vector H2D transfer (every nstlist step);
433 - force (+shift force and energy) output clearing (every step).
435 These operations are issued in the local queue at the beginning of the step
436 and therefore always complete before the local kernel launch. The non-local
437 kernel is launched after the local on the same device/context, so this is
438 inherently scheduled after the operations in the local stream (including the
440 However, for the sake of having a future-proof implementation, we use the
441 misc_ops_done event to record the point in time when the above operations
442 are finished and synchronize with this event in the non-local stream.
444 void nbnxn_gpu_launch_kernel(gmx_nbnxn_ocl_t *nb,
445 const struct nbnxn_atomdata_t *nbatom,
450 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
451 /* OpenCL kernel launch-related stuff */
453 size_t local_work_size[3], global_work_size[3];
454 cl_kernel nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
456 cl_atomdata_t *adat = nb->atdat;
457 cl_nbparam_t *nbp = nb->nbparam;
458 cl_plist_t *plist = nb->plist[iloc];
459 cl_timers_t *t = nb->timers;
460 cl_command_queue stream = nb->stream[iloc];
462 bool bCalcEner = flags & GMX_FORCE_ENERGY;
463 int bCalcFshift = flags & GMX_FORCE_VIRIAL;
464 bool bDoTime = nb->bDoTime;
467 cl_nbparam_params_t nbparams_params;
469 float * debug_buffer_h;
470 size_t debug_buffer_size;
473 /* turn energy calculation always on/off (for debugging/testing only) */
474 bCalcEner = (bCalcEner || always_ener) && !never_ener;
476 /* Don't launch the non-local kernel if there is no work to do.
477 Doing the same for the local kernel is more complicated, since the
478 local part of the force array also depends on the non-local kernel.
479 So to avoid complicating the code and to reduce the risk of bugs,
480 we always call the local kernel, the local x+q copy and later (not in
481 this function) the stream wait, local f copyback and the f buffer
482 clearing. All these operations, except for the local interaction kernel,
483 are needed for the non-local interactions. The skip of the local kernel
484 call is taken care of later in this function. */
485 if (iloc == eintNonlocal && plist->nsci == 0)
487 plist->haveFreshList = false;
492 /* calculate the atom data index range based on locality */
496 adat_len = adat->natoms_local;
500 adat_begin = adat->natoms_local;
501 adat_len = adat->natoms - adat->natoms_local;
504 /* beginning of timed HtoD section */
507 ocl_copy_H2D_async(adat->xq, nbatom->x + adat_begin * 4, adat_begin*sizeof(float)*4,
508 adat_len * sizeof(float) * 4, stream, bDoTime ? (&(t->nb_h2d[iloc])) : NULL);
510 /* When we get here all misc operations issues in the local stream as well as
511 the local xq H2D are done,
512 so we record that in the local stream and wait for it in the nonlocal one. */
513 if (nb->bUseTwoStreams)
515 if (iloc == eintLocal)
517 #ifdef CL_VERSION_1_2
518 cl_error = clEnqueueMarkerWithWaitList(stream, 0, NULL, &(nb->misc_ops_and_local_H2D_done));
520 cl_error = clEnqueueMarker(stream, &(nb->misc_ops_and_local_H2D_done));
522 assert(CL_SUCCESS == cl_error);
524 /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
525 * in the local stream in order to be able to sync with the above event
526 * from the non-local stream.
528 cl_error = clFlush(stream);
529 assert(CL_SUCCESS == cl_error);
533 sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
537 if (nbp->useDynamicPruning && plist->haveFreshList)
539 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
540 (that's the way the timing accounting can distinguish between
541 separate prune kernel and combined force+prune).
543 nbnxn_gpu_launch_kernel_pruneonly(nb, iloc, 1);
546 if (plist->nsci == 0)
548 /* Don't launch an empty local kernel (is not allowed with OpenCL).
549 * TODO: Separate H2D and kernel launch into separate functions.
554 /* beginning of timed nonbonded calculation section */
556 /* get the pointer to the kernel flavor we need to use */
557 nb_kernel = select_nbnxn_kernel(nb,
561 (plist->haveFreshList && !nb->timers->didPrune[iloc]) || always_prune);
563 /* kernel launch config */
564 local_work_size[0] = c_clSize;
565 local_work_size[1] = c_clSize;
566 local_work_size[2] = 1;
568 global_work_size[0] = plist->nsci * local_work_size[0];
569 global_work_size[1] = 1 * local_work_size[1];
570 global_work_size[2] = 1 * local_work_size[2];
572 validate_global_work_size(global_work_size, 3, nb->dev_info);
574 shmem = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
578 static int run_step = 1;
580 if (DEBUG_RUN_STEP == run_step)
582 debug_buffer_size = global_work_size[0] * global_work_size[1] * global_work_size[2] * sizeof(float);
583 debug_buffer_h = (float*)calloc(1, debug_buffer_size);
584 assert(NULL != debug_buffer_h);
586 if (NULL == nb->debug_buffer)
588 nb->debug_buffer = clCreateBuffer(nb->dev_rundata->context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
589 debug_buffer_size, debug_buffer_h, &cl_error);
591 assert(CL_SUCCESS == cl_error);
600 fprintf(debug, "Non-bonded GPU launch configuration:\n\tLocal work size: %dx%dx%d\n\t"
601 "Global work size : %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n",
602 (int)(local_work_size[0]), (int)(local_work_size[1]), (int)(local_work_size[2]),
603 (int)(global_work_size[0]), (int)(global_work_size[1]), plist->nsci*c_numClPerSupercl,
604 c_numClPerSupercl, plist->na_c);
607 fillin_ocl_structures(nbp, &nbparams_params);
610 cl_error = CL_SUCCESS;
611 if (!useLjCombRule(nb->nbparam->vdwtype))
613 cl_error = clSetKernelArg(nb_kernel, arg_no++, sizeof(int), &(adat->ntypes));
615 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(nbparams_params), &(nbparams_params));
616 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->xq));
617 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->f));
618 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->e_lj));
619 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->e_el));
620 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->fshift));
621 if (useLjCombRule(nb->nbparam->vdwtype))
623 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->lj_comb));
627 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->atom_types));
629 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->shift_vec));
630 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(nbp->nbfp_climg2d));
631 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(nbp->nbfp_comb_climg2d));
632 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(nbp->coulomb_tab_climg2d));
633 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(plist->sci));
634 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(plist->cj4));
635 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(plist->excl));
636 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(int), &bCalcFshift);
637 cl_error |= clSetKernelArg(nb_kernel, arg_no++, shmem, NULL);
638 cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(nb->debug_buffer));
640 assert(cl_error == CL_SUCCESS);
644 printf("OpenCL error: %s\n", ocl_get_error_string(cl_error).c_str());
646 cl_error = clEnqueueNDRangeKernel(stream, nb_kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, bDoTime ? &(t->nb_k[iloc]) : NULL);
647 assert(cl_error == CL_SUCCESS);
651 static int run_step = 1;
653 if (DEBUG_RUN_STEP == run_step)
656 char file_name[256] = {0};
658 ocl_copy_D2H_async(debug_buffer_h, nb->debug_buffer, 0,
659 debug_buffer_size, stream, NULL);
661 // Make sure all data has been transfered back from device
664 printf("\nWriting debug_buffer to debug_buffer_ocl.txt...");
666 sprintf(file_name, "debug_buffer_ocl_%d.txt", DEBUG_RUN_STEP);
667 pf = fopen(file_name, "wt");
670 fprintf(pf, "%20s", "");
671 for (int j = 0; j < global_work_size[0]; j++)
674 sprintf(label, "(wIdx=%2d thIdx=%2d)", j / local_work_size[0], j % local_work_size[0]);
675 fprintf(pf, "%20s", label);
678 for (int i = 0; i < global_work_size[1]; i++)
681 sprintf(label, "(wIdy=%2d thIdy=%2d)", i / local_work_size[1], i % local_work_size[1]);
682 fprintf(pf, "\n%20s", label);
684 for (int j = 0; j < global_work_size[0]; j++)
686 fprintf(pf, "%20.5f", debug_buffer_h[i * global_work_size[0] + j]);
697 free(debug_buffer_h);
698 debug_buffer_h = NULL;
707 /*! \brief Calculates the amount of shared memory required by the prune kernel.
709 * Note that for the sake of simplicity we use the CUDA terminology "shared memory"
710 * for OpenCL local memory.
712 * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd dimension.
713 * \returns the amount of local memory in bytes required by the pruning kernel
715 static inline int calc_shmem_required_prune(const int num_threads_z)
719 /* i-atom x in shared memory (for convenience we load all 4 components including q) */
720 shmem = c_numClPerSupercl * c_clSize * sizeof(float)*4;
721 /* cj in shared memory, for each warp separately */
722 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
723 /* Warp vote, requires one uint per warp/32 threads per block. */
724 shmem += sizeof(cl_uint) * 2*num_threads_z;
729 void nbnxn_gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t *nb,
735 cl_atomdata_t *adat = nb->atdat;
736 cl_nbparam_t *nbp = nb->nbparam;
737 cl_plist_t *plist = nb->plist[iloc];
738 cl_timers_t *t = nb->timers;
739 cl_command_queue stream = nb->stream[iloc];
740 bool bDoTime = nb->bDoTime;
742 if (plist->haveFreshList)
744 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
746 /* Set rollingPruningNumParts to signal that it is not set */
747 plist->rollingPruningNumParts = 0;
748 plist->rollingPruningPart = 0;
752 if (plist->rollingPruningNumParts == 0)
754 plist->rollingPruningNumParts = numParts;
758 GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
762 /* Use a local variable for part and update in plist, so we can return here
763 * without duplicating the part increment code.
765 int part = plist->rollingPruningPart;
767 plist->rollingPruningPart++;
768 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
770 plist->rollingPruningPart = 0;
773 /* Compute the number of list entries to prune in this pass */
774 int numSciInPart = (plist->nsci - part)/numParts;
776 /* Don't launch the kernel if there is no work to do. */
777 if (numSciInPart <= 0)
779 plist->haveFreshList = false;
784 /* Kernel launch config:
785 * - The thread block dimensions match the size of i-clusters, j-clusters,
786 * and j-cluster concurrency, in x, y, and z, respectively.
787 * - The 1D block-grid contains as many blocks as super-clusters.
789 int num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
790 cl_kernel pruneKernel = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
792 /* kernel launch config */
793 size_t local_work_size[3], global_work_size[3];
794 local_work_size[0] = c_clSize;
795 local_work_size[1] = c_clSize;
796 local_work_size[2] = num_threads_z;
798 global_work_size[0] = numSciInPart * local_work_size[0];
799 global_work_size[1] = 1 * local_work_size[1];
800 global_work_size[2] = 1 * local_work_size[2];
802 validate_global_work_size(global_work_size, 3, nb->dev_info);
804 int shmem = calc_shmem_required_prune(num_threads_z);
808 fprintf(debug, "Pruning GPU kernel launch configuration:\n\tLocal work size: %dx%dx%d\n\t"
809 "\tGlobal work size: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n"
811 (int)(local_work_size[0]), (int)(local_work_size[1]), (int)(local_work_size[2]),
812 (int)(global_work_size[0]), (int)(global_work_size[1]), plist->nsci*c_numClPerSupercl,
813 c_numClPerSupercl, plist->na_c, shmem);
816 cl_nbparam_params_t nbparams_params;
817 fillin_ocl_structures(nbp, &nbparams_params);
820 cl_error = CL_SUCCESS;
822 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(nbparams_params), &(nbparams_params));
823 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(adat->xq));
824 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(adat->shift_vec));
825 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(plist->sci));
826 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(plist->cj4));
827 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(plist->imask));
828 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(int), &(numParts));
829 cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(int), &(part));
830 cl_error |= clSetKernelArg(pruneKernel, arg_no++, shmem, nullptr);
831 assert(cl_error == CL_SUCCESS);
833 cl_event *pruneEventPtr = nullptr;
836 pruneEventPtr = plist->haveFreshList ? &t->prune_k[iloc] : &t->rollingPrune_k[iloc];
839 cl_error = clEnqueueNDRangeKernel(stream, pruneKernel, 3,
840 nullptr, global_work_size, local_work_size,
841 0, nullptr, pruneEventPtr);
842 GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
844 if (plist->haveFreshList)
846 plist->haveFreshList = false;
847 /* Mark that pruning has been done */
848 nb->timers->didPrune[iloc] = true;
852 /* Mark that rolling pruning has been done */
853 nb->timers->didRollingPrune[iloc] = true;
858 * Launch asynchronously the download of nonbonded forces from the GPU
859 * (and energies/shift forces if required).
861 void nbnxn_gpu_launch_cpyback(gmx_nbnxn_ocl_t *nb,
862 const struct nbnxn_atomdata_t *nbatom,
866 cl_int gmx_unused cl_error;
867 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
870 /* determine interaction locality from atom locality */
875 else if (NONLOCAL_A(aloc))
882 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
883 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
888 cl_atomdata_t *adat = nb->atdat;
889 cl_timers_t *t = nb->timers;
890 bool bDoTime = nb->bDoTime;
891 cl_command_queue stream = nb->stream[iloc];
893 bool bCalcEner = flags & GMX_FORCE_ENERGY;
894 int bCalcFshift = flags & GMX_FORCE_VIRIAL;
897 /* don't launch non-local copy-back if there was no non-local work to do */
898 if (iloc == eintNonlocal && nb->plist[iloc]->nsci == 0)
900 /* TODO An alternative way to signal that non-local work is
901 complete is to use a clEnqueueMarker+clEnqueueBarrier
902 pair. However, the use of bNonLocalStreamActive has the
903 advantage of being local to the host, so probably minimizes
904 overhead. Curiously, for NVIDIA OpenCL with an empty-domain
905 test case, overall simulation performance was higher with
906 the API calls, but this has not been tested on AMD OpenCL,
907 so could be worth considering in future. */
908 nb->bNonLocalStreamActive = false;
912 /* calculate the atom data index range based on locality */
916 adat_len = adat->natoms_local;
920 adat_begin = adat->natoms_local;
921 adat_len = adat->natoms - adat->natoms_local;
924 /* beginning of timed D2H section */
926 /* With DD the local D2H transfer can only start after the non-local
927 has been launched. */
928 if (iloc == eintLocal && nb->bNonLocalStreamActive)
930 sync_ocl_event(stream, &(nb->nonlocal_done));
934 ocl_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f, adat_begin*3*sizeof(float),
935 (adat_len)* adat->f_elem_size, stream, bDoTime ? &(t->nb_d2h_f[iloc]) : NULL);
938 cl_error = clFlush(stream);
939 assert(CL_SUCCESS == cl_error);
941 /* After the non-local D2H is launched the nonlocal_done event can be
942 recorded which signals that the local D2H can proceed. This event is not
943 placed after the non-local kernel because we first need the non-local
945 if (iloc == eintNonlocal)
947 #ifdef CL_VERSION_1_2
948 cl_error = clEnqueueMarkerWithWaitList(stream, 0, NULL, &(nb->nonlocal_done));
950 cl_error = clEnqueueMarker(stream, &(nb->nonlocal_done));
952 assert(CL_SUCCESS == cl_error);
953 nb->bNonLocalStreamActive = true;
956 /* only transfer energies in the local stream */
962 ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0,
963 SHIFTS * adat->fshift_elem_size, stream, bDoTime ? &(t->nb_d2h_fshift[iloc]) : NULL);
969 ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0,
970 sizeof(float), stream, bDoTime ? &(t->nb_d2h_e_lj[iloc]) : NULL);
972 ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0,
973 sizeof(float), stream, bDoTime ? &(t->nb_d2h_e_el[iloc]) : NULL);
978 /*! \brief Count pruning kernel time if either kernel has been triggered
980 * We do the accounting for either of the two pruning kernel flavors:
981 * - 1st pass prune: ran during the current step (prior to the force kernel);
982 * - rolling prune: ran at the end of the previous step (prior to the current step H2D xq);
984 * Note that the resetting of cu_timers_t::didPrune and cu_timers_t::didRollingPrune should happen
985 * after calling this function.
987 * \param[inout] timers structs with OCL timer objects
988 * \param[inout] timings GPU task timing data
989 * \param[in] iloc interaction locality
991 static void countPruneKernelTime(cl_timers_t *timers,
992 gmx_wallclock_gpu_t *timings,
995 // We might have not done any pruning (e.g. if we skipped with empty domains).
996 if (!timers->didPrune[iloc] && !timers->didRollingPrune[iloc])
1001 if (timers->didPrune[iloc])
1003 timings->pruneTime.c++;
1004 timings->pruneTime.t += ocl_event_elapsed_ms(&timers->prune_k[iloc]);
1007 if (timers->didRollingPrune[iloc])
1009 timings->dynamicPruneTime.c++;
1010 timings->dynamicPruneTime.t += ocl_event_elapsed_ms(&timers->rollingPrune_k[iloc]);
1015 * Wait for the asynchronously launched nonbonded calculations and data
1016 * transfers to finish.
1018 void nbnxn_gpu_wait_for_gpu(gmx_nbnxn_ocl_t *nb,
1019 int flags, int aloc,
1020 real *e_lj, real *e_el, rvec *fshift)
1022 /* NOTE: only implemented for single-precision at this time */
1023 cl_int gmx_unused cl_error;
1026 /* determine interaction locality from atom locality */
1031 else if (NONLOCAL_A(aloc))
1033 iloc = eintNonlocal;
1038 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
1039 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
1043 cl_plist_t *plist = nb->plist[iloc];
1044 cl_timers_t *timers = nb->timers;
1045 struct gmx_wallclock_gpu_t *timings = nb->timings;
1046 cl_nb_staging nbst = nb->nbst;
1048 bool bCalcEner = flags & GMX_FORCE_ENERGY;
1049 int bCalcFshift = flags & GMX_FORCE_VIRIAL;
1051 /* turn energy calculation always on/off (for debugging/testing only) */
1052 bCalcEner = (bCalcEner || always_ener) && !never_ener;
1054 /* Launch wait/update timers & counters and do reduction into staging buffers
1055 BUT skip it when during the non-local phase there was actually no work to do.
1056 This is consistent with nbnxn_gpu_launch_kernel.
1058 NOTE: if timing with multiple GPUs (streams) becomes possible, the
1059 counters could end up being inconsistent due to not being incremented
1060 on some of the nodes! */
1061 if (!(iloc == eintNonlocal && nb->plist[iloc]->nsci == 0))
1063 /* Actual sync point. Waits for everything to be finished in the command queue. TODO: Find out if a more fine grained solution is needed */
1064 cl_error = clFinish(nb->stream[iloc]);
1065 assert(CL_SUCCESS == cl_error);
1067 /* timing data accumulation */
1070 /* only increase counter once (at local F wait) */
1074 timings->ktime[plist->haveFreshList ? 1 : 0][bCalcEner ? 1 : 0].c += 1;
1077 /* kernel timings */
1079 timings->ktime[plist->haveFreshList ? 1 : 0][bCalcEner ? 1 : 0].t +=
1080 ocl_event_elapsed_ms(timers->nb_k + iloc);
1082 /* X/q H2D and F D2H timings */
1083 timings->nb_h2d_t += ocl_event_elapsed_ms(timers->nb_h2d + iloc);
1084 timings->nb_d2h_t += ocl_event_elapsed_ms(timers->nb_d2h_f + iloc);
1085 timings->nb_d2h_t += ocl_event_elapsed_ms(timers->nb_d2h_fshift + iloc);
1086 timings->nb_d2h_t += ocl_event_elapsed_ms(timers->nb_d2h_e_el + iloc);
1087 timings->nb_d2h_t += ocl_event_elapsed_ms(timers->nb_d2h_e_lj + iloc);
1089 /* Count the pruning kernel times for both cases:1st pass (at search step)
1090 and rolling pruning (if called at the previous step).
1091 We do the accounting here as this is the only sync point where we
1092 know (without checking or additional sync-ing) that prune tasks in
1093 in the current stream have completed (having just blocking-waited
1094 for the force D2H). */
1095 countPruneKernelTime(timers, timings, iloc);
1097 /* only count atdat and pair-list H2D at pair-search step */
1098 if (timers->didPairlistH2D[iloc])
1100 /* atdat transfer timing (add only once, at local F wait) */
1103 timings->pl_h2d_c++;
1104 timings->pl_h2d_t += ocl_event_elapsed_ms(&(timers->atdat));
1107 timings->pl_h2d_t +=
1108 ocl_event_elapsed_ms(timers->pl_h2d_sci + iloc) +
1109 ocl_event_elapsed_ms(timers->pl_h2d_cj4 + iloc) +
1110 ocl_event_elapsed_ms(timers->pl_h2d_excl + iloc);
1112 /* Clear the timing flag for the next step */
1113 timers->didPairlistH2D[iloc] = false;
1117 /* add up energies and shift forces (only once at local F wait) */
1122 *e_lj += *nbst.e_lj;
1123 *e_el += *nbst.e_el;
1128 for (int i = 0; i < SHIFTS; i++)
1130 fshift[i][0] += (nbst.fshift)[i][0];
1131 fshift[i][1] += (nbst.fshift)[i][1];
1132 fshift[i][2] += (nbst.fshift)[i][2];
1138 /* Always reset both pruning flags (doesn't hurt doing it even when timing is off). */
1139 timers->didPrune[iloc] = timers->didRollingPrune[iloc] = false;
1141 /* Turn off initial list pruning (doesn't hurt if this is not pair-search step). */
1142 plist->haveFreshList = false;
1145 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
1146 int nbnxn_gpu_pick_ewald_kernel_type(bool bTwinCut)
1148 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
1151 /* Benchmarking/development environment variables to force the use of
1152 analytical or tabulated Ewald kernel. */
1153 bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != NULL);
1154 bForceTabulatedEwald = (getenv("GMX_OCL_NB_TAB_EWALD") != NULL);
1156 if (bForceAnalyticalEwald && bForceTabulatedEwald)
1158 gmx_incons("Both analytical and tabulated Ewald OpenCL non-bonded kernels "
1159 "requested through environment variables.");
1162 /* OpenCL: By default, use analytical Ewald
1163 * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
1165 * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
1168 //if ((dev_info->prop.major >= 3 || bForceAnalyticalEwald) && !bForceTabulatedEwald)
1169 if ((1 || bForceAnalyticalEwald) && !bForceTabulatedEwald)
1171 bUseAnalyticalEwald = true;
1175 fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
1180 bUseAnalyticalEwald = false;
1184 fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
1188 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
1189 forces it (use it for debugging/benchmarking only). */
1190 if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == NULL))
1192 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
1196 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;