Fix random typos
[alexxy/gromacs.git] / src / gromacs / nbnxm / opencl / nbnxm_ocl.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36 /*! \internal \file
37  *  \brief Define OpenCL implementation of nbnxm_gpu.h
38  *
39  *  \author Anca Hamuraru <anca@streamcomputing.eu>
40  *  \author Teemu Virolainen <teemu@streamcomputing.eu>
41  *  \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
42  *  \author Szilárd Páll <pall.szilard@gmail.com>
43  *  \ingroup module_nbnxm
44  *
45  *  TODO (psz):
46  *  - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
47  *  - Rework the copying of OCL data structures done before every invocation of both
48  *    nb and prune kernels (using fillin_ocl_structures); also consider at the same
49  *    time calling clSetKernelArg only on the updated parameters (if tracking changed
50  *    parameters is feasible);
51  *  - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
52  *    dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
53  *    on the misc_ops_and_local_H2D_done event could be better expressed this way.
54  *
55  *  - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
56  *    - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
57  *      is identical in the two implementations, so a 3-way split might allow sharing
58  *      code;
59  *    -
60  *
61  */
62 #include "gmxpre.h"
63
64 #include <assert.h>
65 #include <stdlib.h>
66
67 #if defined(_MSVC)
68 #    include <limits>
69 #endif
70
71 #include "gromacs/gpu_utils/device_context.h"
72 #include "gromacs/gpu_utils/gputraits_ocl.h"
73 #include "gromacs/gpu_utils/oclutils.h"
74 #include "gromacs/hardware/device_information.h"
75 #include "gromacs/hardware/hw_info.h"
76 #include "gromacs/mdtypes/simulation_workload.h"
77 #include "gromacs/nbnxm/atomdata.h"
78 #include "gromacs/nbnxm/gpu_common.h"
79 #include "gromacs/nbnxm/gpu_common_utils.h"
80 #include "gromacs/nbnxm/gpu_data_mgmt.h"
81 #include "gromacs/nbnxm/nbnxm.h"
82 #include "gromacs/nbnxm/nbnxm_gpu.h"
83 #include "gromacs/nbnxm/pairlist.h"
84 #include "gromacs/timing/gpu_timing.h"
85 #include "gromacs/utility/cstringutil.h"
86 #include "gromacs/utility/fatalerror.h"
87 #include "gromacs/utility/gmxassert.h"
88
89 #include "nbnxm_ocl_types.h"
90
91 namespace Nbnxm
92 {
93
94 /*! \brief Convenience constants */
95 //@{
96 static constexpr int c_clSize = c_nbnxnGpuClusterSize;
97 //@}
98
99
100 /*! \brief Validates the input global work size parameter.
101  */
102 static inline void validate_global_work_size(const KernelLaunchConfig& config,
103                                              int                       work_dim,
104                                              const DeviceInformation*  dinfo)
105 {
106     cl_uint device_size_t_size_bits;
107     cl_uint host_size_t_size_bits;
108
109     GMX_ASSERT(dinfo, "Need a valid device info object");
110
111     size_t global_work_size[3];
112     GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
113     for (int i = 0; i < work_dim; i++)
114     {
115         global_work_size[i] = config.blockSize[i] * config.gridSize[i];
116     }
117
118     /* Each component of a global_work_size must not exceed the range given by the
119        sizeof(device size_t) for the device on which the kernel execution will
120        be enqueued. See:
121        https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
122      */
123     device_size_t_size_bits = dinfo->adress_bits;
124     host_size_t_size_bits   = static_cast<cl_uint>(sizeof(size_t) * 8);
125
126     /* If sizeof(host size_t) <= sizeof(device size_t)
127             => global_work_size components will always be valid
128        else
129             => get device limit for global work size and
130             compare it against each component of global_work_size.
131      */
132     if (host_size_t_size_bits > device_size_t_size_bits)
133     {
134         size_t device_limit;
135
136         device_limit = (1ULL << device_size_t_size_bits) - 1;
137
138         for (int i = 0; i < work_dim; i++)
139         {
140             if (global_work_size[i] > device_limit)
141             {
142                 gmx_fatal(
143                         FARGS,
144                         "Watch out, the input system is too large to simulate!\n"
145                         "The number of nonbonded work units (=number of super-clusters) exceeds the"
146                         "device capabilities. Global work size limit exceeded (%zu > %zu)!",
147                         global_work_size[i],
148                         device_limit);
149             }
150         }
151     }
152 }
153
154 /* Constant arrays listing non-bonded kernel function names. The arrays are
155  * organized in 2-dim arrays by: electrostatics and VDW type.
156  *
157  *  Note that the row- and column-order of function pointers has to match the
158  *  order of corresponding enumerated electrostatics and vdw types, resp.,
159  *  defined in nbnxm_ocl_types.h.
160  */
161
162 /*! \brief Force-only kernel function names. */
163 static const char* nb_kfunc_noener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
164     { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl",
165       "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",
166       "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl",
167       "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",
168       "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl",
169       "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",
170       "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
171     { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl",
172       "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",
173       "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl",
174       "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",
175       "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl",
176       "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",
177       "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
178     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl",
179       "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",
180       "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl",
181       "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",
182       "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",
183       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",
184       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
185     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl",
186       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl",
187       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl",
188       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl",
189       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl",
190       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl",
191       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
192     { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl",
193       "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",
194       "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl",
195       "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",
196       "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl",
197       "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",
198       "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
199     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",
200       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",
201       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",
202       "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl",
203       "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",
204       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",
205       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
206 };
207
208 /*! \brief Force + energy kernel function pointers. */
209 static const char* nb_kfunc_ener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
210     { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl",
211       "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",
212       "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl",
213       "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",
214       "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl",
215       "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",
216       "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
217     { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl",
218       "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",
219       "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl",
220       "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",
221       "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl",
222       "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",
223       "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
224     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl",
225       "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",
226       "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",
227       "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl",
228       "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",
229       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",
230       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
231     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl",
232       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl",
233       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl",
234       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl",
235       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl",
236       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl",
237       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
238     { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl",
239       "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",
240       "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl",
241       "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",
242       "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl",
243       "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",
244       "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
245     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",
246       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",
247       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",
248       "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",
249       "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",
250       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",
251       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
252 };
253
254 /*! \brief Force + pruning kernel function pointers. */
255 static const char* nb_kfunc_noener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
256     { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",
257       "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",
258       "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",
259       "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl",
260       "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",
261       "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",
262       "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
263     { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl",
264       "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",
265       "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",
266       "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl",
267       "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",
268       "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",
269       "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
270     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",
271       "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",
272       "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",
273       "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",
274       "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",
275       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",
276       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
277     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl",
278       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl",
279       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl",
280       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl",
281       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl",
282       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl",
283       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
284     { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl",
285       "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",
286       "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",
287       "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl",
288       "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",
289       "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",
290       "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
291     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",
292       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",
293       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",
294       "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",
295       "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",
296       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",
297       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
298 };
299
300 /*! \brief Force + energy + pruning kernel function pointers. */
301 static const char* nb_kfunc_ener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
302     { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",
303       "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",
304       "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",
305       "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",
306       "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",
307       "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",
308       "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
309     { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",
310       "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",
311       "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",
312       "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl",
313       "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",
314       "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",
315       "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
316     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",
317       "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",
318       "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",
319       "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",
320       "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",
321       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",
322       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
323     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl",
324       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl",
325       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl",
326       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl",
327       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl",
328       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
329       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
330     { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",
331       "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",
332       "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",
333       "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl",
334       "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",
335       "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",
336       "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
337     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",
338       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",
339       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",
340       "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",
341       "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",
342       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
343       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
344 };
345
346 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
347  *
348  * \param[in] kernel_pruneonly  array of prune kernel objects
349  * \param[in] firstPrunePass    true if the first pruning pass is being executed
350  */
351 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[], bool firstPrunePass)
352 {
353     cl_kernel* kernelPtr;
354
355     if (firstPrunePass)
356     {
357         kernelPtr = &(kernel_pruneonly[epruneFirst]);
358     }
359     else
360     {
361         kernelPtr = &(kernel_pruneonly[epruneRolling]);
362     }
363     // TODO: consider creating the prune kernel object here to avoid a
364     // clCreateKernel for the rolling prune kernel if this is not needed.
365     return *kernelPtr;
366 }
367
368 /*! \brief Return a pointer to the kernel version to be executed at the current step.
369  *  OpenCL kernel objects are cached in nb. If the requested kernel is not
370  *  found in the cache, it will be created and the cache will be updated.
371  */
372 static inline cl_kernel
373 select_nbnxn_kernel(NbnxmGpu* nb, enum ElecType elecType, enum VdwType vdwType, bool bDoEne, bool bDoPrune)
374 {
375     const char* kernel_name_to_run;
376     cl_kernel*  kernel_ptr;
377     cl_int      cl_error;
378
379     const int elecTypeIdx = static_cast<int>(elecType);
380     const int vdwTypeIdx  = static_cast<int>(vdwType);
381
382     GMX_ASSERT(elecTypeIdx < c_numElecTypes,
383                "The electrostatics type requested is not implemented in the OpenCL kernels.");
384     GMX_ASSERT(vdwTypeIdx < c_numVdwTypes,
385                "The VdW type requested is not implemented in the OpenCL kernels.");
386
387     if (bDoEne)
388     {
389         if (bDoPrune)
390         {
391             kernel_name_to_run = nb_kfunc_ener_prune_ptr[elecTypeIdx][vdwTypeIdx];
392             kernel_ptr         = &(nb->kernel_ener_prune_ptr[elecTypeIdx][vdwTypeIdx]);
393         }
394         else
395         {
396             kernel_name_to_run = nb_kfunc_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
397             kernel_ptr         = &(nb->kernel_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx]);
398         }
399     }
400     else
401     {
402         if (bDoPrune)
403         {
404             kernel_name_to_run = nb_kfunc_noener_prune_ptr[elecTypeIdx][vdwTypeIdx];
405             kernel_ptr         = &(nb->kernel_noener_prune_ptr[elecTypeIdx][vdwTypeIdx]);
406         }
407         else
408         {
409             kernel_name_to_run = nb_kfunc_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
410             kernel_ptr         = &(nb->kernel_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx]);
411         }
412     }
413
414     if (nullptr == kernel_ptr[0])
415     {
416         *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
417         GMX_ASSERT(cl_error == CL_SUCCESS,
418                    ("clCreateKernel failed: " + ocl_get_error_string(cl_error)
419                     + " for kernel named " + kernel_name_to_run)
420                            .c_str());
421     }
422
423     return *kernel_ptr;
424 }
425
426 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
427  */
428 static inline int calc_shmem_required_nonbonded(enum VdwType vdwType, bool bPrefetchLjParam)
429 {
430     int shmem;
431
432     /* size of shmem (force-buffers/xq/atom type preloading) */
433     /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
434     /* i-atom x+q in shared memory */
435     shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4; /* xqib */
436     /* cj in shared memory, for both warps separately
437      * TODO: in the "nowarp kernels we load cj only once  so the factor 2 is not needed.
438      */
439     shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs  */
440     if (bPrefetchLjParam)
441     {
442         if (useLjCombRule(vdwType))
443         {
444             /* i-atom LJ combination parameters in shared memory */
445             shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * 2
446                      * sizeof(float); /* atib abused for ljcp, float2 */
447         }
448         else
449         {
450             /* i-atom types in shared memory */
451             shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int); /* atib */
452         }
453     }
454     /* force reduction buffers in shared memory */
455     shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
456     /* Warp vote. In fact it must be * number of warps in block.. */
457     shmem += sizeof(cl_uint) * 2; /* warp_any */
458     return shmem;
459 }
460
461 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
462  *
463  *  The device can't use the same data structures as the host for two main reasons:
464  *  - OpenCL restrictions (pointers are not accepted inside data structures)
465  *  - some host side fields are not needed for the OpenCL kernels.
466  *
467  *  This function is called before the launch of both nbnxn and prune kernels.
468  */
469 static void fillin_ocl_structures(NBParamGpu* nbp, cl_nbparam_params_t* nbparams_params)
470 {
471     nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
472     nbparams_params->c_rf              = nbp->c_rf;
473     nbparams_params->dispersion_shift  = nbp->dispersion_shift;
474     nbparams_params->elecType          = nbp->elecType;
475     nbparams_params->epsfac            = nbp->epsfac;
476     nbparams_params->ewaldcoeff_lj     = nbp->ewaldcoeff_lj;
477     nbparams_params->ewald_beta        = nbp->ewald_beta;
478     nbparams_params->rcoulomb_sq       = nbp->rcoulomb_sq;
479     nbparams_params->repulsion_shift   = nbp->repulsion_shift;
480     nbparams_params->rlistOuter_sq     = nbp->rlistOuter_sq;
481     nbparams_params->rvdw_sq           = nbp->rvdw_sq;
482     nbparams_params->rlistInner_sq     = nbp->rlistInner_sq;
483     nbparams_params->rvdw_switch       = nbp->rvdw_switch;
484     nbparams_params->sh_ewald          = nbp->sh_ewald;
485     nbparams_params->sh_lj_ewald       = nbp->sh_lj_ewald;
486     nbparams_params->two_k_rf          = nbp->two_k_rf;
487     nbparams_params->vdwType           = nbp->vdwType;
488     nbparams_params->vdw_switch        = nbp->vdw_switch;
489 }
490
491 /*! \brief Launch GPU kernel
492
493    As we execute nonbonded workload in separate queues, before launching
494    the kernel we need to make sure that he following operations have completed:
495    - atomdata allocation and related H2D transfers (every nstlist step);
496    - pair list H2D transfer (every nstlist step);
497    - shift vector H2D transfer (every nstlist step);
498    - force (+shift force and energy) output clearing (every step).
499
500    These operations are issued in the local queue at the beginning of the step
501    and therefore always complete before the local kernel launch. The non-local
502    kernel is launched after the local on the same device/context, so this is
503    inherently scheduled after the operations in the local stream (including the
504    above "misc_ops").
505    However, for the sake of having a future-proof implementation, we use the
506    misc_ops_done event to record the point in time when the above  operations
507    are finished and synchronize with this event in the non-local stream.
508  */
509 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const Nbnxm::InteractionLocality iloc)
510 {
511     NBAtomDataGpu*      adat         = nb->atdat;
512     NBParamGpu*         nbp          = nb->nbparam;
513     gpu_plist*          plist        = nb->plist[iloc];
514     Nbnxm::GpuTimers*   timers       = nb->timers;
515     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
516
517     bool bDoTime = nb->bDoTime;
518
519     cl_nbparam_params_t nbparams_params;
520
521     /* Don't launch the non-local kernel if there is no work to do.
522        Doing the same for the local kernel is more complicated, since the
523        local part of the force array also depends on the non-local kernel.
524        So to avoid complicating the code and to reduce the risk of bugs,
525        we always call the local kernel and later (not in
526        this function) the stream wait, local f copyback and the f buffer
527        clearing. All these operations, except for the local interaction kernel,
528        are needed for the non-local interactions. The skip of the local kernel
529        call is taken care of later in this function. */
530     if (canSkipNonbondedWork(*nb, iloc))
531     {
532         plist->haveFreshList = false;
533
534         return;
535     }
536
537     if (nbp->useDynamicPruning && plist->haveFreshList)
538     {
539         /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
540            (that's the way the timing accounting can distinguish between
541            separate prune kernel and combined force+prune).
542          */
543         Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
544     }
545
546     if (plist->nsci == 0)
547     {
548         /* Don't launch an empty local kernel (is not allowed with OpenCL).
549          */
550         return;
551     }
552
553     /* beginning of timed nonbonded calculation section */
554     if (bDoTime)
555     {
556         timers->interaction[iloc].nb_k.openTimingRegion(deviceStream);
557     }
558
559     /* kernel launch config */
560
561     KernelLaunchConfig config;
562     config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwType, nb->bPrefetchLjParam);
563     config.blockSize[0]     = c_clSize;
564     config.blockSize[1]     = c_clSize;
565     config.gridSize[0]      = plist->nsci;
566
567     validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
568
569     if (debug)
570     {
571         fprintf(debug,
572                 "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
573                 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
574                 config.blockSize[0],
575                 config.blockSize[1],
576                 config.blockSize[2],
577                 config.blockSize[0] * config.gridSize[0],
578                 config.blockSize[1] * config.gridSize[1],
579                 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
580                 c_nbnxnGpuNumClusterPerSupercluster,
581                 plist->na_c);
582     }
583
584     fillin_ocl_structures(nbp, &nbparams_params);
585
586     auto* timingEvent = bDoTime ? timers->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
587     constexpr char kernelName[] = "k_calc_nb";
588     const auto     kernel =
589             select_nbnxn_kernel(nb,
590                                 nbp->elecType,
591                                 nbp->vdwType,
592                                 stepWork.computeEnergy,
593                                 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
594
595
596     // The OpenCL kernel takes int as second to last argument because bool is
597     // not supported as a kernel argument type (sizeof(bool) is implementation defined).
598     const int computeFshift = static_cast<int>(stepWork.computeVirial);
599     if (useLjCombRule(nb->nbparam->vdwType))
600     {
601         const auto kernelArgs = prepareGpuKernelArguments(kernel,
602                                                           config,
603                                                           &nbparams_params,
604                                                           &adat->xq,
605                                                           &adat->f,
606                                                           &adat->eLJ,
607                                                           &adat->eElec,
608                                                           &adat->fShift,
609                                                           &adat->ljComb,
610                                                           &adat->shiftVec,
611                                                           &nbp->nbfp,
612                                                           &nbp->nbfp_comb,
613                                                           &nbp->coulomb_tab,
614                                                           &plist->sci,
615                                                           &plist->cj4,
616                                                           &plist->excl,
617                                                           &computeFshift);
618
619         launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
620     }
621     else
622     {
623         const auto kernelArgs = prepareGpuKernelArguments(kernel,
624                                                           config,
625                                                           &adat->numTypes,
626                                                           &nbparams_params,
627                                                           &adat->xq,
628                                                           &adat->f,
629                                                           &adat->eLJ,
630                                                           &adat->eElec,
631                                                           &adat->fShift,
632                                                           &adat->atomTypes,
633                                                           &adat->shiftVec,
634                                                           &nbp->nbfp,
635                                                           &nbp->nbfp_comb,
636                                                           &nbp->coulomb_tab,
637                                                           &plist->sci,
638                                                           &plist->cj4,
639                                                           &plist->excl,
640                                                           &computeFshift);
641         launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
642     }
643
644     if (bDoTime)
645     {
646         timers->interaction[iloc].nb_k.closeTimingRegion(deviceStream);
647     }
648 }
649
650
651 /*! \brief Calculates the amount of shared memory required by the prune kernel.
652  *
653  *  Note that for the sake of simplicity we use the CUDA terminology "shared memory"
654  *  for OpenCL local memory.
655  *
656  * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd
657  * dimension.
658  * \returns   the amount of local memory in bytes required by the pruning kernel
659  */
660 static inline int calc_shmem_required_prune(const int num_threads_z)
661 {
662     int shmem;
663
664     /* i-atom x in shared memory (for convenience we load all 4 components including q) */
665     shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4;
666     /* cj in shared memory, for each warp separately
667      * Note: only need to load once per wavefront, but to keep the code simple,
668      * for now we load twice on AMD.
669      */
670     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
671     /* Warp vote, requires one uint per warp/32 threads per block. */
672     shmem += sizeof(cl_uint) * 2 * num_threads_z;
673
674     return shmem;
675 }
676
677 /*! \brief
678  * Launch the pairlist prune only kernel for the given locality.
679  * \p numParts tells in how many parts, i.e. calls the list will be pruned.
680  */
681 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
682 {
683     NBAtomDataGpu*      adat         = nb->atdat;
684     NBParamGpu*         nbp          = nb->nbparam;
685     gpu_plist*          plist        = nb->plist[iloc];
686     Nbnxm::GpuTimers*   timers       = nb->timers;
687     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
688     bool                bDoTime      = nb->bDoTime;
689
690     if (plist->haveFreshList)
691     {
692         GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
693
694         /* Set rollingPruningNumParts to signal that it is not set */
695         plist->rollingPruningNumParts = 0;
696         plist->rollingPruningPart     = 0;
697     }
698     else
699     {
700         if (plist->rollingPruningNumParts == 0)
701         {
702             plist->rollingPruningNumParts = numParts;
703         }
704         else
705         {
706             GMX_ASSERT(numParts == plist->rollingPruningNumParts,
707                        "It is not allowed to change numParts in between list generation steps");
708         }
709     }
710
711     /* Use a local variable for part and update in plist, so we can return here
712      * without duplicating the part increment code.
713      */
714     int part = plist->rollingPruningPart;
715
716     plist->rollingPruningPart++;
717     if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
718     {
719         plist->rollingPruningPart = 0;
720     }
721
722     /* Compute the number of list entries to prune in this pass */
723     int numSciInPart = (plist->nsci - part) / numParts;
724
725     /* Don't launch the kernel if there is no work to do. */
726     if (numSciInPart <= 0)
727     {
728         plist->haveFreshList = false;
729
730         return;
731     }
732
733     GpuRegionTimer* timer = nullptr;
734     if (bDoTime)
735     {
736         timer = &(plist->haveFreshList ? timers->interaction[iloc].prune_k
737                                        : timers->interaction[iloc].rollingPrune_k);
738     }
739
740     /* beginning of timed prune calculation section */
741     if (bDoTime)
742     {
743         timer->openTimingRegion(deviceStream);
744     }
745
746     /* Kernel launch config:
747      * - The thread block dimensions match the size of i-clusters, j-clusters,
748      *   and j-cluster concurrency, in x, y, and z, respectively.
749      * - The 1D block-grid contains as many blocks as super-clusters.
750      */
751     int num_threads_z = c_pruneKernelJ4Concurrency;
752     /* kernel launch config */
753     KernelLaunchConfig config;
754     config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
755     config.blockSize[0]     = c_clSize;
756     config.blockSize[1]     = c_clSize;
757     config.blockSize[2]     = num_threads_z;
758     config.gridSize[0]      = numSciInPart;
759
760     validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
761
762     if (debug)
763     {
764         fprintf(debug,
765                 "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
766                 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
767                 "\tShMem: %zu\n",
768                 config.blockSize[0],
769                 config.blockSize[1],
770                 config.blockSize[2],
771                 config.blockSize[0] * config.gridSize[0],
772                 config.blockSize[1] * config.gridSize[1],
773                 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
774                 c_nbnxnGpuNumClusterPerSupercluster,
775                 plist->na_c,
776                 config.sharedMemorySize);
777     }
778
779     cl_nbparam_params_t nbparams_params;
780     fillin_ocl_structures(nbp, &nbparams_params);
781
782     auto*          timingEvent  = bDoTime ? timer->fetchNextEvent() : nullptr;
783     constexpr char kernelName[] = "k_pruneonly";
784     const auto     pruneKernel  = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
785     const auto     kernelArgs   = prepareGpuKernelArguments(pruneKernel,
786                                                       config,
787                                                       &nbparams_params,
788                                                       &adat->xq,
789                                                       &adat->shiftVec,
790                                                       &plist->sci,
791                                                       &plist->cj4,
792                                                       &plist->imask,
793                                                       &numParts,
794                                                       &part);
795     launchGpuKernel(pruneKernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
796
797     if (plist->haveFreshList)
798     {
799         plist->haveFreshList = false;
800         /* Mark that pruning has been done */
801         nb->timers->interaction[iloc].didPrune = true;
802     }
803     else
804     {
805         /* Mark that rolling pruning has been done */
806         nb->timers->interaction[iloc].didRollingPrune = true;
807     }
808
809     if (bDoTime)
810     {
811         timer->closeTimingRegion(deviceStream);
812     }
813 }
814
815 } // namespace Nbnxm