165079bce4cdef4d71976a283faf4f9b0e2f4ceb
[alexxy/gromacs.git] / src / gromacs / nbnxm / opencl / nbnxm_ocl.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36 /*! \internal \file
37  *  \brief Define OpenCL implementation of nbnxm_gpu.h
38  *
39  *  \author Anca Hamuraru <anca@streamcomputing.eu>
40  *  \author Teemu Virolainen <teemu@streamcomputing.eu>
41  *  \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
42  *  \author Szilárd Páll <pall.szilard@gmail.com>
43  *  \ingroup module_nbnxm
44  *
45  *  TODO (psz):
46  *  - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
47  *  - Rework the copying of OCL data structures done before every invocation of both
48  *    nb and prune kernels (using fillin_ocl_structures); also consider at the same
49  *    time calling clSetKernelArg only on the updated parameters (if tracking changed
50  *    parameters is feasible);
51  *  - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
52  *    dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
53  *    on the misc_ops_and_local_H2D_done event could be better expressed this way.
54  *
55  *  - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
56  *    - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
57  *      is identical in the two implementations, so a 3-way split might allow sharing
58  *      code;
59  *    -
60  *
61  */
62 #include "gmxpre.h"
63
64 #include <assert.h>
65 #include <stdlib.h>
66
67 #if defined(_MSVC)
68 #    include <limits>
69 #endif
70
71 #include "gromacs/gpu_utils/device_context.h"
72 #include "gromacs/gpu_utils/gputraits_ocl.h"
73 #include "gromacs/gpu_utils/oclutils.h"
74 #include "gromacs/hardware/device_information.h"
75 #include "gromacs/hardware/hw_info.h"
76 #include "gromacs/mdtypes/simulation_workload.h"
77 #include "gromacs/nbnxm/atomdata.h"
78 #include "gromacs/nbnxm/gpu_common.h"
79 #include "gromacs/nbnxm/gpu_common_utils.h"
80 #include "gromacs/nbnxm/gpu_data_mgmt.h"
81 #include "gromacs/nbnxm/nbnxm.h"
82 #include "gromacs/nbnxm/nbnxm_gpu.h"
83 #include "gromacs/nbnxm/pairlist.h"
84 #include "gromacs/pbcutil/ishift.h"
85 #include "gromacs/timing/gpu_timing.h"
86 #include "gromacs/utility/cstringutil.h"
87 #include "gromacs/utility/fatalerror.h"
88 #include "gromacs/utility/gmxassert.h"
89
90 #include "nbnxm_ocl_types.h"
91
92 namespace Nbnxm
93 {
94
95 /*! \brief Convenience constants */
96 //@{
97 static constexpr int c_clSize = c_nbnxnGpuClusterSize;
98 //@}
99
100
101 /*! \brief Validates the input global work size parameter.
102  */
103 static inline void validate_global_work_size(const KernelLaunchConfig& config,
104                                              int                       work_dim,
105                                              const DeviceInformation*  dinfo)
106 {
107     cl_uint device_size_t_size_bits;
108     cl_uint host_size_t_size_bits;
109
110     GMX_ASSERT(dinfo, "Need a valid device info object");
111
112     size_t global_work_size[3];
113     GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
114     for (int i = 0; i < work_dim; i++)
115     {
116         global_work_size[i] = config.blockSize[i] * config.gridSize[i];
117     }
118
119     /* Each component of a global_work_size must not exceed the range given by the
120        sizeof(device size_t) for the device on which the kernel execution will
121        be enqueued. See:
122        https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
123      */
124     device_size_t_size_bits = dinfo->adress_bits;
125     host_size_t_size_bits   = static_cast<cl_uint>(sizeof(size_t) * 8);
126
127     /* If sizeof(host size_t) <= sizeof(device size_t)
128             => global_work_size components will always be valid
129        else
130             => get device limit for global work size and
131             compare it against each component of global_work_size.
132      */
133     if (host_size_t_size_bits > device_size_t_size_bits)
134     {
135         size_t device_limit;
136
137         device_limit = (1ULL << device_size_t_size_bits) - 1;
138
139         for (int i = 0; i < work_dim; i++)
140         {
141             if (global_work_size[i] > device_limit)
142             {
143                 gmx_fatal(
144                         FARGS,
145                         "Watch out, the input system is too large to simulate!\n"
146                         "The number of nonbonded work units (=number of super-clusters) exceeds the"
147                         "device capabilities. Global work size limit exceeded (%zu > %zu)!",
148                         global_work_size[i],
149                         device_limit);
150             }
151         }
152     }
153 }
154
155 /* Constant arrays listing non-bonded kernel function names. The arrays are
156  * organized in 2-dim arrays by: electrostatics and VDW type.
157  *
158  *  Note that the row- and column-order of function pointers has to match the
159  *  order of corresponding enumerated electrostatics and vdw types, resp.,
160  *  defined in nbnxm_ocl_types.h.
161  */
162
163 /*! \brief Force-only kernel function names. */
164 static const char* nb_kfunc_noener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
165     { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl",
166       "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",
167       "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl",
168       "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",
169       "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl",
170       "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",
171       "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
172     { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl",
173       "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",
174       "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl",
175       "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",
176       "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl",
177       "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",
178       "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
179     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl",
180       "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",
181       "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl",
182       "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",
183       "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",
184       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",
185       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
186     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl",
187       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl",
188       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl",
189       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl",
190       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl",
191       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl",
192       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
193     { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl",
194       "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",
195       "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl",
196       "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",
197       "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl",
198       "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",
199       "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
200     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",
201       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",
202       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",
203       "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl",
204       "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",
205       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",
206       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
207 };
208
209 /*! \brief Force + energy kernel function pointers. */
210 static const char* nb_kfunc_ener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
211     { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl",
212       "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",
213       "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl",
214       "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",
215       "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl",
216       "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",
217       "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
218     { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl",
219       "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",
220       "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl",
221       "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",
222       "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl",
223       "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",
224       "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
225     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl",
226       "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",
227       "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",
228       "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl",
229       "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",
230       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",
231       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
232     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl",
233       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl",
234       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl",
235       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl",
236       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl",
237       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl",
238       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
239     { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl",
240       "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",
241       "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl",
242       "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",
243       "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl",
244       "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",
245       "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
246     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",
247       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",
248       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",
249       "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",
250       "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",
251       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",
252       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
253 };
254
255 /*! \brief Force + pruning kernel function pointers. */
256 static const char* nb_kfunc_noener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
257     { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",
258       "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",
259       "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",
260       "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl",
261       "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",
262       "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",
263       "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
264     { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl",
265       "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",
266       "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",
267       "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl",
268       "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",
269       "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",
270       "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
271     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",
272       "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",
273       "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",
274       "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",
275       "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",
276       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",
277       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
278     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl",
279       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl",
280       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl",
281       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl",
282       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl",
283       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl",
284       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
285     { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl",
286       "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",
287       "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",
288       "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl",
289       "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",
290       "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",
291       "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
292     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",
293       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",
294       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",
295       "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",
296       "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",
297       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",
298       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
299 };
300
301 /*! \brief Force + energy + pruning kernel function pointers. */
302 static const char* nb_kfunc_ener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
303     { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",
304       "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",
305       "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",
306       "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",
307       "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",
308       "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",
309       "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
310     { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",
311       "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",
312       "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",
313       "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl",
314       "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",
315       "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",
316       "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
317     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",
318       "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",
319       "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",
320       "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",
321       "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",
322       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",
323       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
324     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl",
325       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl",
326       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl",
327       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl",
328       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl",
329       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
330       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
331     { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",
332       "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",
333       "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",
334       "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl",
335       "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",
336       "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",
337       "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
338     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",
339       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",
340       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",
341       "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",
342       "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",
343       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
344       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
345 };
346
347 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
348  *
349  * \param[in] kernel_pruneonly  array of prune kernel objects
350  * \param[in] firstPrunePass    true if the first pruning pass is being executed
351  */
352 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[], bool firstPrunePass)
353 {
354     cl_kernel* kernelPtr;
355
356     if (firstPrunePass)
357     {
358         kernelPtr = &(kernel_pruneonly[epruneFirst]);
359     }
360     else
361     {
362         kernelPtr = &(kernel_pruneonly[epruneRolling]);
363     }
364     // TODO: consider creating the prune kernel object here to avoid a
365     // clCreateKernel for the rolling prune kernel if this is not needed.
366     return *kernelPtr;
367 }
368
369 /*! \brief Return a pointer to the kernel version to be executed at the current step.
370  *  OpenCL kernel objects are cached in nb. If the requested kernel is not
371  *  found in the cache, it will be created and the cache will be updated.
372  */
373 static inline cl_kernel
374 select_nbnxn_kernel(NbnxmGpu* nb, enum ElecType elecType, enum VdwType vdwType, bool bDoEne, bool bDoPrune)
375 {
376     const char* kernel_name_to_run;
377     cl_kernel*  kernel_ptr;
378     cl_int      cl_error;
379
380     const int elecTypeIdx = static_cast<int>(elecType);
381     const int vdwTypeIdx  = static_cast<int>(vdwType);
382
383     GMX_ASSERT(elecTypeIdx < c_numElecTypes,
384                "The electrostatics type requested is not implemented in the OpenCL kernels.");
385     GMX_ASSERT(vdwTypeIdx < c_numVdwTypes,
386                "The VdW type requested is not implemented in the OpenCL kernels.");
387
388     if (bDoEne)
389     {
390         if (bDoPrune)
391         {
392             kernel_name_to_run = nb_kfunc_ener_prune_ptr[elecTypeIdx][vdwTypeIdx];
393             kernel_ptr         = &(nb->kernel_ener_prune_ptr[elecTypeIdx][vdwTypeIdx]);
394         }
395         else
396         {
397             kernel_name_to_run = nb_kfunc_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
398             kernel_ptr         = &(nb->kernel_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx]);
399         }
400     }
401     else
402     {
403         if (bDoPrune)
404         {
405             kernel_name_to_run = nb_kfunc_noener_prune_ptr[elecTypeIdx][vdwTypeIdx];
406             kernel_ptr         = &(nb->kernel_noener_prune_ptr[elecTypeIdx][vdwTypeIdx]);
407         }
408         else
409         {
410             kernel_name_to_run = nb_kfunc_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
411             kernel_ptr         = &(nb->kernel_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx]);
412         }
413     }
414
415     if (nullptr == kernel_ptr[0])
416     {
417         *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
418         GMX_ASSERT(cl_error == CL_SUCCESS,
419                    ("clCreateKernel failed: " + ocl_get_error_string(cl_error)
420                     + " for kernel named " + kernel_name_to_run)
421                            .c_str());
422     }
423
424     return *kernel_ptr;
425 }
426
427 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
428  */
429 static inline int calc_shmem_required_nonbonded(enum VdwType vdwType, bool bPrefetchLjParam)
430 {
431     int shmem;
432
433     /* size of shmem (force-buffers/xq/atom type preloading) */
434     /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
435     /* i-atom x+q in shared memory */
436     shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4; /* xqib */
437     /* cj in shared memory, for both warps separately
438      * TODO: in the "nowarp kernels we load cj only once  so the factor 2 is not needed.
439      */
440     shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs  */
441     if (bPrefetchLjParam)
442     {
443         if (useLjCombRule(vdwType))
444         {
445             /* i-atom LJ combination parameters in shared memory */
446             shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * 2
447                      * sizeof(float); /* atib abused for ljcp, float2 */
448         }
449         else
450         {
451             /* i-atom types in shared memory */
452             shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int); /* atib */
453         }
454     }
455     /* force reduction buffers in shared memory */
456     shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
457     /* Warp vote. In fact it must be * number of warps in block.. */
458     shmem += sizeof(cl_uint) * 2; /* warp_any */
459     return shmem;
460 }
461
462 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
463  *
464  *  The device can't use the same data structures as the host for two main reasons:
465  *  - OpenCL restrictions (pointers are not accepted inside data structures)
466  *  - some host side fields are not needed for the OpenCL kernels.
467  *
468  *  This function is called before the launch of both nbnxn and prune kernels.
469  */
470 static void fillin_ocl_structures(NBParamGpu* nbp, cl_nbparam_params_t* nbparams_params)
471 {
472     nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
473     nbparams_params->c_rf              = nbp->c_rf;
474     nbparams_params->dispersion_shift  = nbp->dispersion_shift;
475     nbparams_params->elecType          = nbp->elecType;
476     nbparams_params->epsfac            = nbp->epsfac;
477     nbparams_params->ewaldcoeff_lj     = nbp->ewaldcoeff_lj;
478     nbparams_params->ewald_beta        = nbp->ewald_beta;
479     nbparams_params->rcoulomb_sq       = nbp->rcoulomb_sq;
480     nbparams_params->repulsion_shift   = nbp->repulsion_shift;
481     nbparams_params->rlistOuter_sq     = nbp->rlistOuter_sq;
482     nbparams_params->rvdw_sq           = nbp->rvdw_sq;
483     nbparams_params->rlistInner_sq     = nbp->rlistInner_sq;
484     nbparams_params->rvdw_switch       = nbp->rvdw_switch;
485     nbparams_params->sh_ewald          = nbp->sh_ewald;
486     nbparams_params->sh_lj_ewald       = nbp->sh_lj_ewald;
487     nbparams_params->two_k_rf          = nbp->two_k_rf;
488     nbparams_params->vdwType           = nbp->vdwType;
489     nbparams_params->vdw_switch        = nbp->vdw_switch;
490 }
491
492 /*! \brief Enqueues a wait for event completion.
493  *
494  * Then it releases the event and sets it to 0.
495  * Don't use this function when more than one wait will be issued for the event.
496  * Equivalent to Cuda Stream Sync. */
497 static void sync_ocl_event(cl_command_queue stream, cl_event* ocl_event)
498 {
499     cl_int gmx_unused cl_error;
500
501     /* Enqueue wait */
502     cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
503     GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
504
505     /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
506     cl_error = clReleaseEvent(*ocl_event);
507     GMX_ASSERT(cl_error == CL_SUCCESS,
508                ("clReleaseEvent failed: " + ocl_get_error_string(cl_error)).c_str());
509     *ocl_event = nullptr;
510 }
511
512 /*! \brief Launch asynchronously the xq buffer host to device copy. */
513 void gpu_copy_xq_to_gpu(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom, const AtomLocality atomLocality)
514 {
515     GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
516
517     const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
518
519     /* local/nonlocal offset and length used for xq and f */
520     int adat_begin, adat_len;
521
522     cl_atomdata_t*      adat         = nb->atdat;
523     gpu_plist*          plist        = nb->plist[iloc];
524     cl_timers_t*        t            = nb->timers;
525     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
526
527     bool bDoTime = nb->bDoTime;
528
529     /* Don't launch the non-local H2D copy if there is no dependent
530        work to do: neither non-local nor other (e.g. bonded) work
531        to do that has as input the nbnxn coordinates.
532        Doing the same for the local kernel is more complicated, since the
533        local part of the force array also depends on the non-local kernel.
534        So to avoid complicating the code and to reduce the risk of bugs,
535        we always call the local local x+q copy (and the rest of the local
536        work in nbnxn_gpu_launch_kernel().
537      */
538     if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
539     {
540         plist->haveFreshList = false;
541
542         return;
543     }
544
545     /* calculate the atom data index range based on locality */
546     if (atomLocality == AtomLocality::Local)
547     {
548         adat_begin = 0;
549         adat_len   = adat->natoms_local;
550     }
551     else
552     {
553         adat_begin = adat->natoms_local;
554         adat_len   = adat->natoms - adat->natoms_local;
555     }
556
557     /* beginning of timed HtoD section */
558     if (bDoTime)
559     {
560         t->xf[atomLocality].nb_h2d.openTimingRegion(deviceStream);
561     }
562
563     /* HtoD x, q */
564     GMX_ASSERT(sizeof(float) == sizeof(*nbatom->x().data()),
565                "The size of the xyzq buffer element should be equal to the size of float4.");
566     copyToDeviceBuffer(&adat->xq,
567                        nbatom->x().data() + adat_begin * 4,
568                        adat_begin * 4,
569                        adat_len * 4,
570                        deviceStream,
571                        GpuApiCallBehavior::Async,
572                        bDoTime ? t->xf[atomLocality].nb_h2d.fetchNextEvent() : nullptr);
573
574     if (bDoTime)
575     {
576         t->xf[atomLocality].nb_h2d.closeTimingRegion(deviceStream);
577     }
578
579     /* When we get here all misc operations issues in the local stream as well as
580        the local xq H2D are done,
581        so we record that in the local stream and wait for it in the nonlocal one. */
582     if (nb->bUseTwoStreams)
583     {
584         if (iloc == InteractionLocality::Local)
585         {
586             cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(
587                     deviceStream.stream(), 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
588             GMX_ASSERT(cl_error == CL_SUCCESS,
589                        ("clEnqueueMarkerWithWaitList failed: " + ocl_get_error_string(cl_error)).c_str());
590
591             /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
592              * in the local stream in order to be able to sync with the above event
593              * from the non-local stream.
594              */
595             cl_error = clFlush(deviceStream.stream());
596             GMX_ASSERT(cl_error == CL_SUCCESS,
597                        ("clFlush failed: " + ocl_get_error_string(cl_error)).c_str());
598         }
599         else
600         {
601             sync_ocl_event(deviceStream.stream(), &(nb->misc_ops_and_local_H2D_done));
602         }
603     }
604 }
605
606
607 /*! \brief Launch GPU kernel
608
609    As we execute nonbonded workload in separate queues, before launching
610    the kernel we need to make sure that he following operations have completed:
611    - atomdata allocation and related H2D transfers (every nstlist step);
612    - pair list H2D transfer (every nstlist step);
613    - shift vector H2D transfer (every nstlist step);
614    - force (+shift force and energy) output clearing (every step).
615
616    These operations are issued in the local queue at the beginning of the step
617    and therefore always complete before the local kernel launch. The non-local
618    kernel is launched after the local on the same device/context, so this is
619    inherently scheduled after the operations in the local stream (including the
620    above "misc_ops").
621    However, for the sake of having a future-proof implementation, we use the
622    misc_ops_done event to record the point in time when the above  operations
623    are finished and synchronize with this event in the non-local stream.
624  */
625 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const Nbnxm::InteractionLocality iloc)
626 {
627     cl_atomdata_t*      adat         = nb->atdat;
628     NBParamGpu*         nbp          = nb->nbparam;
629     gpu_plist*          plist        = nb->plist[iloc];
630     cl_timers_t*        t            = nb->timers;
631     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
632
633     bool bDoTime = nb->bDoTime;
634
635     cl_nbparam_params_t nbparams_params;
636
637     /* Don't launch the non-local kernel if there is no work to do.
638        Doing the same for the local kernel is more complicated, since the
639        local part of the force array also depends on the non-local kernel.
640        So to avoid complicating the code and to reduce the risk of bugs,
641        we always call the local kernel and later (not in
642        this function) the stream wait, local f copyback and the f buffer
643        clearing. All these operations, except for the local interaction kernel,
644        are needed for the non-local interactions. The skip of the local kernel
645        call is taken care of later in this function. */
646     if (canSkipNonbondedWork(*nb, iloc))
647     {
648         plist->haveFreshList = false;
649
650         return;
651     }
652
653     if (nbp->useDynamicPruning && plist->haveFreshList)
654     {
655         /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
656            (that's the way the timing accounting can distinguish between
657            separate prune kernel and combined force+prune).
658          */
659         Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
660     }
661
662     if (plist->nsci == 0)
663     {
664         /* Don't launch an empty local kernel (is not allowed with OpenCL).
665          */
666         return;
667     }
668
669     /* beginning of timed nonbonded calculation section */
670     if (bDoTime)
671     {
672         t->interaction[iloc].nb_k.openTimingRegion(deviceStream);
673     }
674
675     /* kernel launch config */
676
677     KernelLaunchConfig config;
678     config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwType, nb->bPrefetchLjParam);
679     config.blockSize[0]     = c_clSize;
680     config.blockSize[1]     = c_clSize;
681     config.gridSize[0]      = plist->nsci;
682
683     validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
684
685     if (debug)
686     {
687         fprintf(debug,
688                 "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
689                 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
690                 config.blockSize[0],
691                 config.blockSize[1],
692                 config.blockSize[2],
693                 config.blockSize[0] * config.gridSize[0],
694                 config.blockSize[1] * config.gridSize[1],
695                 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
696                 c_nbnxnGpuNumClusterPerSupercluster,
697                 plist->na_c);
698     }
699
700     fillin_ocl_structures(nbp, &nbparams_params);
701
702     auto*          timingEvent  = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
703     constexpr char kernelName[] = "k_calc_nb";
704     const auto     kernel =
705             select_nbnxn_kernel(nb,
706                                 nbp->elecType,
707                                 nbp->vdwType,
708                                 stepWork.computeEnergy,
709                                 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
710
711
712     // The OpenCL kernel takes int as second to last argument because bool is
713     // not supported as a kernel argument type (sizeof(bool) is implementation defined).
714     const int computeFshift = static_cast<int>(stepWork.computeVirial);
715     if (useLjCombRule(nb->nbparam->vdwType))
716     {
717         const auto kernelArgs = prepareGpuKernelArguments(kernel,
718                                                           config,
719                                                           &nbparams_params,
720                                                           &adat->xq,
721                                                           &adat->f,
722                                                           &adat->e_lj,
723                                                           &adat->e_el,
724                                                           &adat->fshift,
725                                                           &adat->lj_comb,
726                                                           &adat->shift_vec,
727                                                           &nbp->nbfp,
728                                                           &nbp->nbfp_comb,
729                                                           &nbp->coulomb_tab,
730                                                           &plist->sci,
731                                                           &plist->cj4,
732                                                           &plist->excl,
733                                                           &computeFshift);
734
735         launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
736     }
737     else
738     {
739         const auto kernelArgs = prepareGpuKernelArguments(kernel,
740                                                           config,
741                                                           &adat->ntypes,
742                                                           &nbparams_params,
743                                                           &adat->xq,
744                                                           &adat->f,
745                                                           &adat->e_lj,
746                                                           &adat->e_el,
747                                                           &adat->fshift,
748                                                           &adat->atom_types,
749                                                           &adat->shift_vec,
750                                                           &nbp->nbfp,
751                                                           &nbp->nbfp_comb,
752                                                           &nbp->coulomb_tab,
753                                                           &plist->sci,
754                                                           &plist->cj4,
755                                                           &plist->excl,
756                                                           &computeFshift);
757         launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
758     }
759
760     if (bDoTime)
761     {
762         t->interaction[iloc].nb_k.closeTimingRegion(deviceStream);
763     }
764 }
765
766
767 /*! \brief Calculates the amount of shared memory required by the prune kernel.
768  *
769  *  Note that for the sake of simplicity we use the CUDA terminology "shared memory"
770  *  for OpenCL local memory.
771  *
772  * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd
773  * dimension. \returns   the amount of local memory in bytes required by the pruning kernel
774  */
775 static inline int calc_shmem_required_prune(const int num_threads_z)
776 {
777     int shmem;
778
779     /* i-atom x in shared memory (for convenience we load all 4 components including q) */
780     shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4;
781     /* cj in shared memory, for each warp separately
782      * Note: only need to load once per wavefront, but to keep the code simple,
783      * for now we load twice on AMD.
784      */
785     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
786     /* Warp vote, requires one uint per warp/32 threads per block. */
787     shmem += sizeof(cl_uint) * 2 * num_threads_z;
788
789     return shmem;
790 }
791
792 /*! \brief
793  * Launch the pairlist prune only kernel for the given locality.
794  * \p numParts tells in how many parts, i.e. calls the list will be pruned.
795  */
796 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
797 {
798     cl_atomdata_t*      adat         = nb->atdat;
799     NBParamGpu*         nbp          = nb->nbparam;
800     gpu_plist*          plist        = nb->plist[iloc];
801     cl_timers_t*        t            = nb->timers;
802     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
803     bool                bDoTime      = nb->bDoTime;
804
805     if (plist->haveFreshList)
806     {
807         GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
808
809         /* Set rollingPruningNumParts to signal that it is not set */
810         plist->rollingPruningNumParts = 0;
811         plist->rollingPruningPart     = 0;
812     }
813     else
814     {
815         if (plist->rollingPruningNumParts == 0)
816         {
817             plist->rollingPruningNumParts = numParts;
818         }
819         else
820         {
821             GMX_ASSERT(numParts == plist->rollingPruningNumParts,
822                        "It is not allowed to change numParts in between list generation steps");
823         }
824     }
825
826     /* Use a local variable for part and update in plist, so we can return here
827      * without duplicating the part increment code.
828      */
829     int part = plist->rollingPruningPart;
830
831     plist->rollingPruningPart++;
832     if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
833     {
834         plist->rollingPruningPart = 0;
835     }
836
837     /* Compute the number of list entries to prune in this pass */
838     int numSciInPart = (plist->nsci - part) / numParts;
839
840     /* Don't launch the kernel if there is no work to do. */
841     if (numSciInPart <= 0)
842     {
843         plist->haveFreshList = false;
844
845         return;
846     }
847
848     GpuRegionTimer* timer = nullptr;
849     if (bDoTime)
850     {
851         timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
852     }
853
854     /* beginning of timed prune calculation section */
855     if (bDoTime)
856     {
857         timer->openTimingRegion(deviceStream);
858     }
859
860     /* Kernel launch config:
861      * - The thread block dimensions match the size of i-clusters, j-clusters,
862      *   and j-cluster concurrency, in x, y, and z, respectively.
863      * - The 1D block-grid contains as many blocks as super-clusters.
864      */
865     int num_threads_z = c_oclPruneKernelJ4ConcurrencyDEFAULT;
866
867
868     /* kernel launch config */
869     KernelLaunchConfig config;
870     config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
871     config.blockSize[0]     = c_clSize;
872     config.blockSize[1]     = c_clSize;
873     config.blockSize[2]     = num_threads_z;
874     config.gridSize[0]      = numSciInPart;
875
876     validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
877
878     if (debug)
879     {
880         fprintf(debug,
881                 "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
882                 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
883                 "\tShMem: %zu\n",
884                 config.blockSize[0],
885                 config.blockSize[1],
886                 config.blockSize[2],
887                 config.blockSize[0] * config.gridSize[0],
888                 config.blockSize[1] * config.gridSize[1],
889                 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
890                 c_nbnxnGpuNumClusterPerSupercluster,
891                 plist->na_c,
892                 config.sharedMemorySize);
893     }
894
895     cl_nbparam_params_t nbparams_params;
896     fillin_ocl_structures(nbp, &nbparams_params);
897
898     auto*          timingEvent  = bDoTime ? timer->fetchNextEvent() : nullptr;
899     constexpr char kernelName[] = "k_pruneonly";
900     const auto     pruneKernel  = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
901     const auto     kernelArgs   = prepareGpuKernelArguments(pruneKernel,
902                                                       config,
903                                                       &nbparams_params,
904                                                       &adat->xq,
905                                                       &adat->shift_vec,
906                                                       &plist->sci,
907                                                       &plist->cj4,
908                                                       &plist->imask,
909                                                       &numParts,
910                                                       &part);
911     launchGpuKernel(pruneKernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
912
913     if (plist->haveFreshList)
914     {
915         plist->haveFreshList = false;
916         /* Mark that pruning has been done */
917         nb->timers->interaction[iloc].didPrune = true;
918     }
919     else
920     {
921         /* Mark that rolling pruning has been done */
922         nb->timers->interaction[iloc].didRollingPrune = true;
923     }
924
925     if (bDoTime)
926     {
927         timer->closeTimingRegion(deviceStream);
928     }
929 }
930
931 /*! \brief
932  * Launch asynchronously the download of nonbonded forces from the GPU
933  * (and energies/shift forces if required).
934  */
935 void gpu_launch_cpyback(NbnxmGpu*                nb,
936                         struct nbnxn_atomdata_t* nbatom,
937                         const gmx::StepWorkload& stepWork,
938                         const AtomLocality       aloc)
939 {
940     GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
941
942     cl_int gmx_unused cl_error;
943     int               adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
944
945     /* determine interaction locality from atom locality */
946     const InteractionLocality iloc = gpuAtomToInteractionLocality(aloc);
947
948     cl_atomdata_t*      adat         = nb->atdat;
949     cl_timers_t*        t            = nb->timers;
950     bool                bDoTime      = nb->bDoTime;
951     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
952
953     /* don't launch non-local copy-back if there was no non-local work to do */
954     if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
955     {
956         /* TODO An alternative way to signal that non-local work is
957            complete is to use a clEnqueueMarker+clEnqueueBarrier
958            pair. However, the use of bNonLocalStreamActive has the
959            advantage of being local to the host, so probably minimizes
960            overhead. Curiously, for NVIDIA OpenCL with an empty-domain
961            test case, overall simulation performance was higher with
962            the API calls, but this has not been tested on AMD OpenCL,
963            so could be worth considering in future. */
964         nb->bNonLocalStreamActive = CL_FALSE;
965         return;
966     }
967
968     getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
969
970     /* beginning of timed D2H section */
971     if (bDoTime)
972     {
973         t->xf[aloc].nb_d2h.openTimingRegion(deviceStream);
974     }
975
976     /* With DD the local D2H transfer can only start after the non-local
977        has been launched. */
978     if (iloc == InteractionLocality::Local && nb->bNonLocalStreamActive)
979     {
980         sync_ocl_event(deviceStream.stream(), &(nb->nonlocal_done));
981     }
982
983     /* DtoH f */
984     GMX_ASSERT(sizeof(*nbatom->out[0].f.data()) == sizeof(float),
985                "The host force buffer should be in single precision to match device data size.");
986     copyFromDeviceBuffer(&nbatom->out[0].f[adat_begin * DIM],
987                          &adat->f,
988                          adat_begin * DIM,
989                          adat_len * DIM,
990                          deviceStream,
991                          GpuApiCallBehavior::Async,
992                          bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
993
994     /* kick off work */
995     cl_error = clFlush(deviceStream.stream());
996     GMX_ASSERT(cl_error == CL_SUCCESS, ("clFlush failed: " + ocl_get_error_string(cl_error)).c_str());
997
998     /* After the non-local D2H is launched the nonlocal_done event can be
999        recorded which signals that the local D2H can proceed. This event is not
1000        placed after the non-local kernel because we first need the non-local
1001        data back first. */
1002     if (iloc == InteractionLocality::NonLocal)
1003     {
1004         cl_error = clEnqueueMarkerWithWaitList(deviceStream.stream(), 0, nullptr, &(nb->nonlocal_done));
1005         GMX_ASSERT(cl_error == CL_SUCCESS,
1006                    ("clEnqueueMarkerWithWaitList failed: " + ocl_get_error_string(cl_error)).c_str());
1007         nb->bNonLocalStreamActive = CL_TRUE;
1008     }
1009
1010     /* only transfer energies in the local stream */
1011     if (iloc == InteractionLocality::Local)
1012     {
1013         /* DtoH fshift when virial is needed */
1014         if (stepWork.computeVirial)
1015         {
1016             GMX_ASSERT(sizeof(*nb->nbst.fshift) == DIM * sizeof(float),
1017                        "Sizes of host- and device-side shift vector elements should be the same.");
1018             copyFromDeviceBuffer(reinterpret_cast<float*>(nb->nbst.fshift),
1019                                  &adat->fshift,
1020                                  0,
1021                                  SHIFTS * DIM,
1022                                  deviceStream,
1023                                  GpuApiCallBehavior::Async,
1024                                  bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
1025         }
1026
1027         /* DtoH energies */
1028         if (stepWork.computeEnergy)
1029         {
1030             GMX_ASSERT(sizeof(*nb->nbst.e_lj) == sizeof(float),
1031                        "Sizes of host- and device-side LJ energy terms should be the same.");
1032             copyFromDeviceBuffer(nb->nbst.e_lj,
1033                                  &adat->e_lj,
1034                                  0,
1035                                  1,
1036                                  deviceStream,
1037                                  GpuApiCallBehavior::Async,
1038                                  bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
1039             GMX_ASSERT(sizeof(*nb->nbst.e_el) == sizeof(float),
1040                        "Sizes of host- and device-side electrostatic energy terms should be the "
1041                        "same.");
1042             copyFromDeviceBuffer(nb->nbst.e_el,
1043                                  &adat->e_el,
1044                                  0,
1045                                  1,
1046                                  deviceStream,
1047                                  GpuApiCallBehavior::Async,
1048                                  bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
1049         }
1050     }
1051
1052     if (bDoTime)
1053     {
1054         t->xf[aloc].nb_d2h.closeTimingRegion(deviceStream);
1055     }
1056 }
1057
1058 } // namespace Nbnxm