2f6cb2db7b2d2e760738e3378c90cbac752d4ef0
[alexxy/gromacs.git] / src / gromacs / nbnxm / opencl / nbnxm_ocl.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36 /*! \internal \file
37  *  \brief Define OpenCL implementation of nbnxm_gpu.h
38  *
39  *  \author Anca Hamuraru <anca@streamcomputing.eu>
40  *  \author Teemu Virolainen <teemu@streamcomputing.eu>
41  *  \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
42  *  \author Szilárd Páll <pall.szilard@gmail.com>
43  *  \ingroup module_nbnxm
44  *
45  *  TODO (psz):
46  *  - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
47  *  - Rework the copying of OCL data structures done before every invocation of both
48  *    nb and prune kernels (using fillin_ocl_structures); also consider at the same
49  *    time calling clSetKernelArg only on the updated parameters (if tracking changed
50  *    parameters is feasible);
51  *  - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
52  *    dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
53  *    on the misc_ops_and_local_H2D_done event could be better expressed this way.
54  *
55  *  - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
56  *    - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
57  *      is identical in the two implementations, so a 3-way split might allow sharing
58  *      code;
59  *    -
60  *
61  */
62 #include "gmxpre.h"
63
64 #include <assert.h>
65 #include <stdlib.h>
66
67 #if defined(_MSVC)
68 #    include <limits>
69 #endif
70
71 #include "thread_mpi/atomic.h"
72
73 #include "gromacs/gpu_utils/device_context.h"
74 #include "gromacs/gpu_utils/gputraits_ocl.h"
75 #include "gromacs/gpu_utils/oclutils.h"
76 #include "gromacs/hardware/device_information.h"
77 #include "gromacs/hardware/hw_info.h"
78 #include "gromacs/mdtypes/simulation_workload.h"
79 #include "gromacs/nbnxm/atomdata.h"
80 #include "gromacs/nbnxm/gpu_common.h"
81 #include "gromacs/nbnxm/gpu_common_utils.h"
82 #include "gromacs/nbnxm/gpu_data_mgmt.h"
83 #include "gromacs/nbnxm/nbnxm.h"
84 #include "gromacs/nbnxm/nbnxm_gpu.h"
85 #include "gromacs/nbnxm/pairlist.h"
86 #include "gromacs/pbcutil/ishift.h"
87 #include "gromacs/timing/gpu_timing.h"
88 #include "gromacs/utility/cstringutil.h"
89 #include "gromacs/utility/fatalerror.h"
90 #include "gromacs/utility/gmxassert.h"
91
92 #include "nbnxm_ocl_types.h"
93
94 namespace Nbnxm
95 {
96
97 /*! \brief Convenience constants */
98 //@{
99 static constexpr int c_clSize = c_nbnxnGpuClusterSize;
100 //@}
101
102
103 /*! \brief Validates the input global work size parameter.
104  */
105 static inline void validate_global_work_size(const KernelLaunchConfig& config,
106                                              int                       work_dim,
107                                              const DeviceInformation*  dinfo)
108 {
109     cl_uint device_size_t_size_bits;
110     cl_uint host_size_t_size_bits;
111
112     GMX_ASSERT(dinfo, "Need a valid device info object");
113
114     size_t global_work_size[3];
115     GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
116     for (int i = 0; i < work_dim; i++)
117     {
118         global_work_size[i] = config.blockSize[i] * config.gridSize[i];
119     }
120
121     /* Each component of a global_work_size must not exceed the range given by the
122        sizeof(device size_t) for the device on which the kernel execution will
123        be enqueued. See:
124        https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
125      */
126     device_size_t_size_bits = dinfo->adress_bits;
127     host_size_t_size_bits   = static_cast<cl_uint>(sizeof(size_t) * 8);
128
129     /* If sizeof(host size_t) <= sizeof(device size_t)
130             => global_work_size components will always be valid
131        else
132             => get device limit for global work size and
133             compare it against each component of global_work_size.
134      */
135     if (host_size_t_size_bits > device_size_t_size_bits)
136     {
137         size_t device_limit;
138
139         device_limit = (1ULL << device_size_t_size_bits) - 1;
140
141         for (int i = 0; i < work_dim; i++)
142         {
143             if (global_work_size[i] > device_limit)
144             {
145                 gmx_fatal(
146                         FARGS,
147                         "Watch out, the input system is too large to simulate!\n"
148                         "The number of nonbonded work units (=number of super-clusters) exceeds the"
149                         "device capabilities. Global work size limit exceeded (%zu > %zu)!",
150                         global_work_size[i],
151                         device_limit);
152             }
153         }
154     }
155 }
156
157 /* Constant arrays listing non-bonded kernel function names. The arrays are
158  * organized in 2-dim arrays by: electrostatics and VDW type.
159  *
160  *  Note that the row- and column-order of function pointers has to match the
161  *  order of corresponding enumerated electrostatics and vdw types, resp.,
162  *  defined in nbnxm_ocl_types.h.
163  */
164
165 /*! \brief Force-only kernel function names. */
166 static const char* nb_kfunc_noener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
167     { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl",
168       "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",
169       "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl",
170       "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",
171       "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl",
172       "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",
173       "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
174     { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl",
175       "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",
176       "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl",
177       "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",
178       "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl",
179       "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",
180       "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
181     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl",
182       "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",
183       "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl",
184       "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",
185       "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",
186       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",
187       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
188     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl",
189       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl",
190       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl",
191       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl",
192       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl",
193       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl",
194       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
195     { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl",
196       "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",
197       "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl",
198       "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",
199       "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl",
200       "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",
201       "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
202     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",
203       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",
204       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",
205       "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl",
206       "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",
207       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",
208       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
209 };
210
211 /*! \brief Force + energy kernel function pointers. */
212 static const char* nb_kfunc_ener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
213     { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl",
214       "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",
215       "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl",
216       "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",
217       "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl",
218       "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",
219       "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
220     { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl",
221       "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",
222       "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl",
223       "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",
224       "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl",
225       "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",
226       "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
227     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl",
228       "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",
229       "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",
230       "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl",
231       "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",
232       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",
233       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
234     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl",
235       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl",
236       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl",
237       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl",
238       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl",
239       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl",
240       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
241     { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl",
242       "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",
243       "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl",
244       "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",
245       "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl",
246       "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",
247       "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
248     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",
249       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",
250       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",
251       "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",
252       "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",
253       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",
254       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
255 };
256
257 /*! \brief Force + pruning kernel function pointers. */
258 static const char* nb_kfunc_noener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
259     { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",
260       "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",
261       "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",
262       "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl",
263       "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",
264       "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",
265       "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
266     { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl",
267       "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",
268       "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",
269       "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl",
270       "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",
271       "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",
272       "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
273     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",
274       "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",
275       "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",
276       "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",
277       "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",
278       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",
279       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
280     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl",
281       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl",
282       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl",
283       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl",
284       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl",
285       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl",
286       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
287     { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl",
288       "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",
289       "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",
290       "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl",
291       "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",
292       "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",
293       "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
294     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",
295       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",
296       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",
297       "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",
298       "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",
299       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",
300       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
301 };
302
303 /*! \brief Force + energy + pruning kernel function pointers. */
304 static const char* nb_kfunc_ener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
305     { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",
306       "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",
307       "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",
308       "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",
309       "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",
310       "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",
311       "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
312     { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",
313       "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",
314       "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",
315       "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl",
316       "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",
317       "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",
318       "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
319     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",
320       "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",
321       "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",
322       "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",
323       "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",
324       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",
325       "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
326     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl",
327       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl",
328       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl",
329       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl",
330       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl",
331       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
332       "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
333     { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",
334       "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",
335       "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",
336       "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl",
337       "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",
338       "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",
339       "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
340     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",
341       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",
342       "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",
343       "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",
344       "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",
345       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
346       "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
347 };
348
349 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
350  *
351  * \param[in] kernel_pruneonly  array of prune kernel objects
352  * \param[in] firstPrunePass    true if the first pruning pass is being executed
353  */
354 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[], bool firstPrunePass)
355 {
356     cl_kernel* kernelPtr;
357
358     if (firstPrunePass)
359     {
360         kernelPtr = &(kernel_pruneonly[epruneFirst]);
361     }
362     else
363     {
364         kernelPtr = &(kernel_pruneonly[epruneRolling]);
365     }
366     // TODO: consider creating the prune kernel object here to avoid a
367     // clCreateKernel for the rolling prune kernel if this is not needed.
368     return *kernelPtr;
369 }
370
371 /*! \brief Return a pointer to the kernel version to be executed at the current step.
372  *  OpenCL kernel objects are cached in nb. If the requested kernel is not
373  *  found in the cache, it will be created and the cache will be updated.
374  */
375 static inline cl_kernel
376 select_nbnxn_kernel(NbnxmGpu* nb, enum ElecType elecType, enum VdwType vdwType, bool bDoEne, bool bDoPrune)
377 {
378     const char* kernel_name_to_run;
379     cl_kernel*  kernel_ptr;
380     cl_int      cl_error;
381
382     const int elecTypeIdx = static_cast<int>(elecType);
383     const int vdwTypeIdx  = static_cast<int>(vdwType);
384
385     GMX_ASSERT(elecTypeIdx < c_numElecTypes,
386                "The electrostatics type requested is not implemented in the OpenCL kernels.");
387     GMX_ASSERT(vdwTypeIdx < c_numVdwTypes,
388                "The VdW type requested is not implemented in the OpenCL kernels.");
389
390     if (bDoEne)
391     {
392         if (bDoPrune)
393         {
394             kernel_name_to_run = nb_kfunc_ener_prune_ptr[elecTypeIdx][vdwTypeIdx];
395             kernel_ptr         = &(nb->kernel_ener_prune_ptr[elecTypeIdx][vdwTypeIdx]);
396         }
397         else
398         {
399             kernel_name_to_run = nb_kfunc_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
400             kernel_ptr         = &(nb->kernel_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx]);
401         }
402     }
403     else
404     {
405         if (bDoPrune)
406         {
407             kernel_name_to_run = nb_kfunc_noener_prune_ptr[elecTypeIdx][vdwTypeIdx];
408             kernel_ptr         = &(nb->kernel_noener_prune_ptr[elecTypeIdx][vdwTypeIdx]);
409         }
410         else
411         {
412             kernel_name_to_run = nb_kfunc_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
413             kernel_ptr         = &(nb->kernel_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx]);
414         }
415     }
416
417     if (nullptr == kernel_ptr[0])
418     {
419         *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
420         GMX_ASSERT(cl_error == CL_SUCCESS,
421                    ("clCreateKernel failed: " + ocl_get_error_string(cl_error)
422                     + " for kernel named " + kernel_name_to_run)
423                            .c_str());
424     }
425
426     return *kernel_ptr;
427 }
428
429 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
430  */
431 static inline int calc_shmem_required_nonbonded(enum VdwType vdwType, bool bPrefetchLjParam)
432 {
433     int shmem;
434
435     /* size of shmem (force-buffers/xq/atom type preloading) */
436     /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
437     /* i-atom x+q in shared memory */
438     shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4; /* xqib */
439     /* cj in shared memory, for both warps separately
440      * TODO: in the "nowarp kernels we load cj only once  so the factor 2 is not needed.
441      */
442     shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs  */
443     if (bPrefetchLjParam)
444     {
445         if (useLjCombRule(vdwType))
446         {
447             /* i-atom LJ combination parameters in shared memory */
448             shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * 2
449                      * sizeof(float); /* atib abused for ljcp, float2 */
450         }
451         else
452         {
453             /* i-atom types in shared memory */
454             shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int); /* atib */
455         }
456     }
457     /* force reduction buffers in shared memory */
458     shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
459     /* Warp vote. In fact it must be * number of warps in block.. */
460     shmem += sizeof(cl_uint) * 2; /* warp_any */
461     return shmem;
462 }
463
464 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
465  *
466  *  The device can't use the same data structures as the host for two main reasons:
467  *  - OpenCL restrictions (pointers are not accepted inside data structures)
468  *  - some host side fields are not needed for the OpenCL kernels.
469  *
470  *  This function is called before the launch of both nbnxn and prune kernels.
471  */
472 static void fillin_ocl_structures(NBParamGpu* nbp, cl_nbparam_params_t* nbparams_params)
473 {
474     nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
475     nbparams_params->c_rf              = nbp->c_rf;
476     nbparams_params->dispersion_shift  = nbp->dispersion_shift;
477     nbparams_params->elecType          = nbp->elecType;
478     nbparams_params->epsfac            = nbp->epsfac;
479     nbparams_params->ewaldcoeff_lj     = nbp->ewaldcoeff_lj;
480     nbparams_params->ewald_beta        = nbp->ewald_beta;
481     nbparams_params->rcoulomb_sq       = nbp->rcoulomb_sq;
482     nbparams_params->repulsion_shift   = nbp->repulsion_shift;
483     nbparams_params->rlistOuter_sq     = nbp->rlistOuter_sq;
484     nbparams_params->rvdw_sq           = nbp->rvdw_sq;
485     nbparams_params->rlistInner_sq     = nbp->rlistInner_sq;
486     nbparams_params->rvdw_switch       = nbp->rvdw_switch;
487     nbparams_params->sh_ewald          = nbp->sh_ewald;
488     nbparams_params->sh_lj_ewald       = nbp->sh_lj_ewald;
489     nbparams_params->two_k_rf          = nbp->two_k_rf;
490     nbparams_params->vdwType           = nbp->vdwType;
491     nbparams_params->vdw_switch        = nbp->vdw_switch;
492 }
493
494 /*! \brief Enqueues a wait for event completion.
495  *
496  * Then it releases the event and sets it to 0.
497  * Don't use this function when more than one wait will be issued for the event.
498  * Equivalent to Cuda Stream Sync. */
499 static void sync_ocl_event(cl_command_queue stream, cl_event* ocl_event)
500 {
501     cl_int gmx_unused cl_error;
502
503     /* Enqueue wait */
504     cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
505     GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
506
507     /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
508     cl_error = clReleaseEvent(*ocl_event);
509     GMX_ASSERT(cl_error == CL_SUCCESS,
510                ("clReleaseEvent failed: " + ocl_get_error_string(cl_error)).c_str());
511     *ocl_event = nullptr;
512 }
513
514 /*! \brief Launch asynchronously the xq buffer host to device copy. */
515 void gpu_copy_xq_to_gpu(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom, const AtomLocality atomLocality)
516 {
517     GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
518
519     const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
520
521     /* local/nonlocal offset and length used for xq and f */
522     int adat_begin, adat_len;
523
524     cl_atomdata_t*      adat         = nb->atdat;
525     gpu_plist*          plist        = nb->plist[iloc];
526     cl_timers_t*        t            = nb->timers;
527     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
528
529     bool bDoTime = nb->bDoTime;
530
531     /* Don't launch the non-local H2D copy if there is no dependent
532        work to do: neither non-local nor other (e.g. bonded) work
533        to do that has as input the nbnxn coordinates.
534        Doing the same for the local kernel is more complicated, since the
535        local part of the force array also depends on the non-local kernel.
536        So to avoid complicating the code and to reduce the risk of bugs,
537        we always call the local local x+q copy (and the rest of the local
538        work in nbnxn_gpu_launch_kernel().
539      */
540     if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
541     {
542         plist->haveFreshList = false;
543
544         return;
545     }
546
547     /* calculate the atom data index range based on locality */
548     if (atomLocality == AtomLocality::Local)
549     {
550         adat_begin = 0;
551         adat_len   = adat->natoms_local;
552     }
553     else
554     {
555         adat_begin = adat->natoms_local;
556         adat_len   = adat->natoms - adat->natoms_local;
557     }
558
559     /* beginning of timed HtoD section */
560     if (bDoTime)
561     {
562         t->xf[atomLocality].nb_h2d.openTimingRegion(deviceStream);
563     }
564
565     /* HtoD x, q */
566     GMX_ASSERT(sizeof(float) == sizeof(*nbatom->x().data()),
567                "The size of the xyzq buffer element should be equal to the size of float4.");
568     copyToDeviceBuffer(&adat->xq,
569                        nbatom->x().data() + adat_begin * 4,
570                        adat_begin * 4,
571                        adat_len * 4,
572                        deviceStream,
573                        GpuApiCallBehavior::Async,
574                        bDoTime ? t->xf[atomLocality].nb_h2d.fetchNextEvent() : nullptr);
575
576     if (bDoTime)
577     {
578         t->xf[atomLocality].nb_h2d.closeTimingRegion(deviceStream);
579     }
580
581     /* When we get here all misc operations issues in the local stream as well as
582        the local xq H2D are done,
583        so we record that in the local stream and wait for it in the nonlocal one. */
584     if (nb->bUseTwoStreams)
585     {
586         if (iloc == InteractionLocality::Local)
587         {
588             cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(
589                     deviceStream.stream(), 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
590             GMX_ASSERT(cl_error == CL_SUCCESS,
591                        ("clEnqueueMarkerWithWaitList failed: " + ocl_get_error_string(cl_error)).c_str());
592
593             /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
594              * in the local stream in order to be able to sync with the above event
595              * from the non-local stream.
596              */
597             cl_error = clFlush(deviceStream.stream());
598             GMX_ASSERT(cl_error == CL_SUCCESS,
599                        ("clFlush failed: " + ocl_get_error_string(cl_error)).c_str());
600         }
601         else
602         {
603             sync_ocl_event(deviceStream.stream(), &(nb->misc_ops_and_local_H2D_done));
604         }
605     }
606 }
607
608
609 /*! \brief Launch GPU kernel
610
611    As we execute nonbonded workload in separate queues, before launching
612    the kernel we need to make sure that he following operations have completed:
613    - atomdata allocation and related H2D transfers (every nstlist step);
614    - pair list H2D transfer (every nstlist step);
615    - shift vector H2D transfer (every nstlist step);
616    - force (+shift force and energy) output clearing (every step).
617
618    These operations are issued in the local queue at the beginning of the step
619    and therefore always complete before the local kernel launch. The non-local
620    kernel is launched after the local on the same device/context, so this is
621    inherently scheduled after the operations in the local stream (including the
622    above "misc_ops").
623    However, for the sake of having a future-proof implementation, we use the
624    misc_ops_done event to record the point in time when the above  operations
625    are finished and synchronize with this event in the non-local stream.
626  */
627 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const Nbnxm::InteractionLocality iloc)
628 {
629     cl_atomdata_t*      adat         = nb->atdat;
630     NBParamGpu*         nbp          = nb->nbparam;
631     gpu_plist*          plist        = nb->plist[iloc];
632     cl_timers_t*        t            = nb->timers;
633     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
634
635     bool bDoTime = nb->bDoTime;
636
637     cl_nbparam_params_t nbparams_params;
638
639     /* Don't launch the non-local kernel if there is no work to do.
640        Doing the same for the local kernel is more complicated, since the
641        local part of the force array also depends on the non-local kernel.
642        So to avoid complicating the code and to reduce the risk of bugs,
643        we always call the local kernel and later (not in
644        this function) the stream wait, local f copyback and the f buffer
645        clearing. All these operations, except for the local interaction kernel,
646        are needed for the non-local interactions. The skip of the local kernel
647        call is taken care of later in this function. */
648     if (canSkipNonbondedWork(*nb, iloc))
649     {
650         plist->haveFreshList = false;
651
652         return;
653     }
654
655     if (nbp->useDynamicPruning && plist->haveFreshList)
656     {
657         /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
658            (that's the way the timing accounting can distinguish between
659            separate prune kernel and combined force+prune).
660          */
661         Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
662     }
663
664     if (plist->nsci == 0)
665     {
666         /* Don't launch an empty local kernel (is not allowed with OpenCL).
667          */
668         return;
669     }
670
671     /* beginning of timed nonbonded calculation section */
672     if (bDoTime)
673     {
674         t->interaction[iloc].nb_k.openTimingRegion(deviceStream);
675     }
676
677     /* kernel launch config */
678
679     KernelLaunchConfig config;
680     config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwType, nb->bPrefetchLjParam);
681     config.blockSize[0]     = c_clSize;
682     config.blockSize[1]     = c_clSize;
683     config.gridSize[0]      = plist->nsci;
684
685     validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
686
687     if (debug)
688     {
689         fprintf(debug,
690                 "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
691                 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
692                 config.blockSize[0],
693                 config.blockSize[1],
694                 config.blockSize[2],
695                 config.blockSize[0] * config.gridSize[0],
696                 config.blockSize[1] * config.gridSize[1],
697                 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
698                 c_nbnxnGpuNumClusterPerSupercluster,
699                 plist->na_c);
700     }
701
702     fillin_ocl_structures(nbp, &nbparams_params);
703
704     auto*          timingEvent  = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
705     constexpr char kernelName[] = "k_calc_nb";
706     const auto     kernel =
707             select_nbnxn_kernel(nb,
708                                 nbp->elecType,
709                                 nbp->vdwType,
710                                 stepWork.computeEnergy,
711                                 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
712
713
714     // The OpenCL kernel takes int as second to last argument because bool is
715     // not supported as a kernel argument type (sizeof(bool) is implementation defined).
716     const int computeFshift = static_cast<int>(stepWork.computeVirial);
717     if (useLjCombRule(nb->nbparam->vdwType))
718     {
719         const auto kernelArgs = prepareGpuKernelArguments(kernel,
720                                                           config,
721                                                           &nbparams_params,
722                                                           &adat->xq,
723                                                           &adat->f,
724                                                           &adat->e_lj,
725                                                           &adat->e_el,
726                                                           &adat->fshift,
727                                                           &adat->lj_comb,
728                                                           &adat->shift_vec,
729                                                           &nbp->nbfp,
730                                                           &nbp->nbfp_comb,
731                                                           &nbp->coulomb_tab,
732                                                           &plist->sci,
733                                                           &plist->cj4,
734                                                           &plist->excl,
735                                                           &computeFshift);
736
737         launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
738     }
739     else
740     {
741         const auto kernelArgs = prepareGpuKernelArguments(kernel,
742                                                           config,
743                                                           &adat->ntypes,
744                                                           &nbparams_params,
745                                                           &adat->xq,
746                                                           &adat->f,
747                                                           &adat->e_lj,
748                                                           &adat->e_el,
749                                                           &adat->fshift,
750                                                           &adat->atom_types,
751                                                           &adat->shift_vec,
752                                                           &nbp->nbfp,
753                                                           &nbp->nbfp_comb,
754                                                           &nbp->coulomb_tab,
755                                                           &plist->sci,
756                                                           &plist->cj4,
757                                                           &plist->excl,
758                                                           &computeFshift);
759         launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
760     }
761
762     if (bDoTime)
763     {
764         t->interaction[iloc].nb_k.closeTimingRegion(deviceStream);
765     }
766 }
767
768
769 /*! \brief Calculates the amount of shared memory required by the prune kernel.
770  *
771  *  Note that for the sake of simplicity we use the CUDA terminology "shared memory"
772  *  for OpenCL local memory.
773  *
774  * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd
775  * dimension. \returns   the amount of local memory in bytes required by the pruning kernel
776  */
777 static inline int calc_shmem_required_prune(const int num_threads_z)
778 {
779     int shmem;
780
781     /* i-atom x in shared memory (for convenience we load all 4 components including q) */
782     shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4;
783     /* cj in shared memory, for each warp separately
784      * Note: only need to load once per wavefront, but to keep the code simple,
785      * for now we load twice on AMD.
786      */
787     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
788     /* Warp vote, requires one uint per warp/32 threads per block. */
789     shmem += sizeof(cl_uint) * 2 * num_threads_z;
790
791     return shmem;
792 }
793
794 /*! \brief
795  * Launch the pairlist prune only kernel for the given locality.
796  * \p numParts tells in how many parts, i.e. calls the list will be pruned.
797  */
798 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
799 {
800     cl_atomdata_t*      adat         = nb->atdat;
801     NBParamGpu*         nbp          = nb->nbparam;
802     gpu_plist*          plist        = nb->plist[iloc];
803     cl_timers_t*        t            = nb->timers;
804     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
805     bool                bDoTime      = nb->bDoTime;
806
807     if (plist->haveFreshList)
808     {
809         GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
810
811         /* Set rollingPruningNumParts to signal that it is not set */
812         plist->rollingPruningNumParts = 0;
813         plist->rollingPruningPart     = 0;
814     }
815     else
816     {
817         if (plist->rollingPruningNumParts == 0)
818         {
819             plist->rollingPruningNumParts = numParts;
820         }
821         else
822         {
823             GMX_ASSERT(numParts == plist->rollingPruningNumParts,
824                        "It is not allowed to change numParts in between list generation steps");
825         }
826     }
827
828     /* Use a local variable for part and update in plist, so we can return here
829      * without duplicating the part increment code.
830      */
831     int part = plist->rollingPruningPart;
832
833     plist->rollingPruningPart++;
834     if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
835     {
836         plist->rollingPruningPart = 0;
837     }
838
839     /* Compute the number of list entries to prune in this pass */
840     int numSciInPart = (plist->nsci - part) / numParts;
841
842     /* Don't launch the kernel if there is no work to do. */
843     if (numSciInPart <= 0)
844     {
845         plist->haveFreshList = false;
846
847         return;
848     }
849
850     GpuRegionTimer* timer = nullptr;
851     if (bDoTime)
852     {
853         timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
854     }
855
856     /* beginning of timed prune calculation section */
857     if (bDoTime)
858     {
859         timer->openTimingRegion(deviceStream);
860     }
861
862     /* Kernel launch config:
863      * - The thread block dimensions match the size of i-clusters, j-clusters,
864      *   and j-cluster concurrency, in x, y, and z, respectively.
865      * - The 1D block-grid contains as many blocks as super-clusters.
866      */
867     int num_threads_z = c_oclPruneKernelJ4ConcurrencyDEFAULT;
868
869
870     /* kernel launch config */
871     KernelLaunchConfig config;
872     config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
873     config.blockSize[0]     = c_clSize;
874     config.blockSize[1]     = c_clSize;
875     config.blockSize[2]     = num_threads_z;
876     config.gridSize[0]      = numSciInPart;
877
878     validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
879
880     if (debug)
881     {
882         fprintf(debug,
883                 "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
884                 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
885                 "\tShMem: %zu\n",
886                 config.blockSize[0],
887                 config.blockSize[1],
888                 config.blockSize[2],
889                 config.blockSize[0] * config.gridSize[0],
890                 config.blockSize[1] * config.gridSize[1],
891                 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
892                 c_nbnxnGpuNumClusterPerSupercluster,
893                 plist->na_c,
894                 config.sharedMemorySize);
895     }
896
897     cl_nbparam_params_t nbparams_params;
898     fillin_ocl_structures(nbp, &nbparams_params);
899
900     auto*          timingEvent  = bDoTime ? timer->fetchNextEvent() : nullptr;
901     constexpr char kernelName[] = "k_pruneonly";
902     const auto     pruneKernel  = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
903     const auto     kernelArgs   = prepareGpuKernelArguments(pruneKernel,
904                                                       config,
905                                                       &nbparams_params,
906                                                       &adat->xq,
907                                                       &adat->shift_vec,
908                                                       &plist->sci,
909                                                       &plist->cj4,
910                                                       &plist->imask,
911                                                       &numParts,
912                                                       &part);
913     launchGpuKernel(pruneKernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
914
915     if (plist->haveFreshList)
916     {
917         plist->haveFreshList = false;
918         /* Mark that pruning has been done */
919         nb->timers->interaction[iloc].didPrune = true;
920     }
921     else
922     {
923         /* Mark that rolling pruning has been done */
924         nb->timers->interaction[iloc].didRollingPrune = true;
925     }
926
927     if (bDoTime)
928     {
929         timer->closeTimingRegion(deviceStream);
930     }
931 }
932
933 /*! \brief
934  * Launch asynchronously the download of nonbonded forces from the GPU
935  * (and energies/shift forces if required).
936  */
937 void gpu_launch_cpyback(NbnxmGpu*                nb,
938                         struct nbnxn_atomdata_t* nbatom,
939                         const gmx::StepWorkload& stepWork,
940                         const AtomLocality       aloc)
941 {
942     GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
943
944     cl_int gmx_unused cl_error;
945     int               adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
946
947     /* determine interaction locality from atom locality */
948     const InteractionLocality iloc = gpuAtomToInteractionLocality(aloc);
949
950     cl_atomdata_t*      adat         = nb->atdat;
951     cl_timers_t*        t            = nb->timers;
952     bool                bDoTime      = nb->bDoTime;
953     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
954
955     /* don't launch non-local copy-back if there was no non-local work to do */
956     if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
957     {
958         /* TODO An alternative way to signal that non-local work is
959            complete is to use a clEnqueueMarker+clEnqueueBarrier
960            pair. However, the use of bNonLocalStreamActive has the
961            advantage of being local to the host, so probably minimizes
962            overhead. Curiously, for NVIDIA OpenCL with an empty-domain
963            test case, overall simulation performance was higher with
964            the API calls, but this has not been tested on AMD OpenCL,
965            so could be worth considering in future. */
966         nb->bNonLocalStreamActive = CL_FALSE;
967         return;
968     }
969
970     getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
971
972     /* beginning of timed D2H section */
973     if (bDoTime)
974     {
975         t->xf[aloc].nb_d2h.openTimingRegion(deviceStream);
976     }
977
978     /* With DD the local D2H transfer can only start after the non-local
979        has been launched. */
980     if (iloc == InteractionLocality::Local && nb->bNonLocalStreamActive)
981     {
982         sync_ocl_event(deviceStream.stream(), &(nb->nonlocal_done));
983     }
984
985     /* DtoH f */
986     GMX_ASSERT(sizeof(*nbatom->out[0].f.data()) == sizeof(float),
987                "The host force buffer should be in single precision to match device data size.");
988     copyFromDeviceBuffer(&nbatom->out[0].f[adat_begin * DIM],
989                          &adat->f,
990                          adat_begin * DIM,
991                          adat_len * DIM,
992                          deviceStream,
993                          GpuApiCallBehavior::Async,
994                          bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
995
996     /* kick off work */
997     cl_error = clFlush(deviceStream.stream());
998     GMX_ASSERT(cl_error == CL_SUCCESS, ("clFlush failed: " + ocl_get_error_string(cl_error)).c_str());
999
1000     /* After the non-local D2H is launched the nonlocal_done event can be
1001        recorded which signals that the local D2H can proceed. This event is not
1002        placed after the non-local kernel because we first need the non-local
1003        data back first. */
1004     if (iloc == InteractionLocality::NonLocal)
1005     {
1006         cl_error = clEnqueueMarkerWithWaitList(deviceStream.stream(), 0, nullptr, &(nb->nonlocal_done));
1007         GMX_ASSERT(cl_error == CL_SUCCESS,
1008                    ("clEnqueueMarkerWithWaitList failed: " + ocl_get_error_string(cl_error)).c_str());
1009         nb->bNonLocalStreamActive = CL_TRUE;
1010     }
1011
1012     /* only transfer energies in the local stream */
1013     if (iloc == InteractionLocality::Local)
1014     {
1015         /* DtoH fshift when virial is needed */
1016         if (stepWork.computeVirial)
1017         {
1018             GMX_ASSERT(sizeof(*nb->nbst.fshift) == DIM * sizeof(float),
1019                        "Sizes of host- and device-side shift vector elements should be the same.");
1020             copyFromDeviceBuffer(reinterpret_cast<float*>(nb->nbst.fshift),
1021                                  &adat->fshift,
1022                                  0,
1023                                  SHIFTS * DIM,
1024                                  deviceStream,
1025                                  GpuApiCallBehavior::Async,
1026                                  bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
1027         }
1028
1029         /* DtoH energies */
1030         if (stepWork.computeEnergy)
1031         {
1032             GMX_ASSERT(sizeof(*nb->nbst.e_lj) == sizeof(float),
1033                        "Sizes of host- and device-side LJ energy terms should be the same.");
1034             copyFromDeviceBuffer(nb->nbst.e_lj,
1035                                  &adat->e_lj,
1036                                  0,
1037                                  1,
1038                                  deviceStream,
1039                                  GpuApiCallBehavior::Async,
1040                                  bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
1041             GMX_ASSERT(sizeof(*nb->nbst.e_el) == sizeof(float),
1042                        "Sizes of host- and device-side electrostatic energy terms should be the "
1043                        "same.");
1044             copyFromDeviceBuffer(nb->nbst.e_el,
1045                                  &adat->e_el,
1046                                  0,
1047                                  1,
1048                                  deviceStream,
1049                                  GpuApiCallBehavior::Async,
1050                                  bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
1051         }
1052     }
1053
1054     if (bDoTime)
1055     {
1056         t->xf[aloc].nb_d2h.closeTimingRegion(deviceStream);
1057     }
1058 }
1059
1060 } // namespace Nbnxm