F buffer operations in CUDA
[alexxy/gromacs.git] / src / gromacs / nbnxm / opencl / nbnxm_ocl.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *  \brief Define OpenCL implementation of nbnxm_gpu.h
37  *
38  *  \author Anca Hamuraru <anca@streamcomputing.eu>
39  *  \author Teemu Virolainen <teemu@streamcomputing.eu>
40  *  \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
41  *  \author Szilárd Páll <pall.szilard@gmail.com>
42  *  \ingroup module_nbnxm
43  *
44  *  TODO (psz):
45  *  - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
46  *  - Rework the copying of OCL data structures done before every invocation of both
47  *    nb and prune kernels (using fillin_ocl_structures); also consider at the same
48  *    time calling clSetKernelArg only on the updated parameters (if tracking changed
49  *    parameters is feasible);
50  *  - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
51  *    dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
52  *    on the misc_ops_and_local_H2D_done event could be better expressed this way.
53  *
54  *  - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
55  *    - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
56  *      is identical in the two implementations, so a 3-way split might allow sharing
57  *      code;
58  *    -
59  *
60  */
61 #include "gmxpre.h"
62
63 #include <assert.h>
64 #include <stdlib.h>
65
66 #if defined(_MSVC)
67 #include <limits>
68 #endif
69
70 #include "thread_mpi/atomic.h"
71
72 #include "gromacs/gpu_utils/gputraits_ocl.h"
73 #include "gromacs/gpu_utils/oclutils.h"
74 #include "gromacs/hardware/hw_info.h"
75 #include "gromacs/mdlib/force_flags.h"
76 #include "gromacs/nbnxm/atomdata.h"
77 #include "gromacs/nbnxm/gpu_common.h"
78 #include "gromacs/nbnxm/gpu_common_utils.h"
79 #include "gromacs/nbnxm/gpu_data_mgmt.h"
80 #include "gromacs/nbnxm/nbnxm.h"
81 #include "gromacs/nbnxm/nbnxm_gpu.h"
82 #include "gromacs/nbnxm/pairlist.h"
83 #include "gromacs/pbcutil/ishift.h"
84 #include "gromacs/timing/gpu_timing.h"
85 #include "gromacs/utility/cstringutil.h"
86 #include "gromacs/utility/fatalerror.h"
87 #include "gromacs/utility/gmxassert.h"
88
89 #include "nbnxm_ocl_internal.h"
90 #include "nbnxm_ocl_types.h"
91
92 namespace Nbnxm
93 {
94
95 /*! \brief Convenience constants */
96 //@{
97 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
98 static const int c_clSize          = c_nbnxnGpuClusterSize;
99 //@}
100
101
102 /*! \brief Validates the input global work size parameter.
103  */
104 static inline void validate_global_work_size(const KernelLaunchConfig &config, int work_dim, const gmx_device_info_t *dinfo)
105 {
106     cl_uint device_size_t_size_bits;
107     cl_uint host_size_t_size_bits;
108
109     assert(dinfo);
110
111     size_t global_work_size[3];
112     GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
113     for (int i = 0; i < work_dim; i++)
114     {
115         global_work_size[i] = config.blockSize[i] * config.gridSize[i];
116     }
117
118     /* Each component of a global_work_size must not exceed the range given by the
119        sizeof(device size_t) for the device on which the kernel execution will
120        be enqueued. See:
121        https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
122      */
123     device_size_t_size_bits = dinfo->adress_bits;
124     host_size_t_size_bits   = static_cast<cl_uint>(sizeof(size_t) * 8);
125
126     /* If sizeof(host size_t) <= sizeof(device size_t)
127             => global_work_size components will always be valid
128        else
129             => get device limit for global work size and
130             compare it against each component of global_work_size.
131      */
132     if (host_size_t_size_bits > device_size_t_size_bits)
133     {
134         size_t device_limit;
135
136         device_limit = (1ull << device_size_t_size_bits) - 1;
137
138         for (int i = 0; i < work_dim; i++)
139         {
140             if (global_work_size[i] > device_limit)
141             {
142                 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
143                           "The number of nonbonded work units (=number of super-clusters) exceeds the"
144                           "device capabilities. Global work size limit exceeded (%zu > %zu)!",
145                           global_work_size[i], device_limit);
146             }
147         }
148     }
149 }
150
151 /* Constant arrays listing non-bonded kernel function names. The arrays are
152  * organized in 2-dim arrays by: electrostatics and VDW type.
153  *
154  *  Note that the row- and column-order of function pointers has to match the
155  *  order of corresponding enumerated electrostatics and vdw types, resp.,
156  *  defined in nbnxm_ocl_types.h.
157  */
158
159 /*! \brief Force-only kernel function names. */
160 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] =
161 {
162     { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl"            },
163     { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl"             },
164     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl"        },
165     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
166     { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl"             },
167     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl"      }
168 };
169
170 /*! \brief Force + energy kernel function pointers. */
171 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] =
172 {
173     { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl"            },
174     { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl"             },
175     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl"        },
176     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
177     { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl"             },
178     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl"      }
179 };
180
181 /*! \brief Force + pruning kernel function pointers. */
182 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] =
183 {
184     { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl"             },
185     { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl"              },
186     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl"         },
187     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl"  },
188     { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl"              },
189     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl"       }
190 };
191
192 /*! \brief Force + energy + pruning kernel function pointers. */
193 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] =
194 {
195     { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl"            },
196     { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl"             },
197     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl"        },
198     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
199     { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl"             },
200     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl"      }
201 };
202
203 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
204  *
205  * \param[in] kernel_pruneonly  array of prune kernel objects
206  * \param[in] firstPrunePass    true if the first pruning pass is being executed
207  */
208 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[],
209                                           bool      firstPrunePass)
210 {
211     cl_kernel  *kernelPtr;
212
213     if (firstPrunePass)
214     {
215         kernelPtr = &(kernel_pruneonly[epruneFirst]);
216     }
217     else
218     {
219         kernelPtr = &(kernel_pruneonly[epruneRolling]);
220     }
221     // TODO: consider creating the prune kernel object here to avoid a
222     // clCreateKernel for the rolling prune kernel if this is not needed.
223     return *kernelPtr;
224 }
225
226 /*! \brief Return a pointer to the kernel version to be executed at the current step.
227  *  OpenCL kernel objects are cached in nb. If the requested kernel is not
228  *  found in the cache, it will be created and the cache will be updated.
229  */
230 static inline cl_kernel select_nbnxn_kernel(gmx_nbnxn_ocl_t   *nb,
231                                             int                eeltype,
232                                             int                evdwtype,
233                                             bool               bDoEne,
234                                             bool               bDoPrune)
235 {
236     const char* kernel_name_to_run;
237     cl_kernel  *kernel_ptr;
238     cl_int      cl_error;
239
240     assert(eeltype  < eelOclNR);
241     assert(evdwtype < evdwOclNR);
242
243     if (bDoEne)
244     {
245         if (bDoPrune)
246         {
247             kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
248             kernel_ptr         = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
249         }
250         else
251         {
252             kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
253             kernel_ptr         = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
254         }
255     }
256     else
257     {
258         if (bDoPrune)
259         {
260             kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
261             kernel_ptr         = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
262         }
263         else
264         {
265             kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
266             kernel_ptr         = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
267         }
268     }
269
270     if (nullptr == kernel_ptr[0])
271     {
272         *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
273         assert(cl_error == CL_SUCCESS);
274     }
275     // TODO: handle errors
276
277     return *kernel_ptr;
278 }
279
280 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
281  */
282 static inline int calc_shmem_required_nonbonded(int  vdwType,
283                                                 bool bPrefetchLjParam)
284 {
285     int shmem;
286
287     /* size of shmem (force-buffers/xq/atom type preloading) */
288     /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
289     /* i-atom x+q in shared memory */
290     shmem  = c_numClPerSupercl * c_clSize * sizeof(float) * 4; /* xqib */
291     /* cj in shared memory, for both warps separately
292      * TODO: in the "nowarp kernels we load cj only once  so the factor 2 is not needed.
293      */
294     shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int);           /* cjs  */
295     if (bPrefetchLjParam)
296     {
297         if (useLjCombRule(vdwType))
298         {
299             /* i-atom LJ combination parameters in shared memory */
300             shmem += c_numClPerSupercl * c_clSize * 2*sizeof(float); /* atib abused for ljcp, float2 */
301         }
302         else
303         {
304             /* i-atom types in shared memory */
305             shmem += c_numClPerSupercl * c_clSize * sizeof(int); /* atib */
306         }
307     }
308     /* force reduction buffers in shared memory */
309     shmem += c_clSize * c_clSize * 3 * sizeof(float);    /* f_buf */
310     /* Warp vote. In fact it must be * number of warps in block.. */
311     shmem += sizeof(cl_uint) * 2;                        /* warp_any */
312     return shmem;
313 }
314
315 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
316  *
317  *  The device can't use the same data structures as the host for two main reasons:
318  *  - OpenCL restrictions (pointers are not accepted inside data structures)
319  *  - some host side fields are not needed for the OpenCL kernels.
320  *
321  *  This function is called before the launch of both nbnxn and prune kernels.
322  */
323 static void fillin_ocl_structures(cl_nbparam_t        *nbp,
324                                   cl_nbparam_params_t *nbparams_params)
325 {
326     nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
327     nbparams_params->c_rf              = nbp->c_rf;
328     nbparams_params->dispersion_shift  = nbp->dispersion_shift;
329     nbparams_params->eeltype           = nbp->eeltype;
330     nbparams_params->epsfac            = nbp->epsfac;
331     nbparams_params->ewaldcoeff_lj     = nbp->ewaldcoeff_lj;
332     nbparams_params->ewald_beta        = nbp->ewald_beta;
333     nbparams_params->rcoulomb_sq       = nbp->rcoulomb_sq;
334     nbparams_params->repulsion_shift   = nbp->repulsion_shift;
335     nbparams_params->rlistOuter_sq     = nbp->rlistOuter_sq;
336     nbparams_params->rvdw_sq           = nbp->rvdw_sq;
337     nbparams_params->rlistInner_sq     = nbp->rlistInner_sq;
338     nbparams_params->rvdw_switch       = nbp->rvdw_switch;
339     nbparams_params->sh_ewald          = nbp->sh_ewald;
340     nbparams_params->sh_lj_ewald       = nbp->sh_lj_ewald;
341     nbparams_params->two_k_rf          = nbp->two_k_rf;
342     nbparams_params->vdwtype           = nbp->vdwtype;
343     nbparams_params->vdw_switch        = nbp->vdw_switch;
344 }
345
346 /*! \brief Enqueues a wait for event completion.
347  *
348  * Then it releases the event and sets it to 0.
349  * Don't use this function when more than one wait will be issued for the event.
350  * Equivalent to Cuda Stream Sync. */
351 static void sync_ocl_event(cl_command_queue stream, cl_event *ocl_event)
352 {
353     cl_int gmx_unused cl_error;
354
355     /* Enqueue wait */
356     cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
357     GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
358
359     /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
360     cl_error = clReleaseEvent(*ocl_event);
361     assert(CL_SUCCESS == cl_error);
362     *ocl_event = nullptr;
363 }
364
365 /*! \brief Launch asynchronously the xq buffer host to device copy. */
366 void gpu_copy_xq_to_gpu(gmx_nbnxn_ocl_t        *nb,
367                         const nbnxn_atomdata_t *nbatom,
368                         const AtomLocality      atomLocality)
369 {
370     GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
371
372     const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
373
374     /* local/nonlocal offset and length used for xq and f */
375     int                  adat_begin, adat_len;
376
377     cl_atomdata_t       *adat    = nb->atdat;
378     cl_plist_t          *plist   = nb->plist[iloc];
379     cl_timers_t         *t       = nb->timers;
380     cl_command_queue     stream  = nb->stream[iloc];
381
382     bool                 bDoTime = (nb->bDoTime) != 0;
383
384     /* Don't launch the non-local H2D copy if there is no dependent
385        work to do: neither non-local nor other (e.g. bonded) work
386        to do that has as input the nbnxn coordinates.
387        Doing the same for the local kernel is more complicated, since the
388        local part of the force array also depends on the non-local kernel.
389        So to avoid complicating the code and to reduce the risk of bugs,
390        we always call the local local x+q copy (and the rest of the local
391        work in nbnxn_gpu_launch_kernel().
392      */
393     if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
394     {
395         plist->haveFreshList = false;
396
397         return;
398     }
399
400     /* calculate the atom data index range based on locality */
401     if (atomLocality == AtomLocality::Local)
402     {
403         adat_begin  = 0;
404         adat_len    = adat->natoms_local;
405     }
406     else
407     {
408         adat_begin  = adat->natoms_local;
409         adat_len    = adat->natoms - adat->natoms_local;
410     }
411
412     /* beginning of timed HtoD section */
413     if (bDoTime)
414     {
415         t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
416     }
417
418     /* HtoD x, q */
419     ocl_copy_H2D_async(adat->xq, nbatom->x().data() + adat_begin * 4, adat_begin*sizeof(float)*4,
420                        adat_len * sizeof(float) * 4, stream, bDoTime ? t->xf[atomLocality].nb_h2d.fetchNextEvent() : nullptr);
421
422     if (bDoTime)
423     {
424         t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
425     }
426
427     /* When we get here all misc operations issues in the local stream as well as
428        the local xq H2D are done,
429        so we record that in the local stream and wait for it in the nonlocal one. */
430     if (nb->bUseTwoStreams)
431     {
432         if (iloc == InteractionLocality::Local)
433         {
434             cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
435             assert(CL_SUCCESS == cl_error);
436
437             /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
438              * in the local stream in order to be able to sync with the above event
439              * from the non-local stream.
440              */
441             cl_error = clFlush(stream);
442             assert(CL_SUCCESS == cl_error);
443         }
444         else
445         {
446             sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
447         }
448     }
449 }
450
451
452 /*! \brief Launch GPU kernel
453
454    As we execute nonbonded workload in separate queues, before launching
455    the kernel we need to make sure that he following operations have completed:
456    - atomdata allocation and related H2D transfers (every nstlist step);
457    - pair list H2D transfer (every nstlist step);
458    - shift vector H2D transfer (every nstlist step);
459    - force (+shift force and energy) output clearing (every step).
460
461    These operations are issued in the local queue at the beginning of the step
462    and therefore always complete before the local kernel launch. The non-local
463    kernel is launched after the local on the same device/context, so this is
464    inherently scheduled after the operations in the local stream (including the
465    above "misc_ops").
466    However, for the sake of having a future-proof implementation, we use the
467    misc_ops_done event to record the point in time when the above  operations
468    are finished and synchronize with this event in the non-local stream.
469  */
470 void gpu_launch_kernel(gmx_nbnxn_ocl_t                  *nb,
471                        const int                         flags,
472                        const Nbnxm::InteractionLocality  iloc)
473 {
474     cl_atomdata_t       *adat    = nb->atdat;
475     cl_nbparam_t        *nbp     = nb->nbparam;
476     cl_plist_t          *plist   = nb->plist[iloc];
477     cl_timers_t         *t       = nb->timers;
478     cl_command_queue     stream  = nb->stream[iloc];
479
480     bool                 bCalcEner   = (flags & GMX_FORCE_ENERGY) != 0;
481     int                  bCalcFshift = flags & GMX_FORCE_VIRIAL;
482     bool                 bDoTime     = (nb->bDoTime) != 0;
483
484     cl_nbparam_params_t  nbparams_params;
485
486     /* Don't launch the non-local kernel if there is no work to do.
487        Doing the same for the local kernel is more complicated, since the
488        local part of the force array also depends on the non-local kernel.
489        So to avoid complicating the code and to reduce the risk of bugs,
490        we always call the local kernel and later (not in
491        this function) the stream wait, local f copyback and the f buffer
492        clearing. All these operations, except for the local interaction kernel,
493        are needed for the non-local interactions. The skip of the local kernel
494        call is taken care of later in this function. */
495     if (canSkipNonbondedWork(*nb, iloc))
496     {
497         plist->haveFreshList = false;
498
499         return;
500     }
501
502     if (nbp->useDynamicPruning && plist->haveFreshList)
503     {
504         /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
505            (that's the way the timing accounting can distinguish between
506            separate prune kernel and combined force+prune).
507          */
508         Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
509     }
510
511     if (plist->nsci == 0)
512     {
513         /* Don't launch an empty local kernel (is not allowed with OpenCL).
514          */
515         return;
516     }
517
518     /* beginning of timed nonbonded calculation section */
519     if (bDoTime)
520     {
521         t->interaction[iloc].nb_k.openTimingRegion(stream);
522     }
523
524     /* kernel launch config */
525
526     KernelLaunchConfig config;
527     config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
528     config.stream           = stream;
529     config.blockSize[0]     = c_clSize;
530     config.blockSize[1]     = c_clSize;
531     config.gridSize[0]      = plist->nsci;
532
533     validate_global_work_size(config, 3, nb->dev_info);
534
535     if (debug)
536     {
537         fprintf(debug, "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
538                 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
539                 config.blockSize[0], config.blockSize[1], config.blockSize[2],
540                 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
541                 c_numClPerSupercl, plist->na_c);
542     }
543
544     fillin_ocl_structures(nbp, &nbparams_params);
545
546     auto          *timingEvent  = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
547     constexpr char kernelName[] = "k_calc_nb";
548     const auto     kernel       = select_nbnxn_kernel(nb,
549                                                       nbp->eeltype,
550                                                       nbp->vdwtype,
551                                                       bCalcEner,
552                                                       (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
553
554
555     if (useLjCombRule(nb->nbparam->vdwtype))
556     {
557         const auto kernelArgs = prepareGpuKernelArguments(kernel, config,
558                                                           &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
559                                                           &adat->lj_comb,
560                                                           &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
561                                                           &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
562
563         launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
564     }
565     else
566     {
567         const auto kernelArgs = prepareGpuKernelArguments(kernel, config,
568                                                           &adat->ntypes,
569                                                           &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
570                                                           &adat->atom_types,
571                                                           &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
572                                                           &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
573         launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
574     }
575
576     if (bDoTime)
577     {
578         t->interaction[iloc].nb_k.closeTimingRegion(stream);
579     }
580 }
581
582
583 /*! \brief Calculates the amount of shared memory required by the prune kernel.
584  *
585  *  Note that for the sake of simplicity we use the CUDA terminology "shared memory"
586  *  for OpenCL local memory.
587  *
588  * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd dimension.
589  * \returns   the amount of local memory in bytes required by the pruning kernel
590  */
591 static inline int calc_shmem_required_prune(const int num_threads_z)
592 {
593     int shmem;
594
595     /* i-atom x in shared memory (for convenience we load all 4 components including q) */
596     shmem  = c_numClPerSupercl * c_clSize * sizeof(float)*4;
597     /* cj in shared memory, for each warp separately
598      * Note: only need to load once per wavefront, but to keep the code simple,
599      * for now we load twice on AMD.
600      */
601     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
602     /* Warp vote, requires one uint per warp/32 threads per block. */
603     shmem += sizeof(cl_uint) * 2*num_threads_z;
604
605     return shmem;
606 }
607
608 void gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t           *nb,
609                                  const InteractionLocality  iloc,
610                                  const int                  numParts)
611 {
612     cl_atomdata_t       *adat    = nb->atdat;
613     cl_nbparam_t        *nbp     = nb->nbparam;
614     cl_plist_t          *plist   = nb->plist[iloc];
615     cl_timers_t         *t       = nb->timers;
616     cl_command_queue     stream  = nb->stream[iloc];
617     bool                 bDoTime = nb->bDoTime == CL_TRUE;
618
619     if (plist->haveFreshList)
620     {
621         GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
622
623         /* Set rollingPruningNumParts to signal that it is not set */
624         plist->rollingPruningNumParts = 0;
625         plist->rollingPruningPart     = 0;
626     }
627     else
628     {
629         if (plist->rollingPruningNumParts == 0)
630         {
631             plist->rollingPruningNumParts = numParts;
632         }
633         else
634         {
635             GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
636         }
637     }
638
639     /* Use a local variable for part and update in plist, so we can return here
640      * without duplicating the part increment code.
641      */
642     int part = plist->rollingPruningPart;
643
644     plist->rollingPruningPart++;
645     if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
646     {
647         plist->rollingPruningPart = 0;
648     }
649
650     /* Compute the number of list entries to prune in this pass */
651     int numSciInPart = (plist->nsci - part)/numParts;
652
653     /* Don't launch the kernel if there is no work to do. */
654     if (numSciInPart <= 0)
655     {
656         plist->haveFreshList = false;
657
658         return;
659     }
660
661     GpuRegionTimer *timer = nullptr;
662     if (bDoTime)
663     {
664         timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
665     }
666
667     /* beginning of timed prune calculation section */
668     if (bDoTime)
669     {
670         timer->openTimingRegion(stream);
671     }
672
673     /* Kernel launch config:
674      * - The thread block dimensions match the size of i-clusters, j-clusters,
675      *   and j-cluster concurrency, in x, y, and z, respectively.
676      * - The 1D block-grid contains as many blocks as super-clusters.
677      */
678     int       num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
679
680     /* kernel launch config */
681     KernelLaunchConfig config;
682     config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
683     config.stream           = stream;
684     config.blockSize[0]     = c_clSize;
685     config.blockSize[1]     = c_clSize;
686     config.blockSize[2]     = num_threads_z;
687     config.gridSize[0]      = numSciInPart;
688
689     validate_global_work_size(config, 3, nb->dev_info);
690
691     if (debug)
692     {
693         fprintf(debug, "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
694                 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
695                 "\tShMem: %zu\n",
696                 config.blockSize[0], config.blockSize[1], config.blockSize[2],
697                 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
698                 c_numClPerSupercl, plist->na_c, config.sharedMemorySize);
699     }
700
701     cl_nbparam_params_t  nbparams_params;
702     fillin_ocl_structures(nbp, &nbparams_params);
703
704     auto          *timingEvent  = bDoTime ? timer->fetchNextEvent() : nullptr;
705     constexpr char kernelName[] = "k_pruneonly";
706     const auto     pruneKernel  = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
707     const auto     kernelArgs   = prepareGpuKernelArguments(pruneKernel, config,
708                                                             &nbparams_params, &adat->xq, &adat->shift_vec,
709                                                             &plist->sci, &plist->cj4, &plist->imask, &numParts, &part);
710     launchGpuKernel(pruneKernel, config, timingEvent, kernelName, kernelArgs);
711
712     if (plist->haveFreshList)
713     {
714         plist->haveFreshList         = false;
715         /* Mark that pruning has been done */
716         nb->timers->interaction[iloc].didPrune = true;
717     }
718     else
719     {
720         /* Mark that rolling pruning has been done */
721         nb->timers->interaction[iloc].didRollingPrune = true;
722     }
723
724     if (bDoTime)
725     {
726         timer->closeTimingRegion(stream);
727     }
728 }
729
730 /*! \brief
731  * Launch asynchronously the download of nonbonded forces from the GPU
732  * (and energies/shift forces if required).
733  */
734 void gpu_launch_cpyback(gmx_nbnxn_ocl_t                          *nb,
735                         struct nbnxn_atomdata_t                  *nbatom,
736                         const int                                 flags,
737                         const AtomLocality                        aloc,
738                         const bool                    gmx_unused  copyBackNbForce)
739 {
740     GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
741
742     cl_int gmx_unused cl_error;
743     int               adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
744
745     /* determine interaction locality from atom locality */
746     const InteractionLocality iloc = gpuAtomToInteractionLocality(aloc);
747
748     cl_atomdata_t            *adat    = nb->atdat;
749     cl_timers_t              *t       = nb->timers;
750     bool                      bDoTime = nb->bDoTime == CL_TRUE;
751     cl_command_queue          stream  = nb->stream[iloc];
752
753     bool                      bCalcEner   = (flags & GMX_FORCE_ENERGY) != 0;
754     int                       bCalcFshift = flags & GMX_FORCE_VIRIAL;
755
756
757     /* don't launch non-local copy-back if there was no non-local work to do */
758     if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
759     {
760         /* TODO An alternative way to signal that non-local work is
761            complete is to use a clEnqueueMarker+clEnqueueBarrier
762            pair. However, the use of bNonLocalStreamActive has the
763            advantage of being local to the host, so probably minimizes
764            overhead. Curiously, for NVIDIA OpenCL with an empty-domain
765            test case, overall simulation performance was higher with
766            the API calls, but this has not been tested on AMD OpenCL,
767            so could be worth considering in future. */
768         nb->bNonLocalStreamActive = CL_FALSE;
769         return;
770     }
771
772     getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
773
774     /* beginning of timed D2H section */
775     if (bDoTime)
776     {
777         t->xf[aloc].nb_d2h.openTimingRegion(stream);
778     }
779
780     /* With DD the local D2H transfer can only start after the non-local
781        has been launched. */
782     if (iloc == InteractionLocality::Local && nb->bNonLocalStreamActive)
783     {
784         sync_ocl_event(stream, &(nb->nonlocal_done));
785     }
786
787     /* DtoH f */
788     ocl_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f, adat_begin*3*sizeof(float),
789                        (adat_len)* adat->f_elem_size, stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
790
791     /* kick off work */
792     cl_error = clFlush(stream);
793     assert(CL_SUCCESS == cl_error);
794
795     /* After the non-local D2H is launched the nonlocal_done event can be
796        recorded which signals that the local D2H can proceed. This event is not
797        placed after the non-local kernel because we first need the non-local
798        data back first. */
799     if (iloc == InteractionLocality::NonLocal)
800     {
801         cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->nonlocal_done));
802         assert(CL_SUCCESS == cl_error);
803         nb->bNonLocalStreamActive = CL_TRUE;
804     }
805
806     /* only transfer energies in the local stream */
807     if (iloc == InteractionLocality::Local)
808     {
809         /* DtoH fshift */
810         if (bCalcFshift)
811         {
812             ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0,
813                                SHIFTS * adat->fshift_elem_size, stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
814         }
815
816         /* DtoH energies */
817         if (bCalcEner)
818         {
819             ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0,
820                                sizeof(float), stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
821
822             ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0,
823                                sizeof(float), stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
824         }
825     }
826
827     if (bDoTime)
828     {
829         t->xf[aloc].nb_d2h.closeTimingRegion(stream);
830     }
831 }
832
833
834 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
835 int gpu_pick_ewald_kernel_type(const bool bTwinCut)
836 {
837     bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
838     int  kernel_type;
839
840     /* Benchmarking/development environment variables to force the use of
841        analytical or tabulated Ewald kernel. */
842     bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != nullptr);
843     bForceTabulatedEwald  = (getenv("GMX_OCL_NB_TAB_EWALD") != nullptr);
844
845     if (bForceAnalyticalEwald && bForceTabulatedEwald)
846     {
847         gmx_incons("Both analytical and tabulated Ewald OpenCL non-bonded kernels "
848                    "requested through environment variables.");
849     }
850
851     /* OpenCL: By default, use analytical Ewald
852      * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
853      *
854      * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
855      *
856      */
857     /* By default use analytical Ewald. */
858     bUseAnalyticalEwald = true;
859     if (bForceAnalyticalEwald)
860     {
861         if (debug)
862         {
863             fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
864         }
865     }
866     else if (bForceTabulatedEwald)
867     {
868         bUseAnalyticalEwald = false;
869
870         if (debug)
871         {
872             fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
873         }
874     }
875
876     /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
877        forces it (use it for debugging/benchmarking only). */
878     if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == nullptr))
879     {
880         kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
881     }
882     else
883     {
884         kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;
885     }
886
887     return kernel_type;
888 }
889
890 } // namespace Nbnxm