22c6ba169e0700dd211338a3362ce43a55b93266
[alexxy/gromacs.git] / src / gromacs / mdlib / nbnxn_ocl / nbnxn_ocl.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *  \brief Define OpenCL implementation of nbnxn_gpu.h
37  *
38  *  \author Anca Hamuraru <anca@streamcomputing.eu>
39  *  \author Teemu Virolainen <teemu@streamcomputing.eu>
40  *  \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
41  *  \author Szilárd Páll <pall.szilard@gmail.com>
42  *  \ingroup module_mdlib
43  *
44  *  TODO (psz):
45  *  - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
46  *  - Rework the copying of OCL data structures done before every invocation of both
47  *    nb and prune kernels (using fillin_ocl_structures); also consider at the same
48  *    time calling clSetKernelArg only on the updated parameters (if tracking changed
49  *    parameters is feasible);
50  *  - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
51  *    dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
52  *    on the misc_ops_and_local_H2D_done event could be better expressed this way.
53  *
54  *  - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
55  *    - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
56  *      is identical in the two implementations, so a 3-way split might allow sharing
57  *      code;
58  *    -
59  *
60  */
61 #include "gmxpre.h"
62
63 #include <assert.h>
64 #include <stdlib.h>
65
66 #if defined(_MSVC)
67 #include <limits>
68 #endif
69
70 #include "thread_mpi/atomic.h"
71
72 #include "gromacs/gpu_utils/gputraits_ocl.h"
73 #include "gromacs/gpu_utils/oclutils.h"
74 #include "gromacs/hardware/hw_info.h"
75 #include "gromacs/mdlib/force_flags.h"
76 #include "gromacs/mdlib/nb_verlet.h"
77 #include "gromacs/mdlib/nbnxn_consts.h"
78 #include "gromacs/mdlib/nbnxn_gpu.h"
79 #include "gromacs/mdlib/nbnxn_gpu_common.h"
80 #include "gromacs/mdlib/nbnxn_gpu_common_utils.h"
81 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
82 #include "gromacs/mdlib/nbnxn_pairlist.h"
83 #include "gromacs/pbcutil/ishift.h"
84 #include "gromacs/timing/gpu_timing.h"
85 #include "gromacs/utility/cstringutil.h"
86 #include "gromacs/utility/fatalerror.h"
87 #include "gromacs/utility/gmxassert.h"
88
89 #include "nbnxn_ocl_internal.h"
90 #include "nbnxn_ocl_types.h"
91
92
93 /*! \brief Convenience constants */
94 //@{
95 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
96 static const int c_clSize          = c_nbnxnGpuClusterSize;
97 //@}
98
99
100 /*! \brief Validates the input global work size parameter.
101  */
102 static inline void validate_global_work_size(const KernelLaunchConfig &config, int work_dim, const gmx_device_info_t *dinfo)
103 {
104     cl_uint device_size_t_size_bits;
105     cl_uint host_size_t_size_bits;
106
107     assert(dinfo);
108
109     size_t global_work_size[3];
110     GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
111     for (int i = 0; i < work_dim; i++)
112     {
113         global_work_size[i] = config.blockSize[i] * config.gridSize[i];
114     }
115
116     /* Each component of a global_work_size must not exceed the range given by the
117        sizeof(device size_t) for the device on which the kernel execution will
118        be enqueued. See:
119        https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
120      */
121     device_size_t_size_bits = dinfo->adress_bits;
122     host_size_t_size_bits   = static_cast<cl_uint>(sizeof(size_t) * 8);
123
124     /* If sizeof(host size_t) <= sizeof(device size_t)
125             => global_work_size components will always be valid
126        else
127             => get device limit for global work size and
128             compare it against each component of global_work_size.
129      */
130     if (host_size_t_size_bits > device_size_t_size_bits)
131     {
132         size_t device_limit;
133
134         device_limit = (1ull << device_size_t_size_bits) - 1;
135
136         for (int i = 0; i < work_dim; i++)
137         {
138             if (global_work_size[i] > device_limit)
139             {
140                 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
141                           "The number of nonbonded work units (=number of super-clusters) exceeds the"
142                           "device capabilities. Global work size limit exceeded (%zu > %zu)!",
143                           global_work_size[i], device_limit);
144             }
145         }
146     }
147 }
148
149 /* Constant arrays listing non-bonded kernel function names. The arrays are
150  * organized in 2-dim arrays by: electrostatics and VDW type.
151  *
152  *  Note that the row- and column-order of function pointers has to match the
153  *  order of corresponding enumerated electrostatics and vdw types, resp.,
154  *  defined in nbnxn_cuda_types.h.
155  */
156
157 /*! \brief Force-only kernel function names. */
158 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] =
159 {
160     { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl"            },
161     { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl"             },
162     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl"        },
163     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
164     { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl"             },
165     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl"      }
166 };
167
168 /*! \brief Force + energy kernel function pointers. */
169 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] =
170 {
171     { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl"            },
172     { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl"             },
173     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl"        },
174     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
175     { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl"             },
176     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl"      }
177 };
178
179 /*! \brief Force + pruning kernel function pointers. */
180 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] =
181 {
182     { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl"             },
183     { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl"              },
184     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl"         },
185     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl"  },
186     { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl"              },
187     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl"       }
188 };
189
190 /*! \brief Force + energy + pruning kernel function pointers. */
191 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] =
192 {
193     { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl"            },
194     { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl"             },
195     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl"        },
196     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
197     { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl"             },
198     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl"      }
199 };
200
201 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
202  *
203  * \param[in] kernel_pruneonly  array of prune kernel objects
204  * \param[in] firstPrunePass    true if the first pruning pass is being executed
205  */
206 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[],
207                                           bool      firstPrunePass)
208 {
209     cl_kernel  *kernelPtr;
210
211     if (firstPrunePass)
212     {
213         kernelPtr = &(kernel_pruneonly[epruneFirst]);
214     }
215     else
216     {
217         kernelPtr = &(kernel_pruneonly[epruneRolling]);
218     }
219     // TODO: consider creating the prune kernel object here to avoid a
220     // clCreateKernel for the rolling prune kernel if this is not needed.
221     return *kernelPtr;
222 }
223
224 /*! \brief Return a pointer to the kernel version to be executed at the current step.
225  *  OpenCL kernel objects are cached in nb. If the requested kernel is not
226  *  found in the cache, it will be created and the cache will be updated.
227  */
228 static inline cl_kernel select_nbnxn_kernel(gmx_nbnxn_ocl_t   *nb,
229                                             int                eeltype,
230                                             int                evdwtype,
231                                             bool               bDoEne,
232                                             bool               bDoPrune)
233 {
234     const char* kernel_name_to_run;
235     cl_kernel  *kernel_ptr;
236     cl_int      cl_error;
237
238     assert(eeltype  < eelOclNR);
239     assert(evdwtype < evdwOclNR);
240
241     if (bDoEne)
242     {
243         if (bDoPrune)
244         {
245             kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
246             kernel_ptr         = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
247         }
248         else
249         {
250             kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
251             kernel_ptr         = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
252         }
253     }
254     else
255     {
256         if (bDoPrune)
257         {
258             kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
259             kernel_ptr         = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
260         }
261         else
262         {
263             kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
264             kernel_ptr         = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
265         }
266     }
267
268     if (nullptr == kernel_ptr[0])
269     {
270         *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
271         assert(cl_error == CL_SUCCESS);
272     }
273     // TODO: handle errors
274
275     return *kernel_ptr;
276 }
277
278 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
279  */
280 static inline int calc_shmem_required_nonbonded(int  vdwType,
281                                                 bool bPrefetchLjParam)
282 {
283     int shmem;
284
285     /* size of shmem (force-buffers/xq/atom type preloading) */
286     /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
287     /* i-atom x+q in shared memory */
288     shmem  = c_numClPerSupercl * c_clSize * sizeof(float) * 4; /* xqib */
289     /* cj in shared memory, for both warps separately
290      * TODO: in the "nowarp kernels we load cj only once  so the factor 2 is not needed.
291      */
292     shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int);           /* cjs  */
293     if (bPrefetchLjParam)
294     {
295         if (useLjCombRule(vdwType))
296         {
297             /* i-atom LJ combination parameters in shared memory */
298             shmem += c_numClPerSupercl * c_clSize * 2*sizeof(float); /* atib abused for ljcp, float2 */
299         }
300         else
301         {
302             /* i-atom types in shared memory */
303             shmem += c_numClPerSupercl * c_clSize * sizeof(int); /* atib */
304         }
305     }
306     /* force reduction buffers in shared memory */
307     shmem += c_clSize * c_clSize * 3 * sizeof(float);    /* f_buf */
308     /* Warp vote. In fact it must be * number of warps in block.. */
309     shmem += sizeof(cl_uint) * 2;                        /* warp_any */
310     return shmem;
311 }
312
313 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
314  *
315  *  The device can't use the same data structures as the host for two main reasons:
316  *  - OpenCL restrictions (pointers are not accepted inside data structures)
317  *  - some host side fields are not needed for the OpenCL kernels.
318  *
319  *  This function is called before the launch of both nbnxn and prune kernels.
320  */
321 static void fillin_ocl_structures(cl_nbparam_t        *nbp,
322                                   cl_nbparam_params_t *nbparams_params)
323 {
324     nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
325     nbparams_params->c_rf              = nbp->c_rf;
326     nbparams_params->dispersion_shift  = nbp->dispersion_shift;
327     nbparams_params->eeltype           = nbp->eeltype;
328     nbparams_params->epsfac            = nbp->epsfac;
329     nbparams_params->ewaldcoeff_lj     = nbp->ewaldcoeff_lj;
330     nbparams_params->ewald_beta        = nbp->ewald_beta;
331     nbparams_params->rcoulomb_sq       = nbp->rcoulomb_sq;
332     nbparams_params->repulsion_shift   = nbp->repulsion_shift;
333     nbparams_params->rlistOuter_sq     = nbp->rlistOuter_sq;
334     nbparams_params->rvdw_sq           = nbp->rvdw_sq;
335     nbparams_params->rlistInner_sq     = nbp->rlistInner_sq;
336     nbparams_params->rvdw_switch       = nbp->rvdw_switch;
337     nbparams_params->sh_ewald          = nbp->sh_ewald;
338     nbparams_params->sh_lj_ewald       = nbp->sh_lj_ewald;
339     nbparams_params->two_k_rf          = nbp->two_k_rf;
340     nbparams_params->vdwtype           = nbp->vdwtype;
341     nbparams_params->vdw_switch        = nbp->vdw_switch;
342 }
343
344 /*! \brief Enqueues a wait for event completion.
345  *
346  * Then it releases the event and sets it to 0.
347  * Don't use this function when more than one wait will be issued for the event.
348  * Equivalent to Cuda Stream Sync. */
349 static void sync_ocl_event(cl_command_queue stream, cl_event *ocl_event)
350 {
351     cl_int gmx_unused cl_error;
352
353     /* Enqueue wait */
354     cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
355     GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
356
357     /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
358     cl_error = clReleaseEvent(*ocl_event);
359     assert(CL_SUCCESS == cl_error);
360     *ocl_event = nullptr;
361 }
362
363 /*! \brief Launch asynchronously the xq buffer host to device copy. */
364 void nbnxn_gpu_copy_xq_to_gpu(gmx_nbnxn_ocl_t        *nb,
365                               const nbnxn_atomdata_t *nbatom,
366                               int                     iloc,
367                               bool                    haveOtherWork)
368 {
369     int                  adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
370
371     cl_atomdata_t       *adat    = nb->atdat;
372     cl_plist_t          *plist   = nb->plist[iloc];
373     cl_timers_t         *t       = nb->timers;
374     cl_command_queue     stream  = nb->stream[iloc];
375
376     bool                 bDoTime     = (nb->bDoTime) != 0;
377
378     /* Don't launch the non-local H2D copy if there is no dependent
379        work to do: neither non-local nor other (e.g. bonded) work
380        to do that has as input the nbnxn coordinates.
381        Doing the same for the local kernel is more complicated, since the
382        local part of the force array also depends on the non-local kernel.
383        So to avoid complicating the code and to reduce the risk of bugs,
384        we always call the local local x+q copy (and the rest of the local
385        work in nbnxn_gpu_launch_kernel().
386      */
387     if (!haveOtherWork && canSkipWork(nb, iloc))
388     {
389         plist->haveFreshList = false;
390
391         return;
392     }
393
394     /* calculate the atom data index range based on locality */
395     if (LOCAL_I(iloc))
396     {
397         adat_begin  = 0;
398         adat_len    = adat->natoms_local;
399     }
400     else
401     {
402         adat_begin  = adat->natoms_local;
403         adat_len    = adat->natoms - adat->natoms_local;
404     }
405
406     /* beginning of timed HtoD section */
407     if (bDoTime)
408     {
409         t->nb_h2d[iloc].openTimingRegion(stream);
410     }
411
412     /* HtoD x, q */
413     ocl_copy_H2D_async(adat->xq, nbatom->x + adat_begin * 4, adat_begin*sizeof(float)*4,
414                        adat_len * sizeof(float) * 4, stream, bDoTime ? t->nb_h2d[iloc].fetchNextEvent() : nullptr);
415
416     if (bDoTime)
417     {
418         t->nb_h2d[iloc].closeTimingRegion(stream);
419     }
420
421     /* When we get here all misc operations issues in the local stream as well as
422        the local xq H2D are done,
423        so we record that in the local stream and wait for it in the nonlocal one. */
424     if (nb->bUseTwoStreams)
425     {
426         if (iloc == eintLocal)
427         {
428             cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
429             assert(CL_SUCCESS == cl_error);
430
431             /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
432              * in the local stream in order to be able to sync with the above event
433              * from the non-local stream.
434              */
435             cl_error = clFlush(stream);
436             assert(CL_SUCCESS == cl_error);
437         }
438         else
439         {
440             sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
441         }
442     }
443 }
444
445
446 /*! \brief Launch GPU kernel
447
448    As we execute nonbonded workload in separate queues, before launching
449    the kernel we need to make sure that he following operations have completed:
450    - atomdata allocation and related H2D transfers (every nstlist step);
451    - pair list H2D transfer (every nstlist step);
452    - shift vector H2D transfer (every nstlist step);
453    - force (+shift force and energy) output clearing (every step).
454
455    These operations are issued in the local queue at the beginning of the step
456    and therefore always complete before the local kernel launch. The non-local
457    kernel is launched after the local on the same device/context, so this is
458    inherently scheduled after the operations in the local stream (including the
459    above "misc_ops").
460    However, for the sake of having a future-proof implementation, we use the
461    misc_ops_done event to record the point in time when the above  operations
462    are finished and synchronize with this event in the non-local stream.
463  */
464 void nbnxn_gpu_launch_kernel(gmx_nbnxn_ocl_t               *nb,
465                              int                            flags,
466                              int                            iloc)
467 {
468     /* OpenCL kernel launch-related stuff */
469     cl_kernel            nb_kernel = nullptr;  /* fn pointer to the nonbonded kernel */
470
471     cl_atomdata_t       *adat    = nb->atdat;
472     cl_nbparam_t        *nbp     = nb->nbparam;
473     cl_plist_t          *plist   = nb->plist[iloc];
474     cl_timers_t         *t       = nb->timers;
475     cl_command_queue     stream  = nb->stream[iloc];
476
477     bool                 bCalcEner   = (flags & GMX_FORCE_ENERGY) != 0;
478     int                  bCalcFshift = flags & GMX_FORCE_VIRIAL;
479     bool                 bDoTime     = (nb->bDoTime) != 0;
480
481     cl_nbparam_params_t  nbparams_params;
482
483     /* Don't launch the non-local kernel if there is no work to do.
484        Doing the same for the local kernel is more complicated, since the
485        local part of the force array also depends on the non-local kernel.
486        So to avoid complicating the code and to reduce the risk of bugs,
487        we always call the local kernel and later (not in
488        this function) the stream wait, local f copyback and the f buffer
489        clearing. All these operations, except for the local interaction kernel,
490        are needed for the non-local interactions. The skip of the local kernel
491        call is taken care of later in this function. */
492     if (canSkipWork(nb, iloc))
493     {
494         plist->haveFreshList = false;
495
496         return;
497     }
498
499     if (nbp->useDynamicPruning && plist->haveFreshList)
500     {
501         /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
502            (that's the way the timing accounting can distinguish between
503            separate prune kernel and combined force+prune).
504          */
505         nbnxn_gpu_launch_kernel_pruneonly(nb, iloc, 1);
506     }
507
508     if (plist->nsci == 0)
509     {
510         /* Don't launch an empty local kernel (is not allowed with OpenCL).
511          */
512         return;
513     }
514
515     /* beginning of timed nonbonded calculation section */
516     if (bDoTime)
517     {
518         t->nb_k[iloc].openTimingRegion(stream);
519     }
520
521     /* get the pointer to the kernel flavor we need to use */
522     nb_kernel = select_nbnxn_kernel(nb,
523                                     nbp->eeltype,
524                                     nbp->vdwtype,
525                                     bCalcEner,
526                                     (plist->haveFreshList && !nb->timers->didPrune[iloc]));
527
528     /* kernel launch config */
529
530     KernelLaunchConfig config;
531     config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
532     config.stream           = stream;
533     config.blockSize[0]     = c_clSize;
534     config.blockSize[1]     = c_clSize;
535     config.gridSize[0]      = plist->nsci;
536
537     validate_global_work_size(config, 3, nb->dev_info);
538
539     if (debug)
540     {
541         fprintf(debug, "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
542                 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
543                 config.blockSize[0], config.blockSize[1], config.blockSize[2],
544                 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
545                 c_numClPerSupercl, plist->na_c);
546     }
547
548     fillin_ocl_structures(nbp, &nbparams_params);
549
550     auto          *timingEvent  = bDoTime ? t->nb_k[iloc].fetchNextEvent() : nullptr;
551     constexpr char kernelName[] = "k_calc_nb";
552     if (useLjCombRule(nb->nbparam->vdwtype))
553     {
554         const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config,
555                                                           &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
556                                                           &adat->lj_comb,
557                                                           &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
558                                                           &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
559
560         launchGpuKernel(nb_kernel, config, timingEvent, kernelName, kernelArgs);
561     }
562     else
563     {
564         const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config,
565                                                           &adat->ntypes,
566                                                           &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
567                                                           &adat->atom_types,
568                                                           &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
569                                                           &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
570         launchGpuKernel(nb_kernel, config, timingEvent, kernelName, kernelArgs);
571     }
572
573     if (bDoTime)
574     {
575         t->nb_k[iloc].closeTimingRegion(stream);
576     }
577 }
578
579
580 /*! \brief Calculates the amount of shared memory required by the prune kernel.
581  *
582  *  Note that for the sake of simplicity we use the CUDA terminology "shared memory"
583  *  for OpenCL local memory.
584  *
585  * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd dimension.
586  * \returns   the amount of local memory in bytes required by the pruning kernel
587  */
588 static inline int calc_shmem_required_prune(const int num_threads_z)
589 {
590     int shmem;
591
592     /* i-atom x in shared memory (for convenience we load all 4 components including q) */
593     shmem  = c_numClPerSupercl * c_clSize * sizeof(float)*4;
594     /* cj in shared memory, for each warp separately
595      * Note: only need to load once per wavefront, but to keep the code simple,
596      * for now we load twice on AMD.
597      */
598     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
599     /* Warp vote, requires one uint per warp/32 threads per block. */
600     shmem += sizeof(cl_uint) * 2*num_threads_z;
601
602     return shmem;
603 }
604
605 void nbnxn_gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t       *nb,
606                                        int                    iloc,
607                                        int                    numParts)
608 {
609     cl_atomdata_t       *adat    = nb->atdat;
610     cl_nbparam_t        *nbp     = nb->nbparam;
611     cl_plist_t          *plist   = nb->plist[iloc];
612     cl_timers_t         *t       = nb->timers;
613     cl_command_queue     stream  = nb->stream[iloc];
614     bool                 bDoTime = nb->bDoTime == CL_TRUE;
615
616     if (plist->haveFreshList)
617     {
618         GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
619
620         /* Set rollingPruningNumParts to signal that it is not set */
621         plist->rollingPruningNumParts = 0;
622         plist->rollingPruningPart     = 0;
623     }
624     else
625     {
626         if (plist->rollingPruningNumParts == 0)
627         {
628             plist->rollingPruningNumParts = numParts;
629         }
630         else
631         {
632             GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
633         }
634     }
635
636     /* Use a local variable for part and update in plist, so we can return here
637      * without duplicating the part increment code.
638      */
639     int part = plist->rollingPruningPart;
640
641     plist->rollingPruningPart++;
642     if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
643     {
644         plist->rollingPruningPart = 0;
645     }
646
647     /* Compute the number of list entries to prune in this pass */
648     int numSciInPart = (plist->nsci - part)/numParts;
649
650     /* Don't launch the kernel if there is no work to do. */
651     if (numSciInPart <= 0)
652     {
653         plist->haveFreshList = false;
654
655         return;
656     }
657
658     GpuRegionTimer *timer = nullptr;
659     if (bDoTime)
660     {
661         timer = &(plist->haveFreshList ? t->prune_k[iloc] : t->rollingPrune_k[iloc]);
662     }
663
664     /* beginning of timed prune calculation section */
665     if (bDoTime)
666     {
667         timer->openTimingRegion(stream);
668     }
669
670     /* Kernel launch config:
671      * - The thread block dimensions match the size of i-clusters, j-clusters,
672      *   and j-cluster concurrency, in x, y, and z, respectively.
673      * - The 1D block-grid contains as many blocks as super-clusters.
674      */
675     int       num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
676     cl_kernel pruneKernel   = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
677
678     /* kernel launch config */
679     KernelLaunchConfig config;
680     config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
681     config.stream           = stream;
682     config.blockSize[0]     = c_clSize;
683     config.blockSize[1]     = c_clSize;
684     config.blockSize[2]     = num_threads_z;
685     config.gridSize[0]      = numSciInPart;
686
687     validate_global_work_size(config, 3, nb->dev_info);
688
689     if (debug)
690     {
691         fprintf(debug, "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
692                 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
693                 "\tShMem: %zu\n",
694                 config.blockSize[0], config.blockSize[1], config.blockSize[2],
695                 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
696                 c_numClPerSupercl, plist->na_c, config.sharedMemorySize);
697     }
698
699     cl_nbparam_params_t  nbparams_params;
700     fillin_ocl_structures(nbp, &nbparams_params);
701
702     auto          *timingEvent  = bDoTime ? timer->fetchNextEvent() : nullptr;
703     constexpr char kernelName[] = "k_pruneonly";
704     const auto     kernelArgs   = prepareGpuKernelArguments(pruneKernel, config,
705                                                             &nbparams_params, &adat->xq, &adat->shift_vec,
706                                                             &plist->sci, &plist->cj4, &plist->imask, &numParts, &part);
707     launchGpuKernel(pruneKernel, config, timingEvent, kernelName, kernelArgs);
708
709     if (plist->haveFreshList)
710     {
711         plist->haveFreshList         = false;
712         /* Mark that pruning has been done */
713         nb->timers->didPrune[iloc] = true;
714     }
715     else
716     {
717         /* Mark that rolling pruning has been done */
718         nb->timers->didRollingPrune[iloc] = true;
719     }
720
721     if (bDoTime)
722     {
723         timer->closeTimingRegion(stream);
724     }
725 }
726
727 /*! \brief
728  * Launch asynchronously the download of nonbonded forces from the GPU
729  * (and energies/shift forces if required).
730  */
731 void nbnxn_gpu_launch_cpyback(gmx_nbnxn_ocl_t               *nb,
732                               const struct nbnxn_atomdata_t *nbatom,
733                               int                            flags,
734                               int                            aloc,
735                               bool                           haveOtherWork)
736 {
737     cl_int gmx_unused cl_error;
738     int               adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
739
740     /* determine interaction locality from atom locality */
741     int              iloc = gpuAtomToInteractionLocality(aloc);
742
743     cl_atomdata_t   *adat    = nb->atdat;
744     cl_timers_t     *t       = nb->timers;
745     bool             bDoTime = nb->bDoTime == CL_TRUE;
746     cl_command_queue stream  = nb->stream[iloc];
747
748     bool             bCalcEner   = (flags & GMX_FORCE_ENERGY) != 0;
749     int              bCalcFshift = flags & GMX_FORCE_VIRIAL;
750
751
752     /* don't launch non-local copy-back if there was no non-local work to do */
753     if (!haveOtherWork && canSkipWork(nb, iloc))
754     {
755         /* TODO An alternative way to signal that non-local work is
756            complete is to use a clEnqueueMarker+clEnqueueBarrier
757            pair. However, the use of bNonLocalStreamActive has the
758            advantage of being local to the host, so probably minimizes
759            overhead. Curiously, for NVIDIA OpenCL with an empty-domain
760            test case, overall simulation performance was higher with
761            the API calls, but this has not been tested on AMD OpenCL,
762            so could be worth considering in future. */
763         nb->bNonLocalStreamActive = CL_FALSE;
764         return;
765     }
766
767     getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
768
769     /* beginning of timed D2H section */
770     if (bDoTime)
771     {
772         t->nb_d2h[iloc].openTimingRegion(stream);
773     }
774
775     /* With DD the local D2H transfer can only start after the non-local
776        has been launched. */
777     if (iloc == eintLocal && nb->bNonLocalStreamActive)
778     {
779         sync_ocl_event(stream, &(nb->nonlocal_done));
780     }
781
782     /* DtoH f */
783     ocl_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f, adat_begin*3*sizeof(float),
784                        (adat_len)* adat->f_elem_size, stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
785
786     /* kick off work */
787     cl_error = clFlush(stream);
788     assert(CL_SUCCESS == cl_error);
789
790     /* After the non-local D2H is launched the nonlocal_done event can be
791        recorded which signals that the local D2H can proceed. This event is not
792        placed after the non-local kernel because we first need the non-local
793        data back first. */
794     if (iloc == eintNonlocal)
795     {
796         cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->nonlocal_done));
797         assert(CL_SUCCESS == cl_error);
798         nb->bNonLocalStreamActive = CL_TRUE;
799     }
800
801     /* only transfer energies in the local stream */
802     if (LOCAL_I(iloc))
803     {
804         /* DtoH fshift */
805         if (bCalcFshift)
806         {
807             ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0,
808                                SHIFTS * adat->fshift_elem_size, stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
809         }
810
811         /* DtoH energies */
812         if (bCalcEner)
813         {
814             ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0,
815                                sizeof(float), stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
816
817             ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0,
818                                sizeof(float), stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
819         }
820     }
821
822     if (bDoTime)
823     {
824         t->nb_d2h[iloc].closeTimingRegion(stream);
825     }
826 }
827
828
829 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
830 int nbnxn_gpu_pick_ewald_kernel_type(bool bTwinCut)
831 {
832     bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
833     int  kernel_type;
834
835     /* Benchmarking/development environment variables to force the use of
836        analytical or tabulated Ewald kernel. */
837     bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != nullptr);
838     bForceTabulatedEwald  = (getenv("GMX_OCL_NB_TAB_EWALD") != nullptr);
839
840     if (bForceAnalyticalEwald && bForceTabulatedEwald)
841     {
842         gmx_incons("Both analytical and tabulated Ewald OpenCL non-bonded kernels "
843                    "requested through environment variables.");
844     }
845
846     /* OpenCL: By default, use analytical Ewald
847      * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
848      *
849      * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
850      *
851      */
852     /* By default use analytical Ewald. */
853     bUseAnalyticalEwald = true;
854     if (bForceAnalyticalEwald)
855     {
856         if (debug)
857         {
858             fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
859         }
860     }
861     else if (bForceTabulatedEwald)
862     {
863         bUseAnalyticalEwald = false;
864
865         if (debug)
866         {
867             fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
868         }
869     }
870
871     /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
872        forces it (use it for debugging/benchmarking only). */
873     if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == nullptr))
874     {
875         kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
876     }
877     else
878     {
879         kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;
880     }
881
882     return kernel_type;
883 }