Move nbnxn files to nbnxm directory
[alexxy/gromacs.git] / src / gromacs / nbnxm / opencl / nbnxm_ocl.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *  \brief Define OpenCL implementation of nbnxm_gpu.h
37  *
38  *  \author Anca Hamuraru <anca@streamcomputing.eu>
39  *  \author Teemu Virolainen <teemu@streamcomputing.eu>
40  *  \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
41  *  \author Szilárd Páll <pall.szilard@gmail.com>
42  *  \ingroup module_nbnxm
43  *
44  *  TODO (psz):
45  *  - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
46  *  - Rework the copying of OCL data structures done before every invocation of both
47  *    nb and prune kernels (using fillin_ocl_structures); also consider at the same
48  *    time calling clSetKernelArg only on the updated parameters (if tracking changed
49  *    parameters is feasible);
50  *  - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
51  *    dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
52  *    on the misc_ops_and_local_H2D_done event could be better expressed this way.
53  *
54  *  - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
55  *    - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
56  *      is identical in the two implementations, so a 3-way split might allow sharing
57  *      code;
58  *    -
59  *
60  */
61 #include "gmxpre.h"
62
63 #include <assert.h>
64 #include <stdlib.h>
65
66 #if defined(_MSVC)
67 #include <limits>
68 #endif
69
70 #include "thread_mpi/atomic.h"
71
72 #include "gromacs/gpu_utils/gputraits_ocl.h"
73 #include "gromacs/gpu_utils/oclutils.h"
74 #include "gromacs/hardware/hw_info.h"
75 #include "gromacs/mdlib/force_flags.h"
76 #include "gromacs/nbnxm/gpu_common.h"
77 #include "gromacs/nbnxm/gpu_common_utils.h"
78 #include "gromacs/nbnxm/gpu_data_mgmt.h"
79 #include "gromacs/nbnxm/nbnxm.h"
80 #include "gromacs/nbnxm/nbnxm_gpu.h"
81 #include "gromacs/nbnxm/pairlist.h"
82 #include "gromacs/pbcutil/ishift.h"
83 #include "gromacs/timing/gpu_timing.h"
84 #include "gromacs/utility/cstringutil.h"
85 #include "gromacs/utility/fatalerror.h"
86 #include "gromacs/utility/gmxassert.h"
87
88 #include "nbnxm_ocl_internal.h"
89 #include "nbnxm_ocl_types.h"
90
91 /*! \brief Convenience constants */
92 //@{
93 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
94 static const int c_clSize          = c_nbnxnGpuClusterSize;
95 //@}
96
97
98 /*! \brief Validates the input global work size parameter.
99  */
100 static inline void validate_global_work_size(const KernelLaunchConfig &config, int work_dim, const gmx_device_info_t *dinfo)
101 {
102     cl_uint device_size_t_size_bits;
103     cl_uint host_size_t_size_bits;
104
105     assert(dinfo);
106
107     size_t global_work_size[3];
108     GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
109     for (int i = 0; i < work_dim; i++)
110     {
111         global_work_size[i] = config.blockSize[i] * config.gridSize[i];
112     }
113
114     /* Each component of a global_work_size must not exceed the range given by the
115        sizeof(device size_t) for the device on which the kernel execution will
116        be enqueued. See:
117        https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
118      */
119     device_size_t_size_bits = dinfo->adress_bits;
120     host_size_t_size_bits   = static_cast<cl_uint>(sizeof(size_t) * 8);
121
122     /* If sizeof(host size_t) <= sizeof(device size_t)
123             => global_work_size components will always be valid
124        else
125             => get device limit for global work size and
126             compare it against each component of global_work_size.
127      */
128     if (host_size_t_size_bits > device_size_t_size_bits)
129     {
130         size_t device_limit;
131
132         device_limit = (1ull << device_size_t_size_bits) - 1;
133
134         for (int i = 0; i < work_dim; i++)
135         {
136             if (global_work_size[i] > device_limit)
137             {
138                 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
139                           "The number of nonbonded work units (=number of super-clusters) exceeds the"
140                           "device capabilities. Global work size limit exceeded (%zu > %zu)!",
141                           global_work_size[i], device_limit);
142             }
143         }
144     }
145 }
146
147 /* Constant arrays listing non-bonded kernel function names. The arrays are
148  * organized in 2-dim arrays by: electrostatics and VDW type.
149  *
150  *  Note that the row- and column-order of function pointers has to match the
151  *  order of corresponding enumerated electrostatics and vdw types, resp.,
152  *  defined in nbnxm_ocl_types.h.
153  */
154
155 /*! \brief Force-only kernel function names. */
156 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] =
157 {
158     { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl"            },
159     { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl"             },
160     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl"        },
161     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
162     { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl"             },
163     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl"      }
164 };
165
166 /*! \brief Force + energy kernel function pointers. */
167 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] =
168 {
169     { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl"            },
170     { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl"             },
171     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl"        },
172     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
173     { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl"             },
174     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl"      }
175 };
176
177 /*! \brief Force + pruning kernel function pointers. */
178 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] =
179 {
180     { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl"             },
181     { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl"              },
182     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl"         },
183     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl"  },
184     { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl"              },
185     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl"       }
186 };
187
188 /*! \brief Force + energy + pruning kernel function pointers. */
189 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] =
190 {
191     { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl"            },
192     { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl"             },
193     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl"        },
194     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
195     { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl"             },
196     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl"      }
197 };
198
199 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
200  *
201  * \param[in] kernel_pruneonly  array of prune kernel objects
202  * \param[in] firstPrunePass    true if the first pruning pass is being executed
203  */
204 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[],
205                                           bool      firstPrunePass)
206 {
207     cl_kernel  *kernelPtr;
208
209     if (firstPrunePass)
210     {
211         kernelPtr = &(kernel_pruneonly[epruneFirst]);
212     }
213     else
214     {
215         kernelPtr = &(kernel_pruneonly[epruneRolling]);
216     }
217     // TODO: consider creating the prune kernel object here to avoid a
218     // clCreateKernel for the rolling prune kernel if this is not needed.
219     return *kernelPtr;
220 }
221
222 /*! \brief Return a pointer to the kernel version to be executed at the current step.
223  *  OpenCL kernel objects are cached in nb. If the requested kernel is not
224  *  found in the cache, it will be created and the cache will be updated.
225  */
226 static inline cl_kernel select_nbnxn_kernel(gmx_nbnxn_ocl_t   *nb,
227                                             int                eeltype,
228                                             int                evdwtype,
229                                             bool               bDoEne,
230                                             bool               bDoPrune)
231 {
232     const char* kernel_name_to_run;
233     cl_kernel  *kernel_ptr;
234     cl_int      cl_error;
235
236     assert(eeltype  < eelOclNR);
237     assert(evdwtype < evdwOclNR);
238
239     if (bDoEne)
240     {
241         if (bDoPrune)
242         {
243             kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
244             kernel_ptr         = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
245         }
246         else
247         {
248             kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
249             kernel_ptr         = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
250         }
251     }
252     else
253     {
254         if (bDoPrune)
255         {
256             kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
257             kernel_ptr         = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
258         }
259         else
260         {
261             kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
262             kernel_ptr         = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
263         }
264     }
265
266     if (nullptr == kernel_ptr[0])
267     {
268         *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
269         assert(cl_error == CL_SUCCESS);
270     }
271     // TODO: handle errors
272
273     return *kernel_ptr;
274 }
275
276 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
277  */
278 static inline int calc_shmem_required_nonbonded(int  vdwType,
279                                                 bool bPrefetchLjParam)
280 {
281     int shmem;
282
283     /* size of shmem (force-buffers/xq/atom type preloading) */
284     /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
285     /* i-atom x+q in shared memory */
286     shmem  = c_numClPerSupercl * c_clSize * sizeof(float) * 4; /* xqib */
287     /* cj in shared memory, for both warps separately
288      * TODO: in the "nowarp kernels we load cj only once  so the factor 2 is not needed.
289      */
290     shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int);           /* cjs  */
291     if (bPrefetchLjParam)
292     {
293         if (useLjCombRule(vdwType))
294         {
295             /* i-atom LJ combination parameters in shared memory */
296             shmem += c_numClPerSupercl * c_clSize * 2*sizeof(float); /* atib abused for ljcp, float2 */
297         }
298         else
299         {
300             /* i-atom types in shared memory */
301             shmem += c_numClPerSupercl * c_clSize * sizeof(int); /* atib */
302         }
303     }
304     /* force reduction buffers in shared memory */
305     shmem += c_clSize * c_clSize * 3 * sizeof(float);    /* f_buf */
306     /* Warp vote. In fact it must be * number of warps in block.. */
307     shmem += sizeof(cl_uint) * 2;                        /* warp_any */
308     return shmem;
309 }
310
311 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
312  *
313  *  The device can't use the same data structures as the host for two main reasons:
314  *  - OpenCL restrictions (pointers are not accepted inside data structures)
315  *  - some host side fields are not needed for the OpenCL kernels.
316  *
317  *  This function is called before the launch of both nbnxn and prune kernels.
318  */
319 static void fillin_ocl_structures(cl_nbparam_t        *nbp,
320                                   cl_nbparam_params_t *nbparams_params)
321 {
322     nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
323     nbparams_params->c_rf              = nbp->c_rf;
324     nbparams_params->dispersion_shift  = nbp->dispersion_shift;
325     nbparams_params->eeltype           = nbp->eeltype;
326     nbparams_params->epsfac            = nbp->epsfac;
327     nbparams_params->ewaldcoeff_lj     = nbp->ewaldcoeff_lj;
328     nbparams_params->ewald_beta        = nbp->ewald_beta;
329     nbparams_params->rcoulomb_sq       = nbp->rcoulomb_sq;
330     nbparams_params->repulsion_shift   = nbp->repulsion_shift;
331     nbparams_params->rlistOuter_sq     = nbp->rlistOuter_sq;
332     nbparams_params->rvdw_sq           = nbp->rvdw_sq;
333     nbparams_params->rlistInner_sq     = nbp->rlistInner_sq;
334     nbparams_params->rvdw_switch       = nbp->rvdw_switch;
335     nbparams_params->sh_ewald          = nbp->sh_ewald;
336     nbparams_params->sh_lj_ewald       = nbp->sh_lj_ewald;
337     nbparams_params->two_k_rf          = nbp->two_k_rf;
338     nbparams_params->vdwtype           = nbp->vdwtype;
339     nbparams_params->vdw_switch        = nbp->vdw_switch;
340 }
341
342 /*! \brief Enqueues a wait for event completion.
343  *
344  * Then it releases the event and sets it to 0.
345  * Don't use this function when more than one wait will be issued for the event.
346  * Equivalent to Cuda Stream Sync. */
347 static void sync_ocl_event(cl_command_queue stream, cl_event *ocl_event)
348 {
349     cl_int gmx_unused cl_error;
350
351     /* Enqueue wait */
352     cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
353     GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
354
355     /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
356     cl_error = clReleaseEvent(*ocl_event);
357     assert(CL_SUCCESS == cl_error);
358     *ocl_event = nullptr;
359 }
360
361 /*! \brief Launch asynchronously the xq buffer host to device copy. */
362 void nbnxn_gpu_copy_xq_to_gpu(gmx_nbnxn_ocl_t        *nb,
363                               const nbnxn_atomdata_t *nbatom,
364                               int                     iloc,
365                               bool                    haveOtherWork)
366 {
367     int                  adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
368
369     cl_atomdata_t       *adat    = nb->atdat;
370     cl_plist_t          *plist   = nb->plist[iloc];
371     cl_timers_t         *t       = nb->timers;
372     cl_command_queue     stream  = nb->stream[iloc];
373
374     bool                 bDoTime     = (nb->bDoTime) != 0;
375
376     /* Don't launch the non-local H2D copy if there is no dependent
377        work to do: neither non-local nor other (e.g. bonded) work
378        to do that has as input the nbnxn coordinates.
379        Doing the same for the local kernel is more complicated, since the
380        local part of the force array also depends on the non-local kernel.
381        So to avoid complicating the code and to reduce the risk of bugs,
382        we always call the local local x+q copy (and the rest of the local
383        work in nbnxn_gpu_launch_kernel().
384      */
385     if (!haveOtherWork && canSkipWork(nb, iloc))
386     {
387         plist->haveFreshList = false;
388
389         return;
390     }
391
392     /* calculate the atom data index range based on locality */
393     if (LOCAL_I(iloc))
394     {
395         adat_begin  = 0;
396         adat_len    = adat->natoms_local;
397     }
398     else
399     {
400         adat_begin  = adat->natoms_local;
401         adat_len    = adat->natoms - adat->natoms_local;
402     }
403
404     /* beginning of timed HtoD section */
405     if (bDoTime)
406     {
407         t->nb_h2d[iloc].openTimingRegion(stream);
408     }
409
410     /* HtoD x, q */
411     ocl_copy_H2D_async(adat->xq, nbatom->x().data() + adat_begin * 4, adat_begin*sizeof(float)*4,
412                        adat_len * sizeof(float) * 4, stream, bDoTime ? t->nb_h2d[iloc].fetchNextEvent() : nullptr);
413
414     if (bDoTime)
415     {
416         t->nb_h2d[iloc].closeTimingRegion(stream);
417     }
418
419     /* When we get here all misc operations issues in the local stream as well as
420        the local xq H2D are done,
421        so we record that in the local stream and wait for it in the nonlocal one. */
422     if (nb->bUseTwoStreams)
423     {
424         if (iloc == eintLocal)
425         {
426             cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
427             assert(CL_SUCCESS == cl_error);
428
429             /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
430              * in the local stream in order to be able to sync with the above event
431              * from the non-local stream.
432              */
433             cl_error = clFlush(stream);
434             assert(CL_SUCCESS == cl_error);
435         }
436         else
437         {
438             sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
439         }
440     }
441 }
442
443
444 /*! \brief Launch GPU kernel
445
446    As we execute nonbonded workload in separate queues, before launching
447    the kernel we need to make sure that he following operations have completed:
448    - atomdata allocation and related H2D transfers (every nstlist step);
449    - pair list H2D transfer (every nstlist step);
450    - shift vector H2D transfer (every nstlist step);
451    - force (+shift force and energy) output clearing (every step).
452
453    These operations are issued in the local queue at the beginning of the step
454    and therefore always complete before the local kernel launch. The non-local
455    kernel is launched after the local on the same device/context, so this is
456    inherently scheduled after the operations in the local stream (including the
457    above "misc_ops").
458    However, for the sake of having a future-proof implementation, we use the
459    misc_ops_done event to record the point in time when the above  operations
460    are finished and synchronize with this event in the non-local stream.
461  */
462 void nbnxn_gpu_launch_kernel(gmx_nbnxn_ocl_t               *nb,
463                              int                            flags,
464                              int                            iloc)
465 {
466     /* OpenCL kernel launch-related stuff */
467     cl_kernel            nb_kernel = nullptr;  /* fn pointer to the nonbonded kernel */
468
469     cl_atomdata_t       *adat    = nb->atdat;
470     cl_nbparam_t        *nbp     = nb->nbparam;
471     cl_plist_t          *plist   = nb->plist[iloc];
472     cl_timers_t         *t       = nb->timers;
473     cl_command_queue     stream  = nb->stream[iloc];
474
475     bool                 bCalcEner   = (flags & GMX_FORCE_ENERGY) != 0;
476     int                  bCalcFshift = flags & GMX_FORCE_VIRIAL;
477     bool                 bDoTime     = (nb->bDoTime) != 0;
478
479     cl_nbparam_params_t  nbparams_params;
480
481     /* Don't launch the non-local kernel if there is no work to do.
482        Doing the same for the local kernel is more complicated, since the
483        local part of the force array also depends on the non-local kernel.
484        So to avoid complicating the code and to reduce the risk of bugs,
485        we always call the local kernel and later (not in
486        this function) the stream wait, local f copyback and the f buffer
487        clearing. All these operations, except for the local interaction kernel,
488        are needed for the non-local interactions. The skip of the local kernel
489        call is taken care of later in this function. */
490     if (canSkipWork(nb, iloc))
491     {
492         plist->haveFreshList = false;
493
494         return;
495     }
496
497     if (nbp->useDynamicPruning && plist->haveFreshList)
498     {
499         /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
500            (that's the way the timing accounting can distinguish between
501            separate prune kernel and combined force+prune).
502          */
503         nbnxn_gpu_launch_kernel_pruneonly(nb, iloc, 1);
504     }
505
506     if (plist->nsci == 0)
507     {
508         /* Don't launch an empty local kernel (is not allowed with OpenCL).
509          */
510         return;
511     }
512
513     /* beginning of timed nonbonded calculation section */
514     if (bDoTime)
515     {
516         t->nb_k[iloc].openTimingRegion(stream);
517     }
518
519     /* get the pointer to the kernel flavor we need to use */
520     nb_kernel = select_nbnxn_kernel(nb,
521                                     nbp->eeltype,
522                                     nbp->vdwtype,
523                                     bCalcEner,
524                                     (plist->haveFreshList && !nb->timers->didPrune[iloc]));
525
526     /* kernel launch config */
527
528     KernelLaunchConfig config;
529     config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
530     config.stream           = stream;
531     config.blockSize[0]     = c_clSize;
532     config.blockSize[1]     = c_clSize;
533     config.gridSize[0]      = plist->nsci;
534
535     validate_global_work_size(config, 3, nb->dev_info);
536
537     if (debug)
538     {
539         fprintf(debug, "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
540                 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
541                 config.blockSize[0], config.blockSize[1], config.blockSize[2],
542                 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
543                 c_numClPerSupercl, plist->na_c);
544     }
545
546     fillin_ocl_structures(nbp, &nbparams_params);
547
548     auto          *timingEvent  = bDoTime ? t->nb_k[iloc].fetchNextEvent() : nullptr;
549     constexpr char kernelName[] = "k_calc_nb";
550     if (useLjCombRule(nb->nbparam->vdwtype))
551     {
552         const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config,
553                                                           &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
554                                                           &adat->lj_comb,
555                                                           &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
556                                                           &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
557
558         launchGpuKernel(nb_kernel, config, timingEvent, kernelName, kernelArgs);
559     }
560     else
561     {
562         const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config,
563                                                           &adat->ntypes,
564                                                           &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
565                                                           &adat->atom_types,
566                                                           &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
567                                                           &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
568         launchGpuKernel(nb_kernel, config, timingEvent, kernelName, kernelArgs);
569     }
570
571     if (bDoTime)
572     {
573         t->nb_k[iloc].closeTimingRegion(stream);
574     }
575 }
576
577
578 /*! \brief Calculates the amount of shared memory required by the prune kernel.
579  *
580  *  Note that for the sake of simplicity we use the CUDA terminology "shared memory"
581  *  for OpenCL local memory.
582  *
583  * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd dimension.
584  * \returns   the amount of local memory in bytes required by the pruning kernel
585  */
586 static inline int calc_shmem_required_prune(const int num_threads_z)
587 {
588     int shmem;
589
590     /* i-atom x in shared memory (for convenience we load all 4 components including q) */
591     shmem  = c_numClPerSupercl * c_clSize * sizeof(float)*4;
592     /* cj in shared memory, for each warp separately
593      * Note: only need to load once per wavefront, but to keep the code simple,
594      * for now we load twice on AMD.
595      */
596     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
597     /* Warp vote, requires one uint per warp/32 threads per block. */
598     shmem += sizeof(cl_uint) * 2*num_threads_z;
599
600     return shmem;
601 }
602
603 void nbnxn_gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t       *nb,
604                                        int                    iloc,
605                                        int                    numParts)
606 {
607     cl_atomdata_t       *adat    = nb->atdat;
608     cl_nbparam_t        *nbp     = nb->nbparam;
609     cl_plist_t          *plist   = nb->plist[iloc];
610     cl_timers_t         *t       = nb->timers;
611     cl_command_queue     stream  = nb->stream[iloc];
612     bool                 bDoTime = nb->bDoTime == CL_TRUE;
613
614     if (plist->haveFreshList)
615     {
616         GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
617
618         /* Set rollingPruningNumParts to signal that it is not set */
619         plist->rollingPruningNumParts = 0;
620         plist->rollingPruningPart     = 0;
621     }
622     else
623     {
624         if (plist->rollingPruningNumParts == 0)
625         {
626             plist->rollingPruningNumParts = numParts;
627         }
628         else
629         {
630             GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
631         }
632     }
633
634     /* Use a local variable for part and update in plist, so we can return here
635      * without duplicating the part increment code.
636      */
637     int part = plist->rollingPruningPart;
638
639     plist->rollingPruningPart++;
640     if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
641     {
642         plist->rollingPruningPart = 0;
643     }
644
645     /* Compute the number of list entries to prune in this pass */
646     int numSciInPart = (plist->nsci - part)/numParts;
647
648     /* Don't launch the kernel if there is no work to do. */
649     if (numSciInPart <= 0)
650     {
651         plist->haveFreshList = false;
652
653         return;
654     }
655
656     GpuRegionTimer *timer = nullptr;
657     if (bDoTime)
658     {
659         timer = &(plist->haveFreshList ? t->prune_k[iloc] : t->rollingPrune_k[iloc]);
660     }
661
662     /* beginning of timed prune calculation section */
663     if (bDoTime)
664     {
665         timer->openTimingRegion(stream);
666     }
667
668     /* Kernel launch config:
669      * - The thread block dimensions match the size of i-clusters, j-clusters,
670      *   and j-cluster concurrency, in x, y, and z, respectively.
671      * - The 1D block-grid contains as many blocks as super-clusters.
672      */
673     int       num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
674     cl_kernel pruneKernel   = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
675
676     /* kernel launch config */
677     KernelLaunchConfig config;
678     config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
679     config.stream           = stream;
680     config.blockSize[0]     = c_clSize;
681     config.blockSize[1]     = c_clSize;
682     config.blockSize[2]     = num_threads_z;
683     config.gridSize[0]      = numSciInPart;
684
685     validate_global_work_size(config, 3, nb->dev_info);
686
687     if (debug)
688     {
689         fprintf(debug, "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
690                 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
691                 "\tShMem: %zu\n",
692                 config.blockSize[0], config.blockSize[1], config.blockSize[2],
693                 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
694                 c_numClPerSupercl, plist->na_c, config.sharedMemorySize);
695     }
696
697     cl_nbparam_params_t  nbparams_params;
698     fillin_ocl_structures(nbp, &nbparams_params);
699
700     auto          *timingEvent  = bDoTime ? timer->fetchNextEvent() : nullptr;
701     constexpr char kernelName[] = "k_pruneonly";
702     const auto     kernelArgs   = prepareGpuKernelArguments(pruneKernel, config,
703                                                             &nbparams_params, &adat->xq, &adat->shift_vec,
704                                                             &plist->sci, &plist->cj4, &plist->imask, &numParts, &part);
705     launchGpuKernel(pruneKernel, config, timingEvent, kernelName, kernelArgs);
706
707     if (plist->haveFreshList)
708     {
709         plist->haveFreshList         = false;
710         /* Mark that pruning has been done */
711         nb->timers->didPrune[iloc] = true;
712     }
713     else
714     {
715         /* Mark that rolling pruning has been done */
716         nb->timers->didRollingPrune[iloc] = true;
717     }
718
719     if (bDoTime)
720     {
721         timer->closeTimingRegion(stream);
722     }
723 }
724
725 /*! \brief
726  * Launch asynchronously the download of nonbonded forces from the GPU
727  * (and energies/shift forces if required).
728  */
729 void nbnxn_gpu_launch_cpyback(gmx_nbnxn_ocl_t               *nb,
730                               struct nbnxn_atomdata_t       *nbatom,
731                               int                            flags,
732                               int                            aloc,
733                               bool                           haveOtherWork)
734 {
735     cl_int gmx_unused cl_error;
736     int               adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
737
738     /* determine interaction locality from atom locality */
739     int              iloc = gpuAtomToInteractionLocality(aloc);
740
741     cl_atomdata_t   *adat    = nb->atdat;
742     cl_timers_t     *t       = nb->timers;
743     bool             bDoTime = nb->bDoTime == CL_TRUE;
744     cl_command_queue stream  = nb->stream[iloc];
745
746     bool             bCalcEner   = (flags & GMX_FORCE_ENERGY) != 0;
747     int              bCalcFshift = flags & GMX_FORCE_VIRIAL;
748
749
750     /* don't launch non-local copy-back if there was no non-local work to do */
751     if (!haveOtherWork && canSkipWork(nb, iloc))
752     {
753         /* TODO An alternative way to signal that non-local work is
754            complete is to use a clEnqueueMarker+clEnqueueBarrier
755            pair. However, the use of bNonLocalStreamActive has the
756            advantage of being local to the host, so probably minimizes
757            overhead. Curiously, for NVIDIA OpenCL with an empty-domain
758            test case, overall simulation performance was higher with
759            the API calls, but this has not been tested on AMD OpenCL,
760            so could be worth considering in future. */
761         nb->bNonLocalStreamActive = CL_FALSE;
762         return;
763     }
764
765     getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
766
767     /* beginning of timed D2H section */
768     if (bDoTime)
769     {
770         t->nb_d2h[iloc].openTimingRegion(stream);
771     }
772
773     /* With DD the local D2H transfer can only start after the non-local
774        has been launched. */
775     if (iloc == eintLocal && nb->bNonLocalStreamActive)
776     {
777         sync_ocl_event(stream, &(nb->nonlocal_done));
778     }
779
780     /* DtoH f */
781     ocl_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f, adat_begin*3*sizeof(float),
782                        (adat_len)* adat->f_elem_size, stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
783
784     /* kick off work */
785     cl_error = clFlush(stream);
786     assert(CL_SUCCESS == cl_error);
787
788     /* After the non-local D2H is launched the nonlocal_done event can be
789        recorded which signals that the local D2H can proceed. This event is not
790        placed after the non-local kernel because we first need the non-local
791        data back first. */
792     if (iloc == eintNonlocal)
793     {
794         cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->nonlocal_done));
795         assert(CL_SUCCESS == cl_error);
796         nb->bNonLocalStreamActive = CL_TRUE;
797     }
798
799     /* only transfer energies in the local stream */
800     if (LOCAL_I(iloc))
801     {
802         /* DtoH fshift */
803         if (bCalcFshift)
804         {
805             ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0,
806                                SHIFTS * adat->fshift_elem_size, stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
807         }
808
809         /* DtoH energies */
810         if (bCalcEner)
811         {
812             ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0,
813                                sizeof(float), stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
814
815             ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0,
816                                sizeof(float), stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
817         }
818     }
819
820     if (bDoTime)
821     {
822         t->nb_d2h[iloc].closeTimingRegion(stream);
823     }
824 }
825
826
827 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
828 int nbnxn_gpu_pick_ewald_kernel_type(bool bTwinCut)
829 {
830     bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
831     int  kernel_type;
832
833     /* Benchmarking/development environment variables to force the use of
834        analytical or tabulated Ewald kernel. */
835     bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != nullptr);
836     bForceTabulatedEwald  = (getenv("GMX_OCL_NB_TAB_EWALD") != nullptr);
837
838     if (bForceAnalyticalEwald && bForceTabulatedEwald)
839     {
840         gmx_incons("Both analytical and tabulated Ewald OpenCL non-bonded kernels "
841                    "requested through environment variables.");
842     }
843
844     /* OpenCL: By default, use analytical Ewald
845      * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
846      *
847      * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
848      *
849      */
850     /* By default use analytical Ewald. */
851     bUseAnalyticalEwald = true;
852     if (bForceAnalyticalEwald)
853     {
854         if (debug)
855         {
856             fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
857         }
858     }
859     else if (bForceTabulatedEwald)
860     {
861         bUseAnalyticalEwald = false;
862
863         if (debug)
864         {
865             fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
866         }
867     }
868
869     /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
870        forces it (use it for debugging/benchmarking only). */
871     if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == nullptr))
872     {
873         kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
874     }
875     else
876     {
877         kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;
878     }
879
880     return kernel_type;
881 }