Merge branch 'release-2019' into master
[alexxy/gromacs.git] / src / gromacs / nbnxm / opencl / nbnxm_ocl.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *  \brief Define OpenCL implementation of nbnxm_gpu.h
37  *
38  *  \author Anca Hamuraru <anca@streamcomputing.eu>
39  *  \author Teemu Virolainen <teemu@streamcomputing.eu>
40  *  \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
41  *  \author Szilárd Páll <pall.szilard@gmail.com>
42  *  \ingroup module_nbnxm
43  *
44  *  TODO (psz):
45  *  - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
46  *  - Rework the copying of OCL data structures done before every invocation of both
47  *    nb and prune kernels (using fillin_ocl_structures); also consider at the same
48  *    time calling clSetKernelArg only on the updated parameters (if tracking changed
49  *    parameters is feasible);
50  *  - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
51  *    dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
52  *    on the misc_ops_and_local_H2D_done event could be better expressed this way.
53  *
54  *  - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
55  *    - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
56  *      is identical in the two implementations, so a 3-way split might allow sharing
57  *      code;
58  *    -
59  *
60  */
61 #include "gmxpre.h"
62
63 #include <assert.h>
64 #include <stdlib.h>
65
66 #if defined(_MSVC)
67 #include <limits>
68 #endif
69
70 #include "thread_mpi/atomic.h"
71
72 #include "gromacs/gpu_utils/gputraits_ocl.h"
73 #include "gromacs/gpu_utils/oclutils.h"
74 #include "gromacs/hardware/hw_info.h"
75 #include "gromacs/mdlib/force_flags.h"
76 #include "gromacs/nbnxm/gpu_common.h"
77 #include "gromacs/nbnxm/gpu_common_utils.h"
78 #include "gromacs/nbnxm/gpu_data_mgmt.h"
79 #include "gromacs/nbnxm/nbnxm.h"
80 #include "gromacs/nbnxm/nbnxm_gpu.h"
81 #include "gromacs/nbnxm/pairlist.h"
82 #include "gromacs/pbcutil/ishift.h"
83 #include "gromacs/timing/gpu_timing.h"
84 #include "gromacs/utility/cstringutil.h"
85 #include "gromacs/utility/fatalerror.h"
86 #include "gromacs/utility/gmxassert.h"
87
88 #include "nbnxm_ocl_internal.h"
89 #include "nbnxm_ocl_types.h"
90
91 namespace Nbnxm
92 {
93
94 /*! \brief Convenience constants */
95 //@{
96 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
97 static const int c_clSize          = c_nbnxnGpuClusterSize;
98 //@}
99
100
101 /*! \brief Validates the input global work size parameter.
102  */
103 static inline void validate_global_work_size(const KernelLaunchConfig &config, int work_dim, const gmx_device_info_t *dinfo)
104 {
105     cl_uint device_size_t_size_bits;
106     cl_uint host_size_t_size_bits;
107
108     assert(dinfo);
109
110     size_t global_work_size[3];
111     GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
112     for (int i = 0; i < work_dim; i++)
113     {
114         global_work_size[i] = config.blockSize[i] * config.gridSize[i];
115     }
116
117     /* Each component of a global_work_size must not exceed the range given by the
118        sizeof(device size_t) for the device on which the kernel execution will
119        be enqueued. See:
120        https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
121      */
122     device_size_t_size_bits = dinfo->adress_bits;
123     host_size_t_size_bits   = static_cast<cl_uint>(sizeof(size_t) * 8);
124
125     /* If sizeof(host size_t) <= sizeof(device size_t)
126             => global_work_size components will always be valid
127        else
128             => get device limit for global work size and
129             compare it against each component of global_work_size.
130      */
131     if (host_size_t_size_bits > device_size_t_size_bits)
132     {
133         size_t device_limit;
134
135         device_limit = (1ull << device_size_t_size_bits) - 1;
136
137         for (int i = 0; i < work_dim; i++)
138         {
139             if (global_work_size[i] > device_limit)
140             {
141                 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
142                           "The number of nonbonded work units (=number of super-clusters) exceeds the"
143                           "device capabilities. Global work size limit exceeded (%zu > %zu)!",
144                           global_work_size[i], device_limit);
145             }
146         }
147     }
148 }
149
150 /* Constant arrays listing non-bonded kernel function names. The arrays are
151  * organized in 2-dim arrays by: electrostatics and VDW type.
152  *
153  *  Note that the row- and column-order of function pointers has to match the
154  *  order of corresponding enumerated electrostatics and vdw types, resp.,
155  *  defined in nbnxm_ocl_types.h.
156  */
157
158 /*! \brief Force-only kernel function names. */
159 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] =
160 {
161     { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl"            },
162     { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl"             },
163     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl"        },
164     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
165     { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl"             },
166     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl"      }
167 };
168
169 /*! \brief Force + energy kernel function pointers. */
170 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] =
171 {
172     { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl"            },
173     { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl"             },
174     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl"        },
175     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
176     { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl"             },
177     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl"      }
178 };
179
180 /*! \brief Force + pruning kernel function pointers. */
181 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] =
182 {
183     { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl"             },
184     { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl"              },
185     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl"         },
186     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl"  },
187     { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl"              },
188     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl"       }
189 };
190
191 /*! \brief Force + energy + pruning kernel function pointers. */
192 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] =
193 {
194     { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl"            },
195     { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl"             },
196     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl"        },
197     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
198     { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl"             },
199     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl"      }
200 };
201
202 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
203  *
204  * \param[in] kernel_pruneonly  array of prune kernel objects
205  * \param[in] firstPrunePass    true if the first pruning pass is being executed
206  */
207 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[],
208                                           bool      firstPrunePass)
209 {
210     cl_kernel  *kernelPtr;
211
212     if (firstPrunePass)
213     {
214         kernelPtr = &(kernel_pruneonly[epruneFirst]);
215     }
216     else
217     {
218         kernelPtr = &(kernel_pruneonly[epruneRolling]);
219     }
220     // TODO: consider creating the prune kernel object here to avoid a
221     // clCreateKernel for the rolling prune kernel if this is not needed.
222     return *kernelPtr;
223 }
224
225 /*! \brief Return a pointer to the kernel version to be executed at the current step.
226  *  OpenCL kernel objects are cached in nb. If the requested kernel is not
227  *  found in the cache, it will be created and the cache will be updated.
228  */
229 static inline cl_kernel select_nbnxn_kernel(gmx_nbnxn_ocl_t   *nb,
230                                             int                eeltype,
231                                             int                evdwtype,
232                                             bool               bDoEne,
233                                             bool               bDoPrune)
234 {
235     const char* kernel_name_to_run;
236     cl_kernel  *kernel_ptr;
237     cl_int      cl_error;
238
239     assert(eeltype  < eelOclNR);
240     assert(evdwtype < evdwOclNR);
241
242     if (bDoEne)
243     {
244         if (bDoPrune)
245         {
246             kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
247             kernel_ptr         = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
248         }
249         else
250         {
251             kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
252             kernel_ptr         = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
253         }
254     }
255     else
256     {
257         if (bDoPrune)
258         {
259             kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
260             kernel_ptr         = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
261         }
262         else
263         {
264             kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
265             kernel_ptr         = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
266         }
267     }
268
269     if (nullptr == kernel_ptr[0])
270     {
271         *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
272         assert(cl_error == CL_SUCCESS);
273     }
274     // TODO: handle errors
275
276     return *kernel_ptr;
277 }
278
279 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
280  */
281 static inline int calc_shmem_required_nonbonded(int  vdwType,
282                                                 bool bPrefetchLjParam)
283 {
284     int shmem;
285
286     /* size of shmem (force-buffers/xq/atom type preloading) */
287     /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
288     /* i-atom x+q in shared memory */
289     shmem  = c_numClPerSupercl * c_clSize * sizeof(float) * 4; /* xqib */
290     /* cj in shared memory, for both warps separately
291      * TODO: in the "nowarp kernels we load cj only once  so the factor 2 is not needed.
292      */
293     shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int);           /* cjs  */
294     if (bPrefetchLjParam)
295     {
296         if (useLjCombRule(vdwType))
297         {
298             /* i-atom LJ combination parameters in shared memory */
299             shmem += c_numClPerSupercl * c_clSize * 2*sizeof(float); /* atib abused for ljcp, float2 */
300         }
301         else
302         {
303             /* i-atom types in shared memory */
304             shmem += c_numClPerSupercl * c_clSize * sizeof(int); /* atib */
305         }
306     }
307     /* force reduction buffers in shared memory */
308     shmem += c_clSize * c_clSize * 3 * sizeof(float);    /* f_buf */
309     /* Warp vote. In fact it must be * number of warps in block.. */
310     shmem += sizeof(cl_uint) * 2;                        /* warp_any */
311     return shmem;
312 }
313
314 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
315  *
316  *  The device can't use the same data structures as the host for two main reasons:
317  *  - OpenCL restrictions (pointers are not accepted inside data structures)
318  *  - some host side fields are not needed for the OpenCL kernels.
319  *
320  *  This function is called before the launch of both nbnxn and prune kernels.
321  */
322 static void fillin_ocl_structures(cl_nbparam_t        *nbp,
323                                   cl_nbparam_params_t *nbparams_params)
324 {
325     nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
326     nbparams_params->c_rf              = nbp->c_rf;
327     nbparams_params->dispersion_shift  = nbp->dispersion_shift;
328     nbparams_params->eeltype           = nbp->eeltype;
329     nbparams_params->epsfac            = nbp->epsfac;
330     nbparams_params->ewaldcoeff_lj     = nbp->ewaldcoeff_lj;
331     nbparams_params->ewald_beta        = nbp->ewald_beta;
332     nbparams_params->rcoulomb_sq       = nbp->rcoulomb_sq;
333     nbparams_params->repulsion_shift   = nbp->repulsion_shift;
334     nbparams_params->rlistOuter_sq     = nbp->rlistOuter_sq;
335     nbparams_params->rvdw_sq           = nbp->rvdw_sq;
336     nbparams_params->rlistInner_sq     = nbp->rlistInner_sq;
337     nbparams_params->rvdw_switch       = nbp->rvdw_switch;
338     nbparams_params->sh_ewald          = nbp->sh_ewald;
339     nbparams_params->sh_lj_ewald       = nbp->sh_lj_ewald;
340     nbparams_params->two_k_rf          = nbp->two_k_rf;
341     nbparams_params->vdwtype           = nbp->vdwtype;
342     nbparams_params->vdw_switch        = nbp->vdw_switch;
343 }
344
345 /*! \brief Enqueues a wait for event completion.
346  *
347  * Then it releases the event and sets it to 0.
348  * Don't use this function when more than one wait will be issued for the event.
349  * Equivalent to Cuda Stream Sync. */
350 static void sync_ocl_event(cl_command_queue stream, cl_event *ocl_event)
351 {
352     cl_int gmx_unused cl_error;
353
354     /* Enqueue wait */
355     cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
356     GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
357
358     /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
359     cl_error = clReleaseEvent(*ocl_event);
360     assert(CL_SUCCESS == cl_error);
361     *ocl_event = nullptr;
362 }
363
364 /*! \brief Launch asynchronously the xq buffer host to device copy. */
365 void gpu_copy_xq_to_gpu(gmx_nbnxn_ocl_t        *nb,
366                         const nbnxn_atomdata_t *nbatom,
367                         const AtomLocality      atomLocality,
368                         const bool              haveOtherWork)
369 {
370     const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
371
372     /* local/nonlocal offset and length used for xq and f */
373     int                  adat_begin, adat_len;
374
375     cl_atomdata_t       *adat    = nb->atdat;
376     cl_plist_t          *plist   = nb->plist[iloc];
377     cl_timers_t         *t       = nb->timers;
378     cl_command_queue     stream  = nb->stream[iloc];
379
380     bool                 bDoTime = (nb->bDoTime) != 0;
381
382     /* Don't launch the non-local H2D copy if there is no dependent
383        work to do: neither non-local nor other (e.g. bonded) work
384        to do that has as input the nbnxn coordinates.
385        Doing the same for the local kernel is more complicated, since the
386        local part of the force array also depends on the non-local kernel.
387        So to avoid complicating the code and to reduce the risk of bugs,
388        we always call the local local x+q copy (and the rest of the local
389        work in nbnxn_gpu_launch_kernel().
390      */
391     if (!haveOtherWork && canSkipWork(*nb, iloc))
392     {
393         plist->haveFreshList = false;
394
395         return;
396     }
397
398     /* calculate the atom data index range based on locality */
399     if (atomLocality == AtomLocality::Local)
400     {
401         adat_begin  = 0;
402         adat_len    = adat->natoms_local;
403     }
404     else
405     {
406         adat_begin  = adat->natoms_local;
407         adat_len    = adat->natoms - adat->natoms_local;
408     }
409
410     /* beginning of timed HtoD section */
411     if (bDoTime)
412     {
413         t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
414     }
415
416     /* HtoD x, q */
417     ocl_copy_H2D_async(adat->xq, nbatom->x().data() + adat_begin * 4, adat_begin*sizeof(float)*4,
418                        adat_len * sizeof(float) * 4, stream, bDoTime ? t->xf[atomLocality].nb_h2d.fetchNextEvent() : nullptr);
419
420     if (bDoTime)
421     {
422         t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
423     }
424
425     /* When we get here all misc operations issues in the local stream as well as
426        the local xq H2D are done,
427        so we record that in the local stream and wait for it in the nonlocal one. */
428     if (nb->bUseTwoStreams)
429     {
430         if (iloc == InteractionLocality::Local)
431         {
432             cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
433             assert(CL_SUCCESS == cl_error);
434
435             /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
436              * in the local stream in order to be able to sync with the above event
437              * from the non-local stream.
438              */
439             cl_error = clFlush(stream);
440             assert(CL_SUCCESS == cl_error);
441         }
442         else
443         {
444             sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
445         }
446     }
447 }
448
449
450 /*! \brief Launch GPU kernel
451
452    As we execute nonbonded workload in separate queues, before launching
453    the kernel we need to make sure that he following operations have completed:
454    - atomdata allocation and related H2D transfers (every nstlist step);
455    - pair list H2D transfer (every nstlist step);
456    - shift vector H2D transfer (every nstlist step);
457    - force (+shift force and energy) output clearing (every step).
458
459    These operations are issued in the local queue at the beginning of the step
460    and therefore always complete before the local kernel launch. The non-local
461    kernel is launched after the local on the same device/context, so this is
462    inherently scheduled after the operations in the local stream (including the
463    above "misc_ops").
464    However, for the sake of having a future-proof implementation, we use the
465    misc_ops_done event to record the point in time when the above  operations
466    are finished and synchronize with this event in the non-local stream.
467  */
468 void gpu_launch_kernel(gmx_nbnxn_ocl_t                  *nb,
469                        const int                         flags,
470                        const Nbnxm::InteractionLocality  iloc)
471 {
472     /* OpenCL kernel launch-related stuff */
473     cl_kernel            nb_kernel = nullptr;  /* fn pointer to the nonbonded kernel */
474
475     cl_atomdata_t       *adat    = nb->atdat;
476     cl_nbparam_t        *nbp     = nb->nbparam;
477     cl_plist_t          *plist   = nb->plist[iloc];
478     cl_timers_t         *t       = nb->timers;
479     cl_command_queue     stream  = nb->stream[iloc];
480
481     bool                 bCalcEner   = (flags & GMX_FORCE_ENERGY) != 0;
482     int                  bCalcFshift = flags & GMX_FORCE_VIRIAL;
483     bool                 bDoTime     = (nb->bDoTime) != 0;
484
485     cl_nbparam_params_t  nbparams_params;
486
487     /* Don't launch the non-local kernel if there is no work to do.
488        Doing the same for the local kernel is more complicated, since the
489        local part of the force array also depends on the non-local kernel.
490        So to avoid complicating the code and to reduce the risk of bugs,
491        we always call the local kernel and later (not in
492        this function) the stream wait, local f copyback and the f buffer
493        clearing. All these operations, except for the local interaction kernel,
494        are needed for the non-local interactions. The skip of the local kernel
495        call is taken care of later in this function. */
496     if (canSkipWork(*nb, iloc))
497     {
498         plist->haveFreshList = false;
499
500         return;
501     }
502
503     if (nbp->useDynamicPruning && plist->haveFreshList)
504     {
505         /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
506            (that's the way the timing accounting can distinguish between
507            separate prune kernel and combined force+prune).
508          */
509         Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
510     }
511
512     if (plist->nsci == 0)
513     {
514         /* Don't launch an empty local kernel (is not allowed with OpenCL).
515          */
516         return;
517     }
518
519     /* beginning of timed nonbonded calculation section */
520     if (bDoTime)
521     {
522         t->interaction[iloc].nb_k.openTimingRegion(stream);
523     }
524
525     /* get the pointer to the kernel flavor we need to use */
526     nb_kernel = select_nbnxn_kernel(nb,
527                                     nbp->eeltype,
528                                     nbp->vdwtype,
529                                     bCalcEner,
530                                     (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
531
532     /* kernel launch config */
533
534     KernelLaunchConfig config;
535     config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
536     config.stream           = stream;
537     config.blockSize[0]     = c_clSize;
538     config.blockSize[1]     = c_clSize;
539     config.gridSize[0]      = plist->nsci;
540
541     validate_global_work_size(config, 3, nb->dev_info);
542
543     if (debug)
544     {
545         fprintf(debug, "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
546                 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
547                 config.blockSize[0], config.blockSize[1], config.blockSize[2],
548                 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
549                 c_numClPerSupercl, plist->na_c);
550     }
551
552     fillin_ocl_structures(nbp, &nbparams_params);
553
554     auto          *timingEvent  = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
555     constexpr char kernelName[] = "k_calc_nb";
556     if (useLjCombRule(nb->nbparam->vdwtype))
557     {
558         const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config,
559                                                           &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
560                                                           &adat->lj_comb,
561                                                           &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
562                                                           &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
563
564         launchGpuKernel(nb_kernel, config, timingEvent, kernelName, kernelArgs);
565     }
566     else
567     {
568         const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config,
569                                                           &adat->ntypes,
570                                                           &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
571                                                           &adat->atom_types,
572                                                           &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
573                                                           &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
574         launchGpuKernel(nb_kernel, config, timingEvent, kernelName, kernelArgs);
575     }
576
577     if (bDoTime)
578     {
579         t->interaction[iloc].nb_k.closeTimingRegion(stream);
580     }
581 }
582
583
584 /*! \brief Calculates the amount of shared memory required by the prune kernel.
585  *
586  *  Note that for the sake of simplicity we use the CUDA terminology "shared memory"
587  *  for OpenCL local memory.
588  *
589  * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd dimension.
590  * \returns   the amount of local memory in bytes required by the pruning kernel
591  */
592 static inline int calc_shmem_required_prune(const int num_threads_z)
593 {
594     int shmem;
595
596     /* i-atom x in shared memory (for convenience we load all 4 components including q) */
597     shmem  = c_numClPerSupercl * c_clSize * sizeof(float)*4;
598     /* cj in shared memory, for each warp separately
599      * Note: only need to load once per wavefront, but to keep the code simple,
600      * for now we load twice on AMD.
601      */
602     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
603     /* Warp vote, requires one uint per warp/32 threads per block. */
604     shmem += sizeof(cl_uint) * 2*num_threads_z;
605
606     return shmem;
607 }
608
609 void gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t           *nb,
610                                  const InteractionLocality  iloc,
611                                  const int                  numParts)
612 {
613     cl_atomdata_t       *adat    = nb->atdat;
614     cl_nbparam_t        *nbp     = nb->nbparam;
615     cl_plist_t          *plist   = nb->plist[iloc];
616     cl_timers_t         *t       = nb->timers;
617     cl_command_queue     stream  = nb->stream[iloc];
618     bool                 bDoTime = nb->bDoTime == CL_TRUE;
619
620     if (plist->haveFreshList)
621     {
622         GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
623
624         /* Set rollingPruningNumParts to signal that it is not set */
625         plist->rollingPruningNumParts = 0;
626         plist->rollingPruningPart     = 0;
627     }
628     else
629     {
630         if (plist->rollingPruningNumParts == 0)
631         {
632             plist->rollingPruningNumParts = numParts;
633         }
634         else
635         {
636             GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
637         }
638     }
639
640     /* Use a local variable for part and update in plist, so we can return here
641      * without duplicating the part increment code.
642      */
643     int part = plist->rollingPruningPart;
644
645     plist->rollingPruningPart++;
646     if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
647     {
648         plist->rollingPruningPart = 0;
649     }
650
651     /* Compute the number of list entries to prune in this pass */
652     int numSciInPart = (plist->nsci - part)/numParts;
653
654     /* Don't launch the kernel if there is no work to do. */
655     if (numSciInPart <= 0)
656     {
657         plist->haveFreshList = false;
658
659         return;
660     }
661
662     GpuRegionTimer *timer = nullptr;
663     if (bDoTime)
664     {
665         timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
666     }
667
668     /* beginning of timed prune calculation section */
669     if (bDoTime)
670     {
671         timer->openTimingRegion(stream);
672     }
673
674     /* Kernel launch config:
675      * - The thread block dimensions match the size of i-clusters, j-clusters,
676      *   and j-cluster concurrency, in x, y, and z, respectively.
677      * - The 1D block-grid contains as many blocks as super-clusters.
678      */
679     int       num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
680     cl_kernel pruneKernel   = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
681
682     /* kernel launch config */
683     KernelLaunchConfig config;
684     config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
685     config.stream           = stream;
686     config.blockSize[0]     = c_clSize;
687     config.blockSize[1]     = c_clSize;
688     config.blockSize[2]     = num_threads_z;
689     config.gridSize[0]      = numSciInPart;
690
691     validate_global_work_size(config, 3, nb->dev_info);
692
693     if (debug)
694     {
695         fprintf(debug, "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
696                 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
697                 "\tShMem: %zu\n",
698                 config.blockSize[0], config.blockSize[1], config.blockSize[2],
699                 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
700                 c_numClPerSupercl, plist->na_c, config.sharedMemorySize);
701     }
702
703     cl_nbparam_params_t  nbparams_params;
704     fillin_ocl_structures(nbp, &nbparams_params);
705
706     auto          *timingEvent  = bDoTime ? timer->fetchNextEvent() : nullptr;
707     constexpr char kernelName[] = "k_pruneonly";
708     const auto     kernelArgs   = prepareGpuKernelArguments(pruneKernel, config,
709                                                             &nbparams_params, &adat->xq, &adat->shift_vec,
710                                                             &plist->sci, &plist->cj4, &plist->imask, &numParts, &part);
711     launchGpuKernel(pruneKernel, config, timingEvent, kernelName, kernelArgs);
712
713     if (plist->haveFreshList)
714     {
715         plist->haveFreshList         = false;
716         /* Mark that pruning has been done */
717         nb->timers->interaction[iloc].didPrune = true;
718     }
719     else
720     {
721         /* Mark that rolling pruning has been done */
722         nb->timers->interaction[iloc].didRollingPrune = true;
723     }
724
725     if (bDoTime)
726     {
727         timer->closeTimingRegion(stream);
728     }
729 }
730
731 /*! \brief
732  * Launch asynchronously the download of nonbonded forces from the GPU
733  * (and energies/shift forces if required).
734  */
735 void gpu_launch_cpyback(gmx_nbnxn_ocl_t               *nb,
736                         struct nbnxn_atomdata_t       *nbatom,
737                         const int                      flags,
738                         const AtomLocality             aloc,
739                         const bool                     haveOtherWork)
740 {
741     cl_int gmx_unused cl_error;
742     int               adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
743
744     /* determine interaction locality from atom locality */
745     const InteractionLocality iloc = gpuAtomToInteractionLocality(aloc);
746
747     cl_atomdata_t            *adat    = nb->atdat;
748     cl_timers_t              *t       = nb->timers;
749     bool                      bDoTime = nb->bDoTime == CL_TRUE;
750     cl_command_queue          stream  = nb->stream[iloc];
751
752     bool                      bCalcEner   = (flags & GMX_FORCE_ENERGY) != 0;
753     int                       bCalcFshift = flags & GMX_FORCE_VIRIAL;
754
755
756     /* don't launch non-local copy-back if there was no non-local work to do */
757     if (!haveOtherWork && canSkipWork(*nb, iloc))
758     {
759         /* TODO An alternative way to signal that non-local work is
760            complete is to use a clEnqueueMarker+clEnqueueBarrier
761            pair. However, the use of bNonLocalStreamActive has the
762            advantage of being local to the host, so probably minimizes
763            overhead. Curiously, for NVIDIA OpenCL with an empty-domain
764            test case, overall simulation performance was higher with
765            the API calls, but this has not been tested on AMD OpenCL,
766            so could be worth considering in future. */
767         nb->bNonLocalStreamActive = CL_FALSE;
768         return;
769     }
770
771     getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
772
773     /* beginning of timed D2H section */
774     if (bDoTime)
775     {
776         t->xf[aloc].nb_d2h.openTimingRegion(stream);
777     }
778
779     /* With DD the local D2H transfer can only start after the non-local
780        has been launched. */
781     if (iloc == InteractionLocality::Local && nb->bNonLocalStreamActive)
782     {
783         sync_ocl_event(stream, &(nb->nonlocal_done));
784     }
785
786     /* DtoH f */
787     ocl_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f, adat_begin*3*sizeof(float),
788                        (adat_len)* adat->f_elem_size, stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
789
790     /* kick off work */
791     cl_error = clFlush(stream);
792     assert(CL_SUCCESS == cl_error);
793
794     /* After the non-local D2H is launched the nonlocal_done event can be
795        recorded which signals that the local D2H can proceed. This event is not
796        placed after the non-local kernel because we first need the non-local
797        data back first. */
798     if (iloc == InteractionLocality::NonLocal)
799     {
800         cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->nonlocal_done));
801         assert(CL_SUCCESS == cl_error);
802         nb->bNonLocalStreamActive = CL_TRUE;
803     }
804
805     /* only transfer energies in the local stream */
806     if (iloc == InteractionLocality::Local)
807     {
808         /* DtoH fshift */
809         if (bCalcFshift)
810         {
811             ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0,
812                                SHIFTS * adat->fshift_elem_size, stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
813         }
814
815         /* DtoH energies */
816         if (bCalcEner)
817         {
818             ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0,
819                                sizeof(float), stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
820
821             ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0,
822                                sizeof(float), stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
823         }
824     }
825
826     if (bDoTime)
827     {
828         t->xf[aloc].nb_d2h.closeTimingRegion(stream);
829     }
830 }
831
832
833 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
834 int gpu_pick_ewald_kernel_type(const bool bTwinCut)
835 {
836     bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
837     int  kernel_type;
838
839     /* Benchmarking/development environment variables to force the use of
840        analytical or tabulated Ewald kernel. */
841     bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != nullptr);
842     bForceTabulatedEwald  = (getenv("GMX_OCL_NB_TAB_EWALD") != nullptr);
843
844     if (bForceAnalyticalEwald && bForceTabulatedEwald)
845     {
846         gmx_incons("Both analytical and tabulated Ewald OpenCL non-bonded kernels "
847                    "requested through environment variables.");
848     }
849
850     /* OpenCL: By default, use analytical Ewald
851      * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
852      *
853      * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
854      *
855      */
856     /* By default use analytical Ewald. */
857     bUseAnalyticalEwald = true;
858     if (bForceAnalyticalEwald)
859     {
860         if (debug)
861         {
862             fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
863         }
864     }
865     else if (bForceTabulatedEwald)
866     {
867         bUseAnalyticalEwald = false;
868
869         if (debug)
870         {
871             fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
872         }
873     }
874
875     /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
876        forces it (use it for debugging/benchmarking only). */
877     if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == nullptr))
878     {
879         kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
880     }
881     else
882     {
883         kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;
884     }
885
886     return kernel_type;
887 }
888
889 } // namespace Nbnxm