Move nbnxm atomdata types to atomdata.h
[alexxy/gromacs.git] / src / gromacs / nbnxm / opencl / nbnxm_ocl.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *  \brief Define OpenCL implementation of nbnxm_gpu.h
37  *
38  *  \author Anca Hamuraru <anca@streamcomputing.eu>
39  *  \author Teemu Virolainen <teemu@streamcomputing.eu>
40  *  \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
41  *  \author Szilárd Páll <pall.szilard@gmail.com>
42  *  \ingroup module_nbnxm
43  *
44  *  TODO (psz):
45  *  - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
46  *  - Rework the copying of OCL data structures done before every invocation of both
47  *    nb and prune kernels (using fillin_ocl_structures); also consider at the same
48  *    time calling clSetKernelArg only on the updated parameters (if tracking changed
49  *    parameters is feasible);
50  *  - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
51  *    dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
52  *    on the misc_ops_and_local_H2D_done event could be better expressed this way.
53  *
54  *  - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
55  *    - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
56  *      is identical in the two implementations, so a 3-way split might allow sharing
57  *      code;
58  *    -
59  *
60  */
61 #include "gmxpre.h"
62
63 #include <assert.h>
64 #include <stdlib.h>
65
66 #if defined(_MSVC)
67 #include <limits>
68 #endif
69
70 #include "thread_mpi/atomic.h"
71
72 #include "gromacs/gpu_utils/gputraits_ocl.h"
73 #include "gromacs/gpu_utils/oclutils.h"
74 #include "gromacs/hardware/hw_info.h"
75 #include "gromacs/mdlib/force_flags.h"
76 #include "gromacs/nbnxm/atomdata.h"
77 #include "gromacs/nbnxm/gpu_common.h"
78 #include "gromacs/nbnxm/gpu_common_utils.h"
79 #include "gromacs/nbnxm/gpu_data_mgmt.h"
80 #include "gromacs/nbnxm/nbnxm.h"
81 #include "gromacs/nbnxm/nbnxm_gpu.h"
82 #include "gromacs/nbnxm/pairlist.h"
83 #include "gromacs/pbcutil/ishift.h"
84 #include "gromacs/timing/gpu_timing.h"
85 #include "gromacs/utility/cstringutil.h"
86 #include "gromacs/utility/fatalerror.h"
87 #include "gromacs/utility/gmxassert.h"
88
89 #include "nbnxm_ocl_internal.h"
90 #include "nbnxm_ocl_types.h"
91
92 namespace Nbnxm
93 {
94
95 /*! \brief Convenience constants */
96 //@{
97 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
98 static const int c_clSize          = c_nbnxnGpuClusterSize;
99 //@}
100
101
102 /*! \brief Validates the input global work size parameter.
103  */
104 static inline void validate_global_work_size(const KernelLaunchConfig &config, int work_dim, const gmx_device_info_t *dinfo)
105 {
106     cl_uint device_size_t_size_bits;
107     cl_uint host_size_t_size_bits;
108
109     assert(dinfo);
110
111     size_t global_work_size[3];
112     GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
113     for (int i = 0; i < work_dim; i++)
114     {
115         global_work_size[i] = config.blockSize[i] * config.gridSize[i];
116     }
117
118     /* Each component of a global_work_size must not exceed the range given by the
119        sizeof(device size_t) for the device on which the kernel execution will
120        be enqueued. See:
121        https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
122      */
123     device_size_t_size_bits = dinfo->adress_bits;
124     host_size_t_size_bits   = static_cast<cl_uint>(sizeof(size_t) * 8);
125
126     /* If sizeof(host size_t) <= sizeof(device size_t)
127             => global_work_size components will always be valid
128        else
129             => get device limit for global work size and
130             compare it against each component of global_work_size.
131      */
132     if (host_size_t_size_bits > device_size_t_size_bits)
133     {
134         size_t device_limit;
135
136         device_limit = (1ull << device_size_t_size_bits) - 1;
137
138         for (int i = 0; i < work_dim; i++)
139         {
140             if (global_work_size[i] > device_limit)
141             {
142                 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
143                           "The number of nonbonded work units (=number of super-clusters) exceeds the"
144                           "device capabilities. Global work size limit exceeded (%zu > %zu)!",
145                           global_work_size[i], device_limit);
146             }
147         }
148     }
149 }
150
151 /* Constant arrays listing non-bonded kernel function names. The arrays are
152  * organized in 2-dim arrays by: electrostatics and VDW type.
153  *
154  *  Note that the row- and column-order of function pointers has to match the
155  *  order of corresponding enumerated electrostatics and vdw types, resp.,
156  *  defined in nbnxm_ocl_types.h.
157  */
158
159 /*! \brief Force-only kernel function names. */
160 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] =
161 {
162     { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl"            },
163     { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl"             },
164     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl"        },
165     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
166     { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl"             },
167     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl"      }
168 };
169
170 /*! \brief Force + energy kernel function pointers. */
171 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] =
172 {
173     { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl"            },
174     { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl"             },
175     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl"        },
176     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
177     { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl"             },
178     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl"      }
179 };
180
181 /*! \brief Force + pruning kernel function pointers. */
182 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] =
183 {
184     { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl"             },
185     { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl"              },
186     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl"         },
187     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl"  },
188     { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl"              },
189     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl"       }
190 };
191
192 /*! \brief Force + energy + pruning kernel function pointers. */
193 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] =
194 {
195     { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl"            },
196     { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl"             },
197     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl"        },
198     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
199     { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl"             },
200     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl"      }
201 };
202
203 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
204  *
205  * \param[in] kernel_pruneonly  array of prune kernel objects
206  * \param[in] firstPrunePass    true if the first pruning pass is being executed
207  */
208 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[],
209                                           bool      firstPrunePass)
210 {
211     cl_kernel  *kernelPtr;
212
213     if (firstPrunePass)
214     {
215         kernelPtr = &(kernel_pruneonly[epruneFirst]);
216     }
217     else
218     {
219         kernelPtr = &(kernel_pruneonly[epruneRolling]);
220     }
221     // TODO: consider creating the prune kernel object here to avoid a
222     // clCreateKernel for the rolling prune kernel if this is not needed.
223     return *kernelPtr;
224 }
225
226 /*! \brief Return a pointer to the kernel version to be executed at the current step.
227  *  OpenCL kernel objects are cached in nb. If the requested kernel is not
228  *  found in the cache, it will be created and the cache will be updated.
229  */
230 static inline cl_kernel select_nbnxn_kernel(gmx_nbnxn_ocl_t   *nb,
231                                             int                eeltype,
232                                             int                evdwtype,
233                                             bool               bDoEne,
234                                             bool               bDoPrune)
235 {
236     const char* kernel_name_to_run;
237     cl_kernel  *kernel_ptr;
238     cl_int      cl_error;
239
240     assert(eeltype  < eelOclNR);
241     assert(evdwtype < evdwOclNR);
242
243     if (bDoEne)
244     {
245         if (bDoPrune)
246         {
247             kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
248             kernel_ptr         = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
249         }
250         else
251         {
252             kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
253             kernel_ptr         = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
254         }
255     }
256     else
257     {
258         if (bDoPrune)
259         {
260             kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
261             kernel_ptr         = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
262         }
263         else
264         {
265             kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
266             kernel_ptr         = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
267         }
268     }
269
270     if (nullptr == kernel_ptr[0])
271     {
272         *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
273         assert(cl_error == CL_SUCCESS);
274     }
275     // TODO: handle errors
276
277     return *kernel_ptr;
278 }
279
280 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
281  */
282 static inline int calc_shmem_required_nonbonded(int  vdwType,
283                                                 bool bPrefetchLjParam)
284 {
285     int shmem;
286
287     /* size of shmem (force-buffers/xq/atom type preloading) */
288     /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
289     /* i-atom x+q in shared memory */
290     shmem  = c_numClPerSupercl * c_clSize * sizeof(float) * 4; /* xqib */
291     /* cj in shared memory, for both warps separately
292      * TODO: in the "nowarp kernels we load cj only once  so the factor 2 is not needed.
293      */
294     shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int);           /* cjs  */
295     if (bPrefetchLjParam)
296     {
297         if (useLjCombRule(vdwType))
298         {
299             /* i-atom LJ combination parameters in shared memory */
300             shmem += c_numClPerSupercl * c_clSize * 2*sizeof(float); /* atib abused for ljcp, float2 */
301         }
302         else
303         {
304             /* i-atom types in shared memory */
305             shmem += c_numClPerSupercl * c_clSize * sizeof(int); /* atib */
306         }
307     }
308     /* force reduction buffers in shared memory */
309     shmem += c_clSize * c_clSize * 3 * sizeof(float);    /* f_buf */
310     /* Warp vote. In fact it must be * number of warps in block.. */
311     shmem += sizeof(cl_uint) * 2;                        /* warp_any */
312     return shmem;
313 }
314
315 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
316  *
317  *  The device can't use the same data structures as the host for two main reasons:
318  *  - OpenCL restrictions (pointers are not accepted inside data structures)
319  *  - some host side fields are not needed for the OpenCL kernels.
320  *
321  *  This function is called before the launch of both nbnxn and prune kernels.
322  */
323 static void fillin_ocl_structures(cl_nbparam_t        *nbp,
324                                   cl_nbparam_params_t *nbparams_params)
325 {
326     nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
327     nbparams_params->c_rf              = nbp->c_rf;
328     nbparams_params->dispersion_shift  = nbp->dispersion_shift;
329     nbparams_params->eeltype           = nbp->eeltype;
330     nbparams_params->epsfac            = nbp->epsfac;
331     nbparams_params->ewaldcoeff_lj     = nbp->ewaldcoeff_lj;
332     nbparams_params->ewald_beta        = nbp->ewald_beta;
333     nbparams_params->rcoulomb_sq       = nbp->rcoulomb_sq;
334     nbparams_params->repulsion_shift   = nbp->repulsion_shift;
335     nbparams_params->rlistOuter_sq     = nbp->rlistOuter_sq;
336     nbparams_params->rvdw_sq           = nbp->rvdw_sq;
337     nbparams_params->rlistInner_sq     = nbp->rlistInner_sq;
338     nbparams_params->rvdw_switch       = nbp->rvdw_switch;
339     nbparams_params->sh_ewald          = nbp->sh_ewald;
340     nbparams_params->sh_lj_ewald       = nbp->sh_lj_ewald;
341     nbparams_params->two_k_rf          = nbp->two_k_rf;
342     nbparams_params->vdwtype           = nbp->vdwtype;
343     nbparams_params->vdw_switch        = nbp->vdw_switch;
344 }
345
346 /*! \brief Enqueues a wait for event completion.
347  *
348  * Then it releases the event and sets it to 0.
349  * Don't use this function when more than one wait will be issued for the event.
350  * Equivalent to Cuda Stream Sync. */
351 static void sync_ocl_event(cl_command_queue stream, cl_event *ocl_event)
352 {
353     cl_int gmx_unused cl_error;
354
355     /* Enqueue wait */
356     cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
357     GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
358
359     /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
360     cl_error = clReleaseEvent(*ocl_event);
361     assert(CL_SUCCESS == cl_error);
362     *ocl_event = nullptr;
363 }
364
365 /*! \brief Launch asynchronously the xq buffer host to device copy. */
366 void gpu_copy_xq_to_gpu(gmx_nbnxn_ocl_t        *nb,
367                         const nbnxn_atomdata_t *nbatom,
368                         const AtomLocality      atomLocality,
369                         const bool              haveOtherWork)
370 {
371     const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
372
373     /* local/nonlocal offset and length used for xq and f */
374     int                  adat_begin, adat_len;
375
376     cl_atomdata_t       *adat    = nb->atdat;
377     cl_plist_t          *plist   = nb->plist[iloc];
378     cl_timers_t         *t       = nb->timers;
379     cl_command_queue     stream  = nb->stream[iloc];
380
381     bool                 bDoTime = (nb->bDoTime) != 0;
382
383     /* Don't launch the non-local H2D copy if there is no dependent
384        work to do: neither non-local nor other (e.g. bonded) work
385        to do that has as input the nbnxn coordinates.
386        Doing the same for the local kernel is more complicated, since the
387        local part of the force array also depends on the non-local kernel.
388        So to avoid complicating the code and to reduce the risk of bugs,
389        we always call the local local x+q copy (and the rest of the local
390        work in nbnxn_gpu_launch_kernel().
391      */
392     if (!haveOtherWork && canSkipWork(*nb, iloc))
393     {
394         plist->haveFreshList = false;
395
396         return;
397     }
398
399     /* calculate the atom data index range based on locality */
400     if (atomLocality == AtomLocality::Local)
401     {
402         adat_begin  = 0;
403         adat_len    = adat->natoms_local;
404     }
405     else
406     {
407         adat_begin  = adat->natoms_local;
408         adat_len    = adat->natoms - adat->natoms_local;
409     }
410
411     /* beginning of timed HtoD section */
412     if (bDoTime)
413     {
414         t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
415     }
416
417     /* HtoD x, q */
418     ocl_copy_H2D_async(adat->xq, nbatom->x().data() + adat_begin * 4, adat_begin*sizeof(float)*4,
419                        adat_len * sizeof(float) * 4, stream, bDoTime ? t->xf[atomLocality].nb_h2d.fetchNextEvent() : nullptr);
420
421     if (bDoTime)
422     {
423         t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
424     }
425
426     /* When we get here all misc operations issues in the local stream as well as
427        the local xq H2D are done,
428        so we record that in the local stream and wait for it in the nonlocal one. */
429     if (nb->bUseTwoStreams)
430     {
431         if (iloc == InteractionLocality::Local)
432         {
433             cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
434             assert(CL_SUCCESS == cl_error);
435
436             /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
437              * in the local stream in order to be able to sync with the above event
438              * from the non-local stream.
439              */
440             cl_error = clFlush(stream);
441             assert(CL_SUCCESS == cl_error);
442         }
443         else
444         {
445             sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
446         }
447     }
448 }
449
450
451 /*! \brief Launch GPU kernel
452
453    As we execute nonbonded workload in separate queues, before launching
454    the kernel we need to make sure that he following operations have completed:
455    - atomdata allocation and related H2D transfers (every nstlist step);
456    - pair list H2D transfer (every nstlist step);
457    - shift vector H2D transfer (every nstlist step);
458    - force (+shift force and energy) output clearing (every step).
459
460    These operations are issued in the local queue at the beginning of the step
461    and therefore always complete before the local kernel launch. The non-local
462    kernel is launched after the local on the same device/context, so this is
463    inherently scheduled after the operations in the local stream (including the
464    above "misc_ops").
465    However, for the sake of having a future-proof implementation, we use the
466    misc_ops_done event to record the point in time when the above  operations
467    are finished and synchronize with this event in the non-local stream.
468  */
469 void gpu_launch_kernel(gmx_nbnxn_ocl_t                  *nb,
470                        const int                         flags,
471                        const Nbnxm::InteractionLocality  iloc)
472 {
473     /* OpenCL kernel launch-related stuff */
474     cl_kernel            nb_kernel = nullptr;  /* fn pointer to the nonbonded kernel */
475
476     cl_atomdata_t       *adat    = nb->atdat;
477     cl_nbparam_t        *nbp     = nb->nbparam;
478     cl_plist_t          *plist   = nb->plist[iloc];
479     cl_timers_t         *t       = nb->timers;
480     cl_command_queue     stream  = nb->stream[iloc];
481
482     bool                 bCalcEner   = (flags & GMX_FORCE_ENERGY) != 0;
483     int                  bCalcFshift = flags & GMX_FORCE_VIRIAL;
484     bool                 bDoTime     = (nb->bDoTime) != 0;
485
486     cl_nbparam_params_t  nbparams_params;
487
488     /* Don't launch the non-local kernel if there is no work to do.
489        Doing the same for the local kernel is more complicated, since the
490        local part of the force array also depends on the non-local kernel.
491        So to avoid complicating the code and to reduce the risk of bugs,
492        we always call the local kernel and later (not in
493        this function) the stream wait, local f copyback and the f buffer
494        clearing. All these operations, except for the local interaction kernel,
495        are needed for the non-local interactions. The skip of the local kernel
496        call is taken care of later in this function. */
497     if (canSkipWork(*nb, iloc))
498     {
499         plist->haveFreshList = false;
500
501         return;
502     }
503
504     if (nbp->useDynamicPruning && plist->haveFreshList)
505     {
506         /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
507            (that's the way the timing accounting can distinguish between
508            separate prune kernel and combined force+prune).
509          */
510         Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
511     }
512
513     if (plist->nsci == 0)
514     {
515         /* Don't launch an empty local kernel (is not allowed with OpenCL).
516          */
517         return;
518     }
519
520     /* beginning of timed nonbonded calculation section */
521     if (bDoTime)
522     {
523         t->interaction[iloc].nb_k.openTimingRegion(stream);
524     }
525
526     /* get the pointer to the kernel flavor we need to use */
527     nb_kernel = select_nbnxn_kernel(nb,
528                                     nbp->eeltype,
529                                     nbp->vdwtype,
530                                     bCalcEner,
531                                     (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
532
533     /* kernel launch config */
534
535     KernelLaunchConfig config;
536     config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
537     config.stream           = stream;
538     config.blockSize[0]     = c_clSize;
539     config.blockSize[1]     = c_clSize;
540     config.gridSize[0]      = plist->nsci;
541
542     validate_global_work_size(config, 3, nb->dev_info);
543
544     if (debug)
545     {
546         fprintf(debug, "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
547                 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
548                 config.blockSize[0], config.blockSize[1], config.blockSize[2],
549                 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
550                 c_numClPerSupercl, plist->na_c);
551     }
552
553     fillin_ocl_structures(nbp, &nbparams_params);
554
555     auto          *timingEvent  = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
556     constexpr char kernelName[] = "k_calc_nb";
557     if (useLjCombRule(nb->nbparam->vdwtype))
558     {
559         const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config,
560                                                           &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
561                                                           &adat->lj_comb,
562                                                           &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
563                                                           &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
564
565         launchGpuKernel(nb_kernel, config, timingEvent, kernelName, kernelArgs);
566     }
567     else
568     {
569         const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config,
570                                                           &adat->ntypes,
571                                                           &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
572                                                           &adat->atom_types,
573                                                           &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
574                                                           &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
575         launchGpuKernel(nb_kernel, config, timingEvent, kernelName, kernelArgs);
576     }
577
578     if (bDoTime)
579     {
580         t->interaction[iloc].nb_k.closeTimingRegion(stream);
581     }
582 }
583
584
585 /*! \brief Calculates the amount of shared memory required by the prune kernel.
586  *
587  *  Note that for the sake of simplicity we use the CUDA terminology "shared memory"
588  *  for OpenCL local memory.
589  *
590  * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd dimension.
591  * \returns   the amount of local memory in bytes required by the pruning kernel
592  */
593 static inline int calc_shmem_required_prune(const int num_threads_z)
594 {
595     int shmem;
596
597     /* i-atom x in shared memory (for convenience we load all 4 components including q) */
598     shmem  = c_numClPerSupercl * c_clSize * sizeof(float)*4;
599     /* cj in shared memory, for each warp separately
600      * Note: only need to load once per wavefront, but to keep the code simple,
601      * for now we load twice on AMD.
602      */
603     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
604     /* Warp vote, requires one uint per warp/32 threads per block. */
605     shmem += sizeof(cl_uint) * 2*num_threads_z;
606
607     return shmem;
608 }
609
610 void gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t           *nb,
611                                  const InteractionLocality  iloc,
612                                  const int                  numParts)
613 {
614     cl_atomdata_t       *adat    = nb->atdat;
615     cl_nbparam_t        *nbp     = nb->nbparam;
616     cl_plist_t          *plist   = nb->plist[iloc];
617     cl_timers_t         *t       = nb->timers;
618     cl_command_queue     stream  = nb->stream[iloc];
619     bool                 bDoTime = nb->bDoTime == CL_TRUE;
620
621     if (plist->haveFreshList)
622     {
623         GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
624
625         /* Set rollingPruningNumParts to signal that it is not set */
626         plist->rollingPruningNumParts = 0;
627         plist->rollingPruningPart     = 0;
628     }
629     else
630     {
631         if (plist->rollingPruningNumParts == 0)
632         {
633             plist->rollingPruningNumParts = numParts;
634         }
635         else
636         {
637             GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
638         }
639     }
640
641     /* Use a local variable for part and update in plist, so we can return here
642      * without duplicating the part increment code.
643      */
644     int part = plist->rollingPruningPart;
645
646     plist->rollingPruningPart++;
647     if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
648     {
649         plist->rollingPruningPart = 0;
650     }
651
652     /* Compute the number of list entries to prune in this pass */
653     int numSciInPart = (plist->nsci - part)/numParts;
654
655     /* Don't launch the kernel if there is no work to do. */
656     if (numSciInPart <= 0)
657     {
658         plist->haveFreshList = false;
659
660         return;
661     }
662
663     GpuRegionTimer *timer = nullptr;
664     if (bDoTime)
665     {
666         timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
667     }
668
669     /* beginning of timed prune calculation section */
670     if (bDoTime)
671     {
672         timer->openTimingRegion(stream);
673     }
674
675     /* Kernel launch config:
676      * - The thread block dimensions match the size of i-clusters, j-clusters,
677      *   and j-cluster concurrency, in x, y, and z, respectively.
678      * - The 1D block-grid contains as many blocks as super-clusters.
679      */
680     int       num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
681     cl_kernel pruneKernel   = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
682
683     /* kernel launch config */
684     KernelLaunchConfig config;
685     config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
686     config.stream           = stream;
687     config.blockSize[0]     = c_clSize;
688     config.blockSize[1]     = c_clSize;
689     config.blockSize[2]     = num_threads_z;
690     config.gridSize[0]      = numSciInPart;
691
692     validate_global_work_size(config, 3, nb->dev_info);
693
694     if (debug)
695     {
696         fprintf(debug, "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
697                 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
698                 "\tShMem: %zu\n",
699                 config.blockSize[0], config.blockSize[1], config.blockSize[2],
700                 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1], plist->nsci*c_numClPerSupercl,
701                 c_numClPerSupercl, plist->na_c, config.sharedMemorySize);
702     }
703
704     cl_nbparam_params_t  nbparams_params;
705     fillin_ocl_structures(nbp, &nbparams_params);
706
707     auto          *timingEvent  = bDoTime ? timer->fetchNextEvent() : nullptr;
708     constexpr char kernelName[] = "k_pruneonly";
709     const auto     kernelArgs   = prepareGpuKernelArguments(pruneKernel, config,
710                                                             &nbparams_params, &adat->xq, &adat->shift_vec,
711                                                             &plist->sci, &plist->cj4, &plist->imask, &numParts, &part);
712     launchGpuKernel(pruneKernel, config, timingEvent, kernelName, kernelArgs);
713
714     if (plist->haveFreshList)
715     {
716         plist->haveFreshList         = false;
717         /* Mark that pruning has been done */
718         nb->timers->interaction[iloc].didPrune = true;
719     }
720     else
721     {
722         /* Mark that rolling pruning has been done */
723         nb->timers->interaction[iloc].didRollingPrune = true;
724     }
725
726     if (bDoTime)
727     {
728         timer->closeTimingRegion(stream);
729     }
730 }
731
732 /*! \brief
733  * Launch asynchronously the download of nonbonded forces from the GPU
734  * (and energies/shift forces if required).
735  */
736 void gpu_launch_cpyback(gmx_nbnxn_ocl_t               *nb,
737                         struct nbnxn_atomdata_t       *nbatom,
738                         const int                      flags,
739                         const AtomLocality             aloc,
740                         const bool                     haveOtherWork)
741 {
742     cl_int gmx_unused cl_error;
743     int               adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
744
745     /* determine interaction locality from atom locality */
746     const InteractionLocality iloc = gpuAtomToInteractionLocality(aloc);
747
748     cl_atomdata_t            *adat    = nb->atdat;
749     cl_timers_t              *t       = nb->timers;
750     bool                      bDoTime = nb->bDoTime == CL_TRUE;
751     cl_command_queue          stream  = nb->stream[iloc];
752
753     bool                      bCalcEner   = (flags & GMX_FORCE_ENERGY) != 0;
754     int                       bCalcFshift = flags & GMX_FORCE_VIRIAL;
755
756
757     /* don't launch non-local copy-back if there was no non-local work to do */
758     if (!haveOtherWork && canSkipWork(*nb, iloc))
759     {
760         /* TODO An alternative way to signal that non-local work is
761            complete is to use a clEnqueueMarker+clEnqueueBarrier
762            pair. However, the use of bNonLocalStreamActive has the
763            advantage of being local to the host, so probably minimizes
764            overhead. Curiously, for NVIDIA OpenCL with an empty-domain
765            test case, overall simulation performance was higher with
766            the API calls, but this has not been tested on AMD OpenCL,
767            so could be worth considering in future. */
768         nb->bNonLocalStreamActive = CL_FALSE;
769         return;
770     }
771
772     getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
773
774     /* beginning of timed D2H section */
775     if (bDoTime)
776     {
777         t->xf[aloc].nb_d2h.openTimingRegion(stream);
778     }
779
780     /* With DD the local D2H transfer can only start after the non-local
781        has been launched. */
782     if (iloc == InteractionLocality::Local && nb->bNonLocalStreamActive)
783     {
784         sync_ocl_event(stream, &(nb->nonlocal_done));
785     }
786
787     /* DtoH f */
788     ocl_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f, adat_begin*3*sizeof(float),
789                        (adat_len)* adat->f_elem_size, stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
790
791     /* kick off work */
792     cl_error = clFlush(stream);
793     assert(CL_SUCCESS == cl_error);
794
795     /* After the non-local D2H is launched the nonlocal_done event can be
796        recorded which signals that the local D2H can proceed. This event is not
797        placed after the non-local kernel because we first need the non-local
798        data back first. */
799     if (iloc == InteractionLocality::NonLocal)
800     {
801         cl_error = clEnqueueMarkerWithWaitList(stream, 0, nullptr, &(nb->nonlocal_done));
802         assert(CL_SUCCESS == cl_error);
803         nb->bNonLocalStreamActive = CL_TRUE;
804     }
805
806     /* only transfer energies in the local stream */
807     if (iloc == InteractionLocality::Local)
808     {
809         /* DtoH fshift */
810         if (bCalcFshift)
811         {
812             ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0,
813                                SHIFTS * adat->fshift_elem_size, stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
814         }
815
816         /* DtoH energies */
817         if (bCalcEner)
818         {
819             ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0,
820                                sizeof(float), stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
821
822             ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0,
823                                sizeof(float), stream, bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
824         }
825     }
826
827     if (bDoTime)
828     {
829         t->xf[aloc].nb_d2h.closeTimingRegion(stream);
830     }
831 }
832
833
834 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
835 int gpu_pick_ewald_kernel_type(const bool bTwinCut)
836 {
837     bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
838     int  kernel_type;
839
840     /* Benchmarking/development environment variables to force the use of
841        analytical or tabulated Ewald kernel. */
842     bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != nullptr);
843     bForceTabulatedEwald  = (getenv("GMX_OCL_NB_TAB_EWALD") != nullptr);
844
845     if (bForceAnalyticalEwald && bForceTabulatedEwald)
846     {
847         gmx_incons("Both analytical and tabulated Ewald OpenCL non-bonded kernels "
848                    "requested through environment variables.");
849     }
850
851     /* OpenCL: By default, use analytical Ewald
852      * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
853      *
854      * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
855      *
856      */
857     /* By default use analytical Ewald. */
858     bUseAnalyticalEwald = true;
859     if (bForceAnalyticalEwald)
860     {
861         if (debug)
862         {
863             fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
864         }
865     }
866     else if (bForceTabulatedEwald)
867     {
868         bUseAnalyticalEwald = false;
869
870         if (debug)
871         {
872             fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
873         }
874     }
875
876     /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
877        forces it (use it for debugging/benchmarking only). */
878     if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == nullptr))
879     {
880         kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
881     }
882     else
883     {
884         kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;
885     }
886
887     return kernel_type;
888 }
889
890 } // namespace Nbnxm