Merge common nbnxn CUDA/OpenCL GPU wait code-paths
[alexxy/gromacs.git] / src / gromacs / mdlib / nbnxn_ocl / nbnxn_ocl.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *  \brief Define OpenCL implementation of nbnxn_gpu.h
37  *
38  *  \author Anca Hamuraru <anca@streamcomputing.eu>
39  *  \author Teemu Virolainen <teemu@streamcomputing.eu>
40  *  \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
41  *  \author Szilárd Páll <pall.szilard@gmail.com>
42  *  \ingroup module_mdlib
43  *
44  *  TODO (psz):
45  *  - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
46  *  - Rework the copying of OCL data structures done before every invocation of both
47  *    nb and prune kernels (using fillin_ocl_structures); also consider at the same
48  *    time calling clSetKernelArg only on the updated parameters (if tracking changed
49  *    parameters is feasible);
50  *  - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
51  *    dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
52  *    on the misc_ops_and_local_H2D_done event could be better expressed this way.
53  *
54  *  - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
55  *    - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
56  *      is identical in the two implementations, so a 3-way split might allow sharing
57  *      code;
58  *    -
59  *
60  */
61 #include "gmxpre.h"
62
63 #include <assert.h>
64 #include <stdlib.h>
65
66 #if defined(_MSVC)
67 #include <limits>
68 #endif
69
70 #include "thread_mpi/atomic.h"
71
72 #include "gromacs/gpu_utils/oclutils.h"
73 #include "gromacs/hardware/hw_info.h"
74 #include "gromacs/mdlib/force_flags.h"
75 #include "gromacs/mdlib/nb_verlet.h"
76 #include "gromacs/mdlib/nbnxn_consts.h"
77 #include "gromacs/mdlib/nbnxn_gpu.h"
78 #include "gromacs/mdlib/nbnxn_gpu_common.h"
79 #include "gromacs/mdlib/nbnxn_gpu_common_utils.h"
80 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
81 #include "gromacs/mdlib/nbnxn_pairlist.h"
82 #include "gromacs/pbcutil/ishift.h"
83 #include "gromacs/timing/gpu_timing.h"
84 #include "gromacs/utility/cstringutil.h"
85 #include "gromacs/utility/fatalerror.h"
86 #include "gromacs/utility/gmxassert.h"
87
88 #include "nbnxn_ocl_internal.h"
89 #include "nbnxn_ocl_types.h"
90
91 #if defined TEXOBJ_SUPPORTED && __CUDA_ARCH__ >= 300
92 #define USE_TEXOBJ
93 #endif
94
95 /*! \brief Convenience constants */
96 //@{
97 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
98 static const int c_clSize          = c_nbnxnGpuClusterSize;
99 //@}
100
101
102 /* Uncomment this define to enable kernel debugging */
103 //#define DEBUG_OCL
104
105 /*! \brief Specifies which kernel run to debug */
106 #define DEBUG_RUN_STEP 2
107
108 /*! \brief Validates the input global work size parameter.
109  */
110 static inline void validate_global_work_size(size_t *global_work_size, int work_dim, const gmx_device_info_t *dinfo)
111 {
112     cl_uint device_size_t_size_bits;
113     cl_uint host_size_t_size_bits;
114
115     assert(dinfo);
116
117     /* Each component of a global_work_size must not exceed the range given by the
118        sizeof(device size_t) for the device on which the kernel execution will
119        be enqueued. See:
120        https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
121      */
122     device_size_t_size_bits = dinfo->adress_bits;
123     host_size_t_size_bits   = (cl_uint)(sizeof(size_t) * 8);
124
125     /* If sizeof(host size_t) <= sizeof(device size_t)
126             => global_work_size components will always be valid
127        else
128             => get device limit for global work size and
129             compare it against each component of global_work_size.
130      */
131     if (host_size_t_size_bits > device_size_t_size_bits)
132     {
133         size_t device_limit;
134
135         device_limit = (((size_t)1) << device_size_t_size_bits) - 1;
136
137         for (int i = 0; i < work_dim; i++)
138         {
139             if (global_work_size[i] > device_limit)
140             {
141                 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
142                           "The number of nonbonded work units (=number of super-clusters) exceeds the"
143                           "device capabilities. Global work size limit exceeded (%d > %d)!",
144                           global_work_size[i], device_limit);
145             }
146         }
147     }
148 }
149
150 /* Constant arrays listing non-bonded kernel function names. The arrays are
151  * organized in 2-dim arrays by: electrostatics and VDW type.
152  *
153  *  Note that the row- and column-order of function pointers has to match the
154  *  order of corresponding enumerated electrostatics and vdw types, resp.,
155  *  defined in nbnxn_cuda_types.h.
156  */
157
158 /*! \brief Force-only kernel function names. */
159 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] =
160 {
161     { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl"            },
162     { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl"             },
163     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl"        },
164     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
165     { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl"             },
166     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl"      }
167 };
168
169 /*! \brief Force + energy kernel function pointers. */
170 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] =
171 {
172     { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl"            },
173     { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl"             },
174     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl"        },
175     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
176     { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl"             },
177     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl"      }
178 };
179
180 /*! \brief Force + pruning kernel function pointers. */
181 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] =
182 {
183     { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl"             },
184     { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl"              },
185     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl"         },
186     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl"  },
187     { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl"              },
188     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl"       }
189 };
190
191 /*! \brief Force + energy + pruning kernel function pointers. */
192 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] =
193 {
194     { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",            "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl"            },
195     { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl"             },
196     { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",        "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl"        },
197     { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
198     { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",             "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl"             },
199     { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",      "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl"      }
200 };
201
202 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
203  *
204  * \param[in] kernel_pruneonly  array of prune kernel objects
205  * \param[in] firstPrunePass    true if the first pruning pass is being executed
206  */
207 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[],
208                                           bool      firstPrunePass)
209 {
210     cl_kernel  *kernelPtr;
211
212     if (firstPrunePass)
213     {
214         kernelPtr = &(kernel_pruneonly[epruneFirst]);
215     }
216     else
217     {
218         kernelPtr = &(kernel_pruneonly[epruneRolling]);
219     }
220     // TODO: consider creating the prune kernel object here to avoid a
221     // clCreateKernel for the rolling prune kernel if this is not needed.
222     return *kernelPtr;
223 }
224
225 /*! \brief Return a pointer to the kernel version to be executed at the current step.
226  *  OpenCL kernel objects are cached in nb. If the requested kernel is not
227  *  found in the cache, it will be created and the cache will be updated.
228  */
229 static inline cl_kernel select_nbnxn_kernel(gmx_nbnxn_ocl_t   *nb,
230                                             int                eeltype,
231                                             int                evdwtype,
232                                             bool               bDoEne,
233                                             bool               bDoPrune)
234 {
235     const char* kernel_name_to_run;
236     cl_kernel  *kernel_ptr;
237     cl_int      cl_error;
238
239     assert(eeltype  < eelOclNR);
240     assert(evdwtype < evdwOclNR);
241
242     if (bDoEne)
243     {
244         if (bDoPrune)
245         {
246             kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
247             kernel_ptr         = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
248         }
249         else
250         {
251             kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
252             kernel_ptr         = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
253         }
254     }
255     else
256     {
257         if (bDoPrune)
258         {
259             kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
260             kernel_ptr         = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
261         }
262         else
263         {
264             kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
265             kernel_ptr         = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
266         }
267     }
268
269     if (NULL == kernel_ptr[0])
270     {
271         *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
272         assert(cl_error == CL_SUCCESS);
273     }
274     // TODO: handle errors
275
276     return *kernel_ptr;
277 }
278
279 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
280  */
281 static inline int calc_shmem_required_nonbonded(int  vdwType,
282                                                 bool bPrefetchLjParam)
283 {
284     int shmem;
285
286     /* size of shmem (force-buffers/xq/atom type preloading) */
287     /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
288     /* i-atom x+q in shared memory */
289     shmem  = c_numClPerSupercl * c_clSize * sizeof(float) * 4; /* xqib */
290     /* cj in shared memory, for both warps separately */
291     shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int);           /* cjs  */
292     if (bPrefetchLjParam)
293     {
294         if (useLjCombRule(vdwType))
295         {
296             /* i-atom LJ combination parameters in shared memory */
297             shmem += c_numClPerSupercl * c_clSize * 2*sizeof(float); /* atib abused for ljcp, float2 */
298         }
299         else
300         {
301             /* i-atom types in shared memory */
302             shmem += c_numClPerSupercl * c_clSize * sizeof(int); /* atib */
303         }
304     }
305     /* force reduction buffers in shared memory */
306     shmem += c_clSize * c_clSize * 3 * sizeof(float);    /* f_buf */
307     /* Warp vote. In fact it must be * number of warps in block.. */
308     shmem += sizeof(cl_uint) * 2;                        /* warp_any */
309     return shmem;
310 }
311
312 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
313  *
314  *  The device can't use the same data structures as the host for two main reasons:
315  *  - OpenCL restrictions (pointers are not accepted inside data structures)
316  *  - some host side fields are not needed for the OpenCL kernels.
317  *
318  *  This function is called before the launch of both nbnxn and prune kernels.
319  */
320 static void fillin_ocl_structures(cl_nbparam_t        *nbp,
321                                   cl_nbparam_params_t *nbparams_params)
322 {
323     nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
324     nbparams_params->c_rf              = nbp->c_rf;
325     nbparams_params->dispersion_shift  = nbp->dispersion_shift;
326     nbparams_params->eeltype           = nbp->eeltype;
327     nbparams_params->epsfac            = nbp->epsfac;
328     nbparams_params->ewaldcoeff_lj     = nbp->ewaldcoeff_lj;
329     nbparams_params->ewald_beta        = nbp->ewald_beta;
330     nbparams_params->rcoulomb_sq       = nbp->rcoulomb_sq;
331     nbparams_params->repulsion_shift   = nbp->repulsion_shift;
332     nbparams_params->rlistOuter_sq     = nbp->rlistOuter_sq;
333     nbparams_params->rvdw_sq           = nbp->rvdw_sq;
334     nbparams_params->rlistInner_sq     = nbp->rlistInner_sq;
335     nbparams_params->rvdw_switch       = nbp->rvdw_switch;
336     nbparams_params->sh_ewald          = nbp->sh_ewald;
337     nbparams_params->sh_lj_ewald       = nbp->sh_lj_ewald;
338     nbparams_params->two_k_rf          = nbp->two_k_rf;
339     nbparams_params->vdwtype           = nbp->vdwtype;
340     nbparams_params->vdw_switch        = nbp->vdw_switch;
341 }
342
343 /*! \brief Enqueues a wait for event completion.
344  *
345  * Then it releases the event and sets it to 0.
346  * Don't use this function when more than one wait will be issued for the event.
347  * Equivalent to Cuda Stream Sync. */
348 static void sync_ocl_event(cl_command_queue stream, cl_event *ocl_event)
349 {
350     cl_int gmx_unused cl_error;
351
352     /* Enqueue wait */
353 #ifdef CL_VERSION_1_2
354     cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, NULL);
355 #else
356     cl_error = clEnqueueWaitForEvents(stream, 1, ocl_event);
357 #endif
358
359     GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
360
361     /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
362     cl_error = clReleaseEvent(*ocl_event);
363     assert(CL_SUCCESS == cl_error);
364     *ocl_event = 0;
365 }
366
367 /*! \brief Launch GPU kernel
368
369    As we execute nonbonded workload in separate queues, before launching
370    the kernel we need to make sure that he following operations have completed:
371    - atomdata allocation and related H2D transfers (every nstlist step);
372    - pair list H2D transfer (every nstlist step);
373    - shift vector H2D transfer (every nstlist step);
374    - force (+shift force and energy) output clearing (every step).
375
376    These operations are issued in the local queue at the beginning of the step
377    and therefore always complete before the local kernel launch. The non-local
378    kernel is launched after the local on the same device/context, so this is
379    inherently scheduled after the operations in the local stream (including the
380    above "misc_ops").
381    However, for the sake of having a future-proof implementation, we use the
382    misc_ops_done event to record the point in time when the above  operations
383    are finished and synchronize with this event in the non-local stream.
384  */
385 void nbnxn_gpu_launch_kernel(gmx_nbnxn_ocl_t               *nb,
386                              const struct nbnxn_atomdata_t *nbatom,
387                              int                            flags,
388                              int                            iloc)
389 {
390     cl_int               cl_error;
391     int                  adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
392     /* OpenCL kernel launch-related stuff */
393     int                  shmem;
394     size_t               local_work_size[3], global_work_size[3];
395     cl_kernel            nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
396
397     cl_atomdata_t       *adat    = nb->atdat;
398     cl_nbparam_t        *nbp     = nb->nbparam;
399     cl_plist_t          *plist   = nb->plist[iloc];
400     cl_timers_t         *t       = nb->timers;
401     cl_command_queue     stream  = nb->stream[iloc];
402
403     bool                 bCalcEner   = flags & GMX_FORCE_ENERGY;
404     int                  bCalcFshift = flags & GMX_FORCE_VIRIAL;
405     bool                 bDoTime     = nb->bDoTime;
406     cl_uint              arg_no;
407
408     cl_nbparam_params_t  nbparams_params;
409 #ifdef DEBUG_OCL
410     float              * debug_buffer_h;
411     size_t               debug_buffer_size;
412 #endif
413
414     /* Don't launch the non-local kernel if there is no work to do.
415        Doing the same for the local kernel is more complicated, since the
416        local part of the force array also depends on the non-local kernel.
417        So to avoid complicating the code and to reduce the risk of bugs,
418        we always call the local kernel, the local x+q copy and later (not in
419        this function) the stream wait, local f copyback and the f buffer
420        clearing. All these operations, except for the local interaction kernel,
421        are needed for the non-local interactions. The skip of the local kernel
422        call is taken care of later in this function. */
423     if (canSkipWork(nb, iloc))
424     {
425         plist->haveFreshList = false;
426
427         return;
428     }
429
430     /* calculate the atom data index range based on locality */
431     if (LOCAL_I(iloc))
432     {
433         adat_begin  = 0;
434         adat_len    = adat->natoms_local;
435     }
436     else
437     {
438         adat_begin  = adat->natoms_local;
439         adat_len    = adat->natoms - adat->natoms_local;
440     }
441
442     /* beginning of timed HtoD section */
443     if (bDoTime)
444     {
445         t->nb_h2d[iloc].openTimingRegion(stream);
446     }
447
448     /* HtoD x, q */
449     ocl_copy_H2D_async(adat->xq, nbatom->x + adat_begin * 4, adat_begin*sizeof(float)*4,
450                        adat_len * sizeof(float) * 4, stream, bDoTime ? t->nb_h2d[iloc].fetchNextEvent() : nullptr);
451
452     if (bDoTime)
453     {
454         t->nb_h2d[iloc].closeTimingRegion(stream);
455     }
456
457     /* When we get here all misc operations issues in the local stream as well as
458        the local xq H2D are done,
459        so we record that in the local stream and wait for it in the nonlocal one. */
460     if (nb->bUseTwoStreams)
461     {
462         if (iloc == eintLocal)
463         {
464 #ifdef CL_VERSION_1_2
465             cl_error = clEnqueueMarkerWithWaitList(stream, 0, NULL, &(nb->misc_ops_and_local_H2D_done));
466 #else
467             cl_error = clEnqueueMarker(stream, &(nb->misc_ops_and_local_H2D_done));
468 #endif
469             assert(CL_SUCCESS == cl_error);
470
471             /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
472              * in the local stream in order to be able to sync with the above event
473              * from the non-local stream.
474              */
475             cl_error = clFlush(stream);
476             assert(CL_SUCCESS == cl_error);
477         }
478         else
479         {
480             sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
481         }
482     }
483
484     if (nbp->useDynamicPruning && plist->haveFreshList)
485     {
486         /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
487            (that's the way the timing accounting can distinguish between
488            separate prune kernel and combined force+prune).
489          */
490         nbnxn_gpu_launch_kernel_pruneonly(nb, iloc, 1);
491     }
492
493     if (plist->nsci == 0)
494     {
495         /* Don't launch an empty local kernel (is not allowed with OpenCL).
496          * TODO: Separate H2D and kernel launch into separate functions.
497          */
498         return;
499     }
500
501     /* beginning of timed nonbonded calculation section */
502     if (bDoTime)
503     {
504         t->nb_k[iloc].openTimingRegion(stream);
505     }
506
507     /* get the pointer to the kernel flavor we need to use */
508     nb_kernel = select_nbnxn_kernel(nb,
509                                     nbp->eeltype,
510                                     nbp->vdwtype,
511                                     bCalcEner,
512                                     (plist->haveFreshList && !nb->timers->didPrune[iloc]));
513
514     /* kernel launch config */
515     local_work_size[0] = c_clSize;
516     local_work_size[1] = c_clSize;
517     local_work_size[2] = 1;
518
519     global_work_size[0] = plist->nsci * local_work_size[0];
520     global_work_size[1] = 1 * local_work_size[1];
521     global_work_size[2] = 1 * local_work_size[2];
522
523     validate_global_work_size(global_work_size, 3, nb->dev_info);
524
525     shmem     = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
526
527 #ifdef DEBUG_OCL
528     {
529         static int run_step = 1;
530
531         if (DEBUG_RUN_STEP == run_step)
532         {
533             debug_buffer_size = global_work_size[0] * global_work_size[1] * global_work_size[2] * sizeof(float);
534             debug_buffer_h    = (float*)calloc(1, debug_buffer_size);
535             assert(NULL != debug_buffer_h);
536
537             if (NULL == nb->debug_buffer)
538             {
539                 nb->debug_buffer = clCreateBuffer(nb->dev_rundata->context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
540                                                   debug_buffer_size, debug_buffer_h, &cl_error);
541
542                 assert(CL_SUCCESS == cl_error);
543             }
544         }
545
546         run_step++;
547     }
548 #endif
549     if (debug)
550     {
551         fprintf(debug, "Non-bonded GPU launch configuration:\n\tLocal work size: %dx%dx%d\n\t"
552                 "Global work size : %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n",
553                 (int)(local_work_size[0]), (int)(local_work_size[1]), (int)(local_work_size[2]),
554                 (int)(global_work_size[0]), (int)(global_work_size[1]), plist->nsci*c_numClPerSupercl,
555                 c_numClPerSupercl, plist->na_c);
556     }
557
558     fillin_ocl_structures(nbp, &nbparams_params);
559
560     arg_no    = 0;
561     cl_error  = CL_SUCCESS;
562     if (!useLjCombRule(nb->nbparam->vdwtype))
563     {
564         cl_error  = clSetKernelArg(nb_kernel, arg_no++, sizeof(int), &(adat->ntypes));
565     }
566     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(nbparams_params), &(nbparams_params));
567     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->xq));
568     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->f));
569     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->e_lj));
570     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->e_el));
571     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->fshift));
572     if (useLjCombRule(nb->nbparam->vdwtype))
573     {
574         cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->lj_comb));
575     }
576     else
577     {
578         cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->atom_types));
579     }
580     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(adat->shift_vec));
581     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(nbp->nbfp_climg2d));
582     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(nbp->nbfp_comb_climg2d));
583     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(nbp->coulomb_tab_climg2d));
584     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(plist->sci));
585     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(plist->cj4));
586     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(plist->excl));
587     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(int), &bCalcFshift);
588     cl_error |= clSetKernelArg(nb_kernel, arg_no++, shmem, NULL);
589     cl_error |= clSetKernelArg(nb_kernel, arg_no++, sizeof(cl_mem), &(nb->debug_buffer));
590
591     assert(cl_error == CL_SUCCESS);
592
593     if (cl_error)
594     {
595         printf("OpenCL error: %s\n", ocl_get_error_string(cl_error).c_str());
596     }
597     cl_error = clEnqueueNDRangeKernel(stream, nb_kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, bDoTime ? t->nb_k[iloc].fetchNextEvent() : nullptr);
598     assert(cl_error == CL_SUCCESS);
599
600     if (bDoTime)
601     {
602         t->nb_k[iloc].closeTimingRegion(stream);
603     }
604
605 #ifdef DEBUG_OCL
606     {
607         static int run_step = 1;
608
609         if (DEBUG_RUN_STEP == run_step)
610         {
611             FILE *pf;
612             char  file_name[256] = {0};
613
614             ocl_copy_D2H_async(debug_buffer_h, nb->debug_buffer, 0,
615                                debug_buffer_size, stream, NULL);
616
617             // Make sure all data has been transfered back from device
618             clFinish(stream);
619
620             printf("\nWriting debug_buffer to debug_buffer_ocl.txt...");
621
622             sprintf(file_name, "debug_buffer_ocl_%d.txt", DEBUG_RUN_STEP);
623             pf = fopen(file_name, "wt");
624             assert(pf != NULL);
625
626             fprintf(pf, "%20s", "");
627             for (int j = 0; j < global_work_size[0]; j++)
628             {
629                 char label[20];
630                 sprintf(label, "(wIdx=%2d thIdx=%2d)", j / local_work_size[0], j % local_work_size[0]);
631                 fprintf(pf, "%20s", label);
632             }
633
634             for (int i = 0; i < global_work_size[1]; i++)
635             {
636                 char label[20];
637                 sprintf(label, "(wIdy=%2d thIdy=%2d)", i / local_work_size[1], i % local_work_size[1]);
638                 fprintf(pf, "\n%20s", label);
639
640                 for (int j = 0; j < global_work_size[0]; j++)
641                 {
642                     fprintf(pf, "%20.5f", debug_buffer_h[i * global_work_size[0] + j]);
643                 }
644
645                 //fprintf(pf, "\n");
646             }
647
648             fclose(pf);
649
650             printf(" done.\n");
651
652
653             free(debug_buffer_h);
654             debug_buffer_h = NULL;
655         }
656
657         run_step++;
658     }
659 #endif
660 }
661
662
663 /*! \brief Calculates the amount of shared memory required by the prune kernel.
664  *
665  *  Note that for the sake of simplicity we use the CUDA terminology "shared memory"
666  *  for OpenCL local memory.
667  *
668  * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd dimension.
669  * \returns   the amount of local memory in bytes required by the pruning kernel
670  */
671 static inline int calc_shmem_required_prune(const int num_threads_z)
672 {
673     int shmem;
674
675     /* i-atom x in shared memory (for convenience we load all 4 components including q) */
676     shmem  = c_numClPerSupercl * c_clSize * sizeof(float)*4;
677     /* cj in shared memory, for each warp separately */
678     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
679     /* Warp vote, requires one uint per warp/32 threads per block. */
680     shmem += sizeof(cl_uint) * 2*num_threads_z;
681
682     return shmem;
683 }
684
685 void nbnxn_gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t       *nb,
686                                        int                    iloc,
687                                        int                    numParts)
688 {
689     cl_int               cl_error;
690
691     cl_atomdata_t       *adat    = nb->atdat;
692     cl_nbparam_t        *nbp     = nb->nbparam;
693     cl_plist_t          *plist   = nb->plist[iloc];
694     cl_timers_t         *t       = nb->timers;
695     cl_command_queue     stream  = nb->stream[iloc];
696     bool                 bDoTime = nb->bDoTime;
697
698     if (plist->haveFreshList)
699     {
700         GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
701
702         /* Set rollingPruningNumParts to signal that it is not set */
703         plist->rollingPruningNumParts = 0;
704         plist->rollingPruningPart     = 0;
705     }
706     else
707     {
708         if (plist->rollingPruningNumParts == 0)
709         {
710             plist->rollingPruningNumParts = numParts;
711         }
712         else
713         {
714             GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
715         }
716     }
717
718     /* Use a local variable for part and update in plist, so we can return here
719      * without duplicating the part increment code.
720      */
721     int part = plist->rollingPruningPart;
722
723     plist->rollingPruningPart++;
724     if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
725     {
726         plist->rollingPruningPart = 0;
727     }
728
729     /* Compute the number of list entries to prune in this pass */
730     int numSciInPart = (plist->nsci - part)/numParts;
731
732     /* Don't launch the kernel if there is no work to do. */
733     if (numSciInPart <= 0)
734     {
735         plist->haveFreshList = false;
736
737         return;
738     }
739
740     GpuRegionTimer *timer = nullptr;
741     if (bDoTime)
742     {
743         timer = &(plist->haveFreshList ? t->prune_k[iloc] : t->rollingPrune_k[iloc]);
744     }
745
746     /* beginning of timed prune calculation section */
747     if (bDoTime)
748     {
749         timer->openTimingRegion(stream);
750     }
751
752     /* Kernel launch config:
753      * - The thread block dimensions match the size of i-clusters, j-clusters,
754      *   and j-cluster concurrency, in x, y, and z, respectively.
755      * - The 1D block-grid contains as many blocks as super-clusters.
756      */
757     int       num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
758     cl_kernel pruneKernel   = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
759
760     /* kernel launch config */
761     size_t  local_work_size[3], global_work_size[3];
762     local_work_size[0] = c_clSize;
763     local_work_size[1] = c_clSize;
764     local_work_size[2] = num_threads_z;
765
766     global_work_size[0] = numSciInPart * local_work_size[0];
767     global_work_size[1] = 1 * local_work_size[1];
768     global_work_size[2] = 1 * local_work_size[2];
769
770     validate_global_work_size(global_work_size, 3, nb->dev_info);
771
772     int shmem = calc_shmem_required_prune(num_threads_z);
773
774     if (debug)
775     {
776         fprintf(debug, "Pruning GPU kernel launch configuration:\n\tLocal work size: %dx%dx%d\n\t"
777                 "\tGlobal work size: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n"
778                 "\tShMem: %d\n",
779                 (int)(local_work_size[0]), (int)(local_work_size[1]), (int)(local_work_size[2]),
780                 (int)(global_work_size[0]), (int)(global_work_size[1]), plist->nsci*c_numClPerSupercl,
781                 c_numClPerSupercl, plist->na_c, shmem);
782     }
783
784     cl_nbparam_params_t  nbparams_params;
785     fillin_ocl_structures(nbp, &nbparams_params);
786
787     cl_uint  arg_no = 0;
788     cl_error = CL_SUCCESS;
789
790     cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(nbparams_params), &(nbparams_params));
791     cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(adat->xq));
792     cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(adat->shift_vec));
793     cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(plist->sci));
794     cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(plist->cj4));
795     cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(cl_mem), &(plist->imask));
796     cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(int), &(numParts));
797     cl_error |= clSetKernelArg(pruneKernel, arg_no++, sizeof(int), &(part));
798     cl_error |= clSetKernelArg(pruneKernel, arg_no++, shmem, nullptr);
799     assert(cl_error == CL_SUCCESS);
800
801     cl_error = clEnqueueNDRangeKernel(stream, pruneKernel, 3,
802                                       nullptr, global_work_size, local_work_size,
803                                       0, nullptr, bDoTime ? timer->fetchNextEvent() : nullptr);
804     GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
805
806     if (plist->haveFreshList)
807     {
808         plist->haveFreshList         = false;
809         /* Mark that pruning has been done */
810         nb->timers->didPrune[iloc] = true;
811     }
812     else
813     {
814         /* Mark that rolling pruning has been done */
815         nb->timers->didRollingPrune[iloc] = true;
816     }
817
818     if (bDoTime)
819     {
820         timer->closeTimingRegion(stream);
821     }
822 }
823
824 /*! \brief
825  * Launch asynchronously the download of nonbonded forces from the GPU
826  * (and energies/shift forces if required).
827  */
828 void nbnxn_gpu_launch_cpyback(gmx_nbnxn_ocl_t               *nb,
829                               const struct nbnxn_atomdata_t *nbatom,
830                               int                            flags,
831                               int                            aloc)
832 {
833     cl_int gmx_unused cl_error;
834     int               adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
835
836     /* determine interaction locality from atom locality */
837     int              iloc = gpuAtomToInteractionLocality(aloc);
838
839     cl_atomdata_t   *adat    = nb->atdat;
840     cl_timers_t     *t       = nb->timers;
841     bool             bDoTime = nb->bDoTime;
842     cl_command_queue stream  = nb->stream[iloc];
843
844     bool             bCalcEner   = flags & GMX_FORCE_ENERGY;
845     int              bCalcFshift = flags & GMX_FORCE_VIRIAL;
846
847
848     /* don't launch non-local copy-back if there was no non-local work to do */
849     if (canSkipWork(nb, iloc))
850     {
851         /* TODO An alternative way to signal that non-local work is
852            complete is to use a clEnqueueMarker+clEnqueueBarrier
853            pair. However, the use of bNonLocalStreamActive has the
854            advantage of being local to the host, so probably minimizes
855            overhead. Curiously, for NVIDIA OpenCL with an empty-domain
856            test case, overall simulation performance was higher with
857            the API calls, but this has not been tested on AMD OpenCL,
858            so could be worth considering in future. */
859         nb->bNonLocalStreamActive = false;
860         return;
861     }
862
863     getGpuAtomRange(adat, aloc, adat_begin, adat_len);
864
865     /* beginning of timed D2H section */
866     if (bDoTime)
867     {
868         t->nb_d2h[iloc].openTimingRegion(stream);
869     }
870
871     /* With DD the local D2H transfer can only start after the non-local
872        has been launched. */
873     if (iloc == eintLocal && nb->bNonLocalStreamActive)
874     {
875         sync_ocl_event(stream, &(nb->nonlocal_done));
876     }
877
878     /* DtoH f */
879     ocl_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f, adat_begin*3*sizeof(float),
880                        (adat_len)* adat->f_elem_size, stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
881
882     /* kick off work */
883     cl_error = clFlush(stream);
884     assert(CL_SUCCESS == cl_error);
885
886     /* After the non-local D2H is launched the nonlocal_done event can be
887        recorded which signals that the local D2H can proceed. This event is not
888        placed after the non-local kernel because we first need the non-local
889        data back first. */
890     if (iloc == eintNonlocal)
891     {
892 #ifdef CL_VERSION_1_2
893         cl_error = clEnqueueMarkerWithWaitList(stream, 0, NULL, &(nb->nonlocal_done));
894 #else
895         cl_error = clEnqueueMarker(stream, &(nb->nonlocal_done));
896 #endif
897         assert(CL_SUCCESS == cl_error);
898         nb->bNonLocalStreamActive = true;
899     }
900
901     /* only transfer energies in the local stream */
902     if (LOCAL_I(iloc))
903     {
904         /* DtoH fshift */
905         if (bCalcFshift)
906         {
907             ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0,
908                                SHIFTS * adat->fshift_elem_size, stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
909         }
910
911         /* DtoH energies */
912         if (bCalcEner)
913         {
914             ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0,
915                                sizeof(float), stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
916
917             ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0,
918                                sizeof(float), stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
919         }
920     }
921
922     if (bDoTime)
923     {
924         t->nb_d2h[iloc].closeTimingRegion(stream);
925     }
926 }
927
928
929 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
930 int nbnxn_gpu_pick_ewald_kernel_type(bool bTwinCut)
931 {
932     bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
933     int  kernel_type;
934
935     /* Benchmarking/development environment variables to force the use of
936        analytical or tabulated Ewald kernel. */
937     bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != NULL);
938     bForceTabulatedEwald  = (getenv("GMX_OCL_NB_TAB_EWALD") != NULL);
939
940     if (bForceAnalyticalEwald && bForceTabulatedEwald)
941     {
942         gmx_incons("Both analytical and tabulated Ewald OpenCL non-bonded kernels "
943                    "requested through environment variables.");
944     }
945
946     /* OpenCL: By default, use analytical Ewald
947      * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
948      *
949      * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
950      *
951      */
952     //if ((dev_info->prop.major >= 3 || bForceAnalyticalEwald) && !bForceTabulatedEwald)
953     if ((1                         || bForceAnalyticalEwald) && !bForceTabulatedEwald)
954     {
955         bUseAnalyticalEwald = true;
956
957         if (debug)
958         {
959             fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
960         }
961     }
962     else
963     {
964         bUseAnalyticalEwald = false;
965
966         if (debug)
967         {
968             fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
969         }
970     }
971
972     /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
973        forces it (use it for debugging/benchmarking only). */
974     if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == NULL))
975     {
976         kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
977     }
978     else
979     {
980         kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;
981     }
982
983     return kernel_type;
984 }