2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define CUDA implementation of nbnxn_gpu.h
39 * \author Szilard Pall <pall.szilard@gmail.com>
48 #include "gromacs/nbnxm/nbnxm_gpu.h"
55 #include "nbnxm_cuda.h"
57 #include "gromacs/gpu_utils/gpu_utils.h"
58 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
59 #include "gromacs/gpu_utils/typecasts.cuh"
60 #include "gromacs/gpu_utils/vectype_ops.cuh"
61 #include "gromacs/hardware/device_information.h"
62 #include "gromacs/mdtypes/simulation_workload.h"
63 #include "gromacs/nbnxm/atomdata.h"
64 #include "gromacs/nbnxm/gpu_common.h"
65 #include "gromacs/nbnxm/gpu_common_utils.h"
66 #include "gromacs/nbnxm/gpu_data_mgmt.h"
67 #include "gromacs/nbnxm/grid.h"
68 #include "gromacs/nbnxm/nbnxm.h"
69 #include "gromacs/nbnxm/pairlist.h"
70 #include "gromacs/timing/gpu_timing.h"
71 #include "gromacs/utility/cstringutil.h"
72 #include "gromacs/utility/gmxassert.h"
74 #include "nbnxm_buffer_ops_kernels.cuh"
75 #include "nbnxm_cuda_types.h"
77 /***** The kernel declarations/definitions come here *****/
79 /* Top-level kernel declaration generation: will generate through multiple
80 * inclusion the following flavors for all kernel declarations:
81 * - force-only output;
82 * - force and energy output;
83 * - force-only with pair list pruning;
84 * - force and energy output with pair list pruning.
86 #define FUNCTION_DECLARATION_ONLY
88 #include "nbnxm_cuda_kernels.cuh"
89 /** Force & energy **/
91 #include "nbnxm_cuda_kernels.cuh"
94 /*** Pair-list pruning kernels ***/
97 #include "nbnxm_cuda_kernels.cuh"
98 /** Force & energy **/
100 #include "nbnxm_cuda_kernels.cuh"
104 /* Prune-only kernels */
105 #include "nbnxm_cuda_kernel_pruneonly.cuh"
106 #undef FUNCTION_DECLARATION_ONLY
108 /* Now generate the function definitions if we are using a single compilation unit. */
109 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
110 # include "nbnxm_cuda_kernel_F_noprune.cu"
111 # include "nbnxm_cuda_kernel_F_prune.cu"
112 # include "nbnxm_cuda_kernel_VF_noprune.cu"
113 # include "nbnxm_cuda_kernel_VF_prune.cu"
114 # include "nbnxm_cuda_kernel_pruneonly.cu"
115 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
120 //! Number of CUDA threads in a block
121 // TODO Optimize this through experimentation
122 constexpr static int c_bufOpsThreadsPerBlock = 128;
124 /*! Nonbonded kernel function pointer type */
125 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t, const NBParamGpu, const gpu_plist, bool);
127 /*********************************/
129 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
130 static inline int calc_nb_kernel_nblock(int nwork_units, const DeviceInformation* deviceInfo)
135 /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
136 empty domain) and that case should be handled before this point. */
137 assert(nwork_units > 0);
139 max_grid_x_size = deviceInfo->prop.maxGridSize[0];
141 /* do we exceed the grid x dimension limit? */
142 if (nwork_units > max_grid_x_size)
145 "Watch out, the input system is too large to simulate!\n"
146 "The number of nonbonded work units (=number of super-clusters) exceeds the"
147 "maximum grid size in x dimension (%d > %d)!",
148 nwork_units, max_grid_x_size);
155 /* Constant arrays listing all kernel function pointers and enabling selection
156 of a kernel in an elegant manner. */
158 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
159 * electrostatics and VDW type.
161 * Note that the row- and column-order of function pointers has to match the
162 * order of corresponding enumerated electrostatics and vdw types, resp.,
163 * defined in nbnxn_cuda_types.h.
166 /*! Force-only kernel function pointers. */
167 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelTypeNR][evdwTypeNR] = {
168 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda,
169 nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda,
170 nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda,
171 nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
172 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda,
173 nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda,
174 nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda,
175 nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
176 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda,
177 nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda,
178 nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda,
179 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
180 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda,
181 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda,
182 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda,
183 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
184 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda,
185 nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda,
186 nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda,
187 nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
188 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda,
189 nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda,
190 nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda,
191 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
194 /*! Force + energy kernel function pointers. */
195 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelTypeNR][evdwTypeNR] = {
196 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda,
197 nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda,
198 nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda,
199 nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
200 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda,
201 nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda,
202 nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda,
203 nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
204 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda,
205 nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda,
206 nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda,
207 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
208 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda,
209 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda,
210 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda,
211 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
212 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda,
213 nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda,
214 nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda,
215 nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
216 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda,
217 nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda,
218 nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda,
219 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
222 /*! Force + pruning kernel function pointers. */
223 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelTypeNR][evdwTypeNR] = {
224 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda,
225 nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda,
226 nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda,
227 nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
228 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda,
229 nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda,
230 nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda,
231 nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
232 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda,
233 nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda,
234 nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda,
235 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
236 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda,
237 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda,
238 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda,
239 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda,
240 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda,
241 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
242 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda,
243 nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda,
244 nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda,
245 nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
246 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda,
247 nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda,
248 nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda,
249 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
252 /*! Force + energy + pruning kernel function pointers. */
253 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelTypeNR][evdwTypeNR] = {
254 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda,
255 nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda,
256 nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda,
257 nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
258 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda,
259 nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda,
260 nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda,
261 nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
262 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda,
263 nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda,
264 nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda,
265 nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
266 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda,
267 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda,
268 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda,
269 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda,
270 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda,
271 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda,
272 nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
273 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda,
274 nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda,
275 nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda,
276 nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
277 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda,
278 nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda,
279 nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda,
280 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda,
281 nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
284 /*! Return a pointer to the kernel version to be executed at the current step. */
285 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
289 const DeviceInformation gmx_unused* deviceInfo)
291 nbnxn_cu_kfunc_ptr_t res;
293 GMX_ASSERT(eeltype < eelTypeNR,
294 "The electrostatics type requested is not implemented in the CUDA kernels.");
295 GMX_ASSERT(evdwtype < evdwTypeNR,
296 "The VdW type requested is not implemented in the CUDA kernels.");
298 /* assert assumptions made by the kernels */
299 GMX_ASSERT(c_nbnxnGpuClusterSize * c_nbnxnGpuClusterSize / c_nbnxnGpuClusterpairSplit
300 == deviceInfo->prop.warpSize,
301 "The CUDA kernels require the "
302 "cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size "
303 "of the architecture targeted.");
309 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
313 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
320 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
324 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
331 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */
332 static inline int calc_shmem_required_nonbonded(const int num_threads_z,
333 const DeviceInformation gmx_unused* deviceInfo,
334 const NBParamGpu* nbp)
340 /* size of shmem (force-buffers/xq/atom type preloading) */
341 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
342 /* i-atom x+q in shared memory */
343 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float4);
344 /* cj in shared memory, for each warp separately */
345 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
347 if (nbp->vdwtype == evdwTypeCUTCOMBGEOM || nbp->vdwtype == evdwTypeCUTCOMBLB)
349 /* i-atom LJ combination parameters in shared memory */
350 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float2);
354 /* i-atom types in shared memory */
355 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int);
361 /*! \brief Sync the nonlocal stream with dependent tasks in the local queue.
363 * As the point where the local stream tasks can be considered complete happens
364 * at the same call point where the nonlocal stream should be synced with the
365 * the local, this function records the event if called with the local stream as
366 * argument and inserts in the GPU stream a wait on the event on the nonlocal.
368 void nbnxnInsertNonlocalGpuDependency(const NbnxmGpu* nb, const InteractionLocality interactionLocality)
370 const DeviceStream& deviceStream = *nb->deviceStreams[interactionLocality];
372 /* When we get here all misc operations issued in the local stream as well as
373 the local xq H2D are done,
374 so we record that in the local stream and wait for it in the nonlocal one.
375 This wait needs to precede any PP tasks, bonded or nonbonded, that may
376 compute on interactions between local and nonlocal atoms.
378 if (nb->bUseTwoStreams)
380 if (interactionLocality == InteractionLocality::Local)
382 cudaError_t stat = cudaEventRecord(nb->misc_ops_and_local_H2D_done, deviceStream.stream());
383 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_and_local_H2D_done failed");
388 cudaStreamWaitEvent(deviceStream.stream(), nb->misc_ops_and_local_H2D_done, 0);
389 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_and_local_H2D_done failed");
394 /*! \brief Launch asynchronously the xq buffer host to device copy. */
395 void gpu_copy_xq_to_gpu(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom, const AtomLocality atomLocality)
397 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
399 GMX_ASSERT(atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal,
400 "Only local and non-local xq transfers are supported");
402 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
404 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
406 cu_atomdata_t* adat = nb->atdat;
407 gpu_plist* plist = nb->plist[iloc];
408 cu_timers_t* t = nb->timers;
409 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
411 bool bDoTime = nb->bDoTime;
413 /* Don't launch the non-local H2D copy if there is no dependent
414 work to do: neither non-local nor other (e.g. bonded) work
415 to do that has as input the nbnxn coordaintes.
416 Doing the same for the local kernel is more complicated, since the
417 local part of the force array also depends on the non-local kernel.
418 So to avoid complicating the code and to reduce the risk of bugs,
419 we always call the local local x+q copy (and the rest of the local
420 work in nbnxn_gpu_launch_kernel().
422 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
424 plist->haveFreshList = false;
429 /* calculate the atom data index range based on locality */
430 if (atomLocality == AtomLocality::Local)
433 adat_len = adat->natoms_local;
437 adat_begin = adat->natoms_local;
438 adat_len = adat->natoms - adat->natoms_local;
442 /* beginning of timed HtoD section */
445 t->xf[atomLocality].nb_h2d.openTimingRegion(deviceStream);
448 static_assert(sizeof(adat->xq[0]) == sizeof(float4),
449 "The size of the xyzq buffer element should be equal to the size of float4.");
450 copyToDeviceBuffer(&adat->xq, reinterpret_cast<const float4*>(nbatom->x().data()) + adat_begin,
451 adat_begin, adat_len, deviceStream, GpuApiCallBehavior::Async, nullptr);
455 t->xf[atomLocality].nb_h2d.closeTimingRegion(deviceStream);
458 /* When we get here all misc operations issued in the local stream as well as
459 the local xq H2D are done,
460 so we record that in the local stream and wait for it in the nonlocal one.
461 This wait needs to precede any PP tasks, bonded or nonbonded, that may
462 compute on interactions between local and nonlocal atoms.
464 nbnxnInsertNonlocalGpuDependency(nb, iloc);
467 /*! As we execute nonbonded workload in separate streams, before launching
468 the kernel we need to make sure that he following operations have completed:
469 - atomdata allocation and related H2D transfers (every nstlist step);
470 - pair list H2D transfer (every nstlist step);
471 - shift vector H2D transfer (every nstlist step);
472 - force (+shift force and energy) output clearing (every step).
474 These operations are issued in the local stream at the beginning of the step
475 and therefore always complete before the local kernel launch. The non-local
476 kernel is launched after the local on the same device/context hence it is
477 inherently scheduled after the operations in the local stream (including the
478 above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
479 devices with multiple hardware queues the dependency needs to be enforced.
480 We use the misc_ops_and_local_H2D_done event to record the point where
481 the local x+q H2D (and all preceding) tasks are complete and synchronize
482 with this event in the non-local stream before launching the non-bonded kernel.
484 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const InteractionLocality iloc)
486 cu_atomdata_t* adat = nb->atdat;
487 NBParamGpu* nbp = nb->nbparam;
488 gpu_plist* plist = nb->plist[iloc];
489 cu_timers_t* t = nb->timers;
490 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
492 bool bDoTime = nb->bDoTime;
494 /* Don't launch the non-local kernel if there is no work to do.
495 Doing the same for the local kernel is more complicated, since the
496 local part of the force array also depends on the non-local kernel.
497 So to avoid complicating the code and to reduce the risk of bugs,
498 we always call the local kernel, and later (not in
499 this function) the stream wait, local f copyback and the f buffer
500 clearing. All these operations, except for the local interaction kernel,
501 are needed for the non-local interactions. The skip of the local kernel
502 call is taken care of later in this function. */
503 if (canSkipNonbondedWork(*nb, iloc))
505 plist->haveFreshList = false;
510 if (nbp->useDynamicPruning && plist->haveFreshList)
512 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
513 (TODO: ATM that's the way the timing accounting can distinguish between
514 separate prune kernel and combined force+prune, maybe we need a better way?).
516 gpu_launch_kernel_pruneonly(nb, iloc, 1);
519 if (plist->nsci == 0)
521 /* Don't launch an empty local kernel (not allowed with CUDA) */
525 /* beginning of timed nonbonded calculation section */
528 t->interaction[iloc].nb_k.openTimingRegion(deviceStream);
531 /* Kernel launch config:
532 * - The thread block dimensions match the size of i-clusters, j-clusters,
533 * and j-cluster concurrency, in x, y, and z, respectively.
534 * - The 1D block-grid contains as many blocks as super-clusters.
536 int num_threads_z = 1;
537 if (nb->deviceContext_->deviceInfo().prop.major == 3 && nb->deviceContext_->deviceInfo().prop.minor == 7)
541 int nblock = calc_nb_kernel_nblock(plist->nsci, &nb->deviceContext_->deviceInfo());
544 KernelLaunchConfig config;
545 config.blockSize[0] = c_clSize;
546 config.blockSize[1] = c_clSize;
547 config.blockSize[2] = num_threads_z;
548 config.gridSize[0] = nblock;
549 config.sharedMemorySize =
550 calc_shmem_required_nonbonded(num_threads_z, &nb->deviceContext_->deviceInfo(), nbp);
555 "Non-bonded GPU launch configuration:\n\tThread block: %zux%zux%zu\n\t"
556 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
558 config.blockSize[0], config.blockSize[1], config.blockSize[2], config.gridSize[0],
559 config.gridSize[1], plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
560 c_nbnxnGpuNumClusterPerSupercluster, plist->na_c, config.sharedMemorySize);
563 auto* timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
565 select_nbnxn_kernel(nbp->eeltype, nbp->vdwtype, stepWork.computeEnergy,
566 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune),
567 &nb->deviceContext_->deviceInfo());
568 const auto kernelArgs =
569 prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &stepWork.computeVirial);
570 launchGpuKernel(kernel, config, deviceStream, timingEvent, "k_calc_nb", kernelArgs);
574 t->interaction[iloc].nb_k.closeTimingRegion(deviceStream);
577 if (GMX_NATIVE_WINDOWS)
579 /* Windows: force flushing WDDM queue */
580 cudaStreamQuery(deviceStream.stream());
584 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
585 static inline int calc_shmem_required_prune(const int num_threads_z)
589 /* i-atom x in shared memory */
590 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float4);
591 /* cj in shared memory, for each warp separately */
592 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
597 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
599 cu_atomdata_t* adat = nb->atdat;
600 NBParamGpu* nbp = nb->nbparam;
601 gpu_plist* plist = nb->plist[iloc];
602 cu_timers_t* t = nb->timers;
603 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
605 bool bDoTime = nb->bDoTime;
607 if (plist->haveFreshList)
609 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
611 /* Set rollingPruningNumParts to signal that it is not set */
612 plist->rollingPruningNumParts = 0;
613 plist->rollingPruningPart = 0;
617 if (plist->rollingPruningNumParts == 0)
619 plist->rollingPruningNumParts = numParts;
623 GMX_ASSERT(numParts == plist->rollingPruningNumParts,
624 "It is not allowed to change numParts in between list generation steps");
628 /* Use a local variable for part and update in plist, so we can return here
629 * without duplicating the part increment code.
631 int part = plist->rollingPruningPart;
633 plist->rollingPruningPart++;
634 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
636 plist->rollingPruningPart = 0;
639 /* Compute the number of list entries to prune in this pass */
640 int numSciInPart = (plist->nsci - part) / numParts;
642 /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */
643 if (numSciInPart <= 0)
645 plist->haveFreshList = false;
650 GpuRegionTimer* timer = nullptr;
653 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
656 /* beginning of timed prune calculation section */
659 timer->openTimingRegion(deviceStream);
662 /* Kernel launch config:
663 * - The thread block dimensions match the size of i-clusters, j-clusters,
664 * and j-cluster concurrency, in x, y, and z, respectively.
665 * - The 1D block-grid contains as many blocks as super-clusters.
667 int num_threads_z = c_cudaPruneKernelJ4Concurrency;
668 int nblock = calc_nb_kernel_nblock(numSciInPart, &nb->deviceContext_->deviceInfo());
669 KernelLaunchConfig config;
670 config.blockSize[0] = c_clSize;
671 config.blockSize[1] = c_clSize;
672 config.blockSize[2] = num_threads_z;
673 config.gridSize[0] = nblock;
674 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
679 "Pruning GPU kernel launch configuration:\n\tThread block: %zux%zux%zu\n\t"
680 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
682 config.blockSize[0], config.blockSize[1], config.blockSize[2], config.gridSize[0],
683 config.gridSize[1], numSciInPart * c_nbnxnGpuNumClusterPerSupercluster,
684 c_nbnxnGpuNumClusterPerSupercluster, plist->na_c, config.sharedMemorySize);
687 auto* timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
688 constexpr char kernelName[] = "k_pruneonly";
690 plist->haveFreshList ? nbnxn_kernel_prune_cuda<true> : nbnxn_kernel_prune_cuda<false>;
691 const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &numParts, &part);
692 launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
694 /* TODO: consider a more elegant way to track which kernel has been called
695 (combined or separate 1st pass prune, rolling prune). */
696 if (plist->haveFreshList)
698 plist->haveFreshList = false;
699 /* Mark that pruning has been done */
700 nb->timers->interaction[iloc].didPrune = true;
704 /* Mark that rolling pruning has been done */
705 nb->timers->interaction[iloc].didRollingPrune = true;
710 timer->closeTimingRegion(deviceStream);
713 if (GMX_NATIVE_WINDOWS)
715 /* Windows: force flushing WDDM queue */
716 cudaStreamQuery(deviceStream.stream());
720 void gpu_launch_cpyback(NbnxmGpu* nb,
721 nbnxn_atomdata_t* nbatom,
722 const gmx::StepWorkload& stepWork,
723 const AtomLocality atomLocality)
725 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
728 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
730 /* determine interaction locality from atom locality */
731 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
733 /* extract the data */
734 cu_atomdata_t* adat = nb->atdat;
735 cu_timers_t* t = nb->timers;
736 bool bDoTime = nb->bDoTime;
737 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
739 /* don't launch non-local copy-back if there was no non-local work to do */
740 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
745 getGpuAtomRange(adat, atomLocality, &adat_begin, &adat_len);
747 /* beginning of timed D2H section */
750 t->xf[atomLocality].nb_d2h.openTimingRegion(deviceStream);
753 /* With DD the local D2H transfer can only start after the non-local
754 kernel has finished. */
755 if (iloc == InteractionLocality::Local && nb->bUseTwoStreams)
757 stat = cudaStreamWaitEvent(deviceStream.stream(), nb->nonlocal_done, 0);
758 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
762 * Skip if buffer ops / reduction is offloaded to the GPU.
764 if (!stepWork.useGpuFBufferOps)
767 sizeof(adat->f[0]) == sizeof(float3),
768 "The size of the force buffer element should be equal to the size of float3.");
769 copyFromDeviceBuffer(reinterpret_cast<float3*>(nbatom->out[0].f.data()) + adat_begin, &adat->f,
770 adat_begin, adat_len, deviceStream, GpuApiCallBehavior::Async, nullptr);
773 /* After the non-local D2H is launched the nonlocal_done event can be
774 recorded which signals that the local D2H can proceed. This event is not
775 placed after the non-local kernel because we want the non-local data
777 if (iloc == InteractionLocality::NonLocal)
779 stat = cudaEventRecord(nb->nonlocal_done, deviceStream.stream());
780 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
783 /* only transfer energies in the local stream */
784 if (iloc == InteractionLocality::Local)
786 /* DtoH fshift when virial is needed */
787 if (stepWork.computeVirial)
789 static_assert(sizeof(nb->nbst.fshift[0]) == sizeof(adat->fshift[0]),
790 "Sizes of host- and device-side shift vectors should be the same.");
791 copyFromDeviceBuffer(nb->nbst.fshift, &adat->fshift, 0, SHIFTS, deviceStream,
792 GpuApiCallBehavior::Async, nullptr);
796 if (stepWork.computeEnergy)
798 static_assert(sizeof(nb->nbst.e_lj[0]) == sizeof(adat->e_lj[0]),
799 "Sizes of host- and device-side LJ energy terms should be the same.");
800 copyFromDeviceBuffer(nb->nbst.e_lj, &adat->e_lj, 0, 1, deviceStream,
801 GpuApiCallBehavior::Async, nullptr);
802 static_assert(sizeof(nb->nbst.e_el[0]) == sizeof(adat->e_el[0]),
803 "Sizes of host- and device-side electrostatic energy terms should be the "
805 copyFromDeviceBuffer(nb->nbst.e_el, &adat->e_el, 0, 1, deviceStream,
806 GpuApiCallBehavior::Async, nullptr);
812 t->xf[atomLocality].nb_d2h.closeTimingRegion(deviceStream);
816 void cuda_set_cacheconfig()
820 for (int i = 0; i < eelTypeNR; i++)
822 for (int j = 0; j < evdwTypeNR; j++)
824 /* Default kernel 32/32 kB Shared/L1 */
825 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
826 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
827 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
828 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
829 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");
834 /* X buffer operations on GPU: performs conversion from rvec to nb format. */
835 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid& grid,
836 bool setFillerCoords,
838 DeviceBuffer<gmx::RVec> d_x,
839 GpuEventSynchronizer* xReadyOnDevice,
840 const Nbnxm::AtomLocality locality,
844 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
846 cu_atomdata_t* adat = nb->atdat;
848 const int numColumns = grid.numColumns();
849 const int cellOffset = grid.cellOffset();
850 const int numAtomsPerCell = grid.numAtomsPerCell();
851 Nbnxm::InteractionLocality interactionLoc = gpuAtomToInteractionLocality(locality);
853 const DeviceStream& deviceStream = *nb->deviceStreams[interactionLoc];
855 int numAtoms = grid.srcAtomEnd() - grid.srcAtomBegin();
856 // avoid empty kernel launch, skip to inserting stream dependency
859 // TODO: This will only work with CUDA
860 GMX_ASSERT(d_x, "Need a valid device pointer");
862 // ensure that coordinates are ready on the device before launching the kernel
863 GMX_ASSERT(xReadyOnDevice, "Need a valid GpuEventSynchronizer object");
864 xReadyOnDevice->enqueueWaitEvent(deviceStream);
866 KernelLaunchConfig config;
867 config.blockSize[0] = c_bufOpsThreadsPerBlock;
868 config.blockSize[1] = 1;
869 config.blockSize[2] = 1;
870 config.gridSize[0] = (grid.numCellsColumnMax() * numAtomsPerCell + c_bufOpsThreadsPerBlock - 1)
871 / c_bufOpsThreadsPerBlock;
872 config.gridSize[1] = numColumns;
873 config.gridSize[2] = 1;
874 GMX_ASSERT(config.gridSize[0] > 0,
875 "Can not have empty grid, early return above avoids this");
876 config.sharedMemorySize = 0;
878 auto kernelFn = setFillerCoords ? nbnxn_gpu_x_to_nbat_x_kernel<true>
879 : nbnxn_gpu_x_to_nbat_x_kernel<false>;
880 float4* d_xq = adat->xq;
881 float3* d_xFloat3 = asFloat3(d_x);
882 const int* d_atomIndices = nb->atomIndices;
883 const int* d_cxy_na = &nb->cxy_na[numColumnsMax * gridId];
884 const int* d_cxy_ind = &nb->cxy_ind[numColumnsMax * gridId];
885 const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config, &numColumns, &d_xq,
886 &d_xFloat3, &d_atomIndices, &d_cxy_na,
887 &d_cxy_ind, &cellOffset, &numAtomsPerCell);
888 launchGpuKernel(kernelFn, config, deviceStream, nullptr, "XbufferOps", kernelArgs);
891 // TODO: note that this is not necessary when there astreamre no local atoms, that is:
892 // (numAtoms == 0 && interactionLoc == InteractionLocality::Local)
893 // but for now we avoid that optimization
894 nbnxnInsertNonlocalGpuDependency(nb, interactionLoc);
897 /* F buffer operations on GPU: performs force summations and conversion from nb to rvec format.
899 * NOTE: When the total force device buffer is reallocated and its size increases, it is cleared in
900 * Local stream. Hence, if accumulateForce is true, NonLocal stream should start accumulating
901 * forces only after Local stream already done so.
903 void nbnxn_gpu_add_nbat_f_to_f(const AtomLocality atomLocality,
904 DeviceBuffer<gmx::RVec> totalForcesDevice,
906 void* pmeForcesDevice,
907 gmx::ArrayRef<GpuEventSynchronizer* const> dependencyList,
910 bool useGpuFPmeReduction,
911 bool accumulateForce)
913 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
914 GMX_ASSERT(numAtoms != 0, "Cannot call function with no atoms");
915 GMX_ASSERT(totalForcesDevice, "Need a valid totalForcesDevice pointer");
917 const InteractionLocality iLocality = gpuAtomToInteractionLocality(atomLocality);
918 const DeviceStream& deviceStream = *nb->deviceStreams[iLocality];
919 cu_atomdata_t* adat = nb->atdat;
921 size_t gmx_used_in_debug numDependency = static_cast<size_t>((useGpuFPmeReduction == true))
922 + static_cast<size_t>((accumulateForce == true));
923 GMX_ASSERT(numDependency >= dependencyList.size(),
924 "Mismatching number of dependencies and call signature");
926 // Enqueue wait on all dependencies passed
927 for (auto const synchronizer : dependencyList)
929 synchronizer->enqueueWaitEvent(deviceStream);
934 KernelLaunchConfig config;
935 config.blockSize[0] = c_bufOpsThreadsPerBlock;
936 config.blockSize[1] = 1;
937 config.blockSize[2] = 1;
938 config.gridSize[0] = ((numAtoms + 1) + c_bufOpsThreadsPerBlock - 1) / c_bufOpsThreadsPerBlock;
939 config.gridSize[1] = 1;
940 config.gridSize[2] = 1;
941 config.sharedMemorySize = 0;
943 auto kernelFn = accumulateForce ? nbnxn_gpu_add_nbat_f_to_f_kernel<true, false>
944 : nbnxn_gpu_add_nbat_f_to_f_kernel<false, false>;
946 if (useGpuFPmeReduction)
948 GMX_ASSERT(pmeForcesDevice, "Need a valid pmeForcesDevice pointer");
949 kernelFn = accumulateForce ? nbnxn_gpu_add_nbat_f_to_f_kernel<true, true>
950 : nbnxn_gpu_add_nbat_f_to_f_kernel<false, true>;
953 const float3* d_fNB = adat->f;
954 const float3* d_fPme = static_cast<float3*>(pmeForcesDevice);
955 float3* d_fTotal = asFloat3(totalForcesDevice);
956 const int* d_cell = nb->cell;
958 const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config, &d_fNB, &d_fPme, &d_fTotal,
959 &d_cell, &atomStart, &numAtoms);
961 launchGpuKernel(kernelFn, config, deviceStream, nullptr, "FbufferOps", kernelArgs);
963 if (atomLocality == AtomLocality::Local)
965 GMX_ASSERT(nb->localFReductionDone != nullptr,
966 "localFReductionDone has to be a valid pointer");
967 nb->localFReductionDone->markEvent(deviceStream);