aa5652011b1dbeb1939990919a606cfe16827b71
[alexxy/gromacs.git] / src / gromacs / nbnxm / cuda / nbnxm_cuda.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36 /*! \file
37  *  \brief Define CUDA implementation of nbnxn_gpu.h
38  *
39  *  \author Szilard Pall <pall.szilard@gmail.com>
40  */
41 #include "gmxpre.h"
42
43 #include "config.h"
44
45 #include <assert.h>
46 #include <stdlib.h>
47
48 #include "gromacs/nbnxm/nbnxm_gpu.h"
49
50 #if defined(_MSVC)
51 #    include <limits>
52 #endif
53
54
55 #include "nbnxm_cuda.h"
56
57 #include "gromacs/gpu_utils/gpu_utils.h"
58 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
59 #include "gromacs/gpu_utils/typecasts.cuh"
60 #include "gromacs/gpu_utils/vectype_ops.cuh"
61 #include "gromacs/hardware/device_information.h"
62 #include "gromacs/mdtypes/simulation_workload.h"
63 #include "gromacs/nbnxm/atomdata.h"
64 #include "gromacs/nbnxm/gpu_common.h"
65 #include "gromacs/nbnxm/gpu_common_utils.h"
66 #include "gromacs/nbnxm/gpu_data_mgmt.h"
67 #include "gromacs/nbnxm/grid.h"
68 #include "gromacs/nbnxm/nbnxm.h"
69 #include "gromacs/nbnxm/pairlist.h"
70 #include "gromacs/timing/gpu_timing.h"
71 #include "gromacs/utility/cstringutil.h"
72 #include "gromacs/utility/gmxassert.h"
73
74 #include "nbnxm_buffer_ops_kernels.cuh"
75 #include "nbnxm_cuda_types.h"
76
77 /***** The kernel declarations/definitions come here *****/
78
79 /* Top-level kernel declaration generation: will generate through multiple
80  * inclusion the following flavors for all kernel declarations:
81  * - force-only output;
82  * - force and energy output;
83  * - force-only with pair list pruning;
84  * - force and energy output with pair list pruning.
85  */
86 #define FUNCTION_DECLARATION_ONLY
87 /** Force only **/
88 #include "nbnxm_cuda_kernels.cuh"
89 /** Force & energy **/
90 #define CALC_ENERGIES
91 #include "nbnxm_cuda_kernels.cuh"
92 #undef CALC_ENERGIES
93
94 /*** Pair-list pruning kernels ***/
95 /** Force only **/
96 #define PRUNE_NBL
97 #include "nbnxm_cuda_kernels.cuh"
98 /** Force & energy **/
99 #define CALC_ENERGIES
100 #include "nbnxm_cuda_kernels.cuh"
101 #undef CALC_ENERGIES
102 #undef PRUNE_NBL
103
104 /* Prune-only kernels */
105 #include "nbnxm_cuda_kernel_pruneonly.cuh"
106 #undef FUNCTION_DECLARATION_ONLY
107
108 /* Now generate the function definitions if we are using a single compilation unit. */
109 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
110 #    include "nbnxm_cuda_kernel_F_noprune.cu"
111 #    include "nbnxm_cuda_kernel_F_prune.cu"
112 #    include "nbnxm_cuda_kernel_VF_noprune.cu"
113 #    include "nbnxm_cuda_kernel_VF_prune.cu"
114 #    include "nbnxm_cuda_kernel_pruneonly.cu"
115 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
116
117 namespace Nbnxm
118 {
119
120 //! Number of CUDA threads in a block
121 // TODO Optimize this through experimentation
122 constexpr static int c_bufOpsThreadsPerBlock = 128;
123
124 /*! Nonbonded kernel function pointer type */
125 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t, const NBParamGpu, const gpu_plist, bool);
126
127 /*********************************/
128
129 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
130 static inline int calc_nb_kernel_nblock(int nwork_units, const DeviceInformation* deviceInfo)
131 {
132     int max_grid_x_size;
133
134     assert(deviceInfo);
135     /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
136        empty domain) and that case should be handled before this point. */
137     assert(nwork_units > 0);
138
139     max_grid_x_size = deviceInfo->prop.maxGridSize[0];
140
141     /* do we exceed the grid x dimension limit? */
142     if (nwork_units > max_grid_x_size)
143     {
144         gmx_fatal(FARGS,
145                   "Watch out, the input system is too large to simulate!\n"
146                   "The number of nonbonded work units (=number of super-clusters) exceeds the"
147                   "maximum grid size in x dimension (%d > %d)!",
148                   nwork_units,
149                   max_grid_x_size);
150     }
151
152     return nwork_units;
153 }
154
155
156 /* Constant arrays listing all kernel function pointers and enabling selection
157    of a kernel in an elegant manner. */
158
159 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
160  *  electrostatics and VDW type.
161  *
162  *  Note that the row- and column-order of function pointers has to match the
163  *  order of corresponding enumerated electrostatics and vdw types, resp.,
164  *  defined in nbnxn_cuda_types.h.
165  */
166
167 /*! Force-only kernel function pointers. */
168 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
169     { nbnxn_kernel_ElecCut_VdwLJ_F_cuda,
170       nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda,
171       nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda,
172       nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda,
173       nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda,
174       nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda,
175       nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
176     { nbnxn_kernel_ElecRF_VdwLJ_F_cuda,
177       nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda,
178       nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda,
179       nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda,
180       nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda,
181       nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda,
182       nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
183     { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda,
184       nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda,
185       nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda,
186       nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda,
187       nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda,
188       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda,
189       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
190     { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda,
191       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda,
192       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda,
193       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda,
194       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda,
195       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda,
196       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
197     { nbnxn_kernel_ElecEw_VdwLJ_F_cuda,
198       nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda,
199       nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda,
200       nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda,
201       nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda,
202       nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda,
203       nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
204     { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda,
205       nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda,
206       nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda,
207       nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda,
208       nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda,
209       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda,
210       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
211 };
212
213 /*! Force + energy kernel function pointers. */
214 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
215     { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda,
216       nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda,
217       nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda,
218       nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda,
219       nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda,
220       nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda,
221       nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
222     { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda,
223       nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda,
224       nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda,
225       nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda,
226       nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda,
227       nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda,
228       nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
229     { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda,
230       nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda,
231       nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda,
232       nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda,
233       nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda,
234       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda,
235       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
236     { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda,
237       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda,
238       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda,
239       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda,
240       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda,
241       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda,
242       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
243     { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda,
244       nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda,
245       nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda,
246       nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda,
247       nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda,
248       nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda,
249       nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
250     { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda,
251       nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda,
252       nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda,
253       nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda,
254       nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda,
255       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda,
256       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
257 };
258
259 /*! Force + pruning kernel function pointers. */
260 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
261     { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda,
262       nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda,
263       nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda,
264       nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda,
265       nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda,
266       nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda,
267       nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
268     { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda,
269       nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda,
270       nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda,
271       nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda,
272       nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda,
273       nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda,
274       nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
275     { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda,
276       nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda,
277       nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda,
278       nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda,
279       nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda,
280       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda,
281       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
282     { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda,
283       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda,
284       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda,
285       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda,
286       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda,
287       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda,
288       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
289     { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda,
290       nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda,
291       nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda,
292       nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda,
293       nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda,
294       nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda,
295       nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
296     { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda,
297       nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda,
298       nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda,
299       nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda,
300       nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda,
301       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda,
302       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
303 };
304
305 /*! Force + energy + pruning kernel function pointers. */
306 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
307     { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda,
308       nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda,
309       nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda,
310       nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda,
311       nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda,
312       nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda,
313       nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
314     { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda,
315       nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda,
316       nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda,
317       nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda,
318       nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda,
319       nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda,
320       nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
321     { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda,
322       nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda,
323       nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda,
324       nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda,
325       nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda,
326       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda,
327       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
328     { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda,
329       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda,
330       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda,
331       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda,
332       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda,
333       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda,
334       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
335     { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda,
336       nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda,
337       nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda,
338       nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda,
339       nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda,
340       nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda,
341       nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
342     { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda,
343       nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda,
344       nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda,
345       nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda,
346       nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda,
347       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda,
348       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
349 };
350
351 /*! Return a pointer to the kernel version to be executed at the current step. */
352 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(enum ElecType           elecType,
353                                                        enum VdwType            vdwType,
354                                                        bool                    bDoEne,
355                                                        bool                    bDoPrune,
356                                                        const DeviceInformation gmx_unused* deviceInfo)
357 {
358     const int elecTypeIdx = static_cast<int>(elecType);
359     const int vdwTypeIdx  = static_cast<int>(vdwType);
360
361     GMX_ASSERT(elecTypeIdx < c_numElecTypes,
362                "The electrostatics type requested is not implemented in the CUDA kernels.");
363     GMX_ASSERT(vdwTypeIdx < c_numVdwTypes,
364                "The VdW type requested is not implemented in the CUDA kernels.");
365
366     /* assert assumptions made by the kernels */
367     GMX_ASSERT(c_nbnxnGpuClusterSize * c_nbnxnGpuClusterSize / c_nbnxnGpuClusterpairSplit
368                        == deviceInfo->prop.warpSize,
369                "The CUDA kernels require the "
370                "cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size "
371                "of the architecture targeted.");
372
373     if (bDoEne)
374     {
375         if (bDoPrune)
376         {
377             return nb_kfunc_ener_prune_ptr[elecTypeIdx][vdwTypeIdx];
378         }
379         else
380         {
381             return nb_kfunc_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
382         }
383     }
384     else
385     {
386         if (bDoPrune)
387         {
388             return nb_kfunc_noener_prune_ptr[elecTypeIdx][vdwTypeIdx];
389         }
390         else
391         {
392             return nb_kfunc_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
393         }
394     }
395 }
396
397 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */
398 static inline int calc_shmem_required_nonbonded(const int               num_threads_z,
399                                                 const DeviceInformation gmx_unused* deviceInfo,
400                                                 const NBParamGpu*                   nbp)
401 {
402     int shmem;
403
404     assert(deviceInfo);
405
406     /* size of shmem (force-buffers/xq/atom type preloading) */
407     /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
408     /* i-atom x+q in shared memory */
409     shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float4);
410     /* cj in shared memory, for each warp separately */
411     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
412
413     if (nbp->vdwType == VdwType::CutCombGeom || nbp->vdwType == VdwType::CutCombLB)
414     {
415         /* i-atom LJ combination parameters in shared memory */
416         shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float2);
417     }
418     else
419     {
420         /* i-atom types in shared memory */
421         shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int);
422     }
423
424     return shmem;
425 }
426
427 void nbnxnInsertNonlocalGpuDependency(NbnxmGpu* nb, const InteractionLocality interactionLocality)
428 {
429     const DeviceStream& deviceStream = *nb->deviceStreams[interactionLocality];
430
431     /* When we get here all misc operations issued in the local stream as well as
432        the local xq H2D are done,
433        so we record that in the local stream and wait for it in the nonlocal one.
434        This wait needs to precede any PP tasks, bonded or nonbonded, that may
435        compute on interactions between local and nonlocal atoms.
436      */
437     if (nb->bUseTwoStreams)
438     {
439         if (interactionLocality == InteractionLocality::Local)
440         {
441             nb->misc_ops_and_local_H2D_done.markEvent(deviceStream);
442         }
443         else
444         {
445             nb->misc_ops_and_local_H2D_done.enqueueWaitEvent(deviceStream);
446         }
447     }
448 }
449
450 /*! \brief Launch asynchronously the xq buffer host to device copy. */
451 void gpu_copy_xq_to_gpu(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom, const AtomLocality atomLocality)
452 {
453     GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
454
455     const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
456
457     int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
458
459     cu_atomdata_t*      adat         = nb->atdat;
460     gpu_plist*          plist        = nb->plist[iloc];
461     cu_timers_t*        t            = nb->timers;
462     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
463
464     bool bDoTime = nb->bDoTime;
465
466     /* Don't launch the non-local H2D copy if there is no dependent
467        work to do: neither non-local nor other (e.g. bonded) work
468        to do that has as input the nbnxn coordaintes.
469        Doing the same for the local kernel is more complicated, since the
470        local part of the force array also depends on the non-local kernel.
471        So to avoid complicating the code and to reduce the risk of bugs,
472        we always call the local local x+q copy (and the rest of the local
473        work in nbnxn_gpu_launch_kernel().
474      */
475     if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
476     {
477         plist->haveFreshList = false;
478
479         // The event is marked for Local interactions unconditionally,
480         // so it has to be released here because of the early return
481         // for NonLocal interactions.
482         nb->misc_ops_and_local_H2D_done.reset();
483
484         return;
485     }
486
487     /* calculate the atom data index range based on locality */
488     if (atomLocality == AtomLocality::Local)
489     {
490         adat_begin = 0;
491         adat_len   = adat->natoms_local;
492     }
493     else
494     {
495         adat_begin = adat->natoms_local;
496         adat_len   = adat->natoms - adat->natoms_local;
497     }
498
499     /* HtoD x, q */
500     /* beginning of timed HtoD section */
501     if (bDoTime)
502     {
503         t->xf[atomLocality].nb_h2d.openTimingRegion(deviceStream);
504     }
505
506     static_assert(sizeof(adat->xq[0]) == sizeof(float4),
507                   "The size of the xyzq buffer element should be equal to the size of float4.");
508     copyToDeviceBuffer(&adat->xq,
509                        reinterpret_cast<const float4*>(nbatom->x().data()) + adat_begin,
510                        adat_begin,
511                        adat_len,
512                        deviceStream,
513                        GpuApiCallBehavior::Async,
514                        nullptr);
515
516     if (bDoTime)
517     {
518         t->xf[atomLocality].nb_h2d.closeTimingRegion(deviceStream);
519     }
520
521     /* When we get here all misc operations issued in the local stream as well as
522        the local xq H2D are done,
523        so we record that in the local stream and wait for it in the nonlocal one.
524        This wait needs to precede any PP tasks, bonded or nonbonded, that may
525        compute on interactions between local and nonlocal atoms.
526      */
527     nbnxnInsertNonlocalGpuDependency(nb, iloc);
528 }
529
530 /*! As we execute nonbonded workload in separate streams, before launching
531    the kernel we need to make sure that he following operations have completed:
532    - atomdata allocation and related H2D transfers (every nstlist step);
533    - pair list H2D transfer (every nstlist step);
534    - shift vector H2D transfer (every nstlist step);
535    - force (+shift force and energy) output clearing (every step).
536
537    These operations are issued in the local stream at the beginning of the step
538    and therefore always complete before the local kernel launch. The non-local
539    kernel is launched after the local on the same device/context hence it is
540    inherently scheduled after the operations in the local stream (including the
541    above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
542    devices with multiple hardware queues the dependency needs to be enforced.
543    We use the misc_ops_and_local_H2D_done event to record the point where
544    the local x+q H2D (and all preceding) tasks are complete and synchronize
545    with this event in the non-local stream before launching the non-bonded kernel.
546  */
547 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const InteractionLocality iloc)
548 {
549     cu_atomdata_t*      adat         = nb->atdat;
550     NBParamGpu*         nbp          = nb->nbparam;
551     gpu_plist*          plist        = nb->plist[iloc];
552     cu_timers_t*        t            = nb->timers;
553     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
554
555     bool bDoTime = nb->bDoTime;
556
557     /* Don't launch the non-local kernel if there is no work to do.
558        Doing the same for the local kernel is more complicated, since the
559        local part of the force array also depends on the non-local kernel.
560        So to avoid complicating the code and to reduce the risk of bugs,
561        we always call the local kernel, and later (not in
562        this function) the stream wait, local f copyback and the f buffer
563        clearing. All these operations, except for the local interaction kernel,
564        are needed for the non-local interactions. The skip of the local kernel
565        call is taken care of later in this function. */
566     if (canSkipNonbondedWork(*nb, iloc))
567     {
568         plist->haveFreshList = false;
569
570         return;
571     }
572
573     if (nbp->useDynamicPruning && plist->haveFreshList)
574     {
575         /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
576            (TODO: ATM that's the way the timing accounting can distinguish between
577            separate prune kernel and combined force+prune, maybe we need a better way?).
578          */
579         gpu_launch_kernel_pruneonly(nb, iloc, 1);
580     }
581
582     if (plist->nsci == 0)
583     {
584         /* Don't launch an empty local kernel (not allowed with CUDA) */
585         return;
586     }
587
588     /* beginning of timed nonbonded calculation section */
589     if (bDoTime)
590     {
591         t->interaction[iloc].nb_k.openTimingRegion(deviceStream);
592     }
593
594     /* Kernel launch config:
595      * - The thread block dimensions match the size of i-clusters, j-clusters,
596      *   and j-cluster concurrency, in x, y, and z, respectively.
597      * - The 1D block-grid contains as many blocks as super-clusters.
598      */
599     int num_threads_z = 1;
600     if (nb->deviceContext_->deviceInfo().prop.major == 3 && nb->deviceContext_->deviceInfo().prop.minor == 7)
601     {
602         num_threads_z = 2;
603     }
604     int nblock = calc_nb_kernel_nblock(plist->nsci, &nb->deviceContext_->deviceInfo());
605
606
607     KernelLaunchConfig config;
608     config.blockSize[0] = c_clSize;
609     config.blockSize[1] = c_clSize;
610     config.blockSize[2] = num_threads_z;
611     config.gridSize[0]  = nblock;
612     config.sharedMemorySize =
613             calc_shmem_required_nonbonded(num_threads_z, &nb->deviceContext_->deviceInfo(), nbp);
614
615     if (debug)
616     {
617         fprintf(debug,
618                 "Non-bonded GPU launch configuration:\n\tThread block: %zux%zux%zu\n\t"
619                 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
620                 "\tShMem: %zu\n",
621                 config.blockSize[0],
622                 config.blockSize[1],
623                 config.blockSize[2],
624                 config.gridSize[0],
625                 config.gridSize[1],
626                 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
627                 c_nbnxnGpuNumClusterPerSupercluster,
628                 plist->na_c,
629                 config.sharedMemorySize);
630     }
631
632     auto*      timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
633     const auto kernel =
634             select_nbnxn_kernel(nbp->elecType,
635                                 nbp->vdwType,
636                                 stepWork.computeEnergy,
637                                 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune),
638                                 &nb->deviceContext_->deviceInfo());
639     const auto kernelArgs =
640             prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &stepWork.computeVirial);
641     launchGpuKernel(kernel, config, deviceStream, timingEvent, "k_calc_nb", kernelArgs);
642
643     if (bDoTime)
644     {
645         t->interaction[iloc].nb_k.closeTimingRegion(deviceStream);
646     }
647
648     if (GMX_NATIVE_WINDOWS)
649     {
650         /* Windows: force flushing WDDM queue */
651         cudaStreamQuery(deviceStream.stream());
652     }
653 }
654
655 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
656 static inline int calc_shmem_required_prune(const int num_threads_z)
657 {
658     int shmem;
659
660     /* i-atom x in shared memory */
661     shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float4);
662     /* cj in shared memory, for each warp separately */
663     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
664
665     return shmem;
666 }
667
668 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
669 {
670     cu_atomdata_t*      adat         = nb->atdat;
671     NBParamGpu*         nbp          = nb->nbparam;
672     gpu_plist*          plist        = nb->plist[iloc];
673     cu_timers_t*        t            = nb->timers;
674     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
675
676     bool bDoTime = nb->bDoTime;
677
678     if (plist->haveFreshList)
679     {
680         GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
681
682         /* Set rollingPruningNumParts to signal that it is not set */
683         plist->rollingPruningNumParts = 0;
684         plist->rollingPruningPart     = 0;
685     }
686     else
687     {
688         if (plist->rollingPruningNumParts == 0)
689         {
690             plist->rollingPruningNumParts = numParts;
691         }
692         else
693         {
694             GMX_ASSERT(numParts == plist->rollingPruningNumParts,
695                        "It is not allowed to change numParts in between list generation steps");
696         }
697     }
698
699     /* Use a local variable for part and update in plist, so we can return here
700      * without duplicating the part increment code.
701      */
702     int part = plist->rollingPruningPart;
703
704     plist->rollingPruningPart++;
705     if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
706     {
707         plist->rollingPruningPart = 0;
708     }
709
710     /* Compute the number of list entries to prune in this pass */
711     int numSciInPart = (plist->nsci - part) / numParts;
712
713     /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */
714     if (numSciInPart <= 0)
715     {
716         plist->haveFreshList = false;
717
718         return;
719     }
720
721     GpuRegionTimer* timer = nullptr;
722     if (bDoTime)
723     {
724         timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
725     }
726
727     /* beginning of timed prune calculation section */
728     if (bDoTime)
729     {
730         timer->openTimingRegion(deviceStream);
731     }
732
733     /* Kernel launch config:
734      * - The thread block dimensions match the size of i-clusters, j-clusters,
735      *   and j-cluster concurrency, in x, y, and z, respectively.
736      * - The 1D block-grid contains as many blocks as super-clusters.
737      */
738     int num_threads_z = c_cudaPruneKernelJ4Concurrency;
739     int nblock        = calc_nb_kernel_nblock(numSciInPart, &nb->deviceContext_->deviceInfo());
740     KernelLaunchConfig config;
741     config.blockSize[0]     = c_clSize;
742     config.blockSize[1]     = c_clSize;
743     config.blockSize[2]     = num_threads_z;
744     config.gridSize[0]      = nblock;
745     config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
746
747     if (debug)
748     {
749         fprintf(debug,
750                 "Pruning GPU kernel launch configuration:\n\tThread block: %zux%zux%zu\n\t"
751                 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
752                 "\tShMem: %zu\n",
753                 config.blockSize[0],
754                 config.blockSize[1],
755                 config.blockSize[2],
756                 config.gridSize[0],
757                 config.gridSize[1],
758                 numSciInPart * c_nbnxnGpuNumClusterPerSupercluster,
759                 c_nbnxnGpuNumClusterPerSupercluster,
760                 plist->na_c,
761                 config.sharedMemorySize);
762     }
763
764     auto*          timingEvent  = bDoTime ? timer->fetchNextEvent() : nullptr;
765     constexpr char kernelName[] = "k_pruneonly";
766     const auto     kernel =
767             plist->haveFreshList ? nbnxn_kernel_prune_cuda<true> : nbnxn_kernel_prune_cuda<false>;
768     const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &numParts, &part);
769     launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
770
771     /* TODO: consider a more elegant way to track which kernel has been called
772        (combined or separate 1st pass prune, rolling prune). */
773     if (plist->haveFreshList)
774     {
775         plist->haveFreshList = false;
776         /* Mark that pruning has been done */
777         nb->timers->interaction[iloc].didPrune = true;
778     }
779     else
780     {
781         /* Mark that rolling pruning has been done */
782         nb->timers->interaction[iloc].didRollingPrune = true;
783     }
784
785     if (bDoTime)
786     {
787         timer->closeTimingRegion(deviceStream);
788     }
789
790     if (GMX_NATIVE_WINDOWS)
791     {
792         /* Windows: force flushing WDDM queue */
793         cudaStreamQuery(deviceStream.stream());
794     }
795 }
796
797 void gpu_launch_cpyback(NbnxmGpu*                nb,
798                         nbnxn_atomdata_t*        nbatom,
799                         const gmx::StepWorkload& stepWork,
800                         const AtomLocality       atomLocality)
801 {
802     GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
803
804     int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
805
806     /* determine interaction locality from atom locality */
807     const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
808     GMX_ASSERT(iloc == InteractionLocality::Local
809                        || (iloc == InteractionLocality::NonLocal && nb->bNonLocalStreamDoneMarked == false),
810                "Non-local stream is indicating that the copy back event is enqueued at the "
811                "beginning of the copy back function.");
812
813     /* extract the data */
814     cu_atomdata_t*      adat         = nb->atdat;
815     cu_timers_t*        t            = nb->timers;
816     bool                bDoTime      = nb->bDoTime;
817     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
818
819     /* don't launch non-local copy-back if there was no non-local work to do */
820     if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
821     {
822         nb->bNonLocalStreamDoneMarked = false;
823         return;
824     }
825
826     getGpuAtomRange(adat, atomLocality, &adat_begin, &adat_len);
827
828     /* beginning of timed D2H section */
829     if (bDoTime)
830     {
831         t->xf[atomLocality].nb_d2h.openTimingRegion(deviceStream);
832     }
833
834     /* With DD the local D2H transfer can only start after the non-local
835        kernel has finished. */
836     if (iloc == InteractionLocality::Local && nb->bNonLocalStreamDoneMarked)
837     {
838         nb->nonlocal_done.enqueueWaitEvent(deviceStream);
839         nb->bNonLocalStreamDoneMarked = false;
840     }
841
842     /* DtoH f
843      * Skip if buffer ops / reduction is offloaded to the GPU.
844      */
845     if (!stepWork.useGpuFBufferOps)
846     {
847         static_assert(
848                 sizeof(adat->f[0]) == sizeof(float3),
849                 "The size of the force buffer element should be equal to the size of float3.");
850         copyFromDeviceBuffer(reinterpret_cast<float3*>(nbatom->out[0].f.data()) + adat_begin,
851                              &adat->f,
852                              adat_begin,
853                              adat_len,
854                              deviceStream,
855                              GpuApiCallBehavior::Async,
856                              nullptr);
857     }
858
859     /* After the non-local D2H is launched the nonlocal_done event can be
860        recorded which signals that the local D2H can proceed. This event is not
861        placed after the non-local kernel because we want the non-local data
862        back first. */
863     if (iloc == InteractionLocality::NonLocal)
864     {
865         nb->nonlocal_done.markEvent(deviceStream);
866         nb->bNonLocalStreamDoneMarked = true;
867     }
868
869     /* only transfer energies in the local stream */
870     if (iloc == InteractionLocality::Local)
871     {
872         /* DtoH fshift when virial is needed */
873         if (stepWork.computeVirial)
874         {
875             static_assert(sizeof(nb->nbst.fshift[0]) == sizeof(adat->fshift[0]),
876                           "Sizes of host- and device-side shift vectors should be the same.");
877             copyFromDeviceBuffer(
878                     nb->nbst.fshift, &adat->fshift, 0, SHIFTS, deviceStream, GpuApiCallBehavior::Async, nullptr);
879         }
880
881         /* DtoH energies */
882         if (stepWork.computeEnergy)
883         {
884             static_assert(sizeof(nb->nbst.e_lj[0]) == sizeof(adat->e_lj[0]),
885                           "Sizes of host- and device-side LJ energy terms should be the same.");
886             copyFromDeviceBuffer(
887                     nb->nbst.e_lj, &adat->e_lj, 0, 1, deviceStream, GpuApiCallBehavior::Async, nullptr);
888             static_assert(sizeof(nb->nbst.e_el[0]) == sizeof(adat->e_el[0]),
889                           "Sizes of host- and device-side electrostatic energy terms should be the "
890                           "same.");
891             copyFromDeviceBuffer(
892                     nb->nbst.e_el, &adat->e_el, 0, 1, deviceStream, GpuApiCallBehavior::Async, nullptr);
893         }
894     }
895
896     if (bDoTime)
897     {
898         t->xf[atomLocality].nb_d2h.closeTimingRegion(deviceStream);
899     }
900 }
901
902 void cuda_set_cacheconfig()
903 {
904     cudaError_t stat;
905
906     for (int i = 0; i < c_numElecTypes; i++)
907     {
908         for (int j = 0; j < c_numVdwTypes; j++)
909         {
910             /* Default kernel 32/32 kB Shared/L1 */
911             cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
912             cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
913             cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
914             stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
915             CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");
916         }
917     }
918 }
919
920 /* X buffer operations on GPU: performs conversion from rvec to nb format. */
921 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid&        grid,
922                            bool                      setFillerCoords,
923                            NbnxmGpu*                 nb,
924                            DeviceBuffer<gmx::RVec>   d_x,
925                            GpuEventSynchronizer*     xReadyOnDevice,
926                            const Nbnxm::AtomLocality locality,
927                            int                       gridId,
928                            int                       numColumnsMax)
929 {
930     GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
931
932     cu_atomdata_t* adat = nb->atdat;
933
934     const int                  numColumns      = grid.numColumns();
935     const int                  cellOffset      = grid.cellOffset();
936     const int                  numAtomsPerCell = grid.numAtomsPerCell();
937     Nbnxm::InteractionLocality interactionLoc  = gpuAtomToInteractionLocality(locality);
938
939     const DeviceStream& deviceStream = *nb->deviceStreams[interactionLoc];
940
941     int numAtoms = grid.srcAtomEnd() - grid.srcAtomBegin();
942     // avoid empty kernel launch, skip to inserting stream dependency
943     if (numAtoms != 0)
944     {
945         // TODO: This will only work with CUDA
946         GMX_ASSERT(d_x, "Need a valid device pointer");
947
948         // ensure that coordinates are ready on the device before launching the kernel
949         GMX_ASSERT(xReadyOnDevice, "Need a valid GpuEventSynchronizer object");
950         xReadyOnDevice->enqueueWaitEvent(deviceStream);
951
952         KernelLaunchConfig config;
953         config.blockSize[0] = c_bufOpsThreadsPerBlock;
954         config.blockSize[1] = 1;
955         config.blockSize[2] = 1;
956         config.gridSize[0] = (grid.numCellsColumnMax() * numAtomsPerCell + c_bufOpsThreadsPerBlock - 1)
957                              / c_bufOpsThreadsPerBlock;
958         config.gridSize[1] = numColumns;
959         config.gridSize[2] = 1;
960         GMX_ASSERT(config.gridSize[0] > 0,
961                    "Can not have empty grid, early return above avoids this");
962         config.sharedMemorySize = 0;
963
964         auto kernelFn = setFillerCoords ? nbnxn_gpu_x_to_nbat_x_kernel<true>
965                                         : nbnxn_gpu_x_to_nbat_x_kernel<false>;
966         float4*    d_xq          = adat->xq;
967         float3*    d_xFloat3     = asFloat3(d_x);
968         const int* d_atomIndices = nb->atomIndices;
969         const int* d_cxy_na      = &nb->cxy_na[numColumnsMax * gridId];
970         const int* d_cxy_ind     = &nb->cxy_ind[numColumnsMax * gridId];
971         const auto kernelArgs    = prepareGpuKernelArguments(kernelFn,
972                                                           config,
973                                                           &numColumns,
974                                                           &d_xq,
975                                                           &d_xFloat3,
976                                                           &d_atomIndices,
977                                                           &d_cxy_na,
978                                                           &d_cxy_ind,
979                                                           &cellOffset,
980                                                           &numAtomsPerCell);
981         launchGpuKernel(kernelFn, config, deviceStream, nullptr, "XbufferOps", kernelArgs);
982     }
983
984     // TODO: note that this is not necessary when there are no local atoms, that is:
985     // (numAtoms == 0 && interactionLoc == InteractionLocality::Local)
986     // but for now we avoid that optimization
987     nbnxnInsertNonlocalGpuDependency(nb, interactionLoc);
988 }
989
990 void* getGpuForces(NbnxmGpu* nb)
991 {
992     return nb->atdat->f;
993 }
994
995 } // namespace Nbnxm