Revert "Wrap more device pointers in DeviceBuffer" (!1244)
[alexxy/gromacs.git] / src / gromacs / nbnxm / cuda / nbnxm_cuda.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36 /*! \file
37  *  \brief Define CUDA implementation of nbnxn_gpu.h
38  *
39  *  \author Szilard Pall <pall.szilard@gmail.com>
40  */
41 #include "gmxpre.h"
42
43 #include "config.h"
44
45 #include <assert.h>
46 #include <stdlib.h>
47
48 #include "gromacs/nbnxm/nbnxm_gpu.h"
49
50 #if defined(_MSVC)
51 #    include <limits>
52 #endif
53
54
55 #include "nbnxm_cuda.h"
56
57 #include "gromacs/gpu_utils/gpu_utils.h"
58 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
59 #include "gromacs/gpu_utils/typecasts.cuh"
60 #include "gromacs/gpu_utils/vectype_ops.cuh"
61 #include "gromacs/hardware/device_information.h"
62 #include "gromacs/mdtypes/simulation_workload.h"
63 #include "gromacs/nbnxm/atomdata.h"
64 #include "gromacs/nbnxm/gpu_common.h"
65 #include "gromacs/nbnxm/gpu_common_utils.h"
66 #include "gromacs/nbnxm/gpu_data_mgmt.h"
67 #include "gromacs/nbnxm/grid.h"
68 #include "gromacs/nbnxm/nbnxm.h"
69 #include "gromacs/nbnxm/pairlist.h"
70 #include "gromacs/timing/gpu_timing.h"
71 #include "gromacs/utility/cstringutil.h"
72 #include "gromacs/utility/gmxassert.h"
73
74 #include "nbnxm_buffer_ops_kernels.cuh"
75 #include "nbnxm_cuda_types.h"
76
77 /***** The kernel declarations/definitions come here *****/
78
79 /* Top-level kernel declaration generation: will generate through multiple
80  * inclusion the following flavors for all kernel declarations:
81  * - force-only output;
82  * - force and energy output;
83  * - force-only with pair list pruning;
84  * - force and energy output with pair list pruning.
85  */
86 #define FUNCTION_DECLARATION_ONLY
87 /** Force only **/
88 #include "nbnxm_cuda_kernels.cuh"
89 /** Force & energy **/
90 #define CALC_ENERGIES
91 #include "nbnxm_cuda_kernels.cuh"
92 #undef CALC_ENERGIES
93
94 /*** Pair-list pruning kernels ***/
95 /** Force only **/
96 #define PRUNE_NBL
97 #include "nbnxm_cuda_kernels.cuh"
98 /** Force & energy **/
99 #define CALC_ENERGIES
100 #include "nbnxm_cuda_kernels.cuh"
101 #undef CALC_ENERGIES
102 #undef PRUNE_NBL
103
104 /* Prune-only kernels */
105 #include "nbnxm_cuda_kernel_pruneonly.cuh"
106 #undef FUNCTION_DECLARATION_ONLY
107
108 /* Now generate the function definitions if we are using a single compilation unit. */
109 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
110 #    include "nbnxm_cuda_kernel_F_noprune.cu"
111 #    include "nbnxm_cuda_kernel_F_prune.cu"
112 #    include "nbnxm_cuda_kernel_VF_noprune.cu"
113 #    include "nbnxm_cuda_kernel_VF_prune.cu"
114 #    include "nbnxm_cuda_kernel_pruneonly.cu"
115 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
116
117 namespace Nbnxm
118 {
119
120 //! Number of CUDA threads in a block
121 // TODO Optimize this through experimentation
122 constexpr static int c_bufOpsThreadsPerBlock = 128;
123
124 /*! Nonbonded kernel function pointer type */
125 typedef void (*nbnxn_cu_kfunc_ptr_t)(const NBAtomData, const NBParamGpu, const gpu_plist, bool);
126
127 /*********************************/
128
129 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
130 static inline int calc_nb_kernel_nblock(int nwork_units, const DeviceInformation* deviceInfo)
131 {
132     int max_grid_x_size;
133
134     assert(deviceInfo);
135     /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
136        empty domain) and that case should be handled before this point. */
137     assert(nwork_units > 0);
138
139     max_grid_x_size = deviceInfo->prop.maxGridSize[0];
140
141     /* do we exceed the grid x dimension limit? */
142     if (nwork_units > max_grid_x_size)
143     {
144         gmx_fatal(FARGS,
145                   "Watch out, the input system is too large to simulate!\n"
146                   "The number of nonbonded work units (=number of super-clusters) exceeds the"
147                   "maximum grid size in x dimension (%d > %d)!",
148                   nwork_units,
149                   max_grid_x_size);
150     }
151
152     return nwork_units;
153 }
154
155
156 /* Constant arrays listing all kernel function pointers and enabling selection
157    of a kernel in an elegant manner. */
158
159 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
160  *  electrostatics and VDW type.
161  *
162  *  Note that the row- and column-order of function pointers has to match the
163  *  order of corresponding enumerated electrostatics and vdw types, resp.,
164  *  defined in nbnxn_cuda_types.h.
165  */
166
167 /*! Force-only kernel function pointers. */
168 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
169     { nbnxn_kernel_ElecCut_VdwLJ_F_cuda,
170       nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda,
171       nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda,
172       nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda,
173       nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda,
174       nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda,
175       nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
176     { nbnxn_kernel_ElecRF_VdwLJ_F_cuda,
177       nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda,
178       nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda,
179       nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda,
180       nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda,
181       nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda,
182       nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
183     { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda,
184       nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda,
185       nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda,
186       nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda,
187       nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda,
188       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda,
189       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
190     { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda,
191       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda,
192       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda,
193       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda,
194       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda,
195       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda,
196       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
197     { nbnxn_kernel_ElecEw_VdwLJ_F_cuda,
198       nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda,
199       nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda,
200       nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda,
201       nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda,
202       nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda,
203       nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
204     { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda,
205       nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda,
206       nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda,
207       nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda,
208       nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda,
209       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda,
210       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
211 };
212
213 /*! Force + energy kernel function pointers. */
214 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = {
215     { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda,
216       nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda,
217       nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda,
218       nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda,
219       nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda,
220       nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda,
221       nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
222     { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda,
223       nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda,
224       nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda,
225       nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda,
226       nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda,
227       nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda,
228       nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
229     { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda,
230       nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda,
231       nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda,
232       nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda,
233       nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda,
234       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda,
235       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
236     { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda,
237       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda,
238       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda,
239       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda,
240       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda,
241       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda,
242       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
243     { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda,
244       nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda,
245       nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda,
246       nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda,
247       nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda,
248       nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda,
249       nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
250     { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda,
251       nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda,
252       nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda,
253       nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda,
254       nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda,
255       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda,
256       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
257 };
258
259 /*! Force + pruning kernel function pointers. */
260 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
261     { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda,
262       nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda,
263       nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda,
264       nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda,
265       nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda,
266       nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda,
267       nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
268     { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda,
269       nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda,
270       nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda,
271       nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda,
272       nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda,
273       nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda,
274       nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
275     { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda,
276       nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda,
277       nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda,
278       nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda,
279       nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda,
280       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda,
281       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
282     { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda,
283       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda,
284       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda,
285       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda,
286       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda,
287       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda,
288       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
289     { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda,
290       nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda,
291       nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda,
292       nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda,
293       nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda,
294       nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda,
295       nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
296     { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda,
297       nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda,
298       nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda,
299       nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda,
300       nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda,
301       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda,
302       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
303 };
304
305 /*! Force + energy + pruning kernel function pointers. */
306 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[c_numElecTypes][c_numVdwTypes] = {
307     { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda,
308       nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda,
309       nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda,
310       nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda,
311       nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda,
312       nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda,
313       nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
314     { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda,
315       nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda,
316       nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda,
317       nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda,
318       nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda,
319       nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda,
320       nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
321     { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda,
322       nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda,
323       nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda,
324       nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda,
325       nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda,
326       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda,
327       nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
328     { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda,
329       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda,
330       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda,
331       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda,
332       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda,
333       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda,
334       nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
335     { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda,
336       nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda,
337       nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda,
338       nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda,
339       nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda,
340       nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda,
341       nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
342     { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda,
343       nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda,
344       nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda,
345       nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda,
346       nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda,
347       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda,
348       nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
349 };
350
351 /*! Return a pointer to the kernel version to be executed at the current step. */
352 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(enum ElecType           elecType,
353                                                        enum VdwType            vdwType,
354                                                        bool                    bDoEne,
355                                                        bool                    bDoPrune,
356                                                        const DeviceInformation gmx_unused* deviceInfo)
357 {
358     const int elecTypeIdx = static_cast<int>(elecType);
359     const int vdwTypeIdx  = static_cast<int>(vdwType);
360
361     GMX_ASSERT(elecTypeIdx < c_numElecTypes,
362                "The electrostatics type requested is not implemented in the CUDA kernels.");
363     GMX_ASSERT(vdwTypeIdx < c_numVdwTypes,
364                "The VdW type requested is not implemented in the CUDA kernels.");
365
366     /* assert assumptions made by the kernels */
367     GMX_ASSERT(c_nbnxnGpuClusterSize * c_nbnxnGpuClusterSize / c_nbnxnGpuClusterpairSplit
368                        == deviceInfo->prop.warpSize,
369                "The CUDA kernels require the "
370                "cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size "
371                "of the architecture targeted.");
372
373     if (bDoEne)
374     {
375         if (bDoPrune)
376         {
377             return nb_kfunc_ener_prune_ptr[elecTypeIdx][vdwTypeIdx];
378         }
379         else
380         {
381             return nb_kfunc_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
382         }
383     }
384     else
385     {
386         if (bDoPrune)
387         {
388             return nb_kfunc_noener_prune_ptr[elecTypeIdx][vdwTypeIdx];
389         }
390         else
391         {
392             return nb_kfunc_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx];
393         }
394     }
395 }
396
397 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */
398 static inline int calc_shmem_required_nonbonded(const int               num_threads_z,
399                                                 const DeviceInformation gmx_unused* deviceInfo,
400                                                 const NBParamGpu*                   nbp)
401 {
402     int shmem;
403
404     assert(deviceInfo);
405
406     /* size of shmem (force-buffers/xq/atom type preloading) */
407     /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
408     /* i-atom x+q in shared memory */
409     shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float4);
410     /* cj in shared memory, for each warp separately */
411     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
412
413     if (nbp->vdwType == VdwType::CutCombGeom || nbp->vdwType == VdwType::CutCombLB)
414     {
415         /* i-atom LJ combination parameters in shared memory */
416         shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float2);
417     }
418     else
419     {
420         /* i-atom types in shared memory */
421         shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int);
422     }
423
424     return shmem;
425 }
426
427 void nbnxnInsertNonlocalGpuDependency(NbnxmGpu* nb, const InteractionLocality interactionLocality)
428 {
429     const DeviceStream& deviceStream = *nb->deviceStreams[interactionLocality];
430
431     /* When we get here all misc operations issued in the local stream as well as
432        the local xq H2D are done,
433        so we record that in the local stream and wait for it in the nonlocal one.
434        This wait needs to precede any PP tasks, bonded or nonbonded, that may
435        compute on interactions between local and nonlocal atoms.
436      */
437     if (nb->bUseTwoStreams)
438     {
439         if (interactionLocality == InteractionLocality::Local)
440         {
441             nb->misc_ops_and_local_H2D_done.markEvent(deviceStream);
442         }
443         else
444         {
445             nb->misc_ops_and_local_H2D_done.enqueueWaitEvent(deviceStream);
446         }
447     }
448 }
449
450 /*! As we execute nonbonded workload in separate streams, before launching
451    the kernel we need to make sure that he following operations have completed:
452    - atomdata allocation and related H2D transfers (every nstlist step);
453    - pair list H2D transfer (every nstlist step);
454    - shift vector H2D transfer (every nstlist step);
455    - force (+shift force and energy) output clearing (every step).
456
457    These operations are issued in the local stream at the beginning of the step
458    and therefore always complete before the local kernel launch. The non-local
459    kernel is launched after the local on the same device/context hence it is
460    inherently scheduled after the operations in the local stream (including the
461    above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
462    devices with multiple hardware queues the dependency needs to be enforced.
463    We use the misc_ops_and_local_H2D_done event to record the point where
464    the local x+q H2D (and all preceding) tasks are complete and synchronize
465    with this event in the non-local stream before launching the non-bonded kernel.
466  */
467 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const InteractionLocality iloc)
468 {
469     NBAtomData*         adat         = nb->atdat;
470     NBParamGpu*         nbp          = nb->nbparam;
471     gpu_plist*          plist        = nb->plist[iloc];
472     Nbnxm::GpuTimers*   timers       = nb->timers;
473     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
474
475     bool bDoTime = nb->bDoTime;
476
477     /* Don't launch the non-local kernel if there is no work to do.
478        Doing the same for the local kernel is more complicated, since the
479        local part of the force array also depends on the non-local kernel.
480        So to avoid complicating the code and to reduce the risk of bugs,
481        we always call the local kernel, and later (not in
482        this function) the stream wait, local f copyback and the f buffer
483        clearing. All these operations, except for the local interaction kernel,
484        are needed for the non-local interactions. The skip of the local kernel
485        call is taken care of later in this function. */
486     if (canSkipNonbondedWork(*nb, iloc))
487     {
488         plist->haveFreshList = false;
489
490         return;
491     }
492
493     if (nbp->useDynamicPruning && plist->haveFreshList)
494     {
495         /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
496            (TODO: ATM that's the way the timing accounting can distinguish between
497            separate prune kernel and combined force+prune, maybe we need a better way?).
498          */
499         gpu_launch_kernel_pruneonly(nb, iloc, 1);
500     }
501
502     if (plist->nsci == 0)
503     {
504         /* Don't launch an empty local kernel (not allowed with CUDA) */
505         return;
506     }
507
508     /* beginning of timed nonbonded calculation section */
509     if (bDoTime)
510     {
511         timers->interaction[iloc].nb_k.openTimingRegion(deviceStream);
512     }
513
514     /* Kernel launch config:
515      * - The thread block dimensions match the size of i-clusters, j-clusters,
516      *   and j-cluster concurrency, in x, y, and z, respectively.
517      * - The 1D block-grid contains as many blocks as super-clusters.
518      */
519     int num_threads_z = 1;
520     if (nb->deviceContext_->deviceInfo().prop.major == 3 && nb->deviceContext_->deviceInfo().prop.minor == 7)
521     {
522         num_threads_z = 2;
523     }
524     int nblock = calc_nb_kernel_nblock(plist->nsci, &nb->deviceContext_->deviceInfo());
525
526
527     KernelLaunchConfig config;
528     config.blockSize[0] = c_clSize;
529     config.blockSize[1] = c_clSize;
530     config.blockSize[2] = num_threads_z;
531     config.gridSize[0]  = nblock;
532     config.sharedMemorySize =
533             calc_shmem_required_nonbonded(num_threads_z, &nb->deviceContext_->deviceInfo(), nbp);
534
535     if (debug)
536     {
537         fprintf(debug,
538                 "Non-bonded GPU launch configuration:\n\tThread block: %zux%zux%zu\n\t"
539                 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
540                 "\tShMem: %zu\n",
541                 config.blockSize[0],
542                 config.blockSize[1],
543                 config.blockSize[2],
544                 config.gridSize[0],
545                 config.gridSize[1],
546                 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
547                 c_nbnxnGpuNumClusterPerSupercluster,
548                 plist->na_c,
549                 config.sharedMemorySize);
550     }
551
552     auto*      timingEvent = bDoTime ? timers->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
553     const auto kernel =
554             select_nbnxn_kernel(nbp->elecType,
555                                 nbp->vdwType,
556                                 stepWork.computeEnergy,
557                                 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune),
558                                 &nb->deviceContext_->deviceInfo());
559     const auto kernelArgs =
560             prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &stepWork.computeVirial);
561     launchGpuKernel(kernel, config, deviceStream, timingEvent, "k_calc_nb", kernelArgs);
562
563     if (bDoTime)
564     {
565         timers->interaction[iloc].nb_k.closeTimingRegion(deviceStream);
566     }
567
568     if (GMX_NATIVE_WINDOWS)
569     {
570         /* Windows: force flushing WDDM queue */
571         cudaStreamQuery(deviceStream.stream());
572     }
573 }
574
575 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
576 static inline int calc_shmem_required_prune(const int num_threads_z)
577 {
578     int shmem;
579
580     /* i-atom x in shared memory */
581     shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float4);
582     /* cj in shared memory, for each warp separately */
583     shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
584
585     return shmem;
586 }
587
588 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
589 {
590     NBAtomData*         adat         = nb->atdat;
591     NBParamGpu*         nbp          = nb->nbparam;
592     gpu_plist*          plist        = nb->plist[iloc];
593     Nbnxm::GpuTimers*   timers       = nb->timers;
594     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
595
596     bool bDoTime = nb->bDoTime;
597
598     if (plist->haveFreshList)
599     {
600         GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
601
602         /* Set rollingPruningNumParts to signal that it is not set */
603         plist->rollingPruningNumParts = 0;
604         plist->rollingPruningPart     = 0;
605     }
606     else
607     {
608         if (plist->rollingPruningNumParts == 0)
609         {
610             plist->rollingPruningNumParts = numParts;
611         }
612         else
613         {
614             GMX_ASSERT(numParts == plist->rollingPruningNumParts,
615                        "It is not allowed to change numParts in between list generation steps");
616         }
617     }
618
619     /* Use a local variable for part and update in plist, so we can return here
620      * without duplicating the part increment code.
621      */
622     int part = plist->rollingPruningPart;
623
624     plist->rollingPruningPart++;
625     if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
626     {
627         plist->rollingPruningPart = 0;
628     }
629
630     /* Compute the number of list entries to prune in this pass */
631     int numSciInPart = (plist->nsci - part) / numParts;
632
633     /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */
634     if (numSciInPart <= 0)
635     {
636         plist->haveFreshList = false;
637
638         return;
639     }
640
641     GpuRegionTimer* timer = nullptr;
642     if (bDoTime)
643     {
644         timer = &(plist->haveFreshList ? timers->interaction[iloc].prune_k
645                                        : timers->interaction[iloc].rollingPrune_k);
646     }
647
648     /* beginning of timed prune calculation section */
649     if (bDoTime)
650     {
651         timer->openTimingRegion(deviceStream);
652     }
653
654     /* Kernel launch config:
655      * - The thread block dimensions match the size of i-clusters, j-clusters,
656      *   and j-cluster concurrency, in x, y, and z, respectively.
657      * - The 1D block-grid contains as many blocks as super-clusters.
658      */
659     int num_threads_z = c_pruneKernelJ4Concurrency;
660     int nblock        = calc_nb_kernel_nblock(numSciInPart, &nb->deviceContext_->deviceInfo());
661     KernelLaunchConfig config;
662     config.blockSize[0]     = c_clSize;
663     config.blockSize[1]     = c_clSize;
664     config.blockSize[2]     = num_threads_z;
665     config.gridSize[0]      = nblock;
666     config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
667
668     if (debug)
669     {
670         fprintf(debug,
671                 "Pruning GPU kernel launch configuration:\n\tThread block: %zux%zux%zu\n\t"
672                 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
673                 "\tShMem: %zu\n",
674                 config.blockSize[0],
675                 config.blockSize[1],
676                 config.blockSize[2],
677                 config.gridSize[0],
678                 config.gridSize[1],
679                 numSciInPart * c_nbnxnGpuNumClusterPerSupercluster,
680                 c_nbnxnGpuNumClusterPerSupercluster,
681                 plist->na_c,
682                 config.sharedMemorySize);
683     }
684
685     auto*          timingEvent  = bDoTime ? timer->fetchNextEvent() : nullptr;
686     constexpr char kernelName[] = "k_pruneonly";
687     const auto     kernel =
688             plist->haveFreshList ? nbnxn_kernel_prune_cuda<true> : nbnxn_kernel_prune_cuda<false>;
689     const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &numParts, &part);
690     launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
691
692     /* TODO: consider a more elegant way to track which kernel has been called
693        (combined or separate 1st pass prune, rolling prune). */
694     if (plist->haveFreshList)
695     {
696         plist->haveFreshList = false;
697         /* Mark that pruning has been done */
698         nb->timers->interaction[iloc].didPrune = true;
699     }
700     else
701     {
702         /* Mark that rolling pruning has been done */
703         nb->timers->interaction[iloc].didRollingPrune = true;
704     }
705
706     if (bDoTime)
707     {
708         timer->closeTimingRegion(deviceStream);
709     }
710
711     if (GMX_NATIVE_WINDOWS)
712     {
713         /* Windows: force flushing WDDM queue */
714         cudaStreamQuery(deviceStream.stream());
715     }
716 }
717
718 void gpu_launch_cpyback(NbnxmGpu*                nb,
719                         nbnxn_atomdata_t*        nbatom,
720                         const gmx::StepWorkload& stepWork,
721                         const AtomLocality       atomLocality)
722 {
723     GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
724
725     /* determine interaction locality from atom locality */
726     const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
727     GMX_ASSERT(iloc == InteractionLocality::Local
728                        || (iloc == InteractionLocality::NonLocal && nb->bNonLocalStreamDoneMarked == false),
729                "Non-local stream is indicating that the copy back event is enqueued at the "
730                "beginning of the copy back function.");
731
732     /* extract the data */
733     NBAtomData*         adat         = nb->atdat;
734     Nbnxm::GpuTimers*   timers       = nb->timers;
735     bool                bDoTime      = nb->bDoTime;
736     const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
737
738     /* don't launch non-local copy-back if there was no non-local work to do */
739     if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
740     {
741         nb->bNonLocalStreamDoneMarked = false;
742         return;
743     }
744
745     /* local/nonlocal offset and length used for xq and f */
746     auto atomsRange = getGpuAtomRange(adat, atomLocality);
747
748     /* beginning of timed D2H section */
749     if (bDoTime)
750     {
751         timers->xf[atomLocality].nb_d2h.openTimingRegion(deviceStream);
752     }
753
754     /* With DD the local D2H transfer can only start after the non-local
755        kernel has finished. */
756     if (iloc == InteractionLocality::Local && nb->bNonLocalStreamDoneMarked)
757     {
758         nb->nonlocal_done.enqueueWaitEvent(deviceStream);
759         nb->bNonLocalStreamDoneMarked = false;
760     }
761
762     /* DtoH f
763      * Skip if buffer ops / reduction is offloaded to the GPU.
764      */
765     if (!stepWork.useGpuFBufferOps)
766     {
767         static_assert(
768                 sizeof(adat->f[0]) == sizeof(Float3),
769                 "The size of the force buffer element should be equal to the size of float3.");
770         copyFromDeviceBuffer(reinterpret_cast<Float3*>(nbatom->out[0].f.data()) + atomsRange.begin(),
771                              &adat->f,
772                              atomsRange.begin(),
773                              atomsRange.size(),
774                              deviceStream,
775                              GpuApiCallBehavior::Async,
776                              nullptr);
777     }
778
779     /* After the non-local D2H is launched the nonlocal_done event can be
780        recorded which signals that the local D2H can proceed. This event is not
781        placed after the non-local kernel because we want the non-local data
782        back first. */
783     if (iloc == InteractionLocality::NonLocal)
784     {
785         nb->nonlocal_done.markEvent(deviceStream);
786         nb->bNonLocalStreamDoneMarked = true;
787     }
788
789     /* only transfer energies in the local stream */
790     if (iloc == InteractionLocality::Local)
791     {
792         /* DtoH fshift when virial is needed */
793         if (stepWork.computeVirial)
794         {
795             static_assert(sizeof(nb->nbst.fShift[0]) == sizeof(adat->fShift[0]),
796                           "Sizes of host- and device-side shift vectors should be the same.");
797             copyFromDeviceBuffer(
798                     nb->nbst.fShift, &adat->fShift, 0, SHIFTS, deviceStream, GpuApiCallBehavior::Async, nullptr);
799         }
800
801         /* DtoH energies */
802         if (stepWork.computeEnergy)
803         {
804             static_assert(sizeof(nb->nbst.eLJ[0]) == sizeof(adat->eLJ[0]),
805                           "Sizes of host- and device-side LJ energy terms should be the same.");
806             copyFromDeviceBuffer(
807                     nb->nbst.eLJ, &adat->eLJ, 0, 1, deviceStream, GpuApiCallBehavior::Async, nullptr);
808             static_assert(sizeof(nb->nbst.eElec[0]) == sizeof(adat->eElec[0]),
809                           "Sizes of host- and device-side electrostatic energy terms should be the "
810                           "same.");
811             copyFromDeviceBuffer(
812                     nb->nbst.eElec, &adat->eElec, 0, 1, deviceStream, GpuApiCallBehavior::Async, nullptr);
813         }
814     }
815
816     if (bDoTime)
817     {
818         timers->xf[atomLocality].nb_d2h.closeTimingRegion(deviceStream);
819     }
820 }
821
822 void cuda_set_cacheconfig()
823 {
824     cudaError_t stat;
825
826     for (int i = 0; i < c_numElecTypes; i++)
827     {
828         for (int j = 0; j < c_numVdwTypes; j++)
829         {
830             /* Default kernel 32/32 kB Shared/L1 */
831             cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
832             cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
833             cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
834             stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
835             CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");
836         }
837     }
838 }
839
840 /* X buffer operations on GPU: performs conversion from rvec to nb format. */
841 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid&        grid,
842                            bool                      setFillerCoords,
843                            NbnxmGpu*                 nb,
844                            DeviceBuffer<gmx::RVec>   d_x,
845                            GpuEventSynchronizer*     xReadyOnDevice,
846                            const Nbnxm::AtomLocality locality,
847                            int                       gridId,
848                            int                       numColumnsMax)
849 {
850     GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
851
852     NBAtomData* adat = nb->atdat;
853
854     const int                  numColumns      = grid.numColumns();
855     const int                  cellOffset      = grid.cellOffset();
856     const int                  numAtomsPerCell = grid.numAtomsPerCell();
857     Nbnxm::InteractionLocality interactionLoc  = gpuAtomToInteractionLocality(locality);
858
859     const DeviceStream& deviceStream = *nb->deviceStreams[interactionLoc];
860
861     int numAtoms = grid.srcAtomEnd() - grid.srcAtomBegin();
862     // avoid empty kernel launch, skip to inserting stream dependency
863     if (numAtoms != 0)
864     {
865         // TODO: This will only work with CUDA
866         GMX_ASSERT(d_x, "Need a valid device pointer");
867
868         // ensure that coordinates are ready on the device before launching the kernel
869         GMX_ASSERT(xReadyOnDevice, "Need a valid GpuEventSynchronizer object");
870         xReadyOnDevice->enqueueWaitEvent(deviceStream);
871
872         KernelLaunchConfig config;
873         config.blockSize[0] = c_bufOpsThreadsPerBlock;
874         config.blockSize[1] = 1;
875         config.blockSize[2] = 1;
876         config.gridSize[0] = (grid.numCellsColumnMax() * numAtomsPerCell + c_bufOpsThreadsPerBlock - 1)
877                              / c_bufOpsThreadsPerBlock;
878         config.gridSize[1] = numColumns;
879         config.gridSize[2] = 1;
880         GMX_ASSERT(config.gridSize[0] > 0,
881                    "Can not have empty grid, early return above avoids this");
882         config.sharedMemorySize = 0;
883
884         auto kernelFn = setFillerCoords ? nbnxn_gpu_x_to_nbat_x_kernel<true>
885                                         : nbnxn_gpu_x_to_nbat_x_kernel<false>;
886         float4*    d_xq          = adat->xq;
887         float3*    d_xFloat3     = asFloat3(d_x);
888         const int* d_atomIndices = nb->atomIndices;
889         const int* d_cxy_na      = &nb->cxy_na[numColumnsMax * gridId];
890         const int* d_cxy_ind     = &nb->cxy_ind[numColumnsMax * gridId];
891         const auto kernelArgs    = prepareGpuKernelArguments(kernelFn,
892                                                           config,
893                                                           &numColumns,
894                                                           &d_xq,
895                                                           &d_xFloat3,
896                                                           &d_atomIndices,
897                                                           &d_cxy_na,
898                                                           &d_cxy_ind,
899                                                           &cellOffset,
900                                                           &numAtomsPerCell);
901         launchGpuKernel(kernelFn, config, deviceStream, nullptr, "XbufferOps", kernelArgs);
902     }
903
904     // TODO: note that this is not necessary when there are no local atoms, that is:
905     // (numAtoms == 0 && interactionLoc == InteractionLocality::Local)
906     // but for now we avoid that optimization
907     nbnxnInsertNonlocalGpuDependency(nb, interactionLoc);
908 }
909
910 void* getGpuForces(NbnxmGpu* nb)
911 {
912     return nb->atdat->f;
913 }
914
915 } // namespace Nbnxm