2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define CUDA implementation of nbnxn_gpu.h
38 * \author Szilard Pall <pall.szilard@gmail.com>
47 #include "gromacs/mdlib/nbnxn_gpu.h"
56 #include "thread_mpi/atomic.h"
59 #include "gromacs/gmxlib/cuda_tools/cudautils.cuh"
60 #include "gromacs/legacyheaders/types/force_flags.h"
61 #include "gromacs/legacyheaders/types/simple.h"
62 #include "gromacs/mdlib/nb_verlet.h"
63 #include "gromacs/mdlib/nbnxn_consts.h"
64 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
65 #include "gromacs/mdlib/nbnxn_pairlist.h"
66 #include "gromacs/pbcutil/ishift.h"
67 #include "gromacs/timing/gpu_timing.h"
68 #include "gromacs/utility/cstringutil.h"
70 #include "nbnxn_cuda_types.h"
72 /*! Texture reference for LJ C6/C12 parameters; bound to cu_nbparam_t.nbfp */
73 texture<float, 1, cudaReadModeElementType> nbfp_texref;
75 /*! Texture reference for LJ-PME parameters; bound to cu_nbparam_t.nbfp_comb */
76 texture<float, 1, cudaReadModeElementType> nbfp_comb_texref;
78 /*! Texture reference for Ewald coulomb force table; bound to cu_nbparam_t.coulomb_tab */
79 texture<float, 1, cudaReadModeElementType> coulomb_tab_texref;
81 /* Convenience defines */
82 #define NCL_PER_SUPERCL (NBNXN_GPU_NCLUSTER_PER_SUPERCLUSTER)
83 #define CL_SIZE (NBNXN_GPU_CLUSTER_SIZE)
85 /* NTHREAD_Z controls the number of j-clusters processed concurrently on NTHREAD_Z
86 * warp-pairs per block.
88 * - On CC 2.0-3.5, 5.0, and 5.2, NTHREAD_Z == 1, translating to 64 th/block with 16
89 * blocks/multiproc, is the fastest even though this setup gives low occupancy.
90 * NTHREAD_Z > 1 results in excessive register spilling unless the minimum blocks
91 * per multiprocessor is reduced proportionally to get the original number of max
92 * threads in flight (and slightly lower performance).
93 * - On CC 3.7 there are enough registers to double the number of threads; using
94 * NTHREADS_Z == 2 is fastest with 16 blocks (TODO: test with RF and other kernels
95 * with low-register use).
97 * Note that the current kernel implementation only supports NTHREAD_Z > 1 with
98 * shuffle-based reduction, hence CC >= 3.0.
101 /* Kernel launch bounds as function of NTHREAD_Z.
102 * - CC 3.5/5.2: NTHREAD_Z=1, (64, 16) bounds
103 * - CC 3.7: NTHREAD_Z=2, (128, 16) bounds
105 #if __CUDA_ARCH__ == 370
106 #define NTHREAD_Z (2)
107 #define MIN_BLOCKS_PER_MP (16)
109 #define NTHREAD_Z (1)
110 #define MIN_BLOCKS_PER_MP (16)
112 #define THREADS_PER_BLOCK (CL_SIZE*CL_SIZE*NTHREAD_Z)
115 /***** The kernels come here *****/
116 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_utils.cuh"
118 /* Top-level kernel generation: will generate through multiple inclusion the
119 * following flavors for all kernels:
120 * - force-only output;
121 * - force and energy output;
122 * - force-only with pair list pruning;
123 * - force and energy output with pair list pruning.
126 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
127 /** Force & energy **/
128 #define CALC_ENERGIES
129 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
132 /*** Pair-list pruning kernels ***/
135 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
136 /** Force & energy **/
137 #define CALC_ENERGIES
138 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
143 /*! Nonbonded kernel function pointer type */
144 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
149 /*********************************/
151 /* XXX always/never run the energy/pruning kernels -- only for benchmarking purposes */
152 static bool always_ener = (getenv("GMX_GPU_ALWAYS_ENER") != NULL);
153 static bool never_ener = (getenv("GMX_GPU_NEVER_ENER") != NULL);
154 static bool always_prune = (getenv("GMX_GPU_ALWAYS_PRUNE") != NULL);
157 /* Bit-pattern used for polling-based GPU synchronization. It is used as a float
158 * and corresponds to having the exponent set to the maximum (127 -- single
159 * precision) and the mantissa to 0.
161 static unsigned int poll_wait_pattern = (0x7FU << 23);
163 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
164 static inline int calc_nb_kernel_nblock(int nwork_units, gmx_device_info_t *dinfo)
169 /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
170 empty domain) and that case should be handled before this point. */
171 assert(nwork_units > 0);
173 max_grid_x_size = dinfo->prop.maxGridSize[0];
175 /* do we exceed the grid x dimension limit? */
176 if (nwork_units > max_grid_x_size)
178 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
179 "The number of nonbonded work units (=number of super-clusters) exceeds the"
180 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
187 /* Constant arrays listing all kernel function pointers and enabling selection
188 of a kernel in an elegant manner. */
190 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
191 * electrostatics and VDW type.
193 * Note that the row- and column-order of function pointers has to match the
194 * order of corresponding enumerated electrostatics and vdw types, resp.,
195 * defined in nbnxn_cuda_types.h.
198 /*! Force-only kernel function pointers. */
199 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] =
201 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
202 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
203 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
204 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
205 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
206 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
209 /*! Force + energy kernel function pointers. */
210 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] =
212 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
213 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
214 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
215 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
216 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
217 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
220 /*! Force + pruning kernel function pointers. */
221 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] =
223 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
224 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
225 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
226 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
227 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
228 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
231 /*! Force + energy + pruning kernel function pointers. */
232 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] =
234 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
235 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
236 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
237 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
238 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
239 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
242 /*! Return a pointer to the kernel version to be executed at the current step. */
243 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
248 nbnxn_cu_kfunc_ptr_t res;
250 assert(eeltype < eelCuNR);
251 assert(evdwtype < evdwCuNR);
257 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
261 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
268 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
272 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
279 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
280 static inline int calc_shmem_required(const int num_threads_z, gmx_device_info_t gmx_unused *dinfo)
286 /* size of shmem (force-buffers/xq/atom type preloading) */
287 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
288 /* i-atom x+q in shared memory */
289 shmem = NCL_PER_SUPERCL * CL_SIZE * sizeof(float4);
290 /* cj in shared memory, for each warp separately */
291 shmem += num_threads_z * 2 * NBNXN_GPU_JGROUP_SIZE * sizeof(int);
292 /* CUDA versions below 4.2 won't generate code for sm>=3.0 */
293 #if GMX_CUDA_VERSION >= 4200
294 if (dinfo->prop.major >= 3)
296 /* i-atom types in shared memory */
297 shmem += NCL_PER_SUPERCL * CL_SIZE * sizeof(int);
299 if (dinfo->prop.major < 3)
302 /* force reduction buffers in shared memory */
303 shmem += CL_SIZE * CL_SIZE * 3 * sizeof(float);
308 /*! As we execute nonbonded workload in separate streams, before launching
309 the kernel we need to make sure that he following operations have completed:
310 - atomdata allocation and related H2D transfers (every nstlist step);
311 - pair list H2D transfer (every nstlist step);
312 - shift vector H2D transfer (every nstlist step);
313 - force (+shift force and energy) output clearing (every step).
315 These operations are issued in the local stream at the beginning of the step
316 and therefore always complete before the local kernel launch. The non-local
317 kernel is launched after the local on the same device/context hence it is
318 inherently scheduled after the operations in the local stream (including the
319 above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
320 devices with multiple hardware queues the dependency needs to be enforced.
321 We use the misc_ops_and_local_H2D_done event to record the point where
322 the local x+q H2D (and all preceding) tasks are complete and synchronize
323 with this event in the non-local stream before launching the non-bonded kernel.
325 void nbnxn_gpu_launch_kernel(gmx_nbnxn_cuda_t *nb,
326 const nbnxn_atomdata_t *nbatom,
331 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
332 /* CUDA kernel launch-related stuff */
334 dim3 dim_block, dim_grid;
335 nbnxn_cu_kfunc_ptr_t nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
337 cu_atomdata_t *adat = nb->atdat;
338 cu_nbparam_t *nbp = nb->nbparam;
339 cu_plist_t *plist = nb->plist[iloc];
340 cu_timers_t *t = nb->timers;
341 cudaStream_t stream = nb->stream[iloc];
343 bool bCalcEner = flags & GMX_FORCE_ENERGY;
344 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
345 bool bDoTime = nb->bDoTime;
347 /* turn energy calculation always on/off (for debugging/testing only) */
348 bCalcEner = (bCalcEner || always_ener) && !never_ener;
350 /* Don't launch the non-local kernel if there is no work to do.
351 Doing the same for the local kernel is more complicated, since the
352 local part of the force array also depends on the non-local kernel.
353 So to avoid complicating the code and to reduce the risk of bugs,
354 we always call the local kernel, the local x+q copy and later (not in
355 this function) the stream wait, local f copyback and the f buffer
356 clearing. All these operations, except for the local interaction kernel,
357 are needed for the non-local interactions. The skip of the local kernel
358 call is taken care of later in this function. */
359 if (iloc == eintNonlocal && plist->nsci == 0)
364 /* calculate the atom data index range based on locality */
368 adat_len = adat->natoms_local;
372 adat_begin = adat->natoms_local;
373 adat_len = adat->natoms - adat->natoms_local;
376 /* beginning of timed HtoD section */
379 stat = cudaEventRecord(t->start_nb_h2d[iloc], stream);
380 CU_RET_ERR(stat, "cudaEventRecord failed");
384 cu_copy_H2D_async(adat->xq + adat_begin, nbatom->x + adat_begin * 4,
385 adat_len * sizeof(*adat->xq), stream);
387 /* When we get here all misc operations issues in the local stream as well as
388 the local xq H2D are done,
389 so we record that in the local stream and wait for it in the nonlocal one. */
390 if (nb->bUseTwoStreams)
392 if (iloc == eintLocal)
394 stat = cudaEventRecord(nb->misc_ops_and_local_H2D_done, stream);
395 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_and_local_H2D_done failed");
399 stat = cudaStreamWaitEvent(stream, nb->misc_ops_and_local_H2D_done, 0);
400 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_and_local_H2D_done failed");
406 stat = cudaEventRecord(t->stop_nb_h2d[iloc], stream);
407 CU_RET_ERR(stat, "cudaEventRecord failed");
410 if (plist->nsci == 0)
412 /* Don't launch an empty local kernel (not allowed with CUDA) */
416 /* beginning of timed nonbonded calculation section */
419 stat = cudaEventRecord(t->start_nb_k[iloc], stream);
420 CU_RET_ERR(stat, "cudaEventRecord failed");
423 /* get the pointer to the kernel flavor we need to use */
424 nb_kernel = select_nbnxn_kernel(nbp->eeltype,
427 plist->bDoPrune || always_prune);
429 /* Kernel launch config:
430 * - The thread block dimensions match the size of i-clusters, j-clusters,
431 * and j-cluster concurrency, in x, y, and z, respectively.
432 * - The 1D block-grid contains as many blocks as super-clusters.
434 int num_threads_z = 1;
435 if (nb->dev_info->prop.major == 3 && nb->dev_info->prop.minor == 7)
439 nblock = calc_nb_kernel_nblock(plist->nsci, nb->dev_info);
440 dim_block = dim3(CL_SIZE, CL_SIZE, num_threads_z);
441 dim_grid = dim3(nblock, 1, 1);
442 shmem = calc_shmem_required(num_threads_z, nb->dev_info);
446 fprintf(debug, "GPU launch configuration:\n\tThread block: %dx%dx%d\n\t"
447 "\tGrid: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n"
449 dim_block.x, dim_block.y, dim_block.z,
450 dim_grid.x, dim_grid.y, plist->nsci*NCL_PER_SUPERCL,
451 NCL_PER_SUPERCL, plist->na_c,
455 nb_kernel<<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, bCalcFshift);
456 CU_LAUNCH_ERR("k_calc_nb");
460 stat = cudaEventRecord(t->stop_nb_k[iloc], stream);
461 CU_RET_ERR(stat, "cudaEventRecord failed");
465 void nbnxn_gpu_launch_cpyback(gmx_nbnxn_cuda_t *nb,
466 const nbnxn_atomdata_t *nbatom,
471 int adat_begin, adat_len, adat_end; /* local/nonlocal offset and length used for xq and f */
474 /* determine interaction locality from atom locality */
479 else if (NONLOCAL_A(aloc))
486 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
487 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
491 cu_atomdata_t *adat = nb->atdat;
492 cu_timers_t *t = nb->timers;
493 bool bDoTime = nb->bDoTime;
494 cudaStream_t stream = nb->stream[iloc];
496 bool bCalcEner = flags & GMX_FORCE_ENERGY;
497 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
499 /* don't launch non-local copy-back if there was no non-local work to do */
500 if (iloc == eintNonlocal && nb->plist[iloc]->nsci == 0)
505 /* calculate the atom data index range based on locality */
509 adat_len = adat->natoms_local;
510 adat_end = nb->atdat->natoms_local;
514 adat_begin = adat->natoms_local;
515 adat_len = adat->natoms - adat->natoms_local;
516 adat_end = nb->atdat->natoms;
519 /* beginning of timed D2H section */
522 stat = cudaEventRecord(t->start_nb_d2h[iloc], stream);
523 CU_RET_ERR(stat, "cudaEventRecord failed");
526 if (!nb->bUseStreamSync)
528 /* For safety reasons set a few (5%) forces to NaN. This way even if the
529 polling "hack" fails with some future NVIDIA driver we'll get a crash. */
530 for (int i = adat_begin; i < 3*adat_end + 2; i += adat_len/20)
533 nbatom->out[0].f[i] = NAN;
536 if (numeric_limits<float>::has_quiet_NaN)
538 nbatom->out[0].f[i] = numeric_limits<float>::quiet_NaN();
543 nbatom->out[0].f[i] = GMX_REAL_MAX;
548 /* Set the last four bytes of the force array to a bit pattern
549 which can't be the result of the force calculation:
550 max exponent (127) and zero mantissa. */
551 *(unsigned int*)&nbatom->out[0].f[adat_end*3 - 1] = poll_wait_pattern;
554 /* With DD the local D2H transfer can only start after the non-local
555 kernel has finished. */
556 if (iloc == eintLocal && nb->bUseTwoStreams)
558 stat = cudaStreamWaitEvent(stream, nb->nonlocal_done, 0);
559 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
563 cu_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f + adat_begin,
564 (adat_len)*sizeof(*adat->f), stream);
566 /* After the non-local D2H is launched the nonlocal_done event can be
567 recorded which signals that the local D2H can proceed. This event is not
568 placed after the non-local kernel because we want the non-local data
570 if (iloc == eintNonlocal)
572 stat = cudaEventRecord(nb->nonlocal_done, stream);
573 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
576 /* only transfer energies in the local stream */
582 cu_copy_D2H_async(nb->nbst.fshift, adat->fshift,
583 SHIFTS * sizeof(*nb->nbst.fshift), stream);
589 cu_copy_D2H_async(nb->nbst.e_lj, adat->e_lj,
590 sizeof(*nb->nbst.e_lj), stream);
591 cu_copy_D2H_async(nb->nbst.e_el, adat->e_el,
592 sizeof(*nb->nbst.e_el), stream);
598 stat = cudaEventRecord(t->stop_nb_d2h[iloc], stream);
599 CU_RET_ERR(stat, "cudaEventRecord failed");
603 /* Atomic compare-exchange operation on unsigned values. It is used in
604 * polling wait for the GPU.
606 static inline bool atomic_cas(volatile unsigned int *ptr,
613 return tMPI_Atomic_cas((tMPI_Atomic_t *)ptr, oldval, newval);
615 gmx_incons("Atomic operations not available, atomic_cas() should not have been called!");
620 void nbnxn_gpu_wait_for_gpu(gmx_nbnxn_cuda_t *nb,
621 const nbnxn_atomdata_t *nbatom,
623 real *e_lj, real *e_el, rvec *fshift)
625 /* NOTE: only implemented for single-precision at this time */
627 int i, adat_end, iloc = -1;
628 volatile unsigned int *poll_word;
630 /* determine interaction locality from atom locality */
635 else if (NONLOCAL_A(aloc))
642 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
643 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
647 cu_plist_t *plist = nb->plist[iloc];
648 cu_timers_t *timers = nb->timers;
649 struct gmx_wallclock_gpu_t *timings = nb->timings;
650 nb_staging nbst = nb->nbst;
652 bool bCalcEner = flags & GMX_FORCE_ENERGY;
653 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
655 /* turn energy calculation always on/off (for debugging/testing only) */
656 bCalcEner = (bCalcEner || always_ener) && !never_ener;
658 /* Launch wait/update timers & counters, unless doing the non-local phase
659 when there is not actually work to do. This is consistent with
660 nbnxn_cuda_launch_kernel.
662 NOTE: if timing with multiple GPUs (streams) becomes possible, the
663 counters could end up being inconsistent due to not being incremented
664 on some of the nodes! */
665 if (iloc == eintNonlocal && nb->plist[iloc]->nsci == 0)
670 /* calculate the atom data index range based on locality */
673 adat_end = nb->atdat->natoms_local;
677 adat_end = nb->atdat->natoms;
680 if (nb->bUseStreamSync)
682 stat = cudaStreamSynchronize(nb->stream[iloc]);
683 CU_RET_ERR(stat, "cudaStreamSynchronize failed in cu_blockwait_nb");
687 /* Busy-wait until we get the signal pattern set in last byte
688 * of the l/nl float vector. This pattern corresponds to a floating
689 * point number which can't be the result of the force calculation
690 * (maximum, 127 exponent and 0 mantissa).
691 * The polling uses atomic compare-exchange.
693 poll_word = (volatile unsigned int*)&nbatom->out[0].f[adat_end*3 - 1];
694 while (atomic_cas(poll_word, poll_wait_pattern, poll_wait_pattern))
699 /* timing data accumulation */
702 /* only increase counter once (at local F wait) */
706 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].c += 1;
710 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].t +=
711 cu_event_elapsed(timers->start_nb_k[iloc], timers->stop_nb_k[iloc]);
713 /* X/q H2D and F D2H timings */
714 timings->nb_h2d_t += cu_event_elapsed(timers->start_nb_h2d[iloc],
715 timers->stop_nb_h2d[iloc]);
716 timings->nb_d2h_t += cu_event_elapsed(timers->start_nb_d2h[iloc],
717 timers->stop_nb_d2h[iloc]);
719 /* only count atdat and pair-list H2D at pair-search step */
722 /* atdat transfer timing (add only once, at local F wait) */
726 timings->pl_h2d_t += cu_event_elapsed(timers->start_atdat,
730 timings->pl_h2d_t += cu_event_elapsed(timers->start_pl_h2d[iloc],
731 timers->stop_pl_h2d[iloc]);
735 /* add up energies and shift forces (only once at local F wait) */
746 for (i = 0; i < SHIFTS; i++)
748 fshift[i][0] += nbst.fshift[i].x;
749 fshift[i][1] += nbst.fshift[i].y;
750 fshift[i][2] += nbst.fshift[i].z;
755 /* turn off pruning (doesn't matter if this is pair-search step or not) */
756 plist->bDoPrune = false;
759 /*! Return the reference to the nbfp texture. */
760 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_texref()
765 /*! Return the reference to the nbfp_comb texture. */
766 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_comb_texref()
768 return nbfp_comb_texref;
771 /*! Return the reference to the coulomb_tab. */
772 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_coulomb_tab_texref()
774 return coulomb_tab_texref;
777 /*! Set up the cache configuration for the non-bonded kernels,
779 void nbnxn_cuda_set_cacheconfig(gmx_device_info_t *devinfo)
783 for (int i = 0; i < eelCuNR; i++)
785 for (int j = 0; j < evdwCuNR; j++)
787 if (devinfo->prop.major >= 3)
789 /* Default kernel on sm 3.x 48/16 kB Shared/L1 */
790 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferShared);
791 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferShared);
792 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferShared);
793 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferShared);
797 /* On Fermi prefer L1 gives 2% higher performance */
798 /* Default kernel on sm_2.x 16/48 kB Shared/L1 */
799 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferL1);
800 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferL1);
801 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferL1);
802 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferL1);
804 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");