2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "nbnxn_cuda.h"
51 #include "thread_mpi/atomic.h"
54 #include "gromacs/gmxlib/cuda_tools/cudautils.cuh"
55 #include "gromacs/legacyheaders/types/force_flags.h"
56 #include "gromacs/legacyheaders/types/simple.h"
57 #include "gromacs/mdlib/nb_verlet.h"
58 #include "gromacs/mdlib/nbnxn_consts.h"
59 #include "gromacs/mdlib/nbnxn_pairlist.h"
60 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_data_mgmt.h"
61 #include "gromacs/pbcutil/ishift.h"
62 #include "gromacs/utility/cstringutil.h"
64 #include "nbnxn_cuda_types.h"
66 #if defined TEXOBJ_SUPPORTED && __CUDA_ARCH__ >= 300
70 /*! Texture reference for LJ C6/C12 parameters; bound to cu_nbparam_t.nbfp */
71 texture<float, 1, cudaReadModeElementType> nbfp_texref;
73 /*! Texture reference for LJ-PME parameters; bound to cu_nbparam_t.nbfp_comb */
74 texture<float, 1, cudaReadModeElementType> nbfp_comb_texref;
76 /*! Texture reference for Ewald coulomb force table; bound to cu_nbparam_t.coulomb_tab */
77 texture<float, 1, cudaReadModeElementType> coulomb_tab_texref;
79 /* Convenience defines */
80 #define NCL_PER_SUPERCL (NBNXN_GPU_NCLUSTER_PER_SUPERCLUSTER)
81 #define CL_SIZE (NBNXN_GPU_CLUSTER_SIZE)
83 /***** The kernels come here *****/
84 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_utils.cuh"
86 /* Top-level kernel generation: will generate through multiple inclusion the
87 * following flavors for all kernels:
88 * - force-only output;
89 * - force and energy output;
90 * - force-only with pair list pruning;
91 * - force and energy output with pair list pruning.
94 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
95 /** Force & energy **/
97 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
100 /*** Pair-list pruning kernels ***/
103 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
104 /** Force & energy **/
105 #define CALC_ENERGIES
106 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
110 /*! Nonbonded kernel function pointer type */
111 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
116 /*********************************/
118 /* XXX always/never run the energy/pruning kernels -- only for benchmarking purposes */
119 static bool always_ener = (getenv("GMX_GPU_ALWAYS_ENER") != NULL);
120 static bool never_ener = (getenv("GMX_GPU_NEVER_ENER") != NULL);
121 static bool always_prune = (getenv("GMX_GPU_ALWAYS_PRUNE") != NULL);
124 /* Bit-pattern used for polling-based GPU synchronization. It is used as a float
125 * and corresponds to having the exponent set to the maximum (127 -- single
126 * precision) and the mantissa to 0.
128 static unsigned int poll_wait_pattern = (0x7FU << 23);
130 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
131 static inline int calc_nb_kernel_nblock(int nwork_units, cuda_dev_info_t *dinfo)
137 max_grid_x_size = dinfo->prop.maxGridSize[0];
139 /* do we exceed the grid x dimension limit? */
140 if (nwork_units > max_grid_x_size)
142 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
143 "The number of nonbonded work units (=number of super-clusters) exceeds the"
144 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
151 /* Constant arrays listing all kernel function pointers and enabling selection
152 of a kernel in an elegant manner. */
154 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
155 * electrostatics and VDW type.
157 * Note that the row- and column-order of function pointers has to match the
158 * order of corresponding enumerated electrostatics and vdw types, resp.,
159 * defined in nbnxn_cuda_types.h.
162 /*! Force-only kernel function pointers. */
163 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] =
165 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
166 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
167 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
168 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
169 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
170 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
173 /*! Force + energy kernel function pointers. */
174 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] =
176 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
177 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
178 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
179 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
180 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
181 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
184 /*! Force + pruning kernel function pointers. */
185 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] =
187 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
188 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
189 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
190 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
191 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
192 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
195 /*! Force + energy + pruning kernel function pointers. */
196 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] =
198 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
199 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
200 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
201 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
202 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
203 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
206 /*! Return a pointer to the kernel version to be executed at the current step. */
207 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
212 nbnxn_cu_kfunc_ptr_t res;
214 assert(eeltype < eelCuNR);
215 assert(evdwtype < eelCuNR);
221 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
225 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
232 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
236 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
243 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
244 static inline int calc_shmem_required()
248 /* size of shmem (force-buffers/xq/atom type preloading) */
249 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
250 /* i-atom x+q in shared memory */
251 shmem = NCL_PER_SUPERCL * CL_SIZE * sizeof(float4);
252 /* cj in shared memory, for both warps separately */
253 shmem += 2 * NBNXN_GPU_JGROUP_SIZE * sizeof(int);
255 /* i-atom types in shared memory */
256 shmem += NCL_PER_SUPERCL * CL_SIZE * sizeof(int);
258 #if __CUDA_ARCH__ < 300
259 /* force reduction buffers in shared memory */
260 shmem += CL_SIZE * CL_SIZE * 3 * sizeof(float);
266 /*! As we execute nonbonded workload in separate streams, before launching
267 the kernel we need to make sure that he following operations have completed:
268 - atomdata allocation and related H2D transfers (every nstlist step);
269 - pair list H2D transfer (every nstlist step);
270 - shift vector H2D transfer (every nstlist step);
271 - force (+shift force and energy) output clearing (every step).
273 These operations are issued in the local stream at the beginning of the step
274 and therefore always complete before the local kernel launch. The non-local
275 kernel is launched after the local on the same device/context, so this is
276 inherently scheduled after the operations in the local stream (including the
278 However, for the sake of having a future-proof implementation, we use the
279 misc_ops_done event to record the point in time when the above operations
280 are finished and synchronize with this event in the non-local stream.
282 void nbnxn_cuda_launch_kernel(nbnxn_cuda_ptr_t cu_nb,
283 const nbnxn_atomdata_t *nbatom,
288 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
289 /* CUDA kernel launch-related stuff */
291 dim3 dim_block, dim_grid;
292 nbnxn_cu_kfunc_ptr_t nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
294 cu_atomdata_t *adat = cu_nb->atdat;
295 cu_nbparam_t *nbp = cu_nb->nbparam;
296 cu_plist_t *plist = cu_nb->plist[iloc];
297 cu_timers_t *t = cu_nb->timers;
298 cudaStream_t stream = cu_nb->stream[iloc];
300 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
301 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
302 bool bDoTime = cu_nb->bDoTime;
304 /* turn energy calculation always on/off (for debugging/testing only) */
305 bCalcEner = (bCalcEner || always_ener) && !never_ener;
307 /* don't launch the kernel if there is no work to do */
308 if (plist->nsci == 0)
313 /* calculate the atom data index range based on locality */
317 adat_len = adat->natoms_local;
321 adat_begin = adat->natoms_local;
322 adat_len = adat->natoms - adat->natoms_local;
325 /* When we get here all misc operations issues in the local stream are done,
326 so we record that in the local stream and wait for it in the nonlocal one. */
327 if (cu_nb->bUseTwoStreams)
329 if (iloc == eintLocal)
331 stat = cudaEventRecord(cu_nb->misc_ops_done, stream);
332 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_done failed");
336 stat = cudaStreamWaitEvent(stream, cu_nb->misc_ops_done, 0);
337 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_done failed");
341 /* beginning of timed HtoD section */
344 stat = cudaEventRecord(t->start_nb_h2d[iloc], stream);
345 CU_RET_ERR(stat, "cudaEventRecord failed");
349 cu_copy_H2D_async(adat->xq + adat_begin, nbatom->x + adat_begin * 4,
350 adat_len * sizeof(*adat->xq), stream);
354 stat = cudaEventRecord(t->stop_nb_h2d[iloc], stream);
355 CU_RET_ERR(stat, "cudaEventRecord failed");
358 /* beginning of timed nonbonded calculation section */
361 stat = cudaEventRecord(t->start_nb_k[iloc], stream);
362 CU_RET_ERR(stat, "cudaEventRecord failed");
365 /* get the pointer to the kernel flavor we need to use */
366 nb_kernel = select_nbnxn_kernel(nbp->eeltype,
369 plist->bDoPrune || always_prune);
371 /* kernel launch config */
372 nblock = calc_nb_kernel_nblock(plist->nsci, cu_nb->dev_info);
373 dim_block = dim3(CL_SIZE, CL_SIZE, 1);
374 dim_grid = dim3(nblock, 1, 1);
375 shmem = calc_shmem_required();
379 fprintf(debug, "GPU launch configuration:\n\tThread block: %dx%dx%d\n\t"
380 "Grid: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n",
381 dim_block.x, dim_block.y, dim_block.z,
382 dim_grid.x, dim_grid.y, plist->nsci*NCL_PER_SUPERCL,
383 NCL_PER_SUPERCL, plist->na_c);
386 nb_kernel<<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, bCalcFshift);
387 CU_LAUNCH_ERR("k_calc_nb");
391 stat = cudaEventRecord(t->stop_nb_k[iloc], stream);
392 CU_RET_ERR(stat, "cudaEventRecord failed");
396 void nbnxn_cuda_launch_cpyback(nbnxn_cuda_ptr_t cu_nb,
397 const nbnxn_atomdata_t *nbatom,
402 int adat_begin, adat_len, adat_end; /* local/nonlocal offset and length used for xq and f */
405 /* determine interaction locality from atom locality */
410 else if (NONLOCAL_A(aloc))
417 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
418 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
422 cu_atomdata_t *adat = cu_nb->atdat;
423 cu_timers_t *t = cu_nb->timers;
424 bool bDoTime = cu_nb->bDoTime;
425 cudaStream_t stream = cu_nb->stream[iloc];
427 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
428 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
430 /* don't launch copy-back if there was no work to do */
431 if (cu_nb->plist[iloc]->nsci == 0)
436 /* calculate the atom data index range based on locality */
440 adat_len = adat->natoms_local;
441 adat_end = cu_nb->atdat->natoms_local;
445 adat_begin = adat->natoms_local;
446 adat_len = adat->natoms - adat->natoms_local;
447 adat_end = cu_nb->atdat->natoms;
450 /* beginning of timed D2H section */
453 stat = cudaEventRecord(t->start_nb_d2h[iloc], stream);
454 CU_RET_ERR(stat, "cudaEventRecord failed");
457 if (!cu_nb->bUseStreamSync)
459 /* For safety reasons set a few (5%) forces to NaN. This way even if the
460 polling "hack" fails with some future NVIDIA driver we'll get a crash. */
461 for (int i = adat_begin; i < 3*adat_end + 2; i += adat_len/20)
464 nbatom->out[0].f[i] = NAN;
467 if (numeric_limits<float>::has_quiet_NaN)
469 nbatom->out[0].f[i] = numeric_limits<float>::quiet_NaN();
474 nbatom->out[0].f[i] = GMX_REAL_MAX;
479 /* Set the last four bytes of the force array to a bit pattern
480 which can't be the result of the force calculation:
481 max exponent (127) and zero mantissa. */
482 *(unsigned int*)&nbatom->out[0].f[adat_end*3 - 1] = poll_wait_pattern;
485 /* With DD the local D2H transfer can only start after the non-local
486 has been launched. */
487 if (iloc == eintLocal && cu_nb->bUseTwoStreams)
489 stat = cudaStreamWaitEvent(stream, cu_nb->nonlocal_done, 0);
490 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
494 cu_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f + adat_begin,
495 (adat_len)*sizeof(*adat->f), stream);
497 /* After the non-local D2H is launched the nonlocal_done event can be
498 recorded which signals that the local D2H can proceed. This event is not
499 placed after the non-local kernel because we first need the non-local
501 if (iloc == eintNonlocal)
503 stat = cudaEventRecord(cu_nb->nonlocal_done, stream);
504 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
507 /* only transfer energies in the local stream */
513 cu_copy_D2H_async(cu_nb->nbst.fshift, adat->fshift,
514 SHIFTS * sizeof(*cu_nb->nbst.fshift), stream);
520 cu_copy_D2H_async(cu_nb->nbst.e_lj, adat->e_lj,
521 sizeof(*cu_nb->nbst.e_lj), stream);
522 cu_copy_D2H_async(cu_nb->nbst.e_el, adat->e_el,
523 sizeof(*cu_nb->nbst.e_el), stream);
529 stat = cudaEventRecord(t->stop_nb_d2h[iloc], stream);
530 CU_RET_ERR(stat, "cudaEventRecord failed");
534 /* Atomic compare-exchange operation on unsigned values. It is used in
535 * polling wait for the GPU.
537 static inline bool atomic_cas(volatile unsigned int *ptr,
544 return tMPI_Atomic_cas((tMPI_Atomic_t *)ptr, oldval, newval);
546 gmx_incons("Atomic operations not available, atomic_cas() should not have been called!");
551 void nbnxn_cuda_wait_gpu(nbnxn_cuda_ptr_t cu_nb,
552 const nbnxn_atomdata_t *nbatom,
554 real *e_lj, real *e_el, rvec *fshift)
556 /* NOTE: only implemented for single-precision at this time */
558 int i, adat_end, iloc = -1;
559 volatile unsigned int *poll_word;
561 /* determine interaction locality from atom locality */
566 else if (NONLOCAL_A(aloc))
573 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
574 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
578 cu_plist_t *plist = cu_nb->plist[iloc];
579 cu_timers_t *timers = cu_nb->timers;
580 wallclock_gpu_t *timings = cu_nb->timings;
581 nb_staging nbst = cu_nb->nbst;
583 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
584 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
586 /* turn energy calculation always on/off (for debugging/testing only) */
587 bCalcEner = (bCalcEner || always_ener) && !never_ener;
589 /* don't launch wait/update timers & counters if there was no work to do
591 NOTE: if timing with multiple GPUs (streams) becomes possible, the
592 counters could end up being inconsistent due to not being incremented
593 on some of the nodes! */
594 if (cu_nb->plist[iloc]->nsci == 0)
599 /* calculate the atom data index range based on locality */
602 adat_end = cu_nb->atdat->natoms_local;
606 adat_end = cu_nb->atdat->natoms;
609 if (cu_nb->bUseStreamSync)
611 stat = cudaStreamSynchronize(cu_nb->stream[iloc]);
612 CU_RET_ERR(stat, "cudaStreamSynchronize failed in cu_blockwait_nb");
616 /* Busy-wait until we get the signal pattern set in last byte
617 * of the l/nl float vector. This pattern corresponds to a floating
618 * point number which can't be the result of the force calculation
619 * (maximum, 127 exponent and 0 mantissa).
620 * The polling uses atomic compare-exchange.
622 poll_word = (volatile unsigned int*)&nbatom->out[0].f[adat_end*3 - 1];
623 while (atomic_cas(poll_word, poll_wait_pattern, poll_wait_pattern))
628 /* timing data accumulation */
631 /* only increase counter once (at local F wait) */
635 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].c += 1;
639 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].t +=
640 cu_event_elapsed(timers->start_nb_k[iloc], timers->stop_nb_k[iloc]);
642 /* X/q H2D and F D2H timings */
643 timings->nb_h2d_t += cu_event_elapsed(timers->start_nb_h2d[iloc],
644 timers->stop_nb_h2d[iloc]);
645 timings->nb_d2h_t += cu_event_elapsed(timers->start_nb_d2h[iloc],
646 timers->stop_nb_d2h[iloc]);
648 /* only count atdat and pair-list H2D at pair-search step */
651 /* atdat transfer timing (add only once, at local F wait) */
655 timings->pl_h2d_t += cu_event_elapsed(timers->start_atdat,
659 timings->pl_h2d_t += cu_event_elapsed(timers->start_pl_h2d[iloc],
660 timers->stop_pl_h2d[iloc]);
664 /* add up energies and shift forces (only once at local F wait) */
675 for (i = 0; i < SHIFTS; i++)
677 fshift[i][0] += nbst.fshift[i].x;
678 fshift[i][1] += nbst.fshift[i].y;
679 fshift[i][2] += nbst.fshift[i].z;
684 /* turn off pruning (doesn't matter if this is pair-search step or not) */
685 plist->bDoPrune = false;
688 /*! Return the reference to the nbfp texture. */
689 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_texref()
694 /*! Return the reference to the nbfp_comb texture. */
695 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_comb_texref()
697 return nbfp_comb_texref;
700 /*! Return the reference to the coulomb_tab. */
701 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_coulomb_tab_texref()
703 return coulomb_tab_texref;
706 /*! Set up the cache configuration for the non-bonded kernels,
708 void nbnxn_cuda_set_cacheconfig(cuda_dev_info_t *devinfo)
712 for (int i = 0; i < eelCuNR; i++)
714 for (int j = 0; j < evdwCuNR; j++)
716 if (devinfo->prop.major >= 3)
718 /* Default kernel on sm 3.x 48/16 kB Shared/L1 */
719 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferShared);
720 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferShared);
721 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferShared);
722 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferShared);
726 /* On Fermi prefer L1 gives 2% higher performance */
727 /* Default kernel on sm_2.x 16/48 kB Shared/L1 */
728 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferL1);
729 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferL1);
730 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferL1);
731 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferL1);
733 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");