2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "nbnxn_cuda.h"
51 #include "thread_mpi/atomic.h"
54 #include "gromacs/gmxlib/cuda_tools/cudautils.cuh"
55 #include "gromacs/legacyheaders/types/force_flags.h"
56 #include "gromacs/legacyheaders/types/simple.h"
57 #include "gromacs/mdlib/nb_verlet.h"
58 #include "gromacs/mdlib/nbnxn_consts.h"
59 #include "gromacs/mdlib/nbnxn_pairlist.h"
60 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_data_mgmt.h"
61 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_types.h"
62 #include "gromacs/pbcutil/ishift.h"
63 #include "gromacs/utility/cstringutil.h"
65 #if defined TEXOBJ_SUPPORTED && __CUDA_ARCH__ >= 300
69 /*! Texture reference for LJ C6/C12 parameters; bound to cu_nbparam_t.nbfp */
70 texture<float, 1, cudaReadModeElementType> nbfp_texref;
72 /*! Texture reference for LJ-PME parameters; bound to cu_nbparam_t.nbfp_comb */
73 texture<float, 1, cudaReadModeElementType> nbfp_comb_texref;
75 /*! Texture reference for Ewald coulomb force table; bound to cu_nbparam_t.coulomb_tab */
76 texture<float, 1, cudaReadModeElementType> coulomb_tab_texref;
78 /* Convenience defines */
79 #define NCL_PER_SUPERCL (NBNXN_GPU_NCLUSTER_PER_SUPERCLUSTER)
80 #define CL_SIZE (NBNXN_GPU_CLUSTER_SIZE)
82 /***** The kernels come here *****/
83 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_utils.cuh"
85 /* Top-level kernel generation: will generate through multiple inclusion the
86 * following flavors for all kernels:
87 * - force-only output;
88 * - force and energy output;
89 * - force-only with pair list pruning;
90 * - force and energy output with pair list pruning.
93 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
94 /** Force & energy **/
96 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
99 /*** Pair-list pruning kernels ***/
102 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
103 /** Force & energy **/
104 #define CALC_ENERGIES
105 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
109 /*! Nonbonded kernel function pointer type */
110 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
115 /*********************************/
117 /* XXX always/never run the energy/pruning kernels -- only for benchmarking purposes */
118 static bool always_ener = (getenv("GMX_GPU_ALWAYS_ENER") != NULL);
119 static bool never_ener = (getenv("GMX_GPU_NEVER_ENER") != NULL);
120 static bool always_prune = (getenv("GMX_GPU_ALWAYS_PRUNE") != NULL);
123 /* Bit-pattern used for polling-based GPU synchronization. It is used as a float
124 * and corresponds to having the exponent set to the maximum (127 -- single
125 * precision) and the mantissa to 0.
127 static unsigned int poll_wait_pattern = (0x7FU << 23);
129 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
130 static inline int calc_nb_kernel_nblock(int nwork_units, cuda_dev_info_t *dinfo)
136 max_grid_x_size = dinfo->prop.maxGridSize[0];
138 /* do we exceed the grid x dimension limit? */
139 if (nwork_units > max_grid_x_size)
141 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
142 "The number of nonbonded work units (=number of super-clusters) exceeds the"
143 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
150 /* Constant arrays listing all kernel function pointers and enabling selection
151 of a kernel in an elegant manner. */
153 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
154 * electrostatics and VDW type.
156 * Note that the row- and column-order of function pointers has to match the
157 * order of corresponding enumerated electrostatics and vdw types, resp.,
158 * defined in nbnxn_cuda_types.h.
161 /*! Force-only kernel function pointers. */
162 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] =
164 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
165 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
166 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
167 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
168 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
169 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
172 /*! Force + energy kernel function pointers. */
173 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] =
175 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
176 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
177 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
178 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
179 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
180 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
183 /*! Force + pruning kernel function pointers. */
184 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] =
186 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
187 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
188 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
189 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
190 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
191 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
194 /*! Force + energy + pruning kernel function pointers. */
195 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] =
197 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
198 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
199 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
200 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
201 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
202 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
205 /*! Return a pointer to the kernel version to be executed at the current step. */
206 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
211 nbnxn_cu_kfunc_ptr_t res;
213 assert(eeltype < eelCuNR);
214 assert(evdwtype < eelCuNR);
220 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
224 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
231 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
235 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
242 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
243 static inline int calc_shmem_required()
247 /* size of shmem (force-buffers/xq/atom type preloading) */
248 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
249 /* i-atom x+q in shared memory */
250 shmem = NCL_PER_SUPERCL * CL_SIZE * sizeof(float4);
251 /* cj in shared memory, for both warps separately */
252 shmem += 2 * NBNXN_GPU_JGROUP_SIZE * sizeof(int);
254 /* i-atom types in shared memory */
255 shmem += NCL_PER_SUPERCL * CL_SIZE * sizeof(int);
257 #if __CUDA_ARCH__ < 300
258 /* force reduction buffers in shared memory */
259 shmem += CL_SIZE * CL_SIZE * 3 * sizeof(float);
265 /*! As we execute nonbonded workload in separate streams, before launching
266 the kernel we need to make sure that he following operations have completed:
267 - atomdata allocation and related H2D transfers (every nstlist step);
268 - pair list H2D transfer (every nstlist step);
269 - shift vector H2D transfer (every nstlist step);
270 - force (+shift force and energy) output clearing (every step).
272 These operations are issued in the local stream at the beginning of the step
273 and therefore always complete before the local kernel launch. The non-local
274 kernel is launched after the local on the same device/context, so this is
275 inherently scheduled after the operations in the local stream (including the
277 However, for the sake of having a future-proof implementation, we use the
278 misc_ops_done event to record the point in time when the above operations
279 are finished and synchronize with this event in the non-local stream.
281 void nbnxn_cuda_launch_kernel(nbnxn_cuda_ptr_t cu_nb,
282 const nbnxn_atomdata_t *nbatom,
287 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
288 /* CUDA kernel launch-related stuff */
290 dim3 dim_block, dim_grid;
291 nbnxn_cu_kfunc_ptr_t nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
293 cu_atomdata_t *adat = cu_nb->atdat;
294 cu_nbparam_t *nbp = cu_nb->nbparam;
295 cu_plist_t *plist = cu_nb->plist[iloc];
296 cu_timers_t *t = cu_nb->timers;
297 cudaStream_t stream = cu_nb->stream[iloc];
299 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
300 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
301 bool bDoTime = cu_nb->bDoTime;
303 /* turn energy calculation always on/off (for debugging/testing only) */
304 bCalcEner = (bCalcEner || always_ener) && !never_ener;
306 /* don't launch the kernel if there is no work to do */
307 if (plist->nsci == 0)
312 /* calculate the atom data index range based on locality */
316 adat_len = adat->natoms_local;
320 adat_begin = adat->natoms_local;
321 adat_len = adat->natoms - adat->natoms_local;
324 /* When we get here all misc operations issues in the local stream are done,
325 so we record that in the local stream and wait for it in the nonlocal one. */
326 if (cu_nb->bUseTwoStreams)
328 if (iloc == eintLocal)
330 stat = cudaEventRecord(cu_nb->misc_ops_done, stream);
331 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_done failed");
335 stat = cudaStreamWaitEvent(stream, cu_nb->misc_ops_done, 0);
336 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_done failed");
340 /* beginning of timed HtoD section */
343 stat = cudaEventRecord(t->start_nb_h2d[iloc], stream);
344 CU_RET_ERR(stat, "cudaEventRecord failed");
348 cu_copy_H2D_async(adat->xq + adat_begin, nbatom->x + adat_begin * 4,
349 adat_len * sizeof(*adat->xq), stream);
353 stat = cudaEventRecord(t->stop_nb_h2d[iloc], stream);
354 CU_RET_ERR(stat, "cudaEventRecord failed");
357 /* beginning of timed nonbonded calculation section */
360 stat = cudaEventRecord(t->start_nb_k[iloc], stream);
361 CU_RET_ERR(stat, "cudaEventRecord failed");
364 /* get the pointer to the kernel flavor we need to use */
365 nb_kernel = select_nbnxn_kernel(nbp->eeltype,
368 plist->bDoPrune || always_prune);
370 /* kernel launch config */
371 nblock = calc_nb_kernel_nblock(plist->nsci, cu_nb->dev_info);
372 dim_block = dim3(CL_SIZE, CL_SIZE, 1);
373 dim_grid = dim3(nblock, 1, 1);
374 shmem = calc_shmem_required();
378 fprintf(debug, "GPU launch configuration:\n\tThread block: %dx%dx%d\n\t"
379 "Grid: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n",
380 dim_block.x, dim_block.y, dim_block.z,
381 dim_grid.x, dim_grid.y, plist->nsci*NCL_PER_SUPERCL,
382 NCL_PER_SUPERCL, plist->na_c);
385 nb_kernel<<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, bCalcFshift);
386 CU_LAUNCH_ERR("k_calc_nb");
390 stat = cudaEventRecord(t->stop_nb_k[iloc], stream);
391 CU_RET_ERR(stat, "cudaEventRecord failed");
395 void nbnxn_cuda_launch_cpyback(nbnxn_cuda_ptr_t cu_nb,
396 const nbnxn_atomdata_t *nbatom,
401 int adat_begin, adat_len, adat_end; /* local/nonlocal offset and length used for xq and f */
404 /* determine interaction locality from atom locality */
409 else if (NONLOCAL_A(aloc))
416 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
417 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
421 cu_atomdata_t *adat = cu_nb->atdat;
422 cu_timers_t *t = cu_nb->timers;
423 bool bDoTime = cu_nb->bDoTime;
424 cudaStream_t stream = cu_nb->stream[iloc];
426 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
427 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
429 /* don't launch copy-back if there was no work to do */
430 if (cu_nb->plist[iloc]->nsci == 0)
435 /* calculate the atom data index range based on locality */
439 adat_len = adat->natoms_local;
440 adat_end = cu_nb->atdat->natoms_local;
444 adat_begin = adat->natoms_local;
445 adat_len = adat->natoms - adat->natoms_local;
446 adat_end = cu_nb->atdat->natoms;
449 /* beginning of timed D2H section */
452 stat = cudaEventRecord(t->start_nb_d2h[iloc], stream);
453 CU_RET_ERR(stat, "cudaEventRecord failed");
456 if (!cu_nb->bUseStreamSync)
458 /* For safety reasons set a few (5%) forces to NaN. This way even if the
459 polling "hack" fails with some future NVIDIA driver we'll get a crash. */
460 for (int i = adat_begin; i < 3*adat_end + 2; i += adat_len/20)
463 nbatom->out[0].f[i] = NAN;
466 if (numeric_limits<float>::has_quiet_NaN)
468 nbatom->out[0].f[i] = numeric_limits<float>::quiet_NaN();
473 nbatom->out[0].f[i] = GMX_REAL_MAX;
478 /* Set the last four bytes of the force array to a bit pattern
479 which can't be the result of the force calculation:
480 max exponent (127) and zero mantissa. */
481 *(unsigned int*)&nbatom->out[0].f[adat_end*3 - 1] = poll_wait_pattern;
484 /* With DD the local D2H transfer can only start after the non-local
485 has been launched. */
486 if (iloc == eintLocal && cu_nb->bUseTwoStreams)
488 stat = cudaStreamWaitEvent(stream, cu_nb->nonlocal_done, 0);
489 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
493 cu_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f + adat_begin,
494 (adat_len)*sizeof(*adat->f), stream);
496 /* After the non-local D2H is launched the nonlocal_done event can be
497 recorded which signals that the local D2H can proceed. This event is not
498 placed after the non-local kernel because we first need the non-local
500 if (iloc == eintNonlocal)
502 stat = cudaEventRecord(cu_nb->nonlocal_done, stream);
503 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
506 /* only transfer energies in the local stream */
512 cu_copy_D2H_async(cu_nb->nbst.fshift, adat->fshift,
513 SHIFTS * sizeof(*cu_nb->nbst.fshift), stream);
519 cu_copy_D2H_async(cu_nb->nbst.e_lj, adat->e_lj,
520 sizeof(*cu_nb->nbst.e_lj), stream);
521 cu_copy_D2H_async(cu_nb->nbst.e_el, adat->e_el,
522 sizeof(*cu_nb->nbst.e_el), stream);
528 stat = cudaEventRecord(t->stop_nb_d2h[iloc], stream);
529 CU_RET_ERR(stat, "cudaEventRecord failed");
533 /* Atomic compare-exchange operation on unsigned values. It is used in
534 * polling wait for the GPU.
536 static inline bool atomic_cas(volatile unsigned int *ptr,
543 return tMPI_Atomic_cas((tMPI_Atomic_t *)ptr, oldval, newval);
545 gmx_incons("Atomic operations not available, atomic_cas() should not have been called!");
550 void nbnxn_cuda_wait_gpu(nbnxn_cuda_ptr_t cu_nb,
551 const nbnxn_atomdata_t *nbatom,
553 real *e_lj, real *e_el, rvec *fshift)
555 /* NOTE: only implemented for single-precision at this time */
557 int i, adat_end, iloc = -1;
558 volatile unsigned int *poll_word;
560 /* determine interaction locality from atom locality */
565 else if (NONLOCAL_A(aloc))
572 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
573 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
577 cu_plist_t *plist = cu_nb->plist[iloc];
578 cu_timers_t *timers = cu_nb->timers;
579 wallclock_gpu_t *timings = cu_nb->timings;
580 nb_staging nbst = cu_nb->nbst;
582 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
583 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
585 /* turn energy calculation always on/off (for debugging/testing only) */
586 bCalcEner = (bCalcEner || always_ener) && !never_ener;
588 /* don't launch wait/update timers & counters if there was no work to do
590 NOTE: if timing with multiple GPUs (streams) becomes possible, the
591 counters could end up being inconsistent due to not being incremented
592 on some of the nodes! */
593 if (cu_nb->plist[iloc]->nsci == 0)
598 /* calculate the atom data index range based on locality */
601 adat_end = cu_nb->atdat->natoms_local;
605 adat_end = cu_nb->atdat->natoms;
608 if (cu_nb->bUseStreamSync)
610 stat = cudaStreamSynchronize(cu_nb->stream[iloc]);
611 CU_RET_ERR(stat, "cudaStreamSynchronize failed in cu_blockwait_nb");
615 /* Busy-wait until we get the signal pattern set in last byte
616 * of the l/nl float vector. This pattern corresponds to a floating
617 * point number which can't be the result of the force calculation
618 * (maximum, 127 exponent and 0 mantissa).
619 * The polling uses atomic compare-exchange.
621 poll_word = (volatile unsigned int*)&nbatom->out[0].f[adat_end*3 - 1];
622 while (atomic_cas(poll_word, poll_wait_pattern, poll_wait_pattern))
627 /* timing data accumulation */
630 /* only increase counter once (at local F wait) */
634 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].c += 1;
638 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].t +=
639 cu_event_elapsed(timers->start_nb_k[iloc], timers->stop_nb_k[iloc]);
641 /* X/q H2D and F D2H timings */
642 timings->nb_h2d_t += cu_event_elapsed(timers->start_nb_h2d[iloc],
643 timers->stop_nb_h2d[iloc]);
644 timings->nb_d2h_t += cu_event_elapsed(timers->start_nb_d2h[iloc],
645 timers->stop_nb_d2h[iloc]);
647 /* only count atdat and pair-list H2D at pair-search step */
650 /* atdat transfer timing (add only once, at local F wait) */
654 timings->pl_h2d_t += cu_event_elapsed(timers->start_atdat,
658 timings->pl_h2d_t += cu_event_elapsed(timers->start_pl_h2d[iloc],
659 timers->stop_pl_h2d[iloc]);
663 /* add up energies and shift forces (only once at local F wait) */
674 for (i = 0; i < SHIFTS; i++)
676 fshift[i][0] += nbst.fshift[i].x;
677 fshift[i][1] += nbst.fshift[i].y;
678 fshift[i][2] += nbst.fshift[i].z;
683 /* turn off pruning (doesn't matter if this is pair-search step or not) */
684 plist->bDoPrune = false;
687 /*! Return the reference to the nbfp texture. */
688 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_texref()
693 /*! Return the reference to the nbfp_comb texture. */
694 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_comb_texref()
696 return nbfp_comb_texref;
699 /*! Return the reference to the coulomb_tab. */
700 const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_coulomb_tab_texref()
702 return coulomb_tab_texref;
705 /*! Set up the cache configuration for the non-bonded kernels,
707 void nbnxn_cuda_set_cacheconfig(cuda_dev_info_t *devinfo)
711 for (int i = 0; i < eelCuNR; i++)
713 for (int j = 0; j < evdwCuNR; j++)
715 if (devinfo->prop.major >= 3)
717 /* Default kernel on sm 3.x 48/16 kB Shared/L1 */
718 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferShared);
719 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferShared);
720 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferShared);
721 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferShared);
725 /* On Fermi prefer L1 gives 2% higher performance */
726 /* Default kernel on sm_2.x 16/48 kB Shared/L1 */
727 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferL1);
728 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferL1);
729 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferL1);
730 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferL1);
732 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");