2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
45 #include "types/simple.h"
46 #include "types/nbnxn_pairlist.h"
47 #include "types/nb_verlet.h"
48 #include "types/ishift.h"
49 #include "types/force_flags.h"
50 #include "../nbnxn_consts.h"
53 #include "thread_mpi/atomic.h"
56 #include "nbnxn_cuda_types.h"
57 #include "../../gmxlib/cuda_tools/cudautils.cuh"
58 #include "nbnxn_cuda.h"
59 #include "nbnxn_cuda_data_mgmt.h"
61 #if defined TEXOBJ_SUPPORTED && __CUDA_ARCH__ >= 300
65 /*! Texture reference for nonbonded parameters; bound to cu_nbparam_t.nbfp*/
66 texture<float, 1, cudaReadModeElementType> nbfp_texref;
68 /*! Texture reference for Ewald coulomb force table; bound to cu_nbparam_t.coulomb_tab */
69 texture<float, 1, cudaReadModeElementType> coulomb_tab_texref;
71 /* Convenience defines */
72 #define NCL_PER_SUPERCL (NBNXN_GPU_NCLUSTER_PER_SUPERCLUSTER)
73 #define CL_SIZE (NBNXN_GPU_CLUSTER_SIZE)
75 /***** The kernels come here *****/
76 #include "nbnxn_cuda_kernel_utils.cuh"
78 /* Top-level kernel generation: will generate through multiple inclusion the
79 * following flavors for all kernels:
80 * - force-only output;
81 * - force and energy output;
82 * - force-only with pair list pruning;
83 * - force and energy output with pair list pruning.
86 #include "nbnxn_cuda_kernels.cuh"
87 /** Force & energy **/
89 #include "nbnxn_cuda_kernels.cuh"
92 /*** Pair-list pruning kernels ***/
95 #include "nbnxn_cuda_kernels.cuh"
96 /** Force & energy **/
98 #include "nbnxn_cuda_kernels.cuh"
102 /*! Nonbonded kernel function pointer type */
103 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
108 /*********************************/
110 /* XXX always/never run the energy/pruning kernels -- only for benchmarking purposes */
111 static bool always_ener = (getenv("GMX_GPU_ALWAYS_ENER") != NULL);
112 static bool never_ener = (getenv("GMX_GPU_NEVER_ENER") != NULL);
113 static bool always_prune = (getenv("GMX_GPU_ALWAYS_PRUNE") != NULL);
116 /* Bit-pattern used for polling-based GPU synchronization. It is used as a float
117 * and corresponds to having the exponent set to the maximum (127 -- single
118 * precision) and the mantissa to 0.
120 static unsigned int poll_wait_pattern = (0x7FU << 23);
122 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
123 static inline int calc_nb_kernel_nblock(int nwork_units, cuda_dev_info_t *dinfo)
129 max_grid_x_size = dinfo->prop.maxGridSize[0];
131 /* do we exceed the grid x dimension limit? */
132 if (nwork_units > max_grid_x_size)
134 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
135 "The number of nonbonded work units (=number of super-clusters) exceeds the"
136 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
143 /* Constant arrays listing all kernel function pointers and enabling selection
144 of a kernel in an elegant manner. */
146 static const int nEnergyKernelTypes = 2; /* 0 - no energy, 1 - energy */
147 static const int nPruneKernelTypes = 2; /* 0 - no prune, 1 - prune */
149 /*! Pointers to the default kernels organized in a 3 dim array by:
150 * electrostatics type, energy calculation on/off, and pruning on/off.
152 * Note that the order of electrostatics (1st dimension) has to match the
153 * order of corresponding enumerated types defined in nbnxn_cuda_types.h.
155 static const nbnxn_cu_kfunc_ptr_t
156 nb_default_kfunc_ptr[eelCuNR][nEnergyKernelTypes][nPruneKernelTypes] =
158 { { k_nbnxn_cutoff, k_nbnxn_cutoff_prune },
159 { k_nbnxn_cutoff_ener, k_nbnxn_cutoff_ener_prune } },
160 { { k_nbnxn_rf, k_nbnxn_rf_prune },
161 { k_nbnxn_rf_ener, k_nbnxn_rf_ener_prune } },
162 { { k_nbnxn_ewald_tab, k_nbnxn_ewald_tab_prune },
163 { k_nbnxn_ewald_tab_ener, k_nbnxn_ewald_tab_ener_prune } },
164 { { k_nbnxn_ewald_tab_twin, k_nbnxn_ewald_tab_twin_prune },
165 { k_nbnxn_ewald_tab_twin_ener, k_nbnxn_ewald_twin_ener_prune } },
166 { { k_nbnxn_ewald, k_nbnxn_ewald_prune },
167 { k_nbnxn_ewald_ener, k_nbnxn_ewald_ener_prune } },
168 { { k_nbnxn_ewald_twin, k_nbnxn_ewald_twin_prune },
169 { k_nbnxn_ewald_twin_ener, k_nbnxn_ewald_twin_ener_prune } },
172 /*! Return a pointer to the kernel version to be executed at the current step. */
173 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
177 assert(eeltype < eelCuNR);
179 return nb_default_kfunc_ptr[eeltype][bDoEne][bDoPrune];
182 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
183 static inline int calc_shmem_required()
187 /* size of shmem (force-buffers/xq/atom type preloading) */
188 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
189 /* i-atom x+q in shared memory */
190 shmem = NCL_PER_SUPERCL * CL_SIZE * sizeof(float4);
191 /* cj in shared memory, for both warps separately */
192 shmem += 2 * NBNXN_GPU_JGROUP_SIZE * sizeof(int);
194 /* i-atom types in shared memory */
195 shmem += NCL_PER_SUPERCL * CL_SIZE * sizeof(int);
197 #if __CUDA_ARCH__ < 300
198 /* force reduction buffers in shared memory */
199 shmem += CL_SIZE * CL_SIZE * 3 * sizeof(float);
205 /*! As we execute nonbonded workload in separate streams, before launching
206 the kernel we need to make sure that he following operations have completed:
207 - atomdata allocation and related H2D transfers (every nstlist step);
208 - pair list H2D transfer (every nstlist step);
209 - shift vector H2D transfer (every nstlist step);
210 - force (+shift force and energy) output clearing (every step).
212 These operations are issued in the local stream at the beginning of the step
213 and therefore always complete before the local kernel launch. The non-local
214 kernel is launched after the local on the same device/context, so this is
215 inherently scheduled after the operations in the local stream (including the
217 However, for the sake of having a future-proof implementation, we use the
218 misc_ops_done event to record the point in time when the above operations
219 are finished and synchronize with this event in the non-local stream.
221 void nbnxn_cuda_launch_kernel(nbnxn_cuda_ptr_t cu_nb,
222 const nbnxn_atomdata_t *nbatom,
227 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
228 /* CUDA kernel launch-related stuff */
230 dim3 dim_block, dim_grid;
231 nbnxn_cu_kfunc_ptr_t nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
233 cu_atomdata_t *adat = cu_nb->atdat;
234 cu_nbparam_t *nbp = cu_nb->nbparam;
235 cu_plist_t *plist = cu_nb->plist[iloc];
236 cu_timers_t *t = cu_nb->timers;
237 cudaStream_t stream = cu_nb->stream[iloc];
239 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
240 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
241 bool bDoTime = cu_nb->bDoTime;
243 /* turn energy calculation always on/off (for debugging/testing only) */
244 bCalcEner = (bCalcEner || always_ener) && !never_ener;
246 /* don't launch the kernel if there is no work to do */
247 if (plist->nsci == 0)
252 /* calculate the atom data index range based on locality */
256 adat_len = adat->natoms_local;
260 adat_begin = adat->natoms_local;
261 adat_len = adat->natoms - adat->natoms_local;
264 /* When we get here all misc operations issues in the local stream are done,
265 so we record that in the local stream and wait for it in the nonlocal one. */
266 if (cu_nb->bUseTwoStreams)
268 if (iloc == eintLocal)
270 stat = cudaEventRecord(cu_nb->misc_ops_done, stream);
271 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_done failed");
275 stat = cudaStreamWaitEvent(stream, cu_nb->misc_ops_done, 0);
276 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_done failed");
280 /* beginning of timed HtoD section */
283 stat = cudaEventRecord(t->start_nb_h2d[iloc], stream);
284 CU_RET_ERR(stat, "cudaEventRecord failed");
288 cu_copy_H2D_async(adat->xq + adat_begin, nbatom->x + adat_begin * 4,
289 adat_len * sizeof(*adat->xq), stream);
293 stat = cudaEventRecord(t->stop_nb_h2d[iloc], stream);
294 CU_RET_ERR(stat, "cudaEventRecord failed");
297 /* beginning of timed nonbonded calculation section */
300 stat = cudaEventRecord(t->start_nb_k[iloc], stream);
301 CU_RET_ERR(stat, "cudaEventRecord failed");
304 /* get the pointer to the kernel flavor we need to use */
305 nb_kernel = select_nbnxn_kernel(nbp->eeltype, bCalcEner,
306 plist->bDoPrune || always_prune);
308 /* kernel launch config */
309 nblock = calc_nb_kernel_nblock(plist->nsci, cu_nb->dev_info);
310 dim_block = dim3(CL_SIZE, CL_SIZE, 1);
311 dim_grid = dim3(nblock, 1, 1);
312 shmem = calc_shmem_required();
316 fprintf(debug, "GPU launch configuration:\n\tThread block: %dx%dx%d\n\t"
317 "Grid: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n",
318 dim_block.x, dim_block.y, dim_block.z,
319 dim_grid.x, dim_grid.y, plist->nsci*NCL_PER_SUPERCL,
320 NCL_PER_SUPERCL, plist->na_c);
323 nb_kernel<<<dim_grid, dim_block, shmem, stream>>>(*adat, *nbp, *plist, bCalcFshift);
324 CU_LAUNCH_ERR("k_calc_nb");
328 stat = cudaEventRecord(t->stop_nb_k[iloc], stream);
329 CU_RET_ERR(stat, "cudaEventRecord failed");
333 void nbnxn_cuda_launch_cpyback(nbnxn_cuda_ptr_t cu_nb,
334 const nbnxn_atomdata_t *nbatom,
339 int adat_begin, adat_len, adat_end; /* local/nonlocal offset and length used for xq and f */
342 /* determine interaction locality from atom locality */
347 else if (NONLOCAL_A(aloc))
354 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
355 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
359 cu_atomdata_t *adat = cu_nb->atdat;
360 cu_timers_t *t = cu_nb->timers;
361 bool bDoTime = cu_nb->bDoTime;
362 cudaStream_t stream = cu_nb->stream[iloc];
364 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
365 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
367 /* don't launch copy-back if there was no work to do */
368 if (cu_nb->plist[iloc]->nsci == 0)
373 /* calculate the atom data index range based on locality */
377 adat_len = adat->natoms_local;
378 adat_end = cu_nb->atdat->natoms_local;
382 adat_begin = adat->natoms_local;
383 adat_len = adat->natoms - adat->natoms_local;
384 adat_end = cu_nb->atdat->natoms;
387 /* beginning of timed D2H section */
390 stat = cudaEventRecord(t->start_nb_d2h[iloc], stream);
391 CU_RET_ERR(stat, "cudaEventRecord failed");
394 if (!cu_nb->bUseStreamSync)
396 /* For safety reasons set a few (5%) forces to NaN. This way even if the
397 polling "hack" fails with some future NVIDIA driver we'll get a crash. */
398 for (int i = adat_begin; i < 3*adat_end + 2; i += adat_len/20)
401 nbatom->out[0].f[i] = NAN;
404 if (numeric_limits<float>::has_quiet_NaN)
406 nbatom->out[0].f[i] = numeric_limits<float>::quiet_NaN();
411 nbatom->out[0].f[i] = GMX_REAL_MAX;
416 /* Set the last four bytes of the force array to a bit pattern
417 which can't be the result of the force calculation:
418 max exponent (127) and zero mantissa. */
419 *(unsigned int*)&nbatom->out[0].f[adat_end*3 - 1] = poll_wait_pattern;
422 /* With DD the local D2H transfer can only start after the non-local
423 has been launched. */
424 if (iloc == eintLocal && cu_nb->bUseTwoStreams)
426 stat = cudaStreamWaitEvent(stream, cu_nb->nonlocal_done, 0);
427 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
431 cu_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f + adat_begin,
432 (adat_len)*sizeof(*adat->f), stream);
434 /* After the non-local D2H is launched the nonlocal_done event can be
435 recorded which signals that the local D2H can proceed. This event is not
436 placed after the non-local kernel because we first need the non-local
438 if (iloc == eintNonlocal)
440 stat = cudaEventRecord(cu_nb->nonlocal_done, stream);
441 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
444 /* only transfer energies in the local stream */
450 cu_copy_D2H_async(cu_nb->nbst.fshift, adat->fshift,
451 SHIFTS * sizeof(*cu_nb->nbst.fshift), stream);
457 cu_copy_D2H_async(cu_nb->nbst.e_lj, adat->e_lj,
458 sizeof(*cu_nb->nbst.e_lj), stream);
459 cu_copy_D2H_async(cu_nb->nbst.e_el, adat->e_el,
460 sizeof(*cu_nb->nbst.e_el), stream);
466 stat = cudaEventRecord(t->stop_nb_d2h[iloc], stream);
467 CU_RET_ERR(stat, "cudaEventRecord failed");
471 /* Atomic compare-exchange operation on unsigned values. It is used in
472 * polling wait for the GPU.
474 static inline bool atomic_cas(volatile unsigned int *ptr,
481 return tMPI_Atomic_cas((tMPI_Atomic_t *)ptr, oldval, newval);
483 gmx_incons("Atomic operations not available, atomic_cas() should not have been called!");
488 void nbnxn_cuda_wait_gpu(nbnxn_cuda_ptr_t cu_nb,
489 const nbnxn_atomdata_t *nbatom,
491 real *e_lj, real *e_el, rvec *fshift)
493 /* NOTE: only implemented for single-precision at this time */
495 int i, adat_end, iloc = -1;
496 volatile unsigned int *poll_word;
498 /* determine interaction locality from atom locality */
503 else if (NONLOCAL_A(aloc))
510 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
511 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
515 cu_plist_t *plist = cu_nb->plist[iloc];
516 cu_timers_t *timers = cu_nb->timers;
517 wallclock_gpu_t *timings = cu_nb->timings;
518 nb_staging nbst = cu_nb->nbst;
520 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
521 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
523 /* turn energy calculation always on/off (for debugging/testing only) */
524 bCalcEner = (bCalcEner || always_ener) && !never_ener;
526 /* don't launch wait/update timers & counters if there was no work to do
528 NOTE: if timing with multiple GPUs (streams) becomes possible, the
529 counters could end up being inconsistent due to not being incremented
530 on some of the nodes! */
531 if (cu_nb->plist[iloc]->nsci == 0)
536 /* calculate the atom data index range based on locality */
539 adat_end = cu_nb->atdat->natoms_local;
543 adat_end = cu_nb->atdat->natoms;
546 if (cu_nb->bUseStreamSync)
548 stat = cudaStreamSynchronize(cu_nb->stream[iloc]);
549 CU_RET_ERR(stat, "cudaStreamSynchronize failed in cu_blockwait_nb");
553 /* Busy-wait until we get the signal pattern set in last byte
554 * of the l/nl float vector. This pattern corresponds to a floating
555 * point number which can't be the result of the force calculation
556 * (maximum, 127 exponent and 0 mantissa).
557 * The polling uses atomic compare-exchange.
559 poll_word = (volatile unsigned int*)&nbatom->out[0].f[adat_end*3 - 1];
560 while (atomic_cas(poll_word, poll_wait_pattern, poll_wait_pattern)) {}
563 /* timing data accumulation */
566 /* only increase counter once (at local F wait) */
570 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].c += 1;
574 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].t +=
575 cu_event_elapsed(timers->start_nb_k[iloc], timers->stop_nb_k[iloc]);
577 /* X/q H2D and F D2H timings */
578 timings->nb_h2d_t += cu_event_elapsed(timers->start_nb_h2d[iloc],
579 timers->stop_nb_h2d[iloc]);
580 timings->nb_d2h_t += cu_event_elapsed(timers->start_nb_d2h[iloc],
581 timers->stop_nb_d2h[iloc]);
583 /* only count atdat and pair-list H2D at pair-search step */
586 /* atdat transfer timing (add only once, at local F wait) */
590 timings->pl_h2d_t += cu_event_elapsed(timers->start_atdat,
594 timings->pl_h2d_t += cu_event_elapsed(timers->start_pl_h2d[iloc],
595 timers->stop_pl_h2d[iloc]);
599 /* add up energies and shift forces (only once at local F wait) */
610 for (i = 0; i < SHIFTS; i++)
612 fshift[i][0] += nbst.fshift[i].x;
613 fshift[i][1] += nbst.fshift[i].y;
614 fshift[i][2] += nbst.fshift[i].z;
619 /* turn off pruning (doesn't matter if this is pair-search step or not) */
620 plist->bDoPrune = false;
623 /*! Return the reference to the nbfp texture. */
624 const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_nbfp_texref()
629 /*! Return the reference to the coulomb_tab. */
630 const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_coulomb_tab_texref()
632 return coulomb_tab_texref;
635 /*! Set up the cache configuration for the non-bonded kernels,
637 void nbnxn_cuda_set_cacheconfig(cuda_dev_info_t *devinfo)
641 for (int i = 0; i < eelCuNR; i++)
643 for (int j = 0; j < nEnergyKernelTypes; j++)
645 for (int k = 0; k < nPruneKernelTypes; k++)
647 if (devinfo->prop.major >= 3)
649 /* Default kernel on sm 3.x 48/16 kB Shared/L1 */
650 stat = cudaFuncSetCacheConfig(nb_default_kfunc_ptr[i][j][k], cudaFuncCachePreferShared);
654 /* On Fermi prefer L1 gives 2% higher performance */
655 /* Default kernel on sm_2.x 16/48 kB Shared/L1 */
656 stat = cudaFuncSetCacheConfig(nb_default_kfunc_ptr[i][j][k], cudaFuncCachePreferL1);
658 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");