1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2012, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
45 #include "types/simple.h"
46 #include "types/nbnxn_pairlist.h"
47 #include "types/nb_verlet.h"
48 #include "types/ishift.h"
49 #include "types/force_flags.h"
50 #include "../nbnxn_consts.h"
53 #include "thread_mpi/atomic.h"
56 #include "nbnxn_cuda_types.h"
57 #include "../../gmxlib/cuda_tools/cudautils.cuh"
58 #include "nbnxn_cuda.h"
59 #include "nbnxn_cuda_data_mgmt.h"
61 #if defined TEXOBJ_SUPPORTED && __CUDA_ARCH__ >= 300
65 /*! Texture reference for nonbonded parameters; bound to cu_nbparam_t.nbfp*/
66 texture<float, 1, cudaReadModeElementType> nbfp_texref;
68 /*! Texture reference for Ewald coulomb force table; bound to cu_nbparam_t.coulomb_tab */
69 texture<float, 1, cudaReadModeElementType> coulomb_tab_texref;
71 /* Convenience defines */
72 #define NCL_PER_SUPERCL (NBNXN_GPU_NCLUSTER_PER_SUPERCLUSTER)
73 #define CL_SIZE (NBNXN_GPU_CLUSTER_SIZE)
75 /***** The kernels come here *****/
76 #include "nbnxn_cuda_kernel_utils.cuh"
78 /* Top-level kernel generation: will generate through multiple inclusion the
79 * following flavors for all kernels:
80 * - force-only output;
81 * - force and energy output;
82 * - force-only with pair list pruning;
83 * - force and energy output with pair list pruning.
86 #include "nbnxn_cuda_kernels.cuh"
87 /** Force & energy **/
89 #include "nbnxn_cuda_kernels.cuh"
92 /*** Pair-list pruning kernels ***/
95 #include "nbnxn_cuda_kernels.cuh"
96 /** Force & energy **/
98 #include "nbnxn_cuda_kernels.cuh"
102 /*! Nonbonded kernel function pointer type */
103 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
108 /*********************************/
110 /* XXX always/never run the energy/pruning kernels -- only for benchmarking purposes */
111 static bool always_ener = (getenv("GMX_GPU_ALWAYS_ENER") != NULL);
112 static bool never_ener = (getenv("GMX_GPU_NEVER_ENER") != NULL);
113 static bool always_prune = (getenv("GMX_GPU_ALWAYS_PRUNE") != NULL);
116 /* Bit-pattern used for polling-based GPU synchronization. It is used as a float
117 * and corresponds to having the exponent set to the maximum (127 -- single
118 * precision) and the mantissa to 0.
120 static unsigned int poll_wait_pattern = (0x7FU << 23);
122 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
123 static inline int calc_nb_kernel_nblock(int nwork_units, cuda_dev_info_t *dinfo)
129 max_grid_x_size = dinfo->prop.maxGridSize[0];
131 /* do we exceed the grid x dimension limit? */
132 if (nwork_units > max_grid_x_size)
134 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
135 "The number of nonbonded work units (=number of super-clusters) exceeds the"
136 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
143 /* Constant arrays listing all kernel function pointers and enabling selection
144 of a kernel in an elegant manner. */
146 static const int nEnergyKernelTypes = 2; /* 0 - no energy, 1 - energy */
147 static const int nPruneKernelTypes = 2; /* 0 - no prune, 1 - prune */
149 /*! Pointers to the default kernels organized in a 3 dim array by:
150 * electrostatics type, energy calculation on/off, and pruning on/off.
152 * Note that the order of electrostatics (1st dimension) has to match the
153 * order of corresponding enumerated types defined in nbnxn_cuda_types.h.
155 static const nbnxn_cu_kfunc_ptr_t
156 nb_default_kfunc_ptr[eelCuNR][nEnergyKernelTypes][nPruneKernelTypes] =
158 { { k_nbnxn_cutoff, k_nbnxn_cutoff_prune },
159 { k_nbnxn_cutoff_ener, k_nbnxn_cutoff_ener_prune } },
160 { { k_nbnxn_rf, k_nbnxn_rf_prune },
161 { k_nbnxn_rf_ener, k_nbnxn_rf_ener_prune } },
162 { { k_nbnxn_ewald_tab, k_nbnxn_ewald_tab_prune },
163 { k_nbnxn_ewald_tab_ener, k_nbnxn_ewald_tab_ener_prune } },
164 { { k_nbnxn_ewald_tab_twin, k_nbnxn_ewald_tab_twin_prune },
165 { k_nbnxn_ewald_tab_twin_ener, k_nbnxn_ewald_twin_ener_prune } },
166 { { k_nbnxn_ewald, k_nbnxn_ewald_prune },
167 { k_nbnxn_ewald_ener, k_nbnxn_ewald_ener_prune } },
168 { { k_nbnxn_ewald_twin, k_nbnxn_ewald_twin_prune },
169 { k_nbnxn_ewald_twin_ener, k_nbnxn_ewald_twin_ener_prune } },
172 /*! Pointers to the legacy kernels organized in a 3 dim array by:
173 * electrostatics type, energy calculation on/off, and pruning on/off.
175 * Note that the order of electrostatics (1st dimension) has to match the
176 * order of corresponding enumerated types defined in nbnxn_cuda_types.h.
178 static const nbnxn_cu_kfunc_ptr_t
179 nb_legacy_kfunc_ptr[eelCuNR][nEnergyKernelTypes][nPruneKernelTypes] =
181 { { k_nbnxn_cutoff_legacy, k_nbnxn_cutoff_prune_legacy },
182 { k_nbnxn_cutoff_ener_legacy, k_nbnxn_cutoff_ener_prune_legacy } },
183 { { k_nbnxn_rf_legacy, k_nbnxn_rf_prune_legacy },
184 { k_nbnxn_rf_ener_legacy, k_nbnxn_rf_ener_prune_legacy } },
185 { { k_nbnxn_ewald_tab_legacy, k_nbnxn_ewald_tab_prune_legacy },
186 { k_nbnxn_ewald_tab_ener_legacy, k_nbnxn_ewald_tab_ener_prune_legacy } },
187 { { k_nbnxn_ewald_tab_twin_legacy, k_nbnxn_ewald_tab_twin_prune_legacy },
188 { k_nbnxn_ewald_tab_twin_ener_legacy, k_nbnxn_ewald_tab_twin_ener_prune_legacy } },
191 /*! Return a pointer to the kernel version to be executed at the current step. */
192 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int kver, int eeltype,
193 bool bDoEne, bool bDoPrune)
195 assert(kver < eNbnxnCuKNR);
196 assert(eeltype < eelCuNR);
198 if (NBNXN_KVER_LEGACY(kver))
200 /* no analytical Ewald with legacy kernels */
201 assert(eeltype <= eelCuEWALD_TAB_TWIN);
203 return nb_legacy_kfunc_ptr[eeltype][bDoEne][bDoPrune];
207 return nb_default_kfunc_ptr[eeltype][bDoEne][bDoPrune];
211 /*! Calculates the amount of shared memory required for kernel version in use. */
212 static inline int calc_shmem_required(int kver)
216 /* size of shmem (force-buffers/xq/atom type preloading) */
217 if (NBNXN_KVER_LEGACY(kver))
219 /* i-atom x+q in shared memory */
220 shmem = NCL_PER_SUPERCL * CL_SIZE * sizeof(float4);
221 /* force reduction buffers in shared memory */
222 shmem += CL_SIZE * CL_SIZE * 3 * sizeof(float);
226 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
227 /* i-atom x+q in shared memory */
228 shmem = NCL_PER_SUPERCL * CL_SIZE * sizeof(float4);
229 /* cj in shared memory, for both warps separately */
230 shmem += 2 * NBNXN_GPU_JGROUP_SIZE * sizeof(int);
232 /* i-atom types in shared memory */
233 shmem += NCL_PER_SUPERCL * CL_SIZE * sizeof(int);
235 #if __CUDA_ARCH__ < 300
236 /* force reduction buffers in shared memory */
237 shmem += CL_SIZE * CL_SIZE * 3 * sizeof(float);
244 /*! As we execute nonbonded workload in separate streams, before launching
245 the kernel we need to make sure that he following operations have completed:
246 - atomdata allocation and related H2D transfers (every nstlist step);
247 - pair list H2D transfer (every nstlist step);
248 - shift vector H2D transfer (every nstlist step);
249 - force (+shift force and energy) output clearing (every step).
251 These operations are issued in the local stream at the beginning of the step
252 and therefore always complete before the local kernel launch. The non-local
253 kernel is launched after the local on the same device/context, so this is
254 inherently scheduled after the operations in the local stream (including the
256 However, for the sake of having a future-proof implementation, we use the
257 misc_ops_done event to record the point in time when the above operations
258 are finished and synchronize with this event in the non-local stream.
260 void nbnxn_cuda_launch_kernel(nbnxn_cuda_ptr_t cu_nb,
261 const nbnxn_atomdata_t *nbatom,
266 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
267 /* CUDA kernel launch-related stuff */
269 dim3 dim_block, dim_grid;
270 nbnxn_cu_kfunc_ptr_t nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
272 cu_atomdata_t *adat = cu_nb->atdat;
273 cu_nbparam_t *nbp = cu_nb->nbparam;
274 cu_plist_t *plist = cu_nb->plist[iloc];
275 cu_timers_t *t = cu_nb->timers;
276 cudaStream_t stream = cu_nb->stream[iloc];
278 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
279 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
280 bool bDoTime = cu_nb->bDoTime;
282 /* turn energy calculation always on/off (for debugging/testing only) */
283 bCalcEner = (bCalcEner || always_ener) && !never_ener;
285 /* don't launch the kernel if there is no work to do */
286 if (plist->nsci == 0)
291 /* calculate the atom data index range based on locality */
295 adat_len = adat->natoms_local;
299 adat_begin = adat->natoms_local;
300 adat_len = adat->natoms - adat->natoms_local;
303 /* When we get here all misc operations issues in the local stream are done,
304 so we record that in the local stream and wait for it in the nonlocal one. */
305 if (cu_nb->bUseTwoStreams)
307 if (iloc == eintLocal)
309 stat = cudaEventRecord(cu_nb->misc_ops_done, stream);
310 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_done failed");
314 stat = cudaStreamWaitEvent(stream, cu_nb->misc_ops_done, 0);
315 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_done failed");
319 /* beginning of timed HtoD section */
322 stat = cudaEventRecord(t->start_nb_h2d[iloc], stream);
323 CU_RET_ERR(stat, "cudaEventRecord failed");
327 cu_copy_H2D_async(adat->xq + adat_begin, nbatom->x + adat_begin * 4,
328 adat_len * sizeof(*adat->xq), stream);
332 stat = cudaEventRecord(t->stop_nb_h2d[iloc], stream);
333 CU_RET_ERR(stat, "cudaEventRecord failed");
336 /* beginning of timed nonbonded calculation section */
339 stat = cudaEventRecord(t->start_nb_k[iloc], stream);
340 CU_RET_ERR(stat, "cudaEventRecord failed");
343 /* get the pointer to the kernel flavor we need to use */
344 nb_kernel = select_nbnxn_kernel(cu_nb->kernel_ver, nbp->eeltype, bCalcEner,
345 plist->bDoPrune || always_prune);
347 /* kernel launch config */
348 nblock = calc_nb_kernel_nblock(plist->nsci, cu_nb->dev_info);
349 dim_block = dim3(CL_SIZE, CL_SIZE, 1);
350 dim_grid = dim3(nblock, 1, 1);
351 shmem = calc_shmem_required(cu_nb->kernel_ver);
355 fprintf(debug, "GPU launch configuration:\n\tThread block: %dx%dx%d\n\t"
356 "Grid: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n",
357 dim_block.x, dim_block.y, dim_block.z,
358 dim_grid.x, dim_grid.y, plist->nsci*NCL_PER_SUPERCL,
359 NCL_PER_SUPERCL, plist->na_c);
362 nb_kernel<<<dim_grid, dim_block, shmem, stream>>>(*adat, *nbp, *plist, bCalcFshift);
363 CU_LAUNCH_ERR("k_calc_nb");
367 stat = cudaEventRecord(t->stop_nb_k[iloc], stream);
368 CU_RET_ERR(stat, "cudaEventRecord failed");
372 void nbnxn_cuda_launch_cpyback(nbnxn_cuda_ptr_t cu_nb,
373 const nbnxn_atomdata_t *nbatom,
378 int adat_begin, adat_len, adat_end; /* local/nonlocal offset and length used for xq and f */
381 /* determine interaction locality from atom locality */
386 else if (NONLOCAL_A(aloc))
393 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
394 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
398 cu_atomdata_t *adat = cu_nb->atdat;
399 cu_timers_t *t = cu_nb->timers;
400 bool bDoTime = cu_nb->bDoTime;
401 cudaStream_t stream = cu_nb->stream[iloc];
403 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
404 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
406 /* don't launch copy-back if there was no work to do */
407 if (cu_nb->plist[iloc]->nsci == 0)
412 /* calculate the atom data index range based on locality */
416 adat_len = adat->natoms_local;
417 adat_end = cu_nb->atdat->natoms_local;
421 adat_begin = adat->natoms_local;
422 adat_len = adat->natoms - adat->natoms_local;
423 adat_end = cu_nb->atdat->natoms;
426 /* beginning of timed D2H section */
429 stat = cudaEventRecord(t->start_nb_d2h[iloc], stream);
430 CU_RET_ERR(stat, "cudaEventRecord failed");
433 if (!cu_nb->bUseStreamSync)
435 /* For safety reasons set a few (5%) forces to NaN. This way even if the
436 polling "hack" fails with some future NVIDIA driver we'll get a crash. */
437 for (int i = adat_begin; i < 3*adat_end + 2; i += adat_len/20)
440 nbatom->out[0].f[i] = NAN;
443 if (numeric_limits<float>::has_quiet_NaN)
445 nbatom->out[0].f[i] = numeric_limits<float>::quiet_NaN();
450 nbatom->out[0].f[i] = GMX_REAL_MAX;
455 /* Set the last four bytes of the force array to a bit pattern
456 which can't be the result of the force calculation:
457 max exponent (127) and zero mantissa. */
458 *(unsigned int*)&nbatom->out[0].f[adat_end*3 - 1] = poll_wait_pattern;
461 /* With DD the local D2H transfer can only start after the non-local
462 has been launched. */
463 if (iloc == eintLocal && cu_nb->bUseTwoStreams)
465 stat = cudaStreamWaitEvent(stream, cu_nb->nonlocal_done, 0);
466 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
470 cu_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f + adat_begin,
471 (adat_len)*sizeof(*adat->f), stream);
473 /* After the non-local D2H is launched the nonlocal_done event can be
474 recorded which signals that the local D2H can proceed. This event is not
475 placed after the non-local kernel because we first need the non-local
477 if (iloc == eintNonlocal)
479 stat = cudaEventRecord(cu_nb->nonlocal_done, stream);
480 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
483 /* only transfer energies in the local stream */
489 cu_copy_D2H_async(cu_nb->nbst.fshift, adat->fshift,
490 SHIFTS * sizeof(*cu_nb->nbst.fshift), stream);
496 cu_copy_D2H_async(cu_nb->nbst.e_lj, adat->e_lj,
497 sizeof(*cu_nb->nbst.e_lj), stream);
498 cu_copy_D2H_async(cu_nb->nbst.e_el, adat->e_el,
499 sizeof(*cu_nb->nbst.e_el), stream);
505 stat = cudaEventRecord(t->stop_nb_d2h[iloc], stream);
506 CU_RET_ERR(stat, "cudaEventRecord failed");
510 /* Atomic compare-exchange operation on unsigned values. It is used in
511 * polling wait for the GPU.
513 static inline bool atomic_cas(volatile unsigned int *ptr,
520 return tMPI_Atomic_cas((tMPI_Atomic_t *)ptr, oldval, newval);
522 gmx_incons("Atomic operations not available, atomic_cas() should not have been called!");
527 void nbnxn_cuda_wait_gpu(nbnxn_cuda_ptr_t cu_nb,
528 const nbnxn_atomdata_t *nbatom,
530 real *e_lj, real *e_el, rvec *fshift)
532 /* NOTE: only implemented for single-precision at this time */
534 int i, adat_end, iloc = -1;
535 volatile unsigned int *poll_word;
537 /* determine interaction locality from atom locality */
542 else if (NONLOCAL_A(aloc))
549 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
550 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
554 cu_plist_t *plist = cu_nb->plist[iloc];
555 cu_timers_t *timers = cu_nb->timers;
556 wallclock_gpu_t *timings = cu_nb->timings;
557 nb_staging nbst = cu_nb->nbst;
559 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
560 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
562 /* turn energy calculation always on/off (for debugging/testing only) */
563 bCalcEner = (bCalcEner || always_ener) && !never_ener;
565 /* don't launch wait/update timers & counters if there was no work to do
567 NOTE: if timing with multiple GPUs (streams) becomes possible, the
568 counters could end up being inconsistent due to not being incremented
569 on some of the nodes! */
570 if (cu_nb->plist[iloc]->nsci == 0)
575 /* calculate the atom data index range based on locality */
578 adat_end = cu_nb->atdat->natoms_local;
582 adat_end = cu_nb->atdat->natoms;
585 if (cu_nb->bUseStreamSync)
587 stat = cudaStreamSynchronize(cu_nb->stream[iloc]);
588 CU_RET_ERR(stat, "cudaStreamSynchronize failed in cu_blockwait_nb");
592 /* Busy-wait until we get the signal pattern set in last byte
593 * of the l/nl float vector. This pattern corresponds to a floating
594 * point number which can't be the result of the force calculation
595 * (maximum, 127 exponent and 0 mantissa).
596 * The polling uses atomic compare-exchange.
598 poll_word = (volatile unsigned int*)&nbatom->out[0].f[adat_end*3 - 1];
599 while (atomic_cas(poll_word, poll_wait_pattern, poll_wait_pattern)) {}
602 /* timing data accumulation */
605 /* only increase counter once (at local F wait) */
609 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].c += 1;
613 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].t +=
614 cu_event_elapsed(timers->start_nb_k[iloc], timers->stop_nb_k[iloc]);
616 /* X/q H2D and F D2H timings */
617 timings->nb_h2d_t += cu_event_elapsed(timers->start_nb_h2d[iloc],
618 timers->stop_nb_h2d[iloc]);
619 timings->nb_d2h_t += cu_event_elapsed(timers->start_nb_d2h[iloc],
620 timers->stop_nb_d2h[iloc]);
622 /* only count atdat and pair-list H2D at pair-search step */
625 /* atdat transfer timing (add only once, at local F wait) */
629 timings->pl_h2d_t += cu_event_elapsed(timers->start_atdat,
633 timings->pl_h2d_t += cu_event_elapsed(timers->start_pl_h2d[iloc],
634 timers->stop_pl_h2d[iloc]);
638 /* add up energies and shift forces (only once at local F wait) */
649 for (i = 0; i < SHIFTS; i++)
651 fshift[i][0] += nbst.fshift[i].x;
652 fshift[i][1] += nbst.fshift[i].y;
653 fshift[i][2] += nbst.fshift[i].z;
658 /* turn off pruning (doesn't matter if this is pair-search step or not) */
659 plist->bDoPrune = false;
662 /*! Return the reference to the nbfp texture. */
663 const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_nbfp_texref()
668 /*! Return the reference to the coulomb_tab. */
669 const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_coulomb_tab_texref()
671 return coulomb_tab_texref;
674 /*! Set up the cache configuration for the non-bonded kernels,
676 void nbnxn_cuda_set_cacheconfig(cuda_dev_info_t *devinfo)
680 for (int i = 0; i < eelCuNR; i++)
682 for (int j = 0; j < nEnergyKernelTypes; j++)
684 for (int k = 0; k < nPruneKernelTypes; k++)
686 /* Legacy kernel 16/48 kB Shared/L1
687 * No analytical Ewald!
689 if (i != eelCuEWALD_ANA && i != eelCuEWALD_ANA_TWIN)
691 stat = cudaFuncSetCacheConfig(nb_legacy_kfunc_ptr[i][j][k], cudaFuncCachePreferL1);
692 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");
695 if (devinfo->prop.major >= 3)
697 /* Default kernel on sm 3.x 48/16 kB Shared/L1 */
698 stat = cudaFuncSetCacheConfig(nb_default_kfunc_ptr[i][j][k], cudaFuncCachePreferShared);
702 /* On Fermi prefer L1 gives 2% higher performance */
703 /* Default kernel on sm_2.x 16/48 kB Shared/L1 */
704 stat = cudaFuncSetCacheConfig(nb_default_kfunc_ptr[i][j][k], cudaFuncCachePreferL1);
706 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");