1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2012, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
45 #include "types/simple.h"
46 #include "types/nbnxn_pairlist.h"
47 #include "types/nb_verlet.h"
48 #include "types/ishift.h"
49 #include "types/force_flags.h"
50 #include "../nbnxn_consts.h"
53 #include "thread_mpi/atomic.h"
56 #include "nbnxn_cuda_types.h"
57 #include "../../gmxlib/cuda_tools/cudautils.cuh"
58 #include "nbnxn_cuda.h"
59 #include "nbnxn_cuda_data_mgmt.h"
62 /*! Texture reference for nonbonded parameters; bound to cu_nbparam_t.nbfp*/
63 texture<float, 1, cudaReadModeElementType> tex_nbfp;
65 /*! Texture reference for Ewald coulomb force table; bound to cu_nbparam_t.coulomb_tab */
66 texture<float, 1, cudaReadModeElementType> tex_coulomb_tab;
68 /* Convenience defines */
69 #define NCL_PER_SUPERCL (NBNXN_GPU_NCLUSTER_PER_SUPERCLUSTER)
70 #define CL_SIZE (NBNXN_GPU_CLUSTER_SIZE)
72 /***** The kernels come here *****/
73 #include "nbnxn_cuda_kernel_utils.cuh"
75 /* Top-level kernel generation: will generate through multiple inclusion the
76 * following flavors for all kernels:
77 * - force-only output;
78 * - force and energy output;
79 * - force-only with pair list pruning;
80 * - force and energy output with pair list pruning.
83 #include "nbnxn_cuda_kernels.cuh"
84 /** Force & energy **/
86 #include "nbnxn_cuda_kernels.cuh"
89 /*** Pair-list pruning kernels ***/
92 #include "nbnxn_cuda_kernels.cuh"
93 /** Force & energy **/
95 #include "nbnxn_cuda_kernels.cuh"
99 /*! Nonbonded kernel function pointer type */
100 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
105 /*********************************/
107 /* XXX always/never run the energy/pruning kernels -- only for benchmarking purposes */
108 static bool always_ener = (getenv("GMX_GPU_ALWAYS_ENER") != NULL);
109 static bool never_ener = (getenv("GMX_GPU_NEVER_ENER") != NULL);
110 static bool always_prune = (getenv("GMX_GPU_ALWAYS_PRUNE") != NULL);
113 /* Bit-pattern used for polling-based GPU synchronization. It is used as a float
114 * and corresponds to having the exponent set to the maximum (127 -- single
115 * precision) and the mantissa to 0.
117 static unsigned int poll_wait_pattern = (0x7FU << 23);
119 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
120 static inline int calc_nb_kernel_nblock(int nwork_units, cuda_dev_info_t *dinfo)
126 max_grid_x_size = dinfo->prop.maxGridSize[0];
128 /* do we exceed the grid x dimension limit? */
129 if (nwork_units > max_grid_x_size)
131 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
132 "The number of nonbonded work units (=number of super-clusters) exceeds the"
133 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
140 /* Constant arrays listing all kernel function pointers and enabling selection
141 of a kernel in an elegant manner. */
143 static const int nEnergyKernelTypes = 2; /* 0 - no energy, 1 - energy */
144 static const int nPruneKernelTypes = 2; /* 0 - no prune, 1 - prune */
146 /*! Pointers to the default kernels organized in a 3 dim array by:
147 * electrostatics type, energy calculation on/off, and pruning on/off.
149 * Note that the order of electrostatics (1st dimension) has to match the
150 * order of corresponding enumerated types defined in nbnxn_cuda_types.h.
152 static const nbnxn_cu_kfunc_ptr_t
153 nb_default_kfunc_ptr[eelCuNR][nEnergyKernelTypes][nPruneKernelTypes] =
155 { { k_nbnxn_cutoff, k_nbnxn_cutoff_prune },
156 { k_nbnxn_cutoff_ener, k_nbnxn_cutoff_ener_prune } },
157 { { k_nbnxn_rf, k_nbnxn_rf_prune },
158 { k_nbnxn_rf_ener, k_nbnxn_rf_ener_prune } },
159 { { k_nbnxn_ewald_tab, k_nbnxn_ewald_tab_prune },
160 { k_nbnxn_ewald_tab_ener, k_nbnxn_ewald_tab_ener_prune } },
161 { { k_nbnxn_ewald_tab_twin, k_nbnxn_ewald_tab_twin_prune },
162 { k_nbnxn_ewald_tab_twin_ener, k_nbnxn_ewald_twin_ener_prune } },
163 { { k_nbnxn_ewald, k_nbnxn_ewald_prune },
164 { k_nbnxn_ewald_ener, k_nbnxn_ewald_ener_prune } },
165 { { k_nbnxn_ewald_twin, k_nbnxn_ewald_twin_prune },
166 { k_nbnxn_ewald_twin_ener, k_nbnxn_ewald_twin_ener_prune } },
169 /*! Pointers to the legacy kernels organized in a 3 dim array by:
170 * electrostatics type, energy calculation on/off, and pruning on/off.
172 * Note that the order of electrostatics (1st dimension) has to match the
173 * order of corresponding enumerated types defined in nbnxn_cuda_types.h.
175 static const nbnxn_cu_kfunc_ptr_t
176 nb_legacy_kfunc_ptr[eelCuNR][nEnergyKernelTypes][nPruneKernelTypes] =
178 { { k_nbnxn_cutoff_legacy, k_nbnxn_cutoff_prune_legacy },
179 { k_nbnxn_cutoff_ener_legacy, k_nbnxn_cutoff_ener_prune_legacy } },
180 { { k_nbnxn_rf_legacy, k_nbnxn_rf_prune_legacy },
181 { k_nbnxn_rf_ener_legacy, k_nbnxn_rf_ener_prune_legacy } },
182 { { k_nbnxn_ewald_tab_legacy, k_nbnxn_ewald_tab_prune_legacy },
183 { k_nbnxn_ewald_tab_ener_legacy, k_nbnxn_ewald_tab_ener_prune_legacy } },
184 { { k_nbnxn_ewald_tab_twin_legacy, k_nbnxn_ewald_tab_twin_prune_legacy },
185 { k_nbnxn_ewald_tab_twin_ener_legacy, k_nbnxn_ewald_tab_twin_ener_prune_legacy } },
188 /*! Return a pointer to the kernel version to be executed at the current step. */
189 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int kver, int eeltype,
190 bool bDoEne, bool bDoPrune)
192 assert(kver < eNbnxnCuKNR);
193 assert(eeltype < eelCuNR);
195 if (NBNXN_KVER_LEGACY(kver))
197 /* no analytical Ewald with legacy kernels */
198 assert(eeltype <= eelCuEWALD_TAB_TWIN);
200 return nb_legacy_kfunc_ptr[eeltype][bDoEne][bDoPrune];
204 return nb_default_kfunc_ptr[eeltype][bDoEne][bDoPrune];
208 /*! Calculates the amount of shared memory required for kernel version in use. */
209 static inline int calc_shmem_required(int kver)
213 /* size of shmem (force-buffers/xq/atom type preloading) */
214 if (NBNXN_KVER_LEGACY(kver))
216 /* i-atom x+q in shared memory */
217 shmem = NCL_PER_SUPERCL * CL_SIZE * sizeof(float4);
218 /* force reduction buffers in shared memory */
219 shmem += CL_SIZE * CL_SIZE * 3 * sizeof(float);
223 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
224 /* i-atom x+q in shared memory */
225 shmem = NCL_PER_SUPERCL * CL_SIZE * sizeof(float4);
226 /* cj in shared memory, for both warps separately */
227 shmem += 2 * NBNXN_GPU_JGROUP_SIZE * sizeof(int);
229 /* i-atom types in shared memory */
230 shmem += NCL_PER_SUPERCL * CL_SIZE * sizeof(int);
232 #if __CUDA_ARCH__ < 300
233 /* force reduction buffers in shared memory */
234 shmem += CL_SIZE * CL_SIZE * 3 * sizeof(float);
241 /*! As we execute nonbonded workload in separate streams, before launching
242 the kernel we need to make sure that he following operations have completed:
243 - atomdata allocation and related H2D transfers (every nstlist step);
244 - pair list H2D transfer (every nstlist step);
245 - shift vector H2D transfer (every nstlist step);
246 - force (+shift force and energy) output clearing (every step).
248 These operations are issued in the local stream at the beginning of the step
249 and therefore always complete before the local kernel launch. The non-local
250 kernel is launched after the local on the same device/context, so this is
251 inherently scheduled after the operations in the local stream (including the
253 However, for the sake of having a future-proof implementation, we use the
254 misc_ops_done event to record the point in time when the above operations
255 are finished and synchronize with this event in the non-local stream.
257 void nbnxn_cuda_launch_kernel(nbnxn_cuda_ptr_t cu_nb,
258 const nbnxn_atomdata_t *nbatom,
263 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
264 /* CUDA kernel launch-related stuff */
266 dim3 dim_block, dim_grid;
267 nbnxn_cu_kfunc_ptr_t nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
269 cu_atomdata_t *adat = cu_nb->atdat;
270 cu_nbparam_t *nbp = cu_nb->nbparam;
271 cu_plist_t *plist = cu_nb->plist[iloc];
272 cu_timers_t *t = cu_nb->timers;
273 cudaStream_t stream = cu_nb->stream[iloc];
275 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
276 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
277 bool bDoTime = cu_nb->bDoTime;
279 /* turn energy calculation always on/off (for debugging/testing only) */
280 bCalcEner = (bCalcEner || always_ener) && !never_ener;
282 /* don't launch the kernel if there is no work to do */
283 if (plist->nsci == 0)
288 /* calculate the atom data index range based on locality */
292 adat_len = adat->natoms_local;
296 adat_begin = adat->natoms_local;
297 adat_len = adat->natoms - adat->natoms_local;
300 /* When we get here all misc operations issues in the local stream are done,
301 so we record that in the local stream and wait for it in the nonlocal one. */
302 if (cu_nb->bUseTwoStreams)
304 if (iloc == eintLocal)
306 stat = cudaEventRecord(cu_nb->misc_ops_done, stream);
307 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_done failed");
311 stat = cudaStreamWaitEvent(stream, cu_nb->misc_ops_done, 0);
312 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_done failed");
316 /* beginning of timed HtoD section */
319 stat = cudaEventRecord(t->start_nb_h2d[iloc], stream);
320 CU_RET_ERR(stat, "cudaEventRecord failed");
324 cu_copy_H2D_async(adat->xq + adat_begin, nbatom->x + adat_begin * 4,
325 adat_len * sizeof(*adat->xq), stream);
329 stat = cudaEventRecord(t->stop_nb_h2d[iloc], stream);
330 CU_RET_ERR(stat, "cudaEventRecord failed");
333 /* beginning of timed nonbonded calculation section */
336 stat = cudaEventRecord(t->start_nb_k[iloc], stream);
337 CU_RET_ERR(stat, "cudaEventRecord failed");
340 /* get the pointer to the kernel flavor we need to use */
341 nb_kernel = select_nbnxn_kernel(cu_nb->kernel_ver, nbp->eeltype, bCalcEner,
342 plist->bDoPrune || always_prune);
344 /* kernel launch config */
345 nblock = calc_nb_kernel_nblock(plist->nsci, cu_nb->dev_info);
346 dim_block = dim3(CL_SIZE, CL_SIZE, 1);
347 dim_grid = dim3(nblock, 1, 1);
348 shmem = calc_shmem_required(cu_nb->kernel_ver);
352 fprintf(debug, "GPU launch configuration:\n\tThread block: %dx%dx%d\n\t"
353 "Grid: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n",
354 dim_block.x, dim_block.y, dim_block.z,
355 dim_grid.x, dim_grid.y, plist->nsci*NCL_PER_SUPERCL,
356 NCL_PER_SUPERCL, plist->na_c);
359 nb_kernel<<<dim_grid, dim_block, shmem, stream>>>(*adat, *nbp, *plist, bCalcFshift);
360 CU_LAUNCH_ERR("k_calc_nb");
364 stat = cudaEventRecord(t->stop_nb_k[iloc], stream);
365 CU_RET_ERR(stat, "cudaEventRecord failed");
369 void nbnxn_cuda_launch_cpyback(nbnxn_cuda_ptr_t cu_nb,
370 const nbnxn_atomdata_t *nbatom,
375 int adat_begin, adat_len, adat_end; /* local/nonlocal offset and length used for xq and f */
378 /* determine interaction locality from atom locality */
383 else if (NONLOCAL_A(aloc))
390 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
391 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
395 cu_atomdata_t *adat = cu_nb->atdat;
396 cu_timers_t *t = cu_nb->timers;
397 bool bDoTime = cu_nb->bDoTime;
398 cudaStream_t stream = cu_nb->stream[iloc];
400 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
401 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
403 /* don't launch copy-back if there was no work to do */
404 if (cu_nb->plist[iloc]->nsci == 0)
409 /* calculate the atom data index range based on locality */
413 adat_len = adat->natoms_local;
414 adat_end = cu_nb->atdat->natoms_local;
418 adat_begin = adat->natoms_local;
419 adat_len = adat->natoms - adat->natoms_local;
420 adat_end = cu_nb->atdat->natoms;
423 /* beginning of timed D2H section */
426 stat = cudaEventRecord(t->start_nb_d2h[iloc], stream);
427 CU_RET_ERR(stat, "cudaEventRecord failed");
430 if (!cu_nb->bUseStreamSync)
432 /* For safety reasons set a few (5%) forces to NaN. This way even if the
433 polling "hack" fails with some future NVIDIA driver we'll get a crash. */
434 for (int i = adat_begin; i < 3*adat_end + 2; i += adat_len/20)
437 nbatom->out[0].f[i] = NAN;
440 if (numeric_limits<float>::has_quiet_NaN)
442 nbatom->out[0].f[i] = numeric_limits<float>::quiet_NaN();
447 nbatom->out[0].f[i] = GMX_REAL_MAX;
452 /* Set the last four bytes of the force array to a bit pattern
453 which can't be the result of the force calculation:
454 max exponent (127) and zero mantissa. */
455 *(unsigned int*)&nbatom->out[0].f[adat_end*3 - 1] = poll_wait_pattern;
458 /* With DD the local D2H transfer can only start after the non-local
459 has been launched. */
460 if (iloc == eintLocal && cu_nb->bUseTwoStreams)
462 stat = cudaStreamWaitEvent(stream, cu_nb->nonlocal_done, 0);
463 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
467 cu_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f + adat_begin,
468 (adat_len)*sizeof(*adat->f), stream);
470 /* After the non-local D2H is launched the nonlocal_done event can be
471 recorded which signals that the local D2H can proceed. This event is not
472 placed after the non-local kernel because we first need the non-local
474 if (iloc == eintNonlocal)
476 stat = cudaEventRecord(cu_nb->nonlocal_done, stream);
477 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
480 /* only transfer energies in the local stream */
486 cu_copy_D2H_async(cu_nb->nbst.fshift, adat->fshift,
487 SHIFTS * sizeof(*cu_nb->nbst.fshift), stream);
493 cu_copy_D2H_async(cu_nb->nbst.e_lj, adat->e_lj,
494 sizeof(*cu_nb->nbst.e_lj), stream);
495 cu_copy_D2H_async(cu_nb->nbst.e_el, adat->e_el,
496 sizeof(*cu_nb->nbst.e_el), stream);
502 stat = cudaEventRecord(t->stop_nb_d2h[iloc], stream);
503 CU_RET_ERR(stat, "cudaEventRecord failed");
507 /* Atomic compare-exchange operation on unsigned values. It is used in
508 * polling wait for the GPU.
510 static inline bool atomic_cas(volatile unsigned int *ptr,
517 return tMPI_Atomic_cas((tMPI_Atomic_t *)ptr, oldval, newval);
519 gmx_incons("Atomic operations not available, atomic_cas() should not have been called!");
524 void nbnxn_cuda_wait_gpu(nbnxn_cuda_ptr_t cu_nb,
525 const nbnxn_atomdata_t *nbatom,
527 real *e_lj, real *e_el, rvec *fshift)
529 /* NOTE: only implemented for single-precision at this time */
531 int i, adat_end, iloc = -1;
532 volatile unsigned int *poll_word;
534 /* determine interaction locality from atom locality */
539 else if (NONLOCAL_A(aloc))
546 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
547 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
551 cu_plist_t *plist = cu_nb->plist[iloc];
552 cu_timers_t *timers = cu_nb->timers;
553 wallclock_gpu_t *timings = cu_nb->timings;
554 nb_staging nbst = cu_nb->nbst;
556 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
557 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
559 /* turn energy calculation always on/off (for debugging/testing only) */
560 bCalcEner = (bCalcEner || always_ener) && !never_ener;
562 /* don't launch wait/update timers & counters if there was no work to do
564 NOTE: if timing with multiple GPUs (streams) becomes possible, the
565 counters could end up being inconsistent due to not being incremented
566 on some of the nodes! */
567 if (cu_nb->plist[iloc]->nsci == 0)
572 /* calculate the atom data index range based on locality */
575 adat_end = cu_nb->atdat->natoms_local;
579 adat_end = cu_nb->atdat->natoms;
582 if (cu_nb->bUseStreamSync)
584 stat = cudaStreamSynchronize(cu_nb->stream[iloc]);
585 CU_RET_ERR(stat, "cudaStreamSynchronize failed in cu_blockwait_nb");
589 /* Busy-wait until we get the signal pattern set in last byte
590 * of the l/nl float vector. This pattern corresponds to a floating
591 * point number which can't be the result of the force calculation
592 * (maximum, 127 exponent and 0 mantissa).
593 * The polling uses atomic compare-exchange.
595 poll_word = (volatile unsigned int*)&nbatom->out[0].f[adat_end*3 - 1];
596 while (atomic_cas(poll_word, poll_wait_pattern, poll_wait_pattern)) {}
599 /* timing data accumulation */
602 /* only increase counter once (at local F wait) */
606 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].c += 1;
610 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].t +=
611 cu_event_elapsed(timers->start_nb_k[iloc], timers->stop_nb_k[iloc]);
613 /* X/q H2D and F D2H timings */
614 timings->nb_h2d_t += cu_event_elapsed(timers->start_nb_h2d[iloc],
615 timers->stop_nb_h2d[iloc]);
616 timings->nb_d2h_t += cu_event_elapsed(timers->start_nb_d2h[iloc],
617 timers->stop_nb_d2h[iloc]);
619 /* only count atdat and pair-list H2D at pair-search step */
622 /* atdat transfer timing (add only once, at local F wait) */
626 timings->pl_h2d_t += cu_event_elapsed(timers->start_atdat,
630 timings->pl_h2d_t += cu_event_elapsed(timers->start_pl_h2d[iloc],
631 timers->stop_pl_h2d[iloc]);
635 /* add up energies and shift forces (only once at local F wait) */
646 for (i = 0; i < SHIFTS; i++)
648 fshift[i][0] += nbst.fshift[i].x;
649 fshift[i][1] += nbst.fshift[i].y;
650 fshift[i][2] += nbst.fshift[i].z;
655 /* turn off pruning (doesn't matter if this is pair-search step or not) */
656 plist->bDoPrune = false;
659 /*! Return the reference to the nbfp texture. */
660 const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_nbfp_texref()
665 /*! Return the reference to the coulomb_tab. */
666 const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_coulomb_tab_texref()
668 return tex_coulomb_tab;
671 /*! Set up the cache configuration for the non-bonded kernels,
673 void nbnxn_cuda_set_cacheconfig(cuda_dev_info_t *devinfo)
677 for (int i = 0; i < eelCuNR; i++)
679 for (int j = 0; j < nEnergyKernelTypes; j++)
681 for (int k = 0; k < nPruneKernelTypes; k++)
683 /* Legacy kernel 16/48 kB Shared/L1
684 * No analytical Ewald!
686 if (i != eelCuEWALD_ANA && i != eelCuEWALD_ANA_TWIN)
688 stat = cudaFuncSetCacheConfig(nb_legacy_kfunc_ptr[i][j][k], cudaFuncCachePreferL1);
689 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");
692 if (devinfo->prop.major >= 3)
694 /* Default kernel on sm 3.x 48/16 kB Shared/L1 */
695 stat = cudaFuncSetCacheConfig(nb_default_kfunc_ptr[i][j][k], cudaFuncCachePreferShared);
699 /* On Fermi prefer L1 gives 2% higher performance */
700 /* Default kernel on sm_2.x 16/48 kB Shared/L1 */
701 stat = cudaFuncSetCacheConfig(nb_default_kfunc_ptr[i][j][k], cudaFuncCachePreferL1);
703 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");