1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2012, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
45 #include "types/simple.h"
46 #include "types/nbnxn_pairlist.h"
47 #include "types/nb_verlet.h"
48 #include "types/ishift.h"
49 #include "types/force_flags.h"
50 #include "../nbnxn_consts.h"
53 #include "thread_mpi/atomic.h"
56 #include "nbnxn_cuda_types.h"
57 #include "../../gmxlib/cuda_tools/cudautils.cuh"
58 #include "nbnxn_cuda.h"
59 #include "nbnxn_cuda_data_mgmt.h"
62 /*! Texture reference for nonbonded parameters; bound to cu_nbparam_t.nbfp*/
63 texture<float, 1, cudaReadModeElementType> tex_nbfp;
65 /*! Texture reference for Ewald coulomb force table; bound to cu_nbparam_t.coulomb_tab */
66 texture<float, 1, cudaReadModeElementType> tex_coulomb_tab;
68 /* Convenience defines */
69 #define NCL_PER_SUPERCL (NBNXN_GPU_NCLUSTER_PER_SUPERCLUSTER)
70 #define CL_SIZE (NBNXN_GPU_CLUSTER_SIZE)
72 /***** The kernels come here *****/
73 #include "nbnxn_cuda_kernel_utils.cuh"
75 /* Generate all combinations of kernels through multiple inclusion:
76 F, F + E, F + prune, F + E + prune. */
78 #include "nbnxn_cuda_kernels.cuh"
79 /** Force & energy **/
81 #include "nbnxn_cuda_kernels.cuh"
84 /*** Pair-list pruning kernels ***/
87 #include "nbnxn_cuda_kernels.cuh"
88 /** Force & energy **/
90 #include "nbnxn_cuda_kernels.cuh"
94 /*! Nonbonded kernel function pointer type */
95 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
100 /*********************************/
102 /* XXX always/never run the energy/pruning kernels -- only for benchmarking purposes */
103 static bool always_ener = (getenv("GMX_GPU_ALWAYS_ENER") != NULL);
104 static bool never_ener = (getenv("GMX_GPU_NEVER_ENER") != NULL);
105 static bool always_prune = (getenv("GMX_GPU_ALWAYS_PRUNE") != NULL);
108 /* Bit-pattern used for polling-based GPU synchronization. It is used as a float
109 * and corresponds to having the exponent set to the maximum (127 -- single
110 * precision) and the mantissa to 0.
112 static unsigned int poll_wait_pattern = (0x7FU << 23);
114 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
115 static inline int calc_nb_kernel_nblock(int nwork_units, cuda_dev_info_t *dinfo)
121 max_grid_x_size = dinfo->prop.maxGridSize[0];
123 /* do we exceed the grid x dimension limit? */
124 if (nwork_units > max_grid_x_size)
126 gmx_fatal(FARGS, "Watch out system too large to simulate!\n"
127 "The number of nonbonded work units (=number of super-clusters) exceeds the"
128 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
135 /* Constant arrays listing all kernel function pointers and enabling selection
136 of a kernel in an elegant manner. */
138 static const int nEnergyKernelTypes = 2; /* 0 - no energy, 1 - energy */
139 static const int nPruneKernelTypes = 2; /* 0 - no prune, 1 - prune */
141 /* Default kernels */
142 static const nbnxn_cu_kfunc_ptr_t
143 nb_default_kfunc_ptr[eelCuNR][nEnergyKernelTypes][nPruneKernelTypes] =
145 { { k_nbnxn_ewald, k_nbnxn_ewald_prune },
146 { k_nbnxn_ewald_ener, k_nbnxn_ewald_ener_prune } },
147 { { k_nbnxn_ewald_twin, k_nbnxn_ewald_twin_prune },
148 { k_nbnxn_ewald_twin_ener, k_nbnxn_ewald_twin_ener_prune } },
149 { { k_nbnxn_rf, k_nbnxn_rf_prune },
150 { k_nbnxn_rf_ener, k_nbnxn_rf_ener_prune } },
151 { { k_nbnxn_cutoff, k_nbnxn_cutoff_prune },
152 { k_nbnxn_cutoff_ener, k_nbnxn_cutoff_ener_prune } },
156 static const nbnxn_cu_kfunc_ptr_t
157 nb_legacy_kfunc_ptr[eelCuNR][nEnergyKernelTypes][nPruneKernelTypes] =
159 { { k_nbnxn_ewald_legacy, k_nbnxn_ewald_prune_legacy },
160 { k_nbnxn_ewald_ener_legacy, k_nbnxn_ewald_ener_prune_legacy } },
161 { { k_nbnxn_ewald_twin_legacy, k_nbnxn_ewald_twin_prune_legacy },
162 { k_nbnxn_ewald_twin_ener_legacy, k_nbnxn_ewald_twin_ener_prune_legacy } },
163 { { k_nbnxn_rf_legacy, k_nbnxn_rf_prune_legacy },
164 { k_nbnxn_rf_ener_legacy, k_nbnxn_rf_ener_prune_legacy } },
165 { { k_nbnxn_cutoff_legacy, k_nbnxn_cutoff_prune_legacy },
166 { k_nbnxn_cutoff_ener_legacy, k_nbnxn_cutoff_ener_prune_legacy } },
169 /*! Return a pointer to the kernel version to be executed at the current step. */
170 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int kver, int eeltype,
171 bool bDoEne, bool bDoPrune)
173 assert(kver < eNbnxnCuKNR);
174 assert(eeltype < eelCuNR);
176 if (NBNXN_KVER_LEGACY(kver))
178 return nb_legacy_kfunc_ptr[eeltype][bDoEne][bDoPrune];
182 return nb_default_kfunc_ptr[eeltype][bDoEne][bDoPrune];
186 /*! Calculates the amount of shared memory required for kernel version in use. */
187 static inline int calc_shmem_required(int kver)
191 /* size of shmem (force-buffers/xq/atom type preloading) */
192 if (NBNXN_KVER_LEGACY(kver))
194 /* i-atom x+q in shared memory */
195 shmem = NCL_PER_SUPERCL * CL_SIZE * sizeof(float4);
196 /* force reduction buffers in shared memory */
197 shmem += CL_SIZE * CL_SIZE * 3 * sizeof(float);
201 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
202 /* i-atom x+q in shared memory */
203 shmem = NCL_PER_SUPERCL * CL_SIZE * sizeof(float4);
204 /* cj in shared memory, for both warps separately */
205 shmem += 2 * NBNXN_GPU_JGROUP_SIZE * sizeof(int);
207 /* i-atom types in shared memory */
208 shmem += NCL_PER_SUPERCL * CL_SIZE * sizeof(int);
210 #if __CUDA_ARCH__ < 300
211 /* force reduction buffers in shared memory */
212 shmem += CL_SIZE * CL_SIZE * 3 * sizeof(float);
219 /*! As we execute nonbonded workload in separate streams, before launching
220 the kernel we need to make sure that he following operations have completed:
221 - atomdata allocation and related H2D transfers (every nstlist step);
222 - pair list H2D transfer (every nstlist step);
223 - shift vector H2D transfer (every nstlist step);
224 - force (+shift force and energy) output clearing (every step).
226 These operations are issued in the local stream at the beginning of the step
227 and therefore always complete before the local kernel launch. The non-local
228 kernel is launched after the local on the same device/context, so this is
229 inherently scheduled after the operations in the local stream (including the
231 However, for the sake of having a future-proof implementation, we use the
232 misc_ops_done event to record the point in time when the above operations
233 are finished and synchronize with this event in the non-local stream.
235 void nbnxn_cuda_launch_kernel(nbnxn_cuda_ptr_t cu_nb,
236 const nbnxn_atomdata_t *nbatom,
241 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
242 /* CUDA kernel launch-related stuff */
244 dim3 dim_block, dim_grid;
245 nbnxn_cu_kfunc_ptr_t nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
247 cu_atomdata_t *adat = cu_nb->atdat;
248 cu_nbparam_t *nbp = cu_nb->nbparam;
249 cu_plist_t *plist = cu_nb->plist[iloc];
250 cu_timers_t *t = cu_nb->timers;
251 cudaStream_t stream = cu_nb->stream[iloc];
253 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
254 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
255 bool bDoTime = cu_nb->bDoTime;
257 /* turn energy calculation always on/off (for debugging/testing only) */
258 bCalcEner = (bCalcEner || always_ener) && !never_ener;
260 /* don't launch the kernel if there is no work to do */
261 if (plist->nsci == 0)
266 /* calculate the atom data index range based on locality */
270 adat_len = adat->natoms_local;
274 adat_begin = adat->natoms_local;
275 adat_len = adat->natoms - adat->natoms_local;
278 /* When we get here all misc operations issues in the local stream are done,
279 so we record that in the local stream and wait for it in the nonlocal one. */
280 if (cu_nb->bUseTwoStreams)
282 if (iloc == eintLocal)
284 stat = cudaEventRecord(cu_nb->misc_ops_done, stream);
285 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_done failed");
289 stat = cudaStreamWaitEvent(stream, cu_nb->misc_ops_done, 0);
290 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_done failed");
294 /* beginning of timed HtoD section */
297 stat = cudaEventRecord(t->start_nb_h2d[iloc], stream);
298 CU_RET_ERR(stat, "cudaEventRecord failed");
302 cu_copy_H2D_async(adat->xq + adat_begin, nbatom->x + adat_begin * 4,
303 adat_len * sizeof(*adat->xq), stream);
307 stat = cudaEventRecord(t->stop_nb_h2d[iloc], stream);
308 CU_RET_ERR(stat, "cudaEventRecord failed");
311 /* beginning of timed nonbonded calculation section */
314 stat = cudaEventRecord(t->start_nb_k[iloc], stream);
315 CU_RET_ERR(stat, "cudaEventRecord failed");
318 /* get the pointer to the kernel flavor we need to use */
319 nb_kernel = select_nbnxn_kernel(cu_nb->kernel_ver, nbp->eeltype, bCalcEner,
320 plist->bDoPrune || always_prune);
322 /* kernel launch config */
323 nblock = calc_nb_kernel_nblock(plist->nsci, cu_nb->dev_info);
324 dim_block = dim3(CL_SIZE, CL_SIZE, 1);
325 dim_grid = dim3(nblock, 1, 1);
326 shmem = calc_shmem_required(cu_nb->kernel_ver);
330 fprintf(debug, "GPU launch configuration:\n\tThread block: %dx%dx%d\n\t"
331 "Grid: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n",
332 dim_block.x, dim_block.y, dim_block.z,
333 dim_grid.x, dim_grid.y, plist->nsci*NCL_PER_SUPERCL,
334 NCL_PER_SUPERCL, plist->na_c);
337 nb_kernel<<<dim_grid, dim_block, shmem, stream>>>(*adat, *nbp, *plist, bCalcFshift);
338 CU_LAUNCH_ERR("k_calc_nb");
342 stat = cudaEventRecord(t->stop_nb_k[iloc], stream);
343 CU_RET_ERR(stat, "cudaEventRecord failed");
347 void nbnxn_cuda_launch_cpyback(nbnxn_cuda_ptr_t cu_nb,
348 const nbnxn_atomdata_t *nbatom,
353 int adat_begin, adat_len, adat_end; /* local/nonlocal offset and length used for xq and f */
356 /* determine interaction locality from atom locality */
361 else if (NONLOCAL_A(aloc))
368 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
369 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
373 cu_atomdata_t *adat = cu_nb->atdat;
374 cu_timers_t *t = cu_nb->timers;
375 bool bDoTime = cu_nb->bDoTime;
376 cudaStream_t stream = cu_nb->stream[iloc];
378 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
379 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
381 /* don't launch copy-back if there was no work to do */
382 if (cu_nb->plist[iloc]->nsci == 0)
387 /* calculate the atom data index range based on locality */
391 adat_len = adat->natoms_local;
392 adat_end = cu_nb->atdat->natoms_local;
396 adat_begin = adat->natoms_local;
397 adat_len = adat->natoms - adat->natoms_local;
398 adat_end = cu_nb->atdat->natoms;
401 /* beginning of timed D2H section */
404 stat = cudaEventRecord(t->start_nb_d2h[iloc], stream);
405 CU_RET_ERR(stat, "cudaEventRecord failed");
408 if (!cu_nb->bUseStreamSync)
410 /* For safety reasons set a few (5%) forces to NaN. This way even if the
411 polling "hack" fails with some future NVIDIA driver we'll get a crash. */
412 for (int i = adat_begin; i < 3*adat_end + 2; i += adat_len/20)
415 nbatom->out[0].f[i] = NAN;
418 if (numeric_limits<float>::has_quiet_NaN)
420 nbatom->out[0].f[i] = numeric_limits<float>::quiet_NaN();
425 nbatom->out[0].f[i] = GMX_REAL_MAX;
430 /* Set the last four bytes of the force array to a bit pattern
431 which can't be the result of the force calculation:
432 max exponent (127) and zero mantissa. */
433 *(unsigned int*)&nbatom->out[0].f[adat_end*3 - 1] = poll_wait_pattern;
436 /* With DD the local D2H transfer can only start after the non-local
437 has been launched. */
438 if (iloc == eintLocal && cu_nb->bUseTwoStreams)
440 stat = cudaStreamWaitEvent(stream, cu_nb->nonlocal_done, 0);
441 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
445 cu_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f + adat_begin,
446 (adat_len)*sizeof(*adat->f), stream);
448 /* After the non-local D2H is launched the nonlocal_done event can be
449 recorded which signals that the local D2H can proceed. This event is not
450 placed after the non-local kernel because we first need the non-local
452 if (iloc == eintNonlocal)
454 stat = cudaEventRecord(cu_nb->nonlocal_done, stream);
455 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
458 /* only transfer energies in the local stream */
464 cu_copy_D2H_async(cu_nb->nbst.fshift, adat->fshift,
465 SHIFTS * sizeof(*cu_nb->nbst.fshift), stream);
471 cu_copy_D2H_async(cu_nb->nbst.e_lj, adat->e_lj,
472 sizeof(*cu_nb->nbst.e_lj), stream);
473 cu_copy_D2H_async(cu_nb->nbst.e_el, adat->e_el,
474 sizeof(*cu_nb->nbst.e_el), stream);
480 stat = cudaEventRecord(t->stop_nb_d2h[iloc], stream);
481 CU_RET_ERR(stat, "cudaEventRecord failed");
485 /* Atomic compare-exchange operation on unsigned values. It is used in
486 * polling wait for the GPU.
488 static inline bool atomic_cas(volatile unsigned int *ptr,
495 return tMPI_Atomic_cas((tMPI_Atomic_t *)ptr, oldval, newval);
497 gmx_incons("Atomic operations not available, atomic_cas() should not have been called!");
502 void nbnxn_cuda_wait_gpu(nbnxn_cuda_ptr_t cu_nb,
503 const nbnxn_atomdata_t *nbatom,
505 float *e_lj, float *e_el, rvec *fshift)
508 int i, adat_end, iloc = -1;
509 volatile unsigned int *poll_word;
511 /* determine interaction locality from atom locality */
516 else if (NONLOCAL_A(aloc))
523 sprintf(stmp, "Invalid atom locality passed (%d); valid here is only "
524 "local (%d) or nonlocal (%d)", aloc, eatLocal, eatNonlocal);
528 cu_plist_t *plist = cu_nb->plist[iloc];
529 cu_timers_t *timers = cu_nb->timers;
530 wallclock_gpu_t *timings = cu_nb->timings;
531 nb_staging nbst = cu_nb->nbst;
533 bool bCalcEner = flags & GMX_FORCE_VIRIAL;
534 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
536 /* turn energy calculation always on/off (for debugging/testing only) */
537 bCalcEner = (bCalcEner || always_ener) && !never_ener;
539 /* don't launch wait/update timers & counters if there was no work to do
541 NOTE: if timing with multiple GPUs (streams) becomes possible, the
542 counters could end up being inconsistent due to not being incremented
543 on some of the nodes! */
544 if (cu_nb->plist[iloc]->nsci == 0)
549 /* calculate the atom data index range based on locality */
552 adat_end = cu_nb->atdat->natoms_local;
556 adat_end = cu_nb->atdat->natoms;
559 if (cu_nb->bUseStreamSync)
561 stat = cudaStreamSynchronize(cu_nb->stream[iloc]);
562 CU_RET_ERR(stat, "cudaStreamSynchronize failed in cu_blockwait_nb");
566 /* Busy-wait until we get the signal pattern set in last byte
567 * of the l/nl float vector. This pattern corresponds to a floating
568 * point number which can't be the result of the force calculation
569 * (maximum, 127 exponent and 0 mantissa).
570 * The polling uses atomic compare-exchange.
572 poll_word = (volatile unsigned int*)&nbatom->out[0].f[adat_end*3 - 1];
573 while (atomic_cas(poll_word, poll_wait_pattern, poll_wait_pattern)) {}
576 /* timing data accumulation */
579 /* only increase counter once (at local F wait) */
583 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].c += 1;
587 timings->ktime[plist->bDoPrune ? 1 : 0][bCalcEner ? 1 : 0].t +=
588 cu_event_elapsed(timers->start_nb_k[iloc], timers->stop_nb_k[iloc]);
590 /* X/q H2D and F D2H timings */
591 timings->nb_h2d_t += cu_event_elapsed(timers->start_nb_h2d[iloc],
592 timers->stop_nb_h2d[iloc]);
593 timings->nb_d2h_t += cu_event_elapsed(timers->start_nb_d2h[iloc],
594 timers->stop_nb_d2h[iloc]);
596 /* only count atdat and pair-list H2D at pair-search step */
599 /* atdat transfer timing (add only once, at local F wait) */
603 timings->pl_h2d_t += cu_event_elapsed(timers->start_atdat,
607 timings->pl_h2d_t += cu_event_elapsed(timers->start_pl_h2d[iloc],
608 timers->stop_pl_h2d[iloc]);
612 /* add up energies and shift forces (only once at local F wait) */
623 for (i = 0; i < SHIFTS; i++)
625 fshift[i][0] += nbst.fshift[i].x;
626 fshift[i][1] += nbst.fshift[i].y;
627 fshift[i][2] += nbst.fshift[i].z;
632 /* turn off pruning (doesn't matter if this is pair-search step or not) */
633 plist->bDoPrune = false;
636 /*! Return the reference to the nbfp texture. */
637 const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_nbfp_texref()
642 /*! Return the reference to the coulomb_tab. */
643 const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_coulomb_tab_texref()
645 return tex_coulomb_tab;
648 /*! Set up the cache configuration for the non-bonded kernels,
650 void nbnxn_cuda_set_cacheconfig(cuda_dev_info_t *devinfo)
654 for (int i = 0; i < eelCuNR; i++)
655 for (int j = 0; j < nEnergyKernelTypes; j++)
656 for (int k = 0; k < nPruneKernelTypes; k++)
658 /* Legacy kernel 16/48 kB Shared/L1 */
659 stat = cudaFuncSetCacheConfig(nb_legacy_kfunc_ptr[i][j][k], cudaFuncCachePreferL1);
660 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");
662 if (devinfo->prop.major >= 3)
664 /* Default kernel on sm 3.x 48/16 kB Shared/L1 */
665 stat = cudaFuncSetCacheConfig(nb_default_kfunc_ptr[i][j][k], cudaFuncCachePreferShared);
669 /* On Fermi prefer L1 gives 2% higher performance */
670 /* Default kernel on sm_2.x 16/48 kB Shared/L1 */
671 stat = cudaFuncSetCacheConfig(nb_default_kfunc_ptr[i][j][k], cudaFuncCachePreferL1);
673 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");