#include "types/interaction_const.h"
#include "types/force_flags.h"
#include "../nbnxn_consts.h"
+#include "gmx_detect_hardware.h"
#include "nbnxn_cuda_types.h"
#include "../../gmxlib/cuda_tools/cudautils.cuh"
and the table GPU array. If called with an already allocated table,
it just re-uploads the table.
*/
-static void init_ewald_coulomb_force_table(cu_nbparam_t *nbp)
+static void init_ewald_coulomb_force_table(cu_nbparam_t *nbp,
+ const cuda_dev_info_t *dev_info)
{
float *ftmp, *coul_tab;
int tabsize;
nbp->coulomb_tab = coul_tab;
- cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
- stat = cudaBindTexture(NULL, &nbnxn_cuda_get_coulomb_tab_texref(),
- coul_tab, &cd, tabsize*sizeof(*coul_tab));
- CU_RET_ERR(stat, "cudaBindTexture on coul_tab failed");
+#ifdef TEXOBJ_SUPPORTED
+ /* Only device CC >= 3.0 (Kepler and later) support texture objects */
+ if (dev_info->prop.major >= 3)
+ {
+ cudaResourceDesc rd;
+ memset(&rd, 0, sizeof(rd));
+ rd.resType = cudaResourceTypeLinear;
+ rd.res.linear.devPtr = nbp->coulomb_tab;
+ rd.res.linear.desc.f = cudaChannelFormatKindFloat;
+ rd.res.linear.desc.x = 32;
+ rd.res.linear.sizeInBytes = tabsize*sizeof(*coul_tab);
+
+ cudaTextureDesc td;
+ memset(&td, 0, sizeof(td));
+ td.readMode = cudaReadModeElementType;
+ stat = cudaCreateTextureObject(&nbp->coulomb_tab_texobj, &rd, &td, NULL);
+ CU_RET_ERR(stat, "cudaCreateTextureObject on coulomb_tab_texobj failed");
+ }
+ else
+#endif
+ {
+ cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
+ stat = cudaBindTexture(NULL, &nbnxn_cuda_get_coulomb_tab_texref(),
+ coul_tab, &cd, tabsize*sizeof(*coul_tab));
+ CU_RET_ERR(stat, "cudaBindTexture on coulomb_tab_texref failed");
+ }
}
cu_copy_H2D(coul_tab, ftmp, tabsize*sizeof(*coul_tab));
/*! Initializes the nonbonded parameter data structure. */
static void init_nbparam(cu_nbparam_t *nbp,
const interaction_const_t *ic,
- const nonbonded_verlet_t *nbv,
+ const nbnxn_atomdata_t *nbat,
const cuda_dev_info_t *dev_info)
{
cudaError_t stat;
int ntypes, nnbfp;
- ntypes = nbv->grp[0].nbat->ntype;
+ ntypes = nbat->ntype;
nbp->ewald_beta = ic->ewaldcoeff;
nbp->sh_ewald = ic->sh_ewald;
nbp->coulomb_tab = NULL;
if (nbp->eeltype == eelCuEWALD_TAB || nbp->eeltype == eelCuEWALD_TAB_TWIN)
{
- init_ewald_coulomb_force_table(nbp);
+ init_ewald_coulomb_force_table(nbp, dev_info);
}
nnbfp = 2*ntypes*ntypes;
stat = cudaMalloc((void **)&nbp->nbfp, nnbfp*sizeof(*nbp->nbfp));
CU_RET_ERR(stat, "cudaMalloc failed on nbp->nbfp");
- cu_copy_H2D(nbp->nbfp, nbv->grp[0].nbat->nbfp, nnbfp*sizeof(*nbp->nbfp));
+ cu_copy_H2D(nbp->nbfp, nbat->nbfp, nnbfp*sizeof(*nbp->nbfp));
- cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
- stat = cudaBindTexture(NULL, &nbnxn_cuda_get_nbfp_texref(),
- nbp->nbfp, &cd, nnbfp*sizeof(*nbp->nbfp));
- CU_RET_ERR(stat, "cudaBindTexture on nbfp failed");
+#ifdef TEXOBJ_SUPPORTED
+ /* Only device CC >= 3.0 (Kepler and later) support texture objects */
+ if (dev_info->prop.major >= 3)
+ {
+ cudaResourceDesc rd;
+ memset(&rd, 0, sizeof(rd));
+ rd.resType = cudaResourceTypeLinear;
+ rd.res.linear.devPtr = nbp->nbfp;
+ rd.res.linear.desc.f = cudaChannelFormatKindFloat;
+ rd.res.linear.desc.x = 32;
+ rd.res.linear.sizeInBytes = nnbfp*sizeof(*nbp->nbfp);
+
+ cudaTextureDesc td;
+ memset(&td, 0, sizeof(td));
+ td.readMode = cudaReadModeElementType;
+ stat = cudaCreateTextureObject(&nbp->nbfp_texobj, &rd, &td, NULL);
+ CU_RET_ERR(stat, "cudaCreateTextureObject on nbfp_texobj failed");
+ }
+ else
+#endif
+ {
+ cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
+ stat = cudaBindTexture(NULL, &nbnxn_cuda_get_nbfp_texref(),
+ nbp->nbfp, &cd, nnbfp*sizeof(*nbp->nbfp));
+ CU_RET_ERR(stat, "cudaBindTexture on nbfp_texref failed");
+ }
}
/*! Re-generate the GPU Ewald force table, resets rlist, and update the
nbp->eeltype = pick_ewald_kernel_type(ic->rcoulomb != ic->rvdw,
cu_nb->dev_info);
- init_ewald_coulomb_force_table(cu_nb->nbparam);
+ init_ewald_coulomb_force_table(cu_nb->nbparam, cu_nb->dev_info);
}
/*! Initializes the pair list data structure. */
}
bCUDA32 = bCUDA40 = false;
-#if CUDA_VERSION == 3200
+#if CUDA_VERSION == 3020
bCUDA32 = true;
sprintf(sbuf, "3.2");
#elif CUDA_VERSION == 4000
void nbnxn_cuda_init(FILE *fplog,
nbnxn_cuda_ptr_t *p_cu_nb,
- gmx_gpu_info_t *gpu_info, int my_gpu_index,
+ const gmx_gpu_info_t *gpu_info,
+ const gmx_gpu_opt_t *gpu_opt,
+ int my_gpu_index,
gmx_bool bLocalAndNonlocal)
{
cudaError_t stat;
init_plist(nb->plist[eintLocal]);
+ /* set device info, just point it to the right GPU among the detected ones */
+ nb->dev_info = &gpu_info->cuda_dev[get_gpu_device_id(gpu_info, gpu_opt, my_gpu_index)];
+
/* local/non-local GPU streams */
stat = cudaStreamCreate(&nb->stream[eintLocal]);
CU_RET_ERR(stat, "cudaStreamCreate on stream[eintLocal] failed");
if (nb->bUseTwoStreams)
{
init_plist(nb->plist[eintNonlocal]);
+
+ /* CUDA stream priority available in the CUDA RT 5.5 API.
+ * Note that the device we're running on does not have to support
+ * priorities, because we are querying the priority range which in this
+ * case will be a single value.
+ */
+#if CUDA_VERSION >= 5050
+ {
+ int highest_priority;
+ stat = cudaDeviceGetStreamPriorityRange(NULL, &highest_priority);
+ CU_RET_ERR(stat, "cudaDeviceGetStreamPriorityRange failed");
+
+ stat = cudaStreamCreateWithPriority(&nb->stream[eintNonlocal],
+ cudaStreamDefault,
+ highest_priority);
+ CU_RET_ERR(stat, "cudaStreamCreateWithPriority on stream[eintNonlocal] failed");
+ }
+#else
stat = cudaStreamCreate(&nb->stream[eintNonlocal]);
CU_RET_ERR(stat, "cudaStreamCreate on stream[eintNonlocal] failed");
+#endif
}
/* init events for sychronization (timing disabled for performance reasons!) */
stat = cudaEventCreateWithFlags(&nb->misc_ops_done, cudaEventDisableTiming);
CU_RET_ERR(stat, "cudaEventCreate on misc_ops_one failed");
- /* set device info, just point it to the right GPU among the detected ones */
- nb->dev_info = &gpu_info->cuda_dev[get_gpu_device_id(gpu_info, my_gpu_index)];
-
/* On GPUs with ECC enabled, cudaStreamSynchronize shows a large overhead
* (which increases with shorter time/step) caused by a known CUDA driver bug.
* To work around the issue we'll use an (admittedly fragile) memory polling
bTMPIAtomics = false;
#endif
-#if defined(i386) || defined(__x86_64__)
+#ifdef GMX_TARGET_X86
bX86 = true;
#else
bX86 = false;
stat = cudaDriverGetVersion(&cuda_drv_ver);
CU_RET_ERR(stat, "cudaDriverGetVersion failed");
+
bOldDriver = (cuda_drv_ver < 5000);
- if (nb->dev_info->prop.ECCEnabled == 1)
+ if ((nb->dev_info->prop.ECCEnabled == 1) && bOldDriver)
{
+ /* Polling wait should be used instead of cudaStreamSynchronize only if:
+ * - ECC is ON & driver is old (checked above),
+ * - we're on x86/x86_64,
+ * - atomics are available, and
+ * - GPUs are not being shared.
+ */
+ bool bShouldUsePollSync = (bX86 && bTMPIAtomics &&
+ (gmx_count_gpu_dev_shared(gpu_opt) < 1));
+
if (bStreamSync)
{
nb->bUseStreamSync = true;
/* only warn if polling should be used */
- if (bOldDriver && !gpu_info->bDevShare)
+ if (bShouldUsePollSync)
{
md_print_warn(fplog,
"NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0, but\n"
}
else
{
- /* Can/should turn of cudaStreamSynchronize wait only if
- * - we're on x86/x86_64
- * - atomics are available
- * - GPUs are not being shared
- * - and driver is old. */
- nb->bUseStreamSync =
- (bX86 && bTMPIAtomics && !gpu_info->bDevShare && bOldDriver) ?
- true : false;
-
- if (nb->bUseStreamSync)
+ nb->bUseStreamSync = !bShouldUsePollSync;
+
+ if (bShouldUsePollSync)
{
md_print_warn(fplog,
"NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0, known to\n"
- " cause performance loss. Switching to the alternative polling GPU waiting.\n"
+ " cause performance loss. Switching to the alternative polling GPU wait.\n"
" If you encounter issues, switch back to standard GPU waiting by setting\n"
" the GMX_CUDA_STREAMSYNC environment variable.\n");
}
- else if (bOldDriver)
+ else
{
/* Tell the user that the ECC+old driver combination can be bad */
sprintf(sbuf,
- "NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0. A bug in this\n"
- " driver can cause performance loss.\n"
- " However, the polling waiting workaround can not be used because\n%s\n"
+ "NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0.\n"
+ " A known bug in this driver version can cause performance loss.\n"
+ " However, the polling wait workaround can not be used because\n%s\n"
" Consider updating the driver or turning ECC off.",
- (!bX86 || !bTMPIAtomics) ?
- " atomic operations are not supported by the platform/CPU+compiler." :
- " GPU(s) are being oversubscribed.");
+ (bX86 && bTMPIAtomics) ?
+ " GPU(s) are being oversubscribed." :
+ " atomic operations are not supported by the platform/CPU+compiler.");
md_print_warn(fplog, sbuf);
}
}
}
}
-void nbnxn_cuda_init_const(nbnxn_cuda_ptr_t cu_nb,
- const interaction_const_t *ic,
- const nonbonded_verlet_t *nbv)
+void nbnxn_cuda_init_const(nbnxn_cuda_ptr_t cu_nb,
+ const interaction_const_t *ic,
+ const nonbonded_verlet_group_t *nbv_group)
{
- init_atomdata_first(cu_nb->atdat, nbv->grp[0].nbat->ntype);
- init_nbparam(cu_nb->nbparam, ic, nbv, cu_nb->dev_info);
+ init_atomdata_first(cu_nb->atdat, nbv_group[0].nbat->ntype);
+ init_nbparam(cu_nb->nbparam, ic, nbv_group[0].nbat, cu_nb->dev_info);
/* clear energy and shift force outputs */
nbnxn_cuda_clear_e_fshift(cu_nb);
}
}
-void nbnxn_cuda_free(FILE *fplog, nbnxn_cuda_ptr_t cu_nb)
+void nbnxn_cuda_free(nbnxn_cuda_ptr_t cu_nb)
{
cudaError_t stat;
cu_atomdata_t *atdat;
if (nbparam->eeltype == eelCuEWALD_TAB || nbparam->eeltype == eelCuEWALD_TAB_TWIN)
{
- stat = cudaUnbindTexture(nbnxn_cuda_get_coulomb_tab_texref());
- CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab failed");
- cu_free_buffered(nbparam->coulomb_tab, &nbparam->coulomb_tab_size);
+
+#ifdef TEXOBJ_SUPPORTED
+ /* Only device CC >= 3.0 (Kepler and later) support texture objects */
+ if (cu_nb->dev_info->prop.major >= 3)
+ {
+ stat = cudaDestroyTextureObject(nbparam->coulomb_tab_texobj);
+ CU_RET_ERR(stat, "cudaDestroyTextureObject on coulomb_tab_texobj failed");
+ }
+ else
+#endif
+ {
+ stat = cudaUnbindTexture(nbnxn_cuda_get_coulomb_tab_texref());
+ CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab_texref failed");
+ }
+ cu_free_buffered(nbparam->coulomb_tab, &nbparam->coulomb_tab_size);
}
stat = cudaEventDestroy(cu_nb->nonlocal_done);
}
}
- stat = cudaUnbindTexture(nbnxn_cuda_get_nbfp_texref());
- CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab failed");
+#ifdef TEXOBJ_SUPPORTED
+ /* Only device CC >= 3.0 (Kepler and later) support texture objects */
+ if (cu_nb->dev_info->prop.major >= 3)
+ {
+ stat = cudaDestroyTextureObject(nbparam->nbfp_texobj);
+ CU_RET_ERR(stat, "cudaDestroyTextureObject on nbfp_texobj failed");
+ }
+ else
+#endif
+ {
+ stat = cudaUnbindTexture(nbnxn_cuda_get_nbfp_texref());
+ CU_RET_ERR(stat, "cudaUnbindTexture on nbfp_texref failed");
+ }
cu_free_buffered(nbparam->nbfp);
stat = cudaFree(atdat->shift_vec);
gpu_min_ci_balanced_factor*cu_nb->dev_info->prop.multiProcessorCount : 0;
}
+
+gmx_bool nbnxn_cuda_is_kernel_ewald_analytical(const nbnxn_cuda_ptr_t cu_nb)
+{
+ return ((cu_nb->nbparam->eeltype == eelCuEWALD_ANA) ||
+ (cu_nb->nbparam->eeltype == eelCuEWALD_ANA_TWIN));
+}