#include "types/interaction_const.h"
#include "types/force_flags.h"
#include "../nbnxn_consts.h"
+#include "gmx_detect_hardware.h"
#include "nbnxn_cuda_types.h"
#include "../../gmxlib/cuda_tools/cudautils.cuh"
and the table GPU array. If called with an already allocated table,
it just re-uploads the table.
*/
-static void init_ewald_coulomb_force_table(cu_nbparam_t *nbp)
+static void init_ewald_coulomb_force_table(cu_nbparam_t *nbp,
+ const cuda_dev_info_t *dev_info)
{
float *ftmp, *coul_tab;
int tabsize;
nbp->coulomb_tab = coul_tab;
- cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
- stat = cudaBindTexture(NULL, &nbnxn_cuda_get_coulomb_tab_texref(),
- coul_tab, &cd, tabsize*sizeof(*coul_tab));
- CU_RET_ERR(stat, "cudaBindTexture on coul_tab failed");
+#ifdef TEXOBJ_SUPPORTED
+ /* Only device CC >= 3.0 (Kepler and later) support texture objects */
+ if (dev_info->prop.major >= 3)
+ {
+ cudaResourceDesc rd;
+ memset(&rd, 0, sizeof(rd));
+ rd.resType = cudaResourceTypeLinear;
+ rd.res.linear.devPtr = nbp->coulomb_tab;
+ rd.res.linear.desc.f = cudaChannelFormatKindFloat;
+ rd.res.linear.desc.x = 32;
+ rd.res.linear.sizeInBytes = tabsize*sizeof(*coul_tab);
+
+ cudaTextureDesc td;
+ memset(&td, 0, sizeof(td));
+ td.readMode = cudaReadModeElementType;
+ stat = cudaCreateTextureObject(&nbp->coulomb_tab_texobj, &rd, &td, NULL);
+ CU_RET_ERR(stat, "cudaCreateTextureObject on coulomb_tab_texobj failed");
+ }
+ else
+#endif
+ {
+ cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
+ stat = cudaBindTexture(NULL, &nbnxn_cuda_get_coulomb_tab_texref(),
+ coul_tab, &cd, tabsize*sizeof(*coul_tab));
+ CU_RET_ERR(stat, "cudaBindTexture on coulomb_tab_texref failed");
+ }
}
cu_copy_H2D(coul_tab, ftmp, tabsize*sizeof(*coul_tab));
nbp->coulomb_tab = NULL;
if (nbp->eeltype == eelCuEWALD_TAB || nbp->eeltype == eelCuEWALD_TAB_TWIN)
{
- init_ewald_coulomb_force_table(nbp);
+ init_ewald_coulomb_force_table(nbp, dev_info);
}
nnbfp = 2*ntypes*ntypes;
CU_RET_ERR(stat, "cudaMalloc failed on nbp->nbfp");
cu_copy_H2D(nbp->nbfp, nbat->nbfp, nnbfp*sizeof(*nbp->nbfp));
- cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
- stat = cudaBindTexture(NULL, &nbnxn_cuda_get_nbfp_texref(),
- nbp->nbfp, &cd, nnbfp*sizeof(*nbp->nbfp));
- CU_RET_ERR(stat, "cudaBindTexture on nbfp failed");
+#ifdef TEXOBJ_SUPPORTED
+ /* Only device CC >= 3.0 (Kepler and later) support texture objects */
+ if (dev_info->prop.major >= 3)
+ {
+ cudaResourceDesc rd;
+ memset(&rd, 0, sizeof(rd));
+ rd.resType = cudaResourceTypeLinear;
+ rd.res.linear.devPtr = nbp->nbfp;
+ rd.res.linear.desc.f = cudaChannelFormatKindFloat;
+ rd.res.linear.desc.x = 32;
+ rd.res.linear.sizeInBytes = nnbfp*sizeof(*nbp->nbfp);
+
+ cudaTextureDesc td;
+ memset(&td, 0, sizeof(td));
+ td.readMode = cudaReadModeElementType;
+ stat = cudaCreateTextureObject(&nbp->nbfp_texobj, &rd, &td, NULL);
+ CU_RET_ERR(stat, "cudaCreateTextureObject on nbfp_texobj failed");
+ }
+ else
+#endif
+ {
+ cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
+ stat = cudaBindTexture(NULL, &nbnxn_cuda_get_nbfp_texref(),
+ nbp->nbfp, &cd, nnbfp*sizeof(*nbp->nbfp));
+ CU_RET_ERR(stat, "cudaBindTexture on nbfp_texref failed");
+ }
}
/*! Re-generate the GPU Ewald force table, resets rlist, and update the
nbp->eeltype = pick_ewald_kernel_type(ic->rcoulomb != ic->rvdw,
cu_nb->dev_info);
- init_ewald_coulomb_force_table(cu_nb->nbparam);
+ init_ewald_coulomb_force_table(cu_nb->nbparam, cu_nb->dev_info);
}
/*! Initializes the pair list data structure. */
}
bCUDA32 = bCUDA40 = false;
-#if CUDA_VERSION == 3200
+#if CUDA_VERSION == 3020
bCUDA32 = true;
sprintf(sbuf, "3.2");
#elif CUDA_VERSION == 4000
void nbnxn_cuda_init(FILE *fplog,
nbnxn_cuda_ptr_t *p_cu_nb,
- gmx_gpu_info_t *gpu_info, int my_gpu_index,
+ const gmx_gpu_info_t *gpu_info,
+ const gmx_gpu_opt_t *gpu_opt,
+ int my_gpu_index,
gmx_bool bLocalAndNonlocal)
{
cudaError_t stat;
init_plist(nb->plist[eintLocal]);
+ /* set device info, just point it to the right GPU among the detected ones */
+ nb->dev_info = &gpu_info->cuda_dev[get_gpu_device_id(gpu_info, gpu_opt, my_gpu_index)];
+
/* local/non-local GPU streams */
stat = cudaStreamCreate(&nb->stream[eintLocal]);
CU_RET_ERR(stat, "cudaStreamCreate on stream[eintLocal] failed");
if (nb->bUseTwoStreams)
{
init_plist(nb->plist[eintNonlocal]);
+
+ /* CUDA stream priority available in the CUDA RT 5.5 API.
+ * Note that the device we're running on does not have to support
+ * priorities, because we are querying the priority range which in this
+ * case will be a single value.
+ */
+#if CUDA_VERSION >= 5050
+ {
+ int highest_priority;
+ stat = cudaDeviceGetStreamPriorityRange(NULL, &highest_priority);
+ CU_RET_ERR(stat, "cudaDeviceGetStreamPriorityRange failed");
+
+ stat = cudaStreamCreateWithPriority(&nb->stream[eintNonlocal],
+ cudaStreamDefault,
+ highest_priority);
+ CU_RET_ERR(stat, "cudaStreamCreateWithPriority on stream[eintNonlocal] failed");
+ }
+#else
stat = cudaStreamCreate(&nb->stream[eintNonlocal]);
CU_RET_ERR(stat, "cudaStreamCreate on stream[eintNonlocal] failed");
+#endif
}
/* init events for sychronization (timing disabled for performance reasons!) */
stat = cudaEventCreateWithFlags(&nb->misc_ops_done, cudaEventDisableTiming);
CU_RET_ERR(stat, "cudaEventCreate on misc_ops_one failed");
- /* set device info, just point it to the right GPU among the detected ones */
- nb->dev_info = &gpu_info->cuda_dev[get_gpu_device_id(gpu_info, my_gpu_index)];
-
/* On GPUs with ECC enabled, cudaStreamSynchronize shows a large overhead
* (which increases with shorter time/step) caused by a known CUDA driver bug.
* To work around the issue we'll use an (admittedly fragile) memory polling
bTMPIAtomics = false;
#endif
-#if defined(i386) || defined(__x86_64__)
+#ifdef GMX_TARGET_X86
bX86 = true;
#else
bX86 = false;
* - atomics are available, and
* - GPUs are not being shared.
*/
- bool bShouldUsePollSync = (bX86 && bTMPIAtomics && !gpu_info->bDevShare);
+ bool bShouldUsePollSync = (bX86 && bTMPIAtomics &&
+ (gmx_count_gpu_dev_shared(gpu_opt) < 1));
if (bStreamSync)
{
}
}
-void nbnxn_cuda_free(FILE *fplog, nbnxn_cuda_ptr_t cu_nb)
+void nbnxn_cuda_free(nbnxn_cuda_ptr_t cu_nb)
{
cudaError_t stat;
cu_atomdata_t *atdat;
if (nbparam->eeltype == eelCuEWALD_TAB || nbparam->eeltype == eelCuEWALD_TAB_TWIN)
{
- stat = cudaUnbindTexture(nbnxn_cuda_get_coulomb_tab_texref());
- CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab failed");
- cu_free_buffered(nbparam->coulomb_tab, &nbparam->coulomb_tab_size);
+
+#ifdef TEXOBJ_SUPPORTED
+ /* Only device CC >= 3.0 (Kepler and later) support texture objects */
+ if (cu_nb->dev_info->prop.major >= 3)
+ {
+ stat = cudaDestroyTextureObject(nbparam->coulomb_tab_texobj);
+ CU_RET_ERR(stat, "cudaDestroyTextureObject on coulomb_tab_texobj failed");
+ }
+ else
+#endif
+ {
+ stat = cudaUnbindTexture(nbnxn_cuda_get_coulomb_tab_texref());
+ CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab_texref failed");
+ }
+ cu_free_buffered(nbparam->coulomb_tab, &nbparam->coulomb_tab_size);
}
stat = cudaEventDestroy(cu_nb->nonlocal_done);
}
}
- stat = cudaUnbindTexture(nbnxn_cuda_get_nbfp_texref());
- CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab failed");
+#ifdef TEXOBJ_SUPPORTED
+ /* Only device CC >= 3.0 (Kepler and later) support texture objects */
+ if (cu_nb->dev_info->prop.major >= 3)
+ {
+ stat = cudaDestroyTextureObject(nbparam->nbfp_texobj);
+ CU_RET_ERR(stat, "cudaDestroyTextureObject on nbfp_texobj failed");
+ }
+ else
+#endif
+ {
+ stat = cudaUnbindTexture(nbnxn_cuda_get_nbfp_texref());
+ CU_RET_ERR(stat, "cudaUnbindTexture on nbfp_texref failed");
+ }
cu_free_buffered(nbparam->nbfp);
stat = cudaFree(atdat->shift_vec);
gpu_min_ci_balanced_factor*cu_nb->dev_info->prop.multiProcessorCount : 0;
}
+
+gmx_bool nbnxn_cuda_is_kernel_ewald_analytical(const nbnxn_cuda_ptr_t cu_nb)
+{
+ return ((cu_nb->nbparam->eeltype == eelCuEWALD_ANA) ||
+ (cu_nb->nbparam->eeltype == eelCuEWALD_ANA_TWIN));
+}