#include "config.h"
-#include <assert.h>
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
+#include <cerrno>
+#include <cstdlib>
+#include <cstring>
+#include <algorithm>
#include <string>
#include <vector>
#ifdef GMX_GPU
-const gmx_bool bGPUBinary = TRUE;
+
+static const bool bGPUBinary = TRUE;
+
# ifdef GMX_USE_OPENCL
-const char *gpu_implementation = "OpenCL";
+
+static const char *gpu_implementation = "OpenCL";
/* Our current OpenCL implementation only supports using exactly one
* GPU per PP rank, so sharing is impossible */
-const gmx_bool bGpuSharingSupported = FALSE;
+static const bool bGpuSharingSupported = false;
/* Our current OpenCL implementation is not known to handle
* concurrency correctly (at context creation, JIT compilation, or JIT
* cache-management stages). OpenCL runtimes need not support it
* either; library MPI segfaults when creating OpenCL contexts;
* thread-MPI seems to work but is not yet known to be safe. */
-const gmx_bool bMultiGpuPerNodeSupported = FALSE;
-# else
-const char *gpu_implementation = "CUDA";
-const gmx_bool bGpuSharingSupported = TRUE;
-const gmx_bool bMultiGpuPerNodeSupported = TRUE;
-# endif
-#else
-const gmx_bool bGPUBinary = FALSE;
-const char *gpu_implementation = "non-GPU";
-const gmx_bool bGpuSharingSupported = FALSE;
-const gmx_bool bMultiGpuPerNodeSupported = FALSE;
-#endif
+static const bool bMultiGpuPerNodeSupported = false;
+
+# else /* GMX_USE_OPENCL */
+
+// Our CUDA implementation supports everything
+static const char *gpu_implementation = "CUDA";
+static const bool bGpuSharingSupported = true;
+static const bool bMultiGpuPerNodeSupported = true;
+
+# endif /* GMX_USE_OPENCL */
+
+#else /* GMX_GPU */
+
+// Not compiled with GPU support
+static const bool bGPUBinary = false;
+static const char *gpu_implementation = "non-GPU";
+static const bool bGpuSharingSupported = false;
+static const bool bMultiGpuPerNodeSupported = false;
+
+#endif /* GMX_GPU */
/* Names of the GPU detection/check results (see e_gpu_detect_res_t in hw_info.h). */
const char * const gpu_detect_res_str[egpuNR] =
static int gmx_count_gpu_dev_unique(const gmx_gpu_info_t *gpu_info,
const gmx_gpu_opt_t *gpu_opt);
+gmx_bool gmx_multiple_gpu_per_node_supported()
+{
+ return bMultiGpuPerNodeSupported;
+}
+
+gmx_bool gmx_gpu_sharing_supported()
+{
+ return bGpuSharingSupported;
+}
+
static void sprint_gpus(char *sbuf, const gmx_gpu_info_t *gpu_info)
{
int i, ndev;
#if defined GMX_MPI && !defined GMX_THREAD_MPI
/* We only print the detection on one, of possibly multiple, nodes */
- strncpy(onhost, " on host ", 10);
+ std::strncpy(onhost, " on host ", 10);
gmx_gethostname(onhost + 9, HOSTNAMELEN);
#else
/* We detect all relevant GPUs */
- strncpy(onhost, "", 1);
+ std::strncpy(onhost, "", 1);
#endif
if (ngpu > 0)
char th_or_proc[STRLEN], th_or_proc_plural[STRLEN], pernode[STRLEN];
gmx_bool btMPI, bMPI, bNthreadsAuto, bEmulateGPU;
- assert(hwinfo);
- assert(cr);
+ GMX_RELEASE_ASSERT(hwinfo, "hwinfo must be a non-NULL pointer");
+ GMX_RELEASE_ASSERT(cr, "cr must be a non-NULL pointer");
/* Below we only do consistency checks for PP and GPUs,
* this is irrelevant for PME only nodes, so in that case we return
}
else
{
- if (ngpu_comp > npppn)
+ /* TODO Should we have a gpu_opt->n_dev_supported field? */
+ if (ngpu_comp > npppn && gmx_multiple_gpu_per_node_supported())
{
md_print_warn(cr, fplog,
"NOTE: potentially sub-optimal launch configuration, %s started with less\n"
*/
if (cr->rank_pp_intranode == 0)
{
+ std::string reasonForLimit;
+ if (ngpu_comp > 1 &&
+ ngpu_use == 1 &&
+ !gmx_multiple_gpu_per_node_supported())
+ {
+ reasonForLimit = "can be used by ";
+ reasonForLimit += gpu_implementation;
+ reasonForLimit += " in GROMACS";
+ }
+ else
+ {
+ reasonForLimit = "was detected";
+ }
gmx_fatal(FARGS,
"Incorrect launch configuration: mismatching number of PP %s%s and GPUs%s.\n"
- "%s was started with %d PP %s%s%s, but only %d GPU%s were detected.",
+ "%s was started with %d PP %s%s%s, but only %d GPU%s %s.",
th_or_proc, btMPI ? "s" : "es", pernode,
ShortProgram(), npppn, th_or_proc,
th_or_proc_plural, pernode,
- ngpu_use, gpu_use_plural);
+ ngpu_use, gpu_use_plural, reasonForLimit.c_str());
}
}
}
int i, uniq_count, ngpu;
int *uniq_ids;
- assert(gpu_info);
- assert(gpu_opt);
+ GMX_RELEASE_ASSERT(gpu_info, "gpu_info must be a non-NULL pointer");
+ GMX_RELEASE_ASSERT(gpu_opt, "gpu_opt must be a non-NULL pointer");
ngpu = gpu_info->n_dev;
{
int device_id;
- device_id = bGpuSharingSupported ? get_gpu_device_id(gpu_info, gpu_opt, i) : i;
+ device_id = gmx_gpu_sharing_supported() ? get_gpu_device_id(gpu_info, gpu_opt, i) : i;
uniq_ids[device_id] = 1;
}
/* Count the devices used. */
MPI_Comm_rank(physicalnode_comm, &rank_local);
#else
/* Here there should be only one process, check this */
- assert(cr->nnodes == 1 && cr->sim_nodeid == 0);
+ GMX_RELEASE_ASSERT(cr->nnodes == 1 && cr->sim_nodeid == 0, "Only a single (master) process should execute here");
rank_local = 0;
#endif
parse_digits_from_plain_string(env,
&gpu_opt->n_dev_use,
&gpu_opt->dev_use);
- if (!bMultiGpuPerNodeSupported && 1 < gpu_opt->n_dev_use)
+ if (!gmx_multiple_gpu_per_node_supported() && 1 < gpu_opt->n_dev_use)
{
gmx_fatal(FARGS, "The %s implementation only supports using exactly one PP rank per node", gpu_implementation);
}
- if (!bGpuSharingSupported && anyGpuIdIsRepeated(gpu_opt))
+ if (!gmx_gpu_sharing_supported() && anyGpuIdIsRepeated(gpu_opt))
{
gmx_fatal(FARGS, "The %s implementation only supports using exactly one PP rank per GPU", gpu_implementation);
}
{
if (nrank % gpu_opt->n_dev_compatible == 0)
{
- nshare = bGpuSharingSupported ? nrank/gpu_opt->n_dev_compatible : 1;
+ nshare = gmx_gpu_sharing_supported() ? nrank/gpu_opt->n_dev_compatible : 1;
}
else
{
/* Here we will waste GPUs when nrank < gpu_opt->n_dev_compatible */
gpu_opt->n_dev_use = std::min(gpu_opt->n_dev_compatible*nshare, nrank);
- if (!bMultiGpuPerNodeSupported)
+ if (!gmx_multiple_gpu_per_node_supported())
{
gpu_opt->n_dev_use = std::min(gpu_opt->n_dev_use, 1);
}