1 # If the user did not set GMX_GPU we'll consider this option to be
2 # in "auto" mode meaning that we will:
3 # - search for CUDA and set GMX_GPU=ON we find it
4 # - check whether GPUs are present
5 # - if CUDA is not found but GPUs were detected issue a warning
6 if (NOT DEFINED GMX_GPU)
7 set(GMX_GPU_AUTO TRUE CACHE INTERNAL "GPU acceleration will be selected automatically")
9 option(GMX_GPU "Enable GPU acceleration" OFF)
11 if(GMX_GPU AND GMX_DOUBLE)
12 message(FATAL_ERROR "GPU acceleration is not available in double precision!")
14 if(GMX_GPU_AUTO AND GMX_DOUBLE)
15 message(WARNING "GPU acceleration is not available in double precision, disabled!")
16 set_property(CACHE GMX_GPU PROPERTY VALUE OFF)
17 set_property(CACHE GMX_GPU_AUTO PROPERTY VALUE OFF)
20 # detect GPUs in the build host machine
21 if (GMX_GPU OR GMX_GPU_AUTO AND NOT GMX_GPU_DETECTION_DONE)
26 # We need to call find_package even when we've already done the detection/setup
27 if(GMX_GPU OR GMX_GPU_AUTO)
28 # We support CUDA >=v3.2 on *nix, but <= v4.1 doesn't work with MSVC
30 find_package(CUDA 4.1)
32 find_package(CUDA 3.2)
36 # Depending on the current vale of GMX_GPU and GMX_GPU_AUTO:
37 # - OFF, FALSE: Will skip this detection/setup.
38 # - OFF, TRUE : Will keep GMX_GPU=OFF if no CUDA is detected, but will assemble
39 # a warning message which will be issued at the end of the
40 # configuration if GPU(s) were found in the build system.
41 # - ON , FALSE: The user requested GPU builds, will require CUDA and will fail
42 # if it is not available.
43 # - ON , TRUE : Can't happen (GMX_GPU=ON can only be user-set at this point)
44 if(GMX_GPU OR GMX_GPU_AUTO AND NOT GMX_GPU_DETECTION_DONE)
45 if (EXISTS ${CUDA_TOOLKIT_ROOT_DIR})
46 set(CUDA_FOUND TRUE CACHE INTERNAL "Whether the CUDA toolkit was found" FORCE)
48 set(CUDA_FOUND FALSE CACHE INTERNAL "Whether the CUDA toolkit was found" FORCE)
51 # assemble warning/error message
52 if (GMX_DETECT_GPU_AVAILABLE)
54 ${GMX_DETECT_GPU_COUNT} NVIDIA GPU(s) found in the system")
57 if (NOT GMX_DETECT_GPU_INFO STREQUAL "")
59 foreach(gpu ${GMX_DETECT_GPU_INFO})
65 # TODO remove the second part of the message when we'll have compute
66 # capability information from the detection.
68 Compute capability information not available, consult the NVIDIA website:
69 https://developer.nvidia.com/cuda-gpus
73 set(CUDA_NOTFOUND_MESSAGE "
74 mdrun supports native GPU acceleration on NVIDIA hardware with compute
75 capability >=2.0. This requires the NVIDIA CUDA library, which was not
76 found; the location can be hinted by setting CUDA_TOOLKIT_ROOT_DIR as
77 a CMake option (It does not work as an environment variable).
78 The typical location would be /usr/local/cuda[-version].
79 Note that CPU or GPU acceleration can be selected at runtime!
86 # Disable GPU acceleration in auto mode
87 message(STATUS "Disabling native GPU acceleration")
88 set_property(CACHE GMX_GPU PROPERTY VALUE OFF)
89 set(CUDA_NOTFOUND_AUTO ON)
91 # the user requested CUDA, but it wasn't found
92 message(FATAL_ERROR "${CUDA_NOTFOUND_MESSAGE}")
96 message(STATUS "Enabling native GPU acceleration")
97 set_property(CACHE GMX_GPU PROPERTY VALUE ON)
99 endif() # NOT CUDA_FOUND
101 # Annoyingly enough, FindCUDA leaves a few variables behind as non-advanced.
102 # We need to mark these advanced outside the conditional, otherwise, if the
103 # user turns GMX_GPU=OFF after a failed cmake pass, these variables will be
104 # left behind in the cache.
105 mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_SDK_ROOT_DIR CUDA_VERBOSE_BUILD)
108 # set up nvcc options
109 include(gmxManageNvccConfig)
111 # Version info (semicolon used as line separator) for nvcc.
112 get_nvcc_version_info()
114 # Atomic operations used for polling wait for GPU
115 # (to avoid the cudaStreamSynchronize + ECC bug).
116 # ThreadMPI is now always included. Thus, we don't check for Atomics anymore here.
118 # no OpenMP is no good!
121 To use GPU acceleration efficiently, mdrun requires OpenMP multi-threading.
122 With no OpenMP a single CPU core can be used with a GPU which is not optimal.
123 Note that with MPI multiple processes can be forced to use a single GPU, but this
124 typically inefficient. Note that you need to set both C and C++ compilers that
125 support OpenMP (CC and CXX environment variables, respectively) when using GPUs.")