1 # If the user did not set GMX_GPU we'll consider this option to be
2 # in "auto" mode meaning that we will:
3 # - search for CUDA and set GMX_GPU=ON we find it
4 # - check whether GPUs are present
5 # - if CUDA is not found but GPUs were detected issue a warning
6 if (NOT DEFINED GMX_GPU)
7 set(GMX_GPU_AUTO TRUE CACHE INTERNAL "GPU acceleration will be selected automatically")
9 option(GMX_GPU "Enable GPU acceleration" OFF)
11 if(GMX_GPU AND GMX_DOUBLE)
12 message(FATAL_ERROR "GPU acceleration is not available in double precision!")
14 if(GMX_GPU_AUTO AND GMX_DOUBLE)
15 message(WARNING "GPU acceleration is not available in double precision, disabled!")
16 set_property(CACHE GMX_GPU PROPERTY VALUE OFF)
17 set_property(CACHE GMX_GPU_AUTO PROPERTY VALUE OFF)
20 # detect GPUs in the build host machine
21 if ((GMX_GPU OR GMX_GPU_AUTO) AND NOT GMX_GPU_DETECTION_DONE)
26 # We need to call find_package even when we've already done the detection/setup
27 if(GMX_GPU OR GMX_GPU_AUTO)
28 if(NOT GMX_GPU AND NOT GMX_DETECT_GPU_AVAILABLE)
29 # Stay quiet when detection has occured and found no GPU.
30 # Noise is acceptable when there is a GPU or the user required one.
31 set(FIND_CUDA_QUIETLY QUIET)
33 # We support CUDA >=v3.2 on *nix, but <= v4.1 doesn't work with MSVC
35 find_package(CUDA 4.1 ${FIND_CUDA_QUIETLY})
37 find_package(CUDA 3.2 ${FIND_CUDA_QUIETLY})
41 # Depending on the current vale of GMX_GPU and GMX_GPU_AUTO:
42 # - OFF, FALSE: Will skip this detection/setup.
43 # - OFF, TRUE : Will keep GMX_GPU=OFF if no CUDA is detected, but will assemble
44 # a warning message which will be issued at the end of the
45 # configuration if GPU(s) were found in the build system.
46 # - ON , FALSE: The user requested GPU build and this requires CUDA, so we will
47 # fail if it is not available.
48 # - ON , TRUE : Can't happen (GMX_GPU=ON can only be user-set at this point)
49 if((GMX_GPU OR GMX_GPU_AUTO) AND NOT GMX_GPU_DETECTION_DONE)
50 if (EXISTS ${CUDA_TOOLKIT_ROOT_DIR})
51 set(CUDA_FOUND TRUE CACHE INTERNAL "Whether the CUDA toolkit was found" FORCE)
53 set(CUDA_FOUND FALSE CACHE INTERNAL "Whether the CUDA toolkit was found" FORCE)
56 # assemble warning/error message
57 if (GMX_DETECT_GPU_AVAILABLE)
59 ${GMX_DETECT_GPU_COUNT} NVIDIA GPU(s) found in the system")
62 if (NOT GMX_DETECT_GPU_INFO STREQUAL "")
64 foreach(gpu ${GMX_DETECT_GPU_INFO})
70 # TODO remove the second part of the message when we'll have compute
71 # capability information from the detection.
73 Compute capability information not available, consult the NVIDIA website:
74 https://developer.nvidia.com/cuda-gpus
78 set(CUDA_NOTFOUND_MESSAGE "
79 mdrun supports native GPU acceleration on NVIDIA hardware with compute
80 capability >=2.0 (Fermi or later). This requires the NVIDIA CUDA toolkit,
81 which was not found. Its location can be hinted by setting the
82 CUDA_TOOLKIT_ROOT_DIR CMake option (does not work as an environment variable).
83 The typical location would be /usr/local/cuda[-version].
84 Note that CPU or GPU acceleration can be selected at runtime!
91 # Disable GPU acceleration in auto mode
92 message(STATUS "No compatible CUDA toolkit found (v3.2+), disabling native GPU acceleration")
93 set_property(CACHE GMX_GPU PROPERTY VALUE OFF)
94 set(CUDA_NOTFOUND_AUTO ON)
96 # the user requested CUDA, but it wasn't found
97 message(FATAL_ERROR "${CUDA_NOTFOUND_MESSAGE}")
101 message(STATUS "Enabling native GPU acceleration")
102 set_property(CACHE GMX_GPU PROPERTY VALUE ON)
104 endif() # NOT CUDA_FOUND
106 # Annoyingly enough, FindCUDA leaves a few variables behind as non-advanced.
107 # We need to mark these advanced outside the conditional, otherwise, if the
108 # user turns GMX_GPU=OFF after a failed cmake pass, these variables will be
109 # left behind in the cache.
110 mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_SDK_ROOT_DIR CUDA_VERBOSE_BUILD)
112 mark_as_advanced(CUDA_TOOLKIT_ROOT_DIR)
115 # Try to execute ${CUDA_NVCC_EXECUTABLE} --version and set the output
116 # (or an error string) in the argument variable.
117 # Note that semicolon is used as separator for nvcc.
120 # COMPILER_INFO - [output variable] string with compiler path, ID and
121 # some compiler-provided information
122 # COMPILER_FLAGS - [output variable] flags for the compiler
124 macro(get_cuda_compiler_info COMPILER_INFO COMPILER_FLAGS)
125 if(CUDA_NVCC_EXECUTABLE)
127 # Get the nvcc version string. This is multi-line, but since it is only 4 lines
128 # and might change in the future it is better to store than trying to parse out
129 # the version from the current format.
130 execute_process(COMMAND ${CUDA_NVCC_EXECUTABLE} --version
131 RESULT_VARIABLE _nvcc_version_res
132 OUTPUT_VARIABLE _nvcc_version_out
133 ERROR_VARIABLE _nvcc_version_err
134 OUTPUT_STRIP_TRAILING_WHITESPACE)
135 if (${_nvcc_version_res} EQUAL 0)
136 # Fix multi-line mess: Replace newline with ";" so we can use it in a define
137 string(REPLACE "\n" ";" _nvcc_info_singleline ${_nvcc_version_out})
138 SET(${COMPILER_INFO} "${CUDA_NVCC_EXECUTABLE} ${_nvcc_info_singleline}")
139 string(TOUPPER ${CMAKE_BUILD_TYPE} _build_type)
140 SET(_compiler_flags "${CUDA_NVCC_FLAGS_${_build_type}}")
141 if(CUDA_PROPAGATE_HOST_FLAGS)
142 string(REGEX REPLACE "[ ]+" ";" _cxx_flags_nospace "${BUILD_CXXFLAGS}")
144 SET(${COMPILER_FLAGS} "${CUDA_NVCC_FLAGS}${CUDA_NVCC_FLAGS_${_build_type}}; ${_cxx_flags_nospace}")
146 SET(${COMPILER_INFO} "N/A")
147 SET(${COMPILER_FLAGS} "N/A")
153 # set up nvcc options
154 include(gmxManageNvccConfig)
156 # Atomic operations used for polling wait for GPU
157 # (to avoid the cudaStreamSynchronize + ECC bug).
158 # ThreadMPI is now always included. Thus, we don't check for Atomics anymore here.
160 # no OpenMP is no good!
163 To use GPU acceleration efficiently, mdrun requires OpenMP multi-threading.
164 Without OpenMP a single CPU core can be used with a GPU which is not optimal.
165 Note that with MPI multiple processes can be forced to use a single GPU, but this
166 typically inefficient. Note that you need to set both C and C++ compilers that
167 support OpenMP (CC and CXX environment variables, respectively) when using GPUs.")