3 # This file is part of the GROMACS molecular simulation package.
5 # Copyright (c) 2020,2021, by the GROMACS development team, led by
6 # Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 # and including many others, as listed in the AUTHORS file in the
8 # top-level source directory and at http://www.gromacs.org.
10 # GROMACS is free software; you can redistribute it and/or
11 # modify it under the terms of the GNU Lesser General Public License
12 # as published by the Free Software Foundation; either version 2.1
13 # of the License, or (at your option) any later version.
15 # GROMACS is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 # Lesser General Public License for more details.
20 # You should have received a copy of the GNU Lesser General Public
21 # License along with GROMACS; if not, see
22 # http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 # If you want to redistribute modifications to GROMACS, please
26 # consider that scientific software is very special. Version
27 # control is crucial - bugs must be traceable. We will be happy to
28 # consider code for inclusion in the official distribution, but
29 # derived work must not be called official GROMACS. Details are found
30 # in the README & COPYING files - if they are missing, get the
31 # official version at http://www.gromacs.org.
33 # To help us fund GROMACS development, we humbly ask that you cite
34 # the research papers on the package. Check out http://www.gromacs.org.
36 """Building block based Dockerfile generation for CI testing images.
38 Generates a set of docker images used for running GROMACS CI on Gitlab.
39 The images are prepared according to a selection of build configuration targets
40 that hope to cover a broad enough scope of different possible systems,
41 allowing us to check compiler types and versions, as well as libraries used
42 for accelerators and parallel communication systems. Each combinations is
43 described as an entry in the build_configs dictionary, with the script
44 analysing the logic and adding build stages as needed.
46 Based on the example script provided by the NVidia HPCCM repository.
49 `NVidia HPC Container Maker <https://github.com/NVIDIA/hpc-container-maker>`__
52 * Paul Bauer <paul.bauer.q@gmail.com>
53 * Eric Irrgang <ericirrgang@gmail.com>
54 * Joe Jordan <e.jjordan12@gmail.com>
55 * Mark Abraham <mark.j.abraham@gmail.com>
59 $ python3 scripted_gmx_docker_builds.py --help
60 $ python3 scripted_gmx_docker_builds.py --format docker > Dockerfile && docker build .
61 $ python3 scripted_gmx_docker_builds.py | docker build -
71 from distutils.version import StrictVersion
75 from hpccm.building_blocks.base import bb_base
81 'This module assumes availability of supporting modules in the same directory. Add the directory to '
82 'PYTHONPATH or invoke Python from within the module directory so module location can be resolved.')
84 # Basic packages for all final images.
85 _common_packages = ['build-essential',
103 _opencl_extra_packages = [
105 # The following require apt_ppas=['ppa:intel-opencl/intel-opencl']
107 'ocl-icd-libopencl1',
108 'ocl-icd-opencl-dev',
110 # The following require
111 # apt_keys=['http://repo.radeon.com/rocm/apt/debian/rocm.gpg.key'],
112 # apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/debian/ xenial main']
119 # Extra packages needed to build Intel Compute Runtime
120 _intel_compute_runtime_extra_packages = ['pkg-config',
129 'ocl-icd-libopencl1',
130 'ocl-icd-opencl-dev',
133 # Extra packages needed to build Python installations from source.
134 _python_extra_packages = ['build-essential',
153 # Extra packages needed for images for building documentation.
154 _docs_extra_packages = ['autoconf',
170 'texlive-latex-base',
171 'texlive-latex-extra',
172 'texlive-fonts-recommended',
173 'texlive-fonts-extra']
175 # Parse command line arguments
176 parser = argparse.ArgumentParser(description='GROMACS CI image creation script',
177 parents=[utility.parser])
179 parser.add_argument('--format', type=str, default='docker',
180 choices=['docker', 'singularity'],
181 help='Container specification format (default: docker)')
184 def base_image_tag(args) -> str:
185 # Check if we use CUDA images or plain linux images
186 if args.cuda is not None:
187 cuda_version_tag = 'nvidia/cuda:' + args.cuda + '-devel'
188 if args.centos is not None:
189 cuda_version_tag += '-centos' + args.centos
190 elif args.ubuntu is not None:
191 cuda_version_tag += '-ubuntu' + args.ubuntu
193 raise RuntimeError('Logic error: no Linux distribution selected.')
195 base_image_tag = cuda_version_tag
197 if args.centos is not None:
198 base_image_tag = 'centos:centos' + args.centos
199 elif args.ubuntu is not None:
200 base_image_tag = 'ubuntu:' + args.ubuntu
202 raise RuntimeError('Logic error: no Linux distribution selected.')
203 return base_image_tag
206 def get_llvm_packages(args) -> typing.Iterable[str]:
207 # If we use the package version of LLVM, we need to install extra packages for it.
208 if (args.llvm is not None) and (args.tsan is None):
209 packages = [f'libomp-{args.llvm}-dev',
210 f'libomp5-{args.llvm}',
211 'clang-format-' + str(args.llvm),
212 'clang-tidy-' + str(args.llvm)]
213 if args.hipsycl is not None:
214 packages += [f'llvm-{args.llvm}-dev',
215 f'libclang-{args.llvm}-dev',
221 def get_opencl_packages(args) -> typing.Iterable[str]:
222 if (args.doxygen is None) and (args.oneapi is None):
223 return _opencl_extra_packages
227 def get_compiler(args, compiler_build_stage: hpccm.Stage = None) -> bb_base:
229 if args.llvm is not None:
230 # Build our own version instead to get TSAN + OMP
231 if args.tsan is not None:
232 if compiler_build_stage is not None:
233 compiler = compiler_build_stage.runtime(_from='tsan')
235 raise RuntimeError('No TSAN compiler build stage!')
236 # Build the default compiler if we don't need special support
238 compiler = hpccm.building_blocks.llvm(extra_repository=True, version=args.llvm)
240 elif args.oneapi is not None:
241 if compiler_build_stage is not None:
242 compiler = compiler_build_stage.runtime(_from='oneapi')
243 # Prepare the toolchain (needed only for builds done within the Dockerfile, e.g.
244 # OpenMPI builds, which don't currently work for other reasons)
245 oneapi_toolchain = hpccm.toolchain(CC=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icx',
246 CXX=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icpx')
247 setattr(compiler, 'toolchain', oneapi_toolchain)
250 raise RuntimeError('No oneAPI compiler build stage!')
252 elif args.gcc is not None:
253 compiler = hpccm.building_blocks.gnu(extra_repository=True,
257 raise RuntimeError('Logic error: no compiler toolchain selected.')
261 def get_mpi(args, compiler):
262 # If needed, add MPI to the image
263 if args.mpi is not None:
264 if args.mpi == 'openmpi':
266 if args.cuda is not None:
269 if hasattr(compiler, 'toolchain'):
270 if args.oneapi is not None:
271 raise RuntimeError('oneAPI building OpenMPI is not supported')
272 return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, cuda=use_cuda, infiniband=False)
274 raise RuntimeError('compiler is not an HPCCM compiler building block!')
276 elif args.mpi == 'impi':
277 # TODO Intel MPI from the oneAPI repo is not working reliably,
278 # reasons are unclear. When solved, add packagages called:
279 # 'intel-oneapi-mpi', 'intel-oneapi-mpi-devel'
280 # during the compiler stage.
281 # TODO also consider hpccm's intel_mpi package if that doesn't need
283 raise RuntimeError('Intel MPI recipe not implemented yet.')
285 raise RuntimeError('Requested unknown MPI implementation.')
291 if (args.clfft is not None):
292 return hpccm.building_blocks.generic_cmake(
293 repository='https://github.com/clMathLibraries/clFFT.git',
294 prefix='/usr/local', recursive=True, branch=args.clfft, directory='clFFT/src')
298 def get_hipsycl(args):
299 if args.hipsycl is None:
301 if args.llvm is None:
302 raise RuntimeError('Can not build hipSYCL without llvm')
304 cmake_opts = [f'-DLLVM_DIR=/usr/lib/llvm-{args.llvm}/cmake',
305 f'-DCLANG_EXECUTABLE_PATH=/usr/bin/clang++-{args.llvm}',
306 '-DCMAKE_PREFIX_PATH=/opt/rocm/lib/cmake',
307 '-DWITH_ROCM_BACKEND=ON']
308 if args.cuda is not None:
309 cmake_opts += [f'-DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
310 '-DWITH_CUDA_BACKEND=ON']
313 # https://github.com/illuhad/hipSYCL/issues/361#issuecomment-718943645
314 'for f in /opt/rocm/amdgcn/bitcode/*.bc; do ln -s "$f" "/opt/rocm/lib/$(basename $f .bc).amdgcn.bc"; done'
316 if args.cuda is not None:
318 # https://github.com/illuhad/hipSYCL/issues/410#issuecomment-743301929
319 f'sed s/_OPENMP/__OPENMP_NVPTX__/ -i /usr/lib/llvm-{args.llvm}/lib/clang/*/include/__clang_cuda_complex_builtins.h',
320 # Not needed unless we're building with CUDA 11.x, but no harm in doing always
321 f'ln -s /usr/local/cuda/compat/* /usr/local/cuda/lib64/'
324 return hpccm.building_blocks.generic_cmake(
325 repository='https://github.com/illuhad/hipSYCL.git',
326 directory='/var/tmp/hipSYCL',
327 prefix='/usr/local', recursive=True, commit=args.hipsycl,
328 cmake_opts=['-DCMAKE_BUILD_TYPE=Release', *cmake_opts],
329 postinstall=postinstall)
331 def get_intel_compute_runtime(args):
332 # The only reason we need to build Compute Runtime ourselves is because Intel packages have no DG1 support
333 # Otherwise, we could have just installed DEB packages from GitHub or Intel PPA
334 if args.intel_compute_runtime is None:
337 cmake_opts = ['-DCMAKE_BUILD_TYPE=Release',
338 '-DSKIP_UNIT_TESTS=TRUE',
339 '-DSUPPORT_GEN8=0', '-DSUPPORT_GEN9=1', '-DSUPPORT_GEN11=1', '-DSUPPORT_GEN12LP=1', '-DSUPPORT_DG1=1',
342 return hpccm.building_blocks.generic_cmake(
343 repository='https://github.com/intel/compute-runtime.git',
344 directory='compute-runtime',
345 prefix='/usr/local', recursive=True, branch=args.intel_compute_runtime,
346 cmake_opts=cmake_opts,
347 postinstall=['ldconfig'])
349 def add_tsan_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
350 """Isolate the expensive TSAN preparation stage.
352 This is a very expensive stage, but has few and disjoint dependencies, and
353 its output is easily compartmentalized (/usr/local) so we can isolate this
354 build stage to maximize build cache hits and reduce rebuild time, bookkeeping,
355 and final image size.
357 if not isinstance(output_stages, collections.abc.MutableMapping):
358 raise RuntimeError('Need output_stages container.')
359 tsan_stage = hpccm.Stage()
360 tsan_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='tsan')
362 tsan_stage += hpccm.building_blocks.packages(ospackages=['git', 'ca-certificates', 'build-essential', 'cmake'])
363 # CMake will get duplicated later, but this is an expensive image, and it isn't worth optimizing
364 # out that duplication...
365 tsan_stage += hpccm.building_blocks.python(python3=True, python2=False, devel=False)
367 compiler_branch = 'release/' + str(input_args.llvm) + '.x'
368 tsan_stage += hpccm.building_blocks.generic_cmake(
369 repository='https://github.com/llvm/llvm-project.git',
370 directory='/var/tmp/llvm-project/llvm/',
371 prefix='/usr/local', recursive=True, branch=compiler_branch,
372 cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra;compiler-rt;lld"',
373 '-D LIBOMP_TSAN_SUPPORT=on'],
374 postinstall=['ln -s /usr/local/bin/clang++ /usr/local/bin/clang++-' + str(input_args.llvm),
375 'ln -s /usr/local/bin/clang-format /usr/local/bin/clang-format-' + str(input_args.llvm),
376 'ln -s /usr/local/bin/clang-tidy /usr/local/bin/clang-tidy-' + str(input_args.llvm),
377 'ln -s /usr/local/share/clang/run-clang-tidy.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py',
378 'ln -s /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm),
379 'ln -s /usr/local/libexec/c++-analyzer /usr/local/bin/c++-analyzer-' + str(input_args.llvm)])
380 output_stages['compiler_build'] = tsan_stage
382 def oneapi_runtime(_from='0'):
383 oneapi_runtime_stage = hpccm.Stage()
384 oneapi_runtime_stage += hpccm.primitives.copy(_from='oneapi-build',
385 files={"/opt/intel": "/opt/intel",
386 "/etc/bash.bashrc": "/etc/bash.bashrc"})
387 return oneapi_runtime_stage
389 def add_oneapi_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
390 """Isolate the oneAPI preparation stage.
392 This stage is isolated so that its installed components are minimized in the
393 final image (chiefly /opt/intel) and its environment setup script can be
394 sourced. This also helps with rebuild time and final image size.
396 if not isinstance(output_stages, collections.abc.MutableMapping):
397 raise RuntimeError('Need output_stages container.')
398 oneapi_stage = hpccm.Stage()
399 oneapi_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='oneapi-build')
401 version = str(input_args.oneapi)
403 # Add required components for the next stage (both for hpccm and Intel's setvars.sh script)
404 oneapi_stage += hpccm.building_blocks.packages(ospackages=['wget', 'gnupg2', 'ca-certificates', 'lsb-release'])
405 oneapi_stage += hpccm.building_blocks.packages(
406 apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB'],
407 apt_repositories=['deb https://apt.repos.intel.com/oneapi all main'],
408 # Add minimal packages (not the whole HPC toolkit!)
409 ospackages=[f'intel-oneapi-dpcpp-cpp-{version}',
410 f'intel-oneapi-openmp-{version}',
411 f'intel-oneapi-mkl-{version}',
412 f'intel-oneapi-mkl-devel-{version}']
414 # Ensure that all bash shells on the final container will have access to oneAPI
415 oneapi_stage += hpccm.primitives.shell(
416 commands=['echo "source /opt/intel/oneapi/setvars.sh" >> /etc/bash.bashrc',
417 'unlink /opt/intel/oneapi/compiler/latest',
418 f'ln -sf /opt/intel/oneapi/compiler/{version} /opt/intel/oneapi/compiler/latest']
420 setattr(oneapi_stage, 'runtime', oneapi_runtime)
422 output_stages['compiler_build'] = oneapi_stage
424 def prepare_venv(version: StrictVersion) -> typing.Sequence[str]:
425 """Get shell commands to set up the venv for the requested Python version."""
426 major = version.version[0]
427 minor = version.version[1] # type: int
429 pyenv = '$HOME/.pyenv/bin/pyenv'
431 py_ver = f'{major}.{minor}'
432 venv_path = f'$HOME/venv/py{py_ver}'
433 commands = [f'$({pyenv} prefix `{pyenv} whence python{py_ver}`)/bin/python -m venv {venv_path}']
435 commands.append(f'{venv_path}/bin/python -m pip install --upgrade pip setuptools')
436 # Install dependencies for building and testing gmxapi Python package.
437 # WARNING: Please keep this list synchronized with python_packaging/requirements-test.txt
438 # TODO: Get requirements.txt from an input argument.
439 commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
450 'scikit-build>=0.10' \
452 'sphinxcontrib-plantuml>=0.14'""")
454 # TODO: Remove 'importlib_resources' dependency when Python >=3.7 is required.
456 commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
457 'importlib_resources'""")
462 def add_python_stages(building_blocks: typing.Mapping[str, bb_base],
464 output_stages: typing.MutableMapping[str, hpccm.Stage]):
465 """Add the stage(s) necessary for the requested venvs.
467 One intermediate build stage is created for each venv (see --venv option).
469 Each stage partially populates Python installations and venvs in the home
470 directory. The home directory is collected by the 'pyenv' stage for use by
471 the main build stage.
473 if len(input_args.venvs) < 1:
474 raise RuntimeError('No venvs to build...')
475 if output_stages is None or not isinstance(output_stages, collections.abc.Mapping):
476 raise RuntimeError('Need a container for output stages.')
478 # Main Python stage that collects the environments from individual stages.
479 # We collect the stages individually, rather than chaining them, because the
480 # copy is a bit slow and wastes local Docker image space for each filesystem
482 pyenv_stage = hpccm.Stage()
483 pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='pyenv')
484 pyenv_stage += building_blocks['compiler']
485 pyenv_stage += building_blocks['mpi']
486 pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
488 for version in [StrictVersion(py_ver) for py_ver in sorted(input_args.venvs)]:
489 stage_name = 'py' + str(version)
490 stage = hpccm.Stage()
491 stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as=stage_name)
492 stage += building_blocks['compiler']
493 stage += building_blocks['mpi']
494 stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
496 # TODO: Use a non-root user for testing and Python virtual environments.
497 stage += hpccm.primitives.shell(commands=[
498 'curl https://pyenv.run | bash',
499 """echo 'export PYENV_ROOT="$HOME/.pyenv"' >> $HOME/.bashrc""",
500 """echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> $HOME/.bashrc""",
501 """echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc""",
502 """echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc"""])
503 pyenv = '$HOME/.pyenv/bin/pyenv'
504 commands = [f'PYTHON_CONFIGURE_OPTS="--enable-shared" {pyenv} install -s {version}']
505 stage += hpccm.primitives.shell(commands=commands)
507 commands = prepare_venv(version)
508 stage += hpccm.primitives.shell(commands=commands)
510 # TODO: Update user home directory.
511 pyenv_stage += hpccm.primitives.copy(_from=stage_name, _mkdir=True, src=['/root/'],
514 # Add the intermediate build stage to the sequence
515 output_stages[stage_name] = stage
517 # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
518 # # `version` should be a system installation or pyenv environment (or pyenv-virtualenv)
519 # # with the dependencies for all of the Python aspects of CMake-driven builds.
520 # commands = '{pyenv} global {version}'.format(
523 # pyenv_stage += hpccm.primitives.shell(commands=commands)
525 # Add the aggregating build stage to the sequence. This allows the main stage to copy
526 # the files in a single stage, potentially reducing the overall output image size.
527 output_stages['pyenv'] = pyenv_stage
530 def add_documentation_dependencies(input_args,
531 output_stages: typing.MutableMapping[str, hpccm.Stage]):
532 """Add appropriate layers according to doxygen input arguments."""
533 if input_args.doxygen is None:
535 output_stages['main'] += hpccm.primitives.shell(
536 commands=['sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
537 if input_args.doxygen == '1.8.5':
538 doxygen_commit = 'ed4ed873ab0e7f15116e2052119a6729d4589f7a'
539 output_stages['main'] += hpccm.building_blocks.generic_autotools(
540 repository='https://github.com/westes/flex.git',
541 commit='f7788a9a0ecccdc953ed12043ccb59ca25714018',
542 prefix='/tmp/install-of-flex',
543 configure_opts=['--disable-shared'],
544 preconfigure=['./autogen.sh'])
545 output_stages['main'] += hpccm.building_blocks.generic_autotools(
546 repository='https://github.com/doxygen/doxygen.git',
547 commit=doxygen_commit,
550 '--flex /tmp/install-of-flex/bin/flex',
553 version = input_args.doxygen
554 archive_name = f'doxygen-{version}.linux.bin.tar.gz'
555 archive_url = f'https://sourceforge.net/projects/doxygen/files/rel-{version}/{archive_name}'
556 binary_path = f'doxygen-{version}/bin/doxygen'
558 'mkdir doxygen && cd doxygen',
559 f'wget {archive_url}',
560 f'tar xf {archive_name} {binary_path}',
561 f'cp {binary_path} /usr/local/bin/',
562 'cd .. && rm -rf doxygen'
564 output_stages['main'] += hpccm.primitives.shell(commands=commands)
567 def build_stages(args) -> typing.Iterable[hpccm.Stage]:
568 """Define and sequence the stages for the recipe corresponding to *args*."""
570 # A Dockerfile or Singularity recipe can have multiple build stages.
571 # The main build stage can copy files from previous stages, though only
572 # the last stage is included in the tagged output image. This means that
573 # large or expensive sets of build instructions can be isolated in
574 # local/temporary images, but all of the stages need to be output by this
575 # script, and need to occur in the correct order, so we create a sequence
576 # object early in this function.
577 stages = collections.OrderedDict()
579 # If we need TSAN or oneAPI support the early build is more complex,
580 # so that our compiler images don't have all the cruft needed to get those things
582 if args.llvm is not None and args.tsan is not None:
583 add_tsan_compiler_build_stage(input_args=args, output_stages=stages)
584 if args.oneapi is not None:
585 add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)
587 # Building blocks are chunks of container-builder instructions that can be
588 # copied to any build stage with the addition operator.
589 building_blocks = collections.OrderedDict()
590 building_blocks['base_packages'] = hpccm.building_blocks.packages(
591 ospackages=_common_packages)
593 # These are the most expensive and most reusable layers, so we put them first.
594 building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
595 building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'])
596 for i, cmake in enumerate(args.cmake):
597 building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
599 prefix=f'/usr/local/cmake-{cmake}',
602 # Install additional packages early in the build to optimize Docker build layer cache.
603 os_packages = list(get_llvm_packages(args)) + get_opencl_packages(args)
604 if args.doxygen is not None:
605 os_packages += _docs_extra_packages
606 if args.oneapi is not None:
607 os_packages += ['lsb-release']
608 if args.hipsycl is not None:
609 os_packages += ['libboost-fiber-dev']
610 if args.intel_compute_runtime is not None:
611 os_packages += _intel_compute_runtime_extra_packages
612 building_blocks['extra_packages'] = hpccm.building_blocks.packages(
613 ospackages=os_packages,
614 apt_ppas=['ppa:intel-opencl/intel-opencl'],
615 apt_keys=['http://repo.radeon.com/rocm/apt/debian/rocm.gpg.key'],
616 apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/debian/ xenial main']
619 if args.cuda is not None and args.llvm is not None:
620 # Hack to tell clang what version of CUDA we're using
621 # based on https://github.com/llvm/llvm-project/blob/1fdec59bffc11ae37eb51a1b9869f0696bfd5312/clang/lib/Driver/ToolChains/Cuda.cpp#L43
622 cuda_version_split = args.cuda.split('.')
623 # LLVM requires having the version in x.y.z format, while args.cuda be be either x.y or x.y.z
624 cuda_version_str = '{}.{}.{}'.format(
625 cuda_version_split[0],
626 cuda_version_split[1],
627 cuda_version_split[2] if len(cuda_version_split) > 2 else 0
629 building_blocks['cuda-clang-workaround'] = hpccm.primitives.shell(commands=[
630 f'echo "CUDA Version {cuda_version_str}" > /usr/local/cuda/version.txt'
633 building_blocks['clfft'] = get_clfft(args)
635 building_blocks['hipSYCL'] = get_hipsycl(args)
637 building_blocks['intel-compute-runtime'] = get_intel_compute_runtime(args)
639 # Add Python environments to MPI images, only, so we don't have to worry
640 # about whether to install mpi4py.
641 if args.mpi is not None and len(args.venvs) > 0:
642 add_python_stages(building_blocks=building_blocks, input_args=args, output_stages=stages)
644 # Create the stage from which the targeted image will be tagged.
645 stages['main'] = hpccm.Stage()
647 stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
648 for bb in building_blocks.values():
652 # We always add Python3 and Pip
653 stages['main'] += hpccm.building_blocks.python(python3=True, python2=False)
655 # Add documentation requirements (doxygen and sphinx + misc).
656 if args.doxygen is not None:
657 add_documentation_dependencies(args, stages)
659 if 'pyenv' in stages and stages['pyenv'] is not None:
660 stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/.pyenv/'],
662 stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/venv/'],
664 # TODO: Update user home directory.
665 # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
666 # stages['main'] += hpccm.primitives.copy(_from='pyenv', src=['/root/.bashrc'],
669 # Make sure that `python` resolves to something.
670 stages['main'] += hpccm.primitives.shell(commands=['test -x /usr/bin/python || '
671 'update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && '
672 '/usr/bin/python --version'])
674 # Note that the list of stages should be sorted in dependency order.
675 for build_stage in stages.values():
676 if build_stage is not None:
680 if __name__ == '__main__':
681 args = parser.parse_args()
683 # Set container specification output format
684 hpccm.config.set_container_format(args.format)
686 container_recipe = build_stages(args)
688 # Output container specification
689 for stage in container_recipe: