3 # This file is part of the GROMACS molecular simulation package.
5 # Copyright (c) 2020,2021, by the GROMACS development team, led by
6 # Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 # and including many others, as listed in the AUTHORS file in the
8 # top-level source directory and at http://www.gromacs.org.
10 # GROMACS is free software; you can redistribute it and/or
11 # modify it under the terms of the GNU Lesser General Public License
12 # as published by the Free Software Foundation; either version 2.1
13 # of the License, or (at your option) any later version.
15 # GROMACS is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 # Lesser General Public License for more details.
20 # You should have received a copy of the GNU Lesser General Public
21 # License along with GROMACS; if not, see
22 # http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 # If you want to redistribute modifications to GROMACS, please
26 # consider that scientific software is very special. Version
27 # control is crucial - bugs must be traceable. We will be happy to
28 # consider code for inclusion in the official distribution, but
29 # derived work must not be called official GROMACS. Details are found
30 # in the README & COPYING files - if they are missing, get the
31 # official version at http://www.gromacs.org.
33 # To help us fund GROMACS development, we humbly ask that you cite
34 # the research papers on the package. Check out http://www.gromacs.org.
36 """Building block based Dockerfile generation for CI testing images.
38 Generates a set of docker images used for running GROMACS CI on Gitlab.
39 The images are prepared according to a selection of build configuration targets
40 that hope to cover a broad enough scope of different possible systems,
41 allowing us to check compiler types and versions, as well as libraries used
42 for accelerators and parallel communication systems. Each combinations is
43 described as an entry in the build_configs dictionary, with the script
44 analysing the logic and adding build stages as needed.
46 Based on the example script provided by the NVidia HPCCM repository.
49 `NVidia HPC Container Maker <https://github.com/NVIDIA/hpc-container-maker>`__
52 * Paul Bauer <paul.bauer.q@gmail.com>
53 * Eric Irrgang <ericirrgang@gmail.com>
54 * Joe Jordan <e.jjordan12@gmail.com>
55 * Mark Abraham <mark.j.abraham@gmail.com>
59 $ python3 scripted_gmx_docker_builds.py --help
60 $ python3 scripted_gmx_docker_builds.py --format docker > Dockerfile && docker build .
61 $ python3 scripted_gmx_docker_builds.py | docker build -
71 from distutils.version import StrictVersion
75 from hpccm.building_blocks.base import bb_base
81 'This module assumes availability of supporting modules in the same directory. Add the directory to '
82 'PYTHONPATH or invoke Python from within the module directory so module location can be resolved.')
84 # Basic packages for all final images.
85 _common_packages = ['build-essential',
103 _opencl_extra_packages = [
105 # The following require apt_ppas=['ppa:intel-opencl/intel-opencl']
107 'ocl-icd-libopencl1',
108 'ocl-icd-opencl-dev',
112 _rocm_extra_packages = [
113 # The following require
114 # apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
115 # apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/4.0.1/ xenial main']
123 # Extra packages needed to build Intel Compute Runtime
124 _intel_compute_runtime_extra_packages = ['pkg-config',
133 'ocl-icd-libopencl1',
134 'ocl-icd-opencl-dev',
137 # Extra packages needed to build Python installations from source.
138 _python_extra_packages = ['build-essential',
157 # Extra packages needed for images for building documentation.
158 _docs_extra_packages = ['autoconf',
173 'texlive-latex-base',
174 'texlive-latex-extra',
175 'texlive-fonts-recommended',
176 'texlive-fonts-extra']
178 # Parse command line arguments
179 parser = argparse.ArgumentParser(description='GROMACS CI image creation script',
180 parents=[utility.parser])
182 parser.add_argument('--format', type=str, default='docker',
183 choices=['docker', 'singularity'],
184 help='Container specification format (default: docker)')
187 def base_image_tag(args) -> str:
188 # Check if we use CUDA images or plain linux images
189 if args.cuda is not None:
190 cuda_version_tag = 'nvidia/cuda:' + args.cuda + '-devel'
191 if args.centos is not None:
192 cuda_version_tag += '-centos' + args.centos
193 elif args.ubuntu is not None:
194 cuda_version_tag += '-ubuntu' + args.ubuntu
196 raise RuntimeError('Logic error: no Linux distribution selected.')
198 base_image_tag = cuda_version_tag
200 if args.centos is not None:
201 base_image_tag = 'centos:centos' + args.centos
202 elif args.ubuntu is not None:
203 base_image_tag = 'ubuntu:' + args.ubuntu
205 raise RuntimeError('Logic error: no Linux distribution selected.')
206 return base_image_tag
209 def get_llvm_packages(args) -> typing.Iterable[str]:
210 # If we use the package version of LLVM, we need to install extra packages for it.
211 if (args.llvm is not None) and (args.tsan is None):
212 packages = [f'libomp-{args.llvm}-dev',
213 f'libomp5-{args.llvm}',
214 'clang-format-' + str(args.llvm),
215 'clang-tidy-' + str(args.llvm)]
216 if args.hipsycl is not None:
217 packages += [f'llvm-{args.llvm}-dev',
218 f'libclang-{args.llvm}-dev',
224 def get_opencl_packages(args) -> typing.Iterable[str]:
225 if (args.doxygen is None) and (args.oneapi is None):
226 return _opencl_extra_packages
230 def get_rocm_packages(args) -> typing.Iterable[str]:
231 if (args.rocm is None):
234 return _rocm_extra_packages
236 def get_compiler(args, compiler_build_stage: hpccm.Stage = None) -> bb_base:
238 if args.llvm is not None:
239 # Build our own version instead to get TSAN + OMP
240 if args.tsan is not None:
241 if compiler_build_stage is not None:
242 compiler = compiler_build_stage.runtime(_from='tsan')
244 raise RuntimeError('No TSAN compiler build stage!')
245 # Build the default compiler if we don't need special support
247 compiler = hpccm.building_blocks.llvm(extra_repository=True, version=args.llvm)
249 elif args.oneapi is not None:
250 if compiler_build_stage is not None:
251 compiler = compiler_build_stage.runtime(_from='oneapi')
252 # Prepare the toolchain (needed only for builds done within the Dockerfile, e.g.
253 # OpenMPI builds, which don't currently work for other reasons)
254 oneapi_toolchain = hpccm.toolchain(CC=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icx',
255 CXX=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icpx')
256 setattr(compiler, 'toolchain', oneapi_toolchain)
259 raise RuntimeError('No oneAPI compiler build stage!')
261 elif args.gcc is not None:
262 compiler = hpccm.building_blocks.gnu(extra_repository=True,
266 raise RuntimeError('Logic error: no compiler toolchain selected.')
270 def get_mpi(args, compiler):
271 # If needed, add MPI to the image
272 if args.mpi is not None:
273 if args.mpi == 'openmpi':
275 if args.cuda is not None:
278 if hasattr(compiler, 'toolchain'):
279 if args.oneapi is not None:
280 raise RuntimeError('oneAPI building OpenMPI is not supported')
281 return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, cuda=use_cuda, infiniband=False)
283 raise RuntimeError('compiler is not an HPCCM compiler building block!')
285 elif args.mpi == 'impi':
286 # TODO Intel MPI from the oneAPI repo is not working reliably,
287 # reasons are unclear. When solved, add packagages called:
288 # 'intel-oneapi-mpi', 'intel-oneapi-mpi-devel'
289 # during the compiler stage.
290 # TODO also consider hpccm's intel_mpi package if that doesn't need
292 raise RuntimeError('Intel MPI recipe not implemented yet.')
294 raise RuntimeError('Requested unknown MPI implementation.')
300 if (args.clfft is not None):
301 return hpccm.building_blocks.generic_cmake(
302 repository='https://github.com/clMathLibraries/clFFT.git',
303 prefix='/usr/local', recursive=True, branch=args.clfft, directory='clFFT/src')
307 def get_hipsycl(args):
308 if args.hipsycl is None:
310 if args.llvm is None:
311 raise RuntimeError('Can not build hipSYCL without llvm')
313 if args.rocm is None:
314 raise RuntimeError('hipSYCL requires the rocm packages')
316 cmake_opts = [f'-DLLVM_DIR=/usr/lib/llvm-{args.llvm}/cmake',
317 f'-DCLANG_EXECUTABLE_PATH=/usr/bin/clang++-{args.llvm}',
318 '-DCMAKE_PREFIX_PATH=/opt/rocm/lib/cmake',
319 '-DWITH_ROCM_BACKEND=ON']
320 if args.cuda is not None:
321 cmake_opts += [f'-DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
322 '-DWITH_CUDA_BACKEND=ON']
325 # https://github.com/illuhad/hipSYCL/issues/361#issuecomment-718943645
326 'for f in /opt/rocm/amdgcn/bitcode/*.bc; do ln -s "$f" "/opt/rocm/lib/$(basename $f .bc).amdgcn.bc"; done'
328 if args.cuda is not None:
330 # https://github.com/illuhad/hipSYCL/issues/410#issuecomment-743301929
331 f'sed s/_OPENMP/__OPENMP_NVPTX__/ -i /usr/lib/llvm-{args.llvm}/lib/clang/*/include/__clang_cuda_complex_builtins.h',
332 # Not needed unless we're building with CUDA 11.x, but no harm in doing always
333 f'ln -s /usr/local/cuda/compat/* /usr/local/cuda/lib64/'
336 return hpccm.building_blocks.generic_cmake(
337 repository='https://github.com/illuhad/hipSYCL.git',
338 directory='/var/tmp/hipSYCL',
339 prefix='/usr/local', recursive=True, commit=args.hipsycl,
340 cmake_opts=['-DCMAKE_BUILD_TYPE=Release', *cmake_opts],
341 postinstall=postinstall)
343 def get_intel_compute_runtime(args):
344 # The only reason we need to build Compute Runtime ourselves is because Intel packages have no DG1 support
345 # Otherwise, we could have just installed DEB packages from GitHub or Intel PPA
346 if args.intel_compute_runtime is None:
349 cmake_opts = ['-DCMAKE_BUILD_TYPE=Release',
350 '-DSKIP_UNIT_TESTS=TRUE',
351 '-DSUPPORT_GEN8=0', '-DSUPPORT_GEN9=1', '-DSUPPORT_GEN11=1', '-DSUPPORT_GEN12LP=1', '-DSUPPORT_DG1=1',
354 return hpccm.building_blocks.generic_cmake(
355 repository='https://github.com/intel/compute-runtime.git',
356 directory='compute-runtime',
357 prefix='/usr/local', recursive=True, branch=args.intel_compute_runtime,
358 cmake_opts=cmake_opts,
359 postinstall=['ldconfig'])
361 def add_tsan_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
362 """Isolate the expensive TSAN preparation stage.
364 This is a very expensive stage, but has few and disjoint dependencies, and
365 its output is easily compartmentalized (/usr/local) so we can isolate this
366 build stage to maximize build cache hits and reduce rebuild time, bookkeeping,
367 and final image size.
369 if not isinstance(output_stages, collections.abc.MutableMapping):
370 raise RuntimeError('Need output_stages container.')
371 tsan_stage = hpccm.Stage()
372 tsan_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='tsan')
374 tsan_stage += hpccm.building_blocks.packages(ospackages=['git', 'ca-certificates', 'build-essential', 'cmake'])
375 # CMake will get duplicated later, but this is an expensive image, and it isn't worth optimizing
376 # out that duplication...
377 tsan_stage += hpccm.building_blocks.python(python3=True, python2=False, devel=False)
379 compiler_branch = 'release/' + str(input_args.llvm) + '.x'
380 tsan_stage += hpccm.building_blocks.generic_cmake(
381 repository='https://github.com/llvm/llvm-project.git',
382 directory='/var/tmp/llvm-project/llvm/',
383 prefix='/usr/local', recursive=True, branch=compiler_branch,
384 cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra;compiler-rt;lld"',
385 '-D LIBOMP_TSAN_SUPPORT=on'],
386 postinstall=['ln -s /usr/local/bin/clang++ /usr/local/bin/clang++-' + str(input_args.llvm),
387 'ln -s /usr/local/bin/clang-format /usr/local/bin/clang-format-' + str(input_args.llvm),
388 'ln -s /usr/local/bin/clang-tidy /usr/local/bin/clang-tidy-' + str(input_args.llvm),
389 'ln -s /usr/local/share/clang/run-clang-tidy.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py',
390 'ln -s /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm),
391 'ln -s /usr/local/libexec/c++-analyzer /usr/local/bin/c++-analyzer-' + str(input_args.llvm)])
392 output_stages['compiler_build'] = tsan_stage
394 def oneapi_runtime(_from='0'):
395 oneapi_runtime_stage = hpccm.Stage()
396 oneapi_runtime_stage += hpccm.primitives.copy(_from='oneapi-build',
397 files={"/opt/intel": "/opt/intel",
398 "/etc/bash.bashrc": "/etc/bash.bashrc"})
399 return oneapi_runtime_stage
401 def add_oneapi_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
402 """Isolate the oneAPI preparation stage.
404 This stage is isolated so that its installed components are minimized in the
405 final image (chiefly /opt/intel) and its environment setup script can be
406 sourced. This also helps with rebuild time and final image size.
408 if not isinstance(output_stages, collections.abc.MutableMapping):
409 raise RuntimeError('Need output_stages container.')
410 oneapi_stage = hpccm.Stage()
411 oneapi_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='oneapi-build')
413 version = str(input_args.oneapi)
415 # Add required components for the next stage (both for hpccm and Intel's setvars.sh script)
416 oneapi_stage += hpccm.building_blocks.packages(ospackages=['wget', 'gnupg2', 'ca-certificates', 'lsb-release'])
417 oneapi_stage += hpccm.building_blocks.packages(
418 apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB'],
419 apt_repositories=['deb https://apt.repos.intel.com/oneapi all main'],
420 # Add minimal packages (not the whole HPC toolkit!)
421 ospackages=[f'intel-oneapi-dpcpp-cpp-{version}',
422 f'intel-oneapi-openmp-{version}',
423 f'intel-oneapi-mkl-{version}',
424 f'intel-oneapi-mkl-devel-{version}']
426 # Ensure that all bash shells on the final container will have access to oneAPI
427 oneapi_stage += hpccm.primitives.shell(
428 commands=['echo "source /opt/intel/oneapi/setvars.sh" >> /etc/bash.bashrc',
429 'unlink /opt/intel/oneapi/compiler/latest',
430 f'ln -sf /opt/intel/oneapi/compiler/{version} /opt/intel/oneapi/compiler/latest']
432 setattr(oneapi_stage, 'runtime', oneapi_runtime)
434 output_stages['compiler_build'] = oneapi_stage
436 def prepare_venv(version: StrictVersion) -> typing.Sequence[str]:
437 """Get shell commands to set up the venv for the requested Python version."""
438 major = version.version[0]
439 minor = version.version[1] # type: int
441 pyenv = '$HOME/.pyenv/bin/pyenv'
443 py_ver = f'{major}.{minor}'
444 venv_path = f'$HOME/venv/py{py_ver}'
445 commands = [f'$({pyenv} prefix `{pyenv} whence python{py_ver}`)/bin/python -m venv {venv_path}']
447 commands.append(f'{venv_path}/bin/python -m pip install --upgrade pip setuptools')
448 # Install dependencies for building and testing gmxapi Python package.
449 # WARNING: Please keep this list synchronized with python_packaging/requirements-test.txt
450 # TODO: Get requirements.txt from an input argument.
451 commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
462 'scikit-build>=0.10' \
464 'sphinxcontrib-plantuml>=0.14'""")
466 # TODO: Remove 'importlib_resources' dependency when Python >=3.7 is required.
468 commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
469 'importlib_resources'""")
474 def add_python_stages(building_blocks: typing.Mapping[str, bb_base],
476 output_stages: typing.MutableMapping[str, hpccm.Stage]):
477 """Add the stage(s) necessary for the requested venvs.
479 One intermediate build stage is created for each venv (see --venv option).
481 Each stage partially populates Python installations and venvs in the home
482 directory. The home directory is collected by the 'pyenv' stage for use by
483 the main build stage.
485 if len(input_args.venvs) < 1:
486 raise RuntimeError('No venvs to build...')
487 if output_stages is None or not isinstance(output_stages, collections.abc.Mapping):
488 raise RuntimeError('Need a container for output stages.')
490 # Main Python stage that collects the environments from individual stages.
491 # We collect the stages individually, rather than chaining them, because the
492 # copy is a bit slow and wastes local Docker image space for each filesystem
494 pyenv_stage = hpccm.Stage()
495 pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='pyenv')
496 pyenv_stage += building_blocks['compiler']
497 pyenv_stage += building_blocks['mpi']
498 pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
500 for version in [StrictVersion(py_ver) for py_ver in sorted(input_args.venvs)]:
501 stage_name = 'py' + str(version)
502 stage = hpccm.Stage()
503 stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as=stage_name)
504 stage += building_blocks['compiler']
505 stage += building_blocks['mpi']
506 stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
508 # TODO: Use a non-root user for testing and Python virtual environments.
509 stage += hpccm.primitives.shell(commands=[
510 'curl https://pyenv.run | bash',
511 """echo 'export PYENV_ROOT="$HOME/.pyenv"' >> $HOME/.bashrc""",
512 """echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> $HOME/.bashrc""",
513 """echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc""",
514 """echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc"""])
515 pyenv = '$HOME/.pyenv/bin/pyenv'
516 commands = [f'PYTHON_CONFIGURE_OPTS="--enable-shared" {pyenv} install -s {version}']
517 stage += hpccm.primitives.shell(commands=commands)
519 commands = prepare_venv(version)
520 stage += hpccm.primitives.shell(commands=commands)
522 # TODO: Update user home directory.
523 pyenv_stage += hpccm.primitives.copy(_from=stage_name, _mkdir=True, src=['/root/'],
526 # Add the intermediate build stage to the sequence
527 output_stages[stage_name] = stage
529 # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
530 # # `version` should be a system installation or pyenv environment (or pyenv-virtualenv)
531 # # with the dependencies for all of the Python aspects of CMake-driven builds.
532 # commands = '{pyenv} global {version}'.format(
535 # pyenv_stage += hpccm.primitives.shell(commands=commands)
537 # Add the aggregating build stage to the sequence. This allows the main stage to copy
538 # the files in a single stage, potentially reducing the overall output image size.
539 output_stages['pyenv'] = pyenv_stage
542 def add_documentation_dependencies(input_args,
543 output_stages: typing.MutableMapping[str, hpccm.Stage]):
544 """Add appropriate layers according to doxygen input arguments."""
545 if input_args.doxygen is None:
547 # Always clone the same version of linkchecker (latest release at June 1, 2021)
548 output_stages['main'] += hpccm.building_blocks.pip(pip='pip3', packages=['git+https://github.com/linkchecker/linkchecker.git@v10.0.1'])
549 output_stages['main'] += hpccm.primitives.shell(
550 commands=['sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
551 if input_args.doxygen == '1.8.5':
552 doxygen_commit = 'ed4ed873ab0e7f15116e2052119a6729d4589f7a'
553 output_stages['main'] += hpccm.building_blocks.generic_autotools(
554 repository='https://github.com/westes/flex.git',
555 commit='f7788a9a0ecccdc953ed12043ccb59ca25714018',
556 prefix='/tmp/install-of-flex',
557 configure_opts=['--disable-shared'],
558 preconfigure=['./autogen.sh'])
559 output_stages['main'] += hpccm.building_blocks.generic_autotools(
560 repository='https://github.com/doxygen/doxygen.git',
561 commit=doxygen_commit,
564 '--flex /tmp/install-of-flex/bin/flex',
567 version = input_args.doxygen
568 archive_name = f'doxygen-{version}.linux.bin.tar.gz'
569 archive_url = f'https://sourceforge.net/projects/doxygen/files/rel-{version}/{archive_name}'
570 binary_path = f'doxygen-{version}/bin/doxygen'
572 'mkdir doxygen && cd doxygen',
573 f'wget {archive_url}',
574 f'tar xf {archive_name} {binary_path}',
575 f'cp {binary_path} /usr/local/bin/',
576 'cd .. && rm -rf doxygen'
578 output_stages['main'] += hpccm.primitives.shell(commands=commands)
581 def build_stages(args) -> typing.Iterable[hpccm.Stage]:
582 """Define and sequence the stages for the recipe corresponding to *args*."""
584 # A Dockerfile or Singularity recipe can have multiple build stages.
585 # The main build stage can copy files from previous stages, though only
586 # the last stage is included in the tagged output image. This means that
587 # large or expensive sets of build instructions can be isolated in
588 # local/temporary images, but all of the stages need to be output by this
589 # script, and need to occur in the correct order, so we create a sequence
590 # object early in this function.
591 stages = collections.OrderedDict()
593 # If we need TSAN or oneAPI support the early build is more complex,
594 # so that our compiler images don't have all the cruft needed to get those things
596 if args.llvm is not None and args.tsan is not None:
597 add_tsan_compiler_build_stage(input_args=args, output_stages=stages)
598 if args.oneapi is not None:
599 add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)
601 # Building blocks are chunks of container-builder instructions that can be
602 # copied to any build stage with the addition operator.
603 building_blocks = collections.OrderedDict()
604 building_blocks['base_packages'] = hpccm.building_blocks.packages(
605 ospackages=_common_packages)
607 # These are the most expensive and most reusable layers, so we put them first.
608 building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
609 building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'])
610 for i, cmake in enumerate(args.cmake):
611 building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
613 prefix=f'/usr/local/cmake-{cmake}',
616 # Install additional packages early in the build to optimize Docker build layer cache.
617 os_packages = list(get_llvm_packages(args)) + get_opencl_packages(args) + get_rocm_packages(args)
618 if args.doxygen is not None:
619 os_packages += _docs_extra_packages
620 if args.oneapi is not None:
621 os_packages += ['lsb-release']
622 if args.hipsycl is not None:
623 os_packages += ['libboost-fiber-dev']
624 if args.intel_compute_runtime is not None:
625 os_packages += _intel_compute_runtime_extra_packages
626 building_blocks['extra_packages'] = []
627 if args.rocm is not None:
628 building_blocks['extra_packages'] += hpccm.building_blocks.packages(
629 apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
630 apt_repositories=[f'deb [arch=amd64] http://repo.radeon.com/rocm/apt/{args.rocm}/ xenial main']
632 building_blocks['extra_packages'] += hpccm.building_blocks.packages(
633 ospackages=os_packages,
634 apt_ppas=['ppa:intel-opencl/intel-opencl'])
636 if args.cuda is not None and args.llvm is not None:
637 # Hack to tell clang what version of CUDA we're using
638 # based on https://github.com/llvm/llvm-project/blob/1fdec59bffc11ae37eb51a1b9869f0696bfd5312/clang/lib/Driver/ToolChains/Cuda.cpp#L43
639 cuda_version_split = args.cuda.split('.')
640 # LLVM requires having the version in x.y.z format, while args.cuda be be either x.y or x.y.z
641 cuda_version_str = '{}.{}.{}'.format(
642 cuda_version_split[0],
643 cuda_version_split[1],
644 cuda_version_split[2] if len(cuda_version_split) > 2 else 0
646 building_blocks['cuda-clang-workaround'] = hpccm.primitives.shell(commands=[
647 f'echo "CUDA Version {cuda_version_str}" > /usr/local/cuda/version.txt'
650 building_blocks['clfft'] = get_clfft(args)
652 building_blocks['hipSYCL'] = get_hipsycl(args)
654 building_blocks['intel-compute-runtime'] = get_intel_compute_runtime(args)
656 # Add Python environments to MPI images, only, so we don't have to worry
657 # about whether to install mpi4py.
658 if args.mpi is not None and len(args.venvs) > 0:
659 add_python_stages(building_blocks=building_blocks, input_args=args, output_stages=stages)
661 # Create the stage from which the targeted image will be tagged.
662 stages['main'] = hpccm.Stage()
664 stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
665 for bb in building_blocks.values():
669 # We always add Python3 and Pip
670 stages['main'] += hpccm.building_blocks.python(python3=True, python2=False)
672 # Add documentation requirements (doxygen and sphinx + misc).
673 if args.doxygen is not None:
674 add_documentation_dependencies(args, stages)
676 if 'pyenv' in stages and stages['pyenv'] is not None:
677 stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/.pyenv/'],
679 stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/venv/'],
681 # TODO: Update user home directory.
682 # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
683 # stages['main'] += hpccm.primitives.copy(_from='pyenv', src=['/root/.bashrc'],
686 # Make sure that `python` resolves to something.
687 stages['main'] += hpccm.primitives.shell(commands=['test -x /usr/bin/python || '
688 'update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && '
689 '/usr/bin/python --version'])
691 # Note that the list of stages should be sorted in dependency order.
692 for build_stage in stages.values():
693 if build_stage is not None:
697 if __name__ == '__main__':
698 args = parser.parse_args()
700 # Set container specification output format
701 hpccm.config.set_container_format(args.format)
703 container_recipe = build_stages(args)
705 # Output container specification
706 for stage in container_recipe: