3 # This file is part of the GROMACS molecular simulation package.
5 # Copyright (c) 2020,2021, by the GROMACS development team, led by
6 # Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 # and including many others, as listed in the AUTHORS file in the
8 # top-level source directory and at http://www.gromacs.org.
10 # GROMACS is free software; you can redistribute it and/or
11 # modify it under the terms of the GNU Lesser General Public License
12 # as published by the Free Software Foundation; either version 2.1
13 # of the License, or (at your option) any later version.
15 # GROMACS is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 # Lesser General Public License for more details.
20 # You should have received a copy of the GNU Lesser General Public
21 # License along with GROMACS; if not, see
22 # http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 # If you want to redistribute modifications to GROMACS, please
26 # consider that scientific software is very special. Version
27 # control is crucial - bugs must be traceable. We will be happy to
28 # consider code for inclusion in the official distribution, but
29 # derived work must not be called official GROMACS. Details are found
30 # in the README & COPYING files - if they are missing, get the
31 # official version at http://www.gromacs.org.
33 # To help us fund GROMACS development, we humbly ask that you cite
34 # the research papers on the package. Check out http://www.gromacs.org.
36 """Building block based Dockerfile generation for CI testing images.
38 Generates a set of docker images used for running GROMACS CI on Gitlab.
39 The images are prepared according to a selection of build configuration targets
40 that hope to cover a broad enough scope of different possible systems,
41 allowing us to check compiler types and versions, as well as libraries used
42 for accelerators and parallel communication systems. Each combinations is
43 described as an entry in the build_configs dictionary, with the script
44 analysing the logic and adding build stages as needed.
46 Based on the example script provided by the NVidia HPCCM repository.
49 `NVidia HPC Container Maker <https://github.com/NVIDIA/hpc-container-maker>`__
52 * Paul Bauer <paul.bauer.q@gmail.com>
53 * Eric Irrgang <ericirrgang@gmail.com>
54 * Joe Jordan <e.jjordan12@gmail.com>
55 * Mark Abraham <mark.j.abraham@gmail.com>
56 * Gaurav Garg <gaugarg@nvidia.com>
60 $ python3 scripted_gmx_docker_builds.py --help
61 $ python3 scripted_gmx_docker_builds.py --format docker > Dockerfile && docker build .
62 $ python3 scripted_gmx_docker_builds.py | docker build -
72 from distutils.version import StrictVersion
76 from hpccm.building_blocks.base import bb_base
82 'This module assumes availability of supporting modules in the same directory. Add the directory to '
83 'PYTHONPATH or invoke Python from within the module directory so module location can be resolved.')
85 # Basic packages for all final images.
86 _common_packages = ['build-essential',
104 _opencl_extra_packages = [
106 # The following require apt_ppas=['ppa:intel-opencl/intel-opencl']
108 'ocl-icd-libopencl1',
109 'ocl-icd-opencl-dev',
113 _rocm_extra_packages = [
114 # The following require
115 # apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
116 # apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/4.0.1/ xenial main']
126 # Extra packages needed to build Intel Compute Runtime
127 _intel_compute_runtime_extra_packages = ['pkg-config',
136 'ocl-icd-libopencl1',
137 'ocl-icd-opencl-dev',
140 # Extra packages needed to build Python installations from source.
141 _python_extra_packages = ['build-essential',
160 # Extra packages needed for images for building documentation.
161 _docs_extra_packages = ['autoconf',
176 'texlive-latex-base',
177 'texlive-latex-extra',
178 'texlive-fonts-recommended',
179 'texlive-fonts-extra',
182 # Parse command line arguments
183 parser = argparse.ArgumentParser(description='GROMACS CI image creation script',
184 parents=[utility.parser])
186 parser.add_argument('--format', type=str, default='docker',
187 choices=['docker', 'singularity'],
188 help='Container specification format (default: docker)')
191 def base_image_tag(args) -> str:
192 # Check if we use CUDA images or plain linux images
193 if args.cuda is not None:
194 cuda_version_tag = 'nvidia/cuda:' + args.cuda + '-devel'
195 if args.centos is not None:
196 cuda_version_tag += '-centos' + args.centos
197 elif args.ubuntu is not None:
198 cuda_version_tag += '-ubuntu' + args.ubuntu
200 raise RuntimeError('Logic error: no Linux distribution selected.')
202 base_image_tag = cuda_version_tag
204 if args.centos is not None:
205 base_image_tag = 'centos:centos' + args.centos
206 elif args.ubuntu is not None:
207 base_image_tag = 'ubuntu:' + args.ubuntu
209 raise RuntimeError('Logic error: no Linux distribution selected.')
210 return base_image_tag
212 # Convert the linux distribution variables into something that hpccm
214 def hpccm_distro_name(args) -> str:
215 if args.centos is not None:
216 name_mapping = { '7': 'centos7',
218 if args.centos in name_mapping:
219 hpccm_name = name_mapping[args.centos]
221 raise RuntimeError('Logic error: unsupported CentOS distribution selected.')
222 elif args.ubuntu is not None:
223 name_mapping = { '20.04': 'ubuntu20',
225 '16.04': 'ubuntu16' }
226 if args.ubuntu in name_mapping:
227 hpccm_name = name_mapping[args.ubuntu]
229 raise RuntimeError('Logic error: unsupported Ubuntu distribution selected.')
231 raise RuntimeError('Logic error: no Linux distribution selected.')
234 def get_llvm_packages(args) -> typing.Iterable[str]:
235 # If we use the package version of LLVM, we need to install extra packages for it.
236 if (args.llvm is not None) and (args.tsan is None):
237 packages = [f'libomp-{args.llvm}-dev',
238 f'libomp5-{args.llvm}',
239 'clang-format-' + str(args.llvm),
240 'clang-tidy-' + str(args.llvm)]
241 if args.hipsycl is not None:
242 packages += [f'llvm-{args.llvm}-dev',
243 f'libclang-{args.llvm}-dev',
249 def get_opencl_packages(args) -> typing.Iterable[str]:
250 if (args.doxygen is None) and (args.oneapi is None):
251 return _opencl_extra_packages
255 def get_rocm_packages(args) -> typing.Iterable[str]:
256 if (args.rocm is None):
259 return _rocm_extra_packages
261 def get_compiler(args, compiler_build_stage: hpccm.Stage = None) -> bb_base:
263 if args.llvm is not None:
264 # Build our own version instead to get TSAN + OMP
265 if args.tsan is not None:
266 if compiler_build_stage is not None:
267 compiler = compiler_build_stage.runtime(_from='tsan')
269 raise RuntimeError('No TSAN compiler build stage!')
270 # Build the default compiler if we don't need special support
272 # Currently the focal apt repositories do not contain
273 # llvm higher than 11, so we work around that. This will
274 # need further work when we start supporting ubuntu 22.04
275 compiler = hpccm.building_blocks.llvm(version=args.llvm, upstream=True if int(args.llvm) > 11 else False)
277 elif args.oneapi is not None:
278 if compiler_build_stage is not None:
279 compiler = compiler_build_stage.runtime(_from='oneapi')
280 # Prepare the toolchain (needed only for builds done within the Dockerfile, e.g.
281 # OpenMPI builds, which don't currently work for other reasons)
282 oneapi_toolchain = hpccm.toolchain(CC=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icx',
283 CXX=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icpx')
284 setattr(compiler, 'toolchain', oneapi_toolchain)
287 raise RuntimeError('No oneAPI compiler build stage!')
289 elif args.gcc is not None:
290 compiler = hpccm.building_blocks.gnu(extra_repository=True,
294 raise RuntimeError('Logic error: no compiler toolchain selected.')
298 def get_gdrcopy(args, compiler):
299 if args.cuda is not None:
300 if hasattr(compiler, 'toolchain'):
301 # Version last updated June 7, 2021
302 return hpccm.building_blocks.gdrcopy(toolchain=compiler.toolchain, version="2.2")
304 raise RuntimeError('compiler is not an HPCCM compiler building block!')
308 def get_ucx(args, compiler, gdrcopy):
309 if args.cuda is not None:
310 if hasattr(compiler, 'toolchain'):
311 use_gdrcopy = (gdrcopy is not None)
312 # Version last updated June 7, 2021
313 return hpccm.building_blocks.ucx(toolchain=compiler.toolchain, gdrcopy=use_gdrcopy, version="1.10.1", cuda=True)
315 raise RuntimeError('compiler is not an HPCCM compiler building block!')
319 def get_mpi(args, compiler, ucx):
320 # If needed, add MPI to the image
321 if args.mpi is not None:
322 if args.mpi == 'openmpi':
323 if hasattr(compiler, 'toolchain'):
324 if args.oneapi is not None:
325 raise RuntimeError('oneAPI building OpenMPI is not supported')
326 use_cuda = (args.cuda is not None)
327 use_ucx = (ucx is not None)
328 # Version last updated June 7, 2021
329 return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, version="4.1.1", cuda=use_cuda, ucx=use_ucx, infiniband=False)
331 raise RuntimeError('compiler is not an HPCCM compiler building block!')
333 elif args.mpi == 'impi':
334 # TODO Intel MPI from the oneAPI repo is not working reliably,
335 # reasons are unclear. When solved, add packagages called:
336 # 'intel-oneapi-mpi', 'intel-oneapi-mpi-devel'
337 # during the compiler stage.
338 # TODO also consider hpccm's intel_mpi package if that doesn't need
340 raise RuntimeError('Intel MPI recipe not implemented yet.')
342 raise RuntimeError('Requested unknown MPI implementation.')
348 if (args.clfft is not None):
349 return hpccm.building_blocks.generic_cmake(
350 repository='https://github.com/clMathLibraries/clFFT.git',
351 prefix='/usr/local', recursive=True, branch=args.clfft, directory='clFFT/src')
355 def get_heffte(args):
356 if (args.heffte is not None):
357 return hpccm.building_blocks.generic_cmake(
358 cmake_opts=['-D CMAKE_BUILD_TYPE=Release',
359 '-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
360 '-D Heffte_ENABLE_CUDA=ON',
361 '-D Heffte_ENABLE_FFTW=OFF',
362 '-D BUILD_SHARED_LIBS=ON'],
363 repository='https://bitbucket.org/icl/heffte.git',
364 prefix='/usr/local', recursive=True, commit=args.heffte, directory='heffte')
368 def get_hipsycl(args):
369 if args.hipsycl is None:
371 if args.llvm is None:
372 raise RuntimeError('Can not build hipSYCL without llvm')
374 if args.rocm is None:
375 raise RuntimeError('hipSYCL requires the rocm packages')
377 cmake_opts = [f'-DLLVM_DIR=/opt/rocm/llvm/lib/cmake/llvm',
378 '-DCMAKE_PREFIX_PATH=/opt/rocm/lib/cmake',
379 '-DWITH_ROCM_BACKEND=ON']
380 if args.cuda is not None:
381 cmake_opts += [f'-DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
382 '-DWITH_CUDA_BACKEND=ON']
385 # https://github.com/illuhad/hipSYCL/issues/361#issuecomment-718943645
386 'for f in /opt/rocm/amdgcn/bitcode/*.bc; do ln -s "$f" "/opt/rocm/lib/$(basename $f .bc).amdgcn.bc"; done'
388 if args.cuda is not None:
390 # https://github.com/illuhad/hipSYCL/issues/410#issuecomment-743301929
391 f'sed s/_OPENMP/__OPENMP_NVPTX__/ -i /usr/lib/llvm-{args.llvm}/lib/clang/*/include/__clang_cuda_complex_builtins.h',
392 # Not needed unless we're building with CUDA 11.x, but no harm in doing always
393 f'ln -s /usr/local/cuda/compat/* /usr/local/cuda/lib64/'
396 return hpccm.building_blocks.generic_cmake(
397 repository='https://github.com/illuhad/hipSYCL.git',
398 directory='/var/tmp/hipSYCL',
399 prefix='/usr/local', recursive=True, commit=args.hipsycl,
400 cmake_opts=['-DCMAKE_BUILD_TYPE=Release', *cmake_opts],
401 postinstall=postinstall)
403 def get_intel_compute_runtime(args):
404 # The only reason we need to build Compute Runtime ourselves is because Intel packages have no DG1 support
405 # Otherwise, we could have just installed DEB packages from GitHub or Intel PPA
406 if args.intel_compute_runtime is None:
409 cmake_opts = ['-DCMAKE_BUILD_TYPE=Release',
410 '-DSKIP_UNIT_TESTS=TRUE',
411 '-DSUPPORT_GEN8=0', '-DSUPPORT_GEN9=1', '-DSUPPORT_GEN11=1', '-DSUPPORT_GEN12LP=1', '-DSUPPORT_DG1=1',
414 return hpccm.building_blocks.generic_cmake(
415 repository='https://github.com/intel/compute-runtime.git',
416 directory='compute-runtime',
417 prefix='/usr/local', recursive=True, branch=args.intel_compute_runtime,
418 cmake_opts=cmake_opts,
419 postinstall=['ldconfig'])
421 def add_tsan_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
422 """Isolate the expensive TSAN preparation stage.
424 This is a very expensive stage, but has few and disjoint dependencies, and
425 its output is easily compartmentalized (/usr/local) so we can isolate this
426 build stage to maximize build cache hits and reduce rebuild time, bookkeeping,
427 and final image size.
429 if not isinstance(output_stages, collections.abc.MutableMapping):
430 raise RuntimeError('Need output_stages container.')
431 tsan_stage = hpccm.Stage()
432 tsan_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='tsan')
434 tsan_stage += hpccm.building_blocks.packages(ospackages=['git', 'ca-certificates', 'build-essential', 'cmake'])
435 # CMake will get duplicated later, but this is an expensive image, and it isn't worth optimizing
436 # out that duplication...
437 tsan_stage += hpccm.building_blocks.python(python3=True, python2=False, devel=False)
439 compiler_branch = 'release/' + str(input_args.llvm) + '.x'
440 tsan_stage += hpccm.building_blocks.generic_cmake(
441 repository='https://github.com/llvm/llvm-project.git',
442 directory='/var/tmp/llvm-project/llvm/',
443 prefix='/usr/local', recursive=True, branch=compiler_branch,
444 cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra;compiler-rt;lld"',
445 '-D LIBOMP_TSAN_SUPPORT=on'],
446 postinstall=['ln -s /usr/local/bin/clang++ /usr/local/bin/clang++-' + str(input_args.llvm),
447 'ln -s /usr/local/bin/clang-format /usr/local/bin/clang-format-' + str(input_args.llvm),
448 'ln -s /usr/local/bin/clang-tidy /usr/local/bin/clang-tidy-' + str(input_args.llvm),
449 'ln -s /usr/local/share/clang/run-clang-tidy.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py',
450 'ln -s /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm),
451 'ln -s /usr/local/libexec/c++-analyzer /usr/local/bin/c++-analyzer-' + str(input_args.llvm)])
452 output_stages['compiler_build'] = tsan_stage
454 def oneapi_runtime(_from='0'):
455 oneapi_runtime_stage = hpccm.Stage()
456 oneapi_runtime_stage += hpccm.primitives.copy(_from='oneapi-build',
457 files={"/opt/intel": "/opt/intel",
458 "/etc/bash.bashrc": "/etc/bash.bashrc"})
459 return oneapi_runtime_stage
461 def add_oneapi_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
462 """Isolate the oneAPI preparation stage.
464 This stage is isolated so that its installed components are minimized in the
465 final image (chiefly /opt/intel) and its environment setup script can be
466 sourced. This also helps with rebuild time and final image size.
468 if not isinstance(output_stages, collections.abc.MutableMapping):
469 raise RuntimeError('Need output_stages container.')
470 oneapi_stage = hpccm.Stage()
471 oneapi_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='oneapi-build')
473 version = str(input_args.oneapi)
475 # Add required components for the next stage (both for hpccm and Intel's setvars.sh script)
476 oneapi_stage += hpccm.building_blocks.packages(ospackages=['wget', 'gnupg2', 'ca-certificates', 'lsb-release'])
477 oneapi_stage += hpccm.building_blocks.packages(
478 apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB'],
479 apt_repositories=['deb https://apt.repos.intel.com/oneapi all main'],
480 # Add minimal packages (not the whole HPC toolkit!)
481 ospackages=[f'intel-oneapi-dpcpp-cpp-{version}',
482 f'intel-oneapi-openmp-{version}',
483 f'intel-oneapi-mkl-{version}',
484 f'intel-oneapi-mkl-devel-{version}']
486 # Ensure that all bash shells on the final container will have access to oneAPI
487 oneapi_stage += hpccm.primitives.shell(
488 commands=['echo "source /opt/intel/oneapi/setvars.sh" >> /etc/bash.bashrc',
489 'unlink /opt/intel/oneapi/compiler/latest',
490 f'ln -sf /opt/intel/oneapi/compiler/{version} /opt/intel/oneapi/compiler/latest']
492 setattr(oneapi_stage, 'runtime', oneapi_runtime)
494 output_stages['compiler_build'] = oneapi_stage
496 def prepare_venv(version: StrictVersion) -> typing.Sequence[str]:
497 """Get shell commands to set up the venv for the requested Python version."""
498 major = version.version[0]
499 minor = version.version[1] # type: int
501 pyenv = '$HOME/.pyenv/bin/pyenv'
503 py_ver = f'{major}.{minor}'
504 venv_path = f'$HOME/venv/py{py_ver}'
505 commands = [f'$({pyenv} prefix `{pyenv} whence python{py_ver}`)/bin/python -m venv {venv_path}']
507 commands.append(f'{venv_path}/bin/python -m pip install --upgrade pip setuptools')
508 # Install dependencies for building and testing gmxapi Python package.
509 # WARNING: Please keep this list synchronized with python_packaging/requirements-test.txt
510 # TODO: Get requirements.txt from an input argument.
511 commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
524 'sphinxcontrib-plantuml>=0.14'""")
526 # TODO: Remove 'importlib_resources' dependency when Python >=3.7 is required.
528 commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
529 'importlib_resources'""")
534 def add_python_stages(building_blocks: typing.Mapping[str, bb_base],
536 output_stages: typing.MutableMapping[str, hpccm.Stage]):
537 """Add the stage(s) necessary for the requested venvs.
539 One intermediate build stage is created for each venv (see --venv option).
541 Each stage partially populates Python installations and venvs in the home
542 directory. The home directory is collected by the 'pyenv' stage for use by
543 the main build stage.
545 if len(input_args.venvs) < 1:
546 raise RuntimeError('No venvs to build...')
547 if output_stages is None or not isinstance(output_stages, collections.abc.Mapping):
548 raise RuntimeError('Need a container for output stages.')
550 # Main Python stage that collects the environments from individual stages.
551 # We collect the stages individually, rather than chaining them, because the
552 # copy is a bit slow and wastes local Docker image space for each filesystem
554 pyenv_stage = hpccm.Stage()
555 pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='pyenv')
556 pyenv_stage += building_blocks['compiler']
557 if building_blocks['gdrcopy'] is not None:
558 pyenv_stage += building_blocks['gdrcopy']
559 if building_blocks['ucx'] is not None:
560 pyenv_stage += building_blocks['ucx']
561 pyenv_stage += building_blocks['mpi']
562 pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
564 for version in [StrictVersion(py_ver) for py_ver in sorted(input_args.venvs)]:
565 stage_name = 'py' + str(version)
566 stage = hpccm.Stage()
567 stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as=stage_name)
568 stage += building_blocks['compiler']
569 if building_blocks['gdrcopy'] is not None:
570 stage += building_blocks['gdrcopy']
571 if building_blocks['ucx'] is not None:
572 stage += building_blocks['ucx']
573 stage += building_blocks['mpi']
574 stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
576 # TODO: Use a non-root user for testing and Python virtual environments.
577 stage += hpccm.primitives.shell(commands=[
578 'curl https://pyenv.run | bash',
579 """echo 'export PYENV_ROOT="$HOME/.pyenv"' >> $HOME/.bashrc""",
580 """echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> $HOME/.bashrc""",
581 """echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc""",
582 """echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc"""])
583 pyenv = '$HOME/.pyenv/bin/pyenv'
584 commands = [f'PYTHON_CONFIGURE_OPTS="--enable-shared" {pyenv} install -s {version}']
585 stage += hpccm.primitives.shell(commands=commands)
587 commands = prepare_venv(version)
588 stage += hpccm.primitives.shell(commands=commands)
590 # TODO: Update user home directory.
591 pyenv_stage += hpccm.primitives.copy(_from=stage_name, _mkdir=True, src=['/root/'],
594 # Add the intermediate build stage to the sequence
595 output_stages[stage_name] = stage
597 # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
598 # # `version` should be a system installation or pyenv environment (or pyenv-virtualenv)
599 # # with the dependencies for all of the Python aspects of CMake-driven builds.
600 # commands = '{pyenv} global {version}'.format(
603 # pyenv_stage += hpccm.primitives.shell(commands=commands)
605 # Add the aggregating build stage to the sequence. This allows the main stage to copy
606 # the files in a single stage, potentially reducing the overall output image size.
607 output_stages['pyenv'] = pyenv_stage
610 def add_documentation_dependencies(input_args,
611 output_stages: typing.MutableMapping[str, hpccm.Stage]):
612 """Add appropriate layers according to doxygen input arguments."""
613 if input_args.doxygen is None:
615 # Always clone the same version of linkchecker (latest release at June 1, 2021)
616 output_stages['main'] += hpccm.building_blocks.pip(pip='pip3', packages=['git+https://github.com/linkchecker/linkchecker.git@v10.0.1'])
617 output_stages['main'] += hpccm.primitives.shell(
618 commands=['sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
619 if input_args.doxygen == '1.8.5':
620 doxygen_commit = 'ed4ed873ab0e7f15116e2052119a6729d4589f7a'
621 output_stages['main'] += hpccm.building_blocks.generic_autotools(
622 repository='https://github.com/westes/flex.git',
623 commit='f7788a9a0ecccdc953ed12043ccb59ca25714018',
624 prefix='/tmp/install-of-flex',
625 configure_opts=['--disable-shared'],
626 preconfigure=['./autogen.sh'])
627 output_stages['main'] += hpccm.building_blocks.generic_autotools(
628 repository='https://github.com/doxygen/doxygen.git',
629 commit=doxygen_commit,
632 '--flex /tmp/install-of-flex/bin/flex',
635 version = input_args.doxygen
636 archive_name = f'doxygen-{version}.linux.bin.tar.gz'
637 archive_url = f'https://sourceforge.net/projects/doxygen/files/rel-{version}/{archive_name}'
638 binary_path = f'doxygen-{version}/bin/doxygen'
640 'mkdir doxygen && cd doxygen',
641 f'wget {archive_url}',
642 f'tar xf {archive_name} {binary_path}',
643 f'cp {binary_path} /usr/local/bin/',
644 'cd .. && rm -rf doxygen'
646 output_stages['main'] += hpccm.primitives.shell(commands=commands)
649 def build_stages(args) -> typing.Iterable[hpccm.Stage]:
650 """Define and sequence the stages for the recipe corresponding to *args*."""
652 # A Dockerfile or Singularity recipe can have multiple build stages.
653 # The main build stage can copy files from previous stages, though only
654 # the last stage is included in the tagged output image. This means that
655 # large or expensive sets of build instructions can be isolated in
656 # local/temporary images, but all of the stages need to be output by this
657 # script, and need to occur in the correct order, so we create a sequence
658 # object early in this function.
659 stages = collections.OrderedDict()
661 # If we need TSAN or oneAPI support the early build is more complex,
662 # so that our compiler images don't have all the cruft needed to get those things
664 if args.llvm is not None and args.tsan is not None:
665 add_tsan_compiler_build_stage(input_args=args, output_stages=stages)
666 if args.oneapi is not None:
667 add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)
669 # Building blocks are chunks of container-builder instructions that can be
670 # copied to any build stage with the addition operator.
671 building_blocks = collections.OrderedDict()
672 building_blocks['base_packages'] = hpccm.building_blocks.packages(
673 ospackages=_common_packages)
675 # Normally in hpccm the first call to baseimage sets the context
676 # for other packages, e.g. for which apt respository to
677 # use. We want to set that early on.
678 hpccm.config.set_linux_distro(hpccm_distro_name(args))
680 # These are the most expensive and most reusable layers, so we put them first.
681 building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
682 building_blocks['gdrcopy'] = get_gdrcopy(args, building_blocks['compiler'])
683 building_blocks['ucx'] = get_ucx(args, building_blocks['compiler'], building_blocks['gdrcopy'])
684 building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'], building_blocks['ucx'])
685 for i, cmake in enumerate(args.cmake):
686 building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
688 prefix=f'/usr/local/cmake-{cmake}',
691 # Install additional packages early in the build to optimize Docker build layer cache.
692 os_packages = list(get_llvm_packages(args)) + get_opencl_packages(args) + get_rocm_packages(args)
693 if args.doxygen is not None:
694 os_packages += _docs_extra_packages
695 if args.oneapi is not None:
696 os_packages += ['lsb-release']
697 if args.hipsycl is not None:
698 os_packages += ['libboost-fiber-dev']
699 if args.intel_compute_runtime is not None:
700 os_packages += _intel_compute_runtime_extra_packages
701 building_blocks['extra_packages'] = []
702 if args.rocm is not None:
703 building_blocks['extra_packages'] += hpccm.building_blocks.packages(
704 apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
705 apt_repositories=[f'deb [arch=amd64] http://repo.radeon.com/rocm/apt/{args.rocm}/ xenial main']
707 building_blocks['extra_packages'] += hpccm.building_blocks.packages(
708 ospackages=os_packages,
709 apt_ppas=['ppa:intel-opencl/intel-opencl'])
711 if args.cuda is not None and args.llvm is not None:
712 # Hack to tell clang what version of CUDA we're using
713 # based on https://github.com/llvm/llvm-project/blob/1fdec59bffc11ae37eb51a1b9869f0696bfd5312/clang/lib/Driver/ToolChains/Cuda.cpp#L43
714 cuda_version_split = args.cuda.split('.')
715 # LLVM requires having the version in x.y.z format, while args.cuda be be either x.y or x.y.z
716 cuda_version_str = '{}.{}.{}'.format(
717 cuda_version_split[0],
718 cuda_version_split[1],
719 cuda_version_split[2] if len(cuda_version_split) > 2 else 0
721 building_blocks['cuda-clang-workaround'] = hpccm.primitives.shell(commands=[
722 f'echo "CUDA Version {cuda_version_str}" > /usr/local/cuda/version.txt'
725 building_blocks['clfft'] = get_clfft(args)
727 building_blocks['heffte'] = get_heffte(args)
729 building_blocks['hipSYCL'] = get_hipsycl(args)
731 building_blocks['intel-compute-runtime'] = get_intel_compute_runtime(args)
733 # Add Python environments to MPI images, only, so we don't have to worry
734 # about whether to install mpi4py.
735 if args.mpi is not None and len(args.venvs) > 0:
736 add_python_stages(building_blocks=building_blocks, input_args=args, output_stages=stages)
738 # Create the stage from which the targeted image will be tagged.
739 stages['main'] = hpccm.Stage()
741 stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
742 for bb in building_blocks.values():
746 # We always add Python3 and Pip
747 stages['main'] += hpccm.building_blocks.python(python3=True, python2=False)
749 # Add documentation requirements (doxygen and sphinx + misc).
750 if args.doxygen is not None:
751 add_documentation_dependencies(args, stages)
753 if 'pyenv' in stages and stages['pyenv'] is not None:
754 stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/.pyenv/'],
756 stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/venv/'],
758 # TODO: Update user home directory.
759 # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
760 # stages['main'] += hpccm.primitives.copy(_from='pyenv', src=['/root/.bashrc'],
763 # Make sure that `python` resolves to something.
764 stages['main'] += hpccm.primitives.shell(commands=['test -x /usr/bin/python || '
765 'update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && '
766 '/usr/bin/python --version'])
768 # Note that the list of stages should be sorted in dependency order.
769 for build_stage in stages.values():
770 if build_stage is not None:
774 if __name__ == '__main__':
775 args = parser.parse_args()
777 # Set container specification output format
778 hpccm.config.set_container_format(args.format)
780 container_recipe = build_stages(args)
782 # Output container specification
783 for stage in container_recipe: