3 # This file is part of the GROMACS molecular simulation package.
5 # Copyright (c) 2020,2021, by the GROMACS development team, led by
6 # Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 # and including many others, as listed in the AUTHORS file in the
8 # top-level source directory and at http://www.gromacs.org.
10 # GROMACS is free software; you can redistribute it and/or
11 # modify it under the terms of the GNU Lesser General Public License
12 # as published by the Free Software Foundation; either version 2.1
13 # of the License, or (at your option) any later version.
15 # GROMACS is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 # Lesser General Public License for more details.
20 # You should have received a copy of the GNU Lesser General Public
21 # License along with GROMACS; if not, see
22 # http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 # If you want to redistribute modifications to GROMACS, please
26 # consider that scientific software is very special. Version
27 # control is crucial - bugs must be traceable. We will be happy to
28 # consider code for inclusion in the official distribution, but
29 # derived work must not be called official GROMACS. Details are found
30 # in the README & COPYING files - if they are missing, get the
31 # official version at http://www.gromacs.org.
33 # To help us fund GROMACS development, we humbly ask that you cite
34 # the research papers on the package. Check out http://www.gromacs.org.
36 """Building block based Dockerfile generation for CI testing images.
38 Generates a set of docker images used for running GROMACS CI on Gitlab.
39 The images are prepared according to a selection of build configuration targets
40 that hope to cover a broad enough scope of different possible systems,
41 allowing us to check compiler types and versions, as well as libraries used
42 for accelerators and parallel communication systems. Each combinations is
43 described as an entry in the build_configs dictionary, with the script
44 analysing the logic and adding build stages as needed.
46 Based on the example script provided by the NVidia HPCCM repository.
49 `NVidia HPC Container Maker <https://github.com/NVIDIA/hpc-container-maker>`__
52 * Paul Bauer <paul.bauer.q@gmail.com>
53 * Eric Irrgang <ericirrgang@gmail.com>
54 * Joe Jordan <e.jjordan12@gmail.com>
55 * Mark Abraham <mark.j.abraham@gmail.com>
59 $ python3 scripted_gmx_docker_builds.py --help
60 $ python3 scripted_gmx_docker_builds.py --format docker > Dockerfile && docker build .
61 $ python3 scripted_gmx_docker_builds.py | docker build -
71 from distutils.version import StrictVersion
75 from hpccm.building_blocks.base import bb_base
81 'This module assumes availability of supporting modules in the same directory. Add the directory to '
82 'PYTHONPATH or invoke Python from within the module directory so module location can be resolved.')
84 # Basic packages for all final images.
85 _common_packages = ['build-essential',
103 _opencl_extra_packages = [
105 # The following require apt_ppas=['ppa:intel-opencl/intel-opencl']
107 'ocl-icd-libopencl1',
108 'ocl-icd-opencl-dev',
112 _rocm_extra_packages = [
113 # The following require
114 # apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
115 # apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/4.0.1/ xenial main']
125 # Extra packages needed to build Intel Compute Runtime
126 _intel_compute_runtime_extra_packages = ['pkg-config',
135 'ocl-icd-libopencl1',
136 'ocl-icd-opencl-dev',
139 # Extra packages needed to build Python installations from source.
140 _python_extra_packages = ['build-essential',
159 # Extra packages needed for images for building documentation.
160 _docs_extra_packages = ['autoconf',
175 'texlive-latex-base',
176 'texlive-latex-extra',
177 'texlive-fonts-recommended',
178 'texlive-fonts-extra',
181 # Parse command line arguments
182 parser = argparse.ArgumentParser(description='GROMACS CI image creation script',
183 parents=[utility.parser])
185 parser.add_argument('--format', type=str, default='docker',
186 choices=['docker', 'singularity'],
187 help='Container specification format (default: docker)')
190 def base_image_tag(args) -> str:
191 # Check if we use CUDA images or plain linux images
192 if args.cuda is not None:
193 cuda_version_tag = 'nvidia/cuda:' + args.cuda + '-devel'
194 if args.centos is not None:
195 cuda_version_tag += '-centos' + args.centos
196 elif args.ubuntu is not None:
197 cuda_version_tag += '-ubuntu' + args.ubuntu
199 raise RuntimeError('Logic error: no Linux distribution selected.')
201 base_image_tag = cuda_version_tag
203 if args.centos is not None:
204 base_image_tag = 'centos:centos' + args.centos
205 elif args.ubuntu is not None:
206 base_image_tag = 'ubuntu:' + args.ubuntu
208 raise RuntimeError('Logic error: no Linux distribution selected.')
209 return base_image_tag
211 # Convert the linux distribution variables into something that hpccm
213 def hpccm_distro_name(args) -> str:
214 if args.centos is not None:
215 name_mapping = { '7': 'centos7',
217 if args.centos in name_mapping:
218 hpccm_name = name_mapping[args.centos]
220 raise RuntimeError('Logic error: unsupported CentOS distribution selected.')
221 elif args.ubuntu is not None:
222 name_mapping = { '20.04': 'ubuntu20',
224 '16.04': 'ubuntu16' }
225 if args.ubuntu in name_mapping:
226 hpccm_name = name_mapping[args.ubuntu]
228 raise RuntimeError('Logic error: unsupported Ubuntu distribution selected.')
230 raise RuntimeError('Logic error: no Linux distribution selected.')
233 def get_llvm_packages(args) -> typing.Iterable[str]:
234 # If we use the package version of LLVM, we need to install extra packages for it.
235 if (args.llvm is not None) and (args.tsan is None):
236 packages = [f'libomp-{args.llvm}-dev',
237 f'libomp5-{args.llvm}',
238 'clang-format-' + str(args.llvm),
239 'clang-tidy-' + str(args.llvm)]
240 if args.hipsycl is not None:
241 packages += [f'llvm-{args.llvm}-dev',
242 f'libclang-{args.llvm}-dev',
248 def get_opencl_packages(args) -> typing.Iterable[str]:
249 if (args.doxygen is None) and (args.oneapi is None):
250 return _opencl_extra_packages
254 def get_rocm_packages(args) -> typing.Iterable[str]:
255 if (args.rocm is None):
258 return _rocm_extra_packages
260 def get_compiler(args, compiler_build_stage: hpccm.Stage = None) -> bb_base:
262 if args.llvm is not None:
263 # Build our own version instead to get TSAN + OMP
264 if args.tsan is not None:
265 if compiler_build_stage is not None:
266 compiler = compiler_build_stage.runtime(_from='tsan')
268 raise RuntimeError('No TSAN compiler build stage!')
269 # Build the default compiler if we don't need special support
271 # Currently the focal apt repositories do not contain
272 # llvm higher than 11, so we work around that. This will
273 # need further work when we start supporting ubuntu 22.04
274 compiler = hpccm.building_blocks.llvm(version=args.llvm, upstream=True if int(args.llvm) > 11 else False)
276 elif args.oneapi is not None:
277 if compiler_build_stage is not None:
278 compiler = compiler_build_stage.runtime(_from='oneapi')
279 # Prepare the toolchain (needed only for builds done within the Dockerfile, e.g.
280 # OpenMPI builds, which don't currently work for other reasons)
281 oneapi_toolchain = hpccm.toolchain(CC=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icx',
282 CXX=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icpx')
283 setattr(compiler, 'toolchain', oneapi_toolchain)
286 raise RuntimeError('No oneAPI compiler build stage!')
288 elif args.gcc is not None:
289 compiler = hpccm.building_blocks.gnu(extra_repository=True,
293 raise RuntimeError('Logic error: no compiler toolchain selected.')
297 def get_gdrcopy(args, compiler):
298 if args.cuda is not None:
299 if hasattr(compiler, 'toolchain'):
300 # Version last updated June 7, 2021
301 return hpccm.building_blocks.gdrcopy(toolchain=compiler.toolchain, version="2.2")
303 raise RuntimeError('compiler is not an HPCCM compiler building block!')
307 def get_ucx(args, compiler, gdrcopy):
308 if args.cuda is not None:
309 if hasattr(compiler, 'toolchain'):
310 use_gdrcopy = (gdrcopy is not None)
311 # Version last updated June 7, 2021
312 return hpccm.building_blocks.ucx(toolchain=compiler.toolchain, gdrcopy=use_gdrcopy, version="1.10.1", cuda=True)
314 raise RuntimeError('compiler is not an HPCCM compiler building block!')
318 def get_mpi(args, compiler, ucx):
319 # If needed, add MPI to the image
320 if args.mpi is not None:
321 if args.mpi == 'openmpi':
322 if hasattr(compiler, 'toolchain'):
323 if args.oneapi is not None:
324 raise RuntimeError('oneAPI building OpenMPI is not supported')
325 use_cuda = (args.cuda is not None)
326 use_ucx = (ucx is not None)
327 # Version last updated June 7, 2021
328 return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, version="4.1.1", cuda=use_cuda, ucx=use_ucx, infiniband=False)
330 raise RuntimeError('compiler is not an HPCCM compiler building block!')
332 elif args.mpi == 'impi':
333 # TODO Intel MPI from the oneAPI repo is not working reliably,
334 # reasons are unclear. When solved, add packagages called:
335 # 'intel-oneapi-mpi', 'intel-oneapi-mpi-devel'
336 # during the compiler stage.
337 # TODO also consider hpccm's intel_mpi package if that doesn't need
339 raise RuntimeError('Intel MPI recipe not implemented yet.')
341 raise RuntimeError('Requested unknown MPI implementation.')
347 if (args.clfft is not None):
348 return hpccm.building_blocks.generic_cmake(
349 repository='https://github.com/clMathLibraries/clFFT.git',
350 prefix='/usr/local', recursive=True, branch=args.clfft, directory='clFFT/src')
354 def get_hipsycl(args):
355 if args.hipsycl is None:
357 if args.llvm is None:
358 raise RuntimeError('Can not build hipSYCL without llvm')
360 if args.rocm is None:
361 raise RuntimeError('hipSYCL requires the rocm packages')
363 cmake_opts = [f'-DLLVM_DIR=/opt/rocm/llvm/lib/cmake/llvm',
364 '-DCMAKE_PREFIX_PATH=/opt/rocm/lib/cmake',
365 '-DWITH_ROCM_BACKEND=ON']
366 if args.cuda is not None:
367 cmake_opts += [f'-DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
368 '-DWITH_CUDA_BACKEND=ON']
371 # https://github.com/illuhad/hipSYCL/issues/361#issuecomment-718943645
372 'for f in /opt/rocm/amdgcn/bitcode/*.bc; do ln -s "$f" "/opt/rocm/lib/$(basename $f .bc).amdgcn.bc"; done'
374 if args.cuda is not None:
376 # https://github.com/illuhad/hipSYCL/issues/410#issuecomment-743301929
377 f'sed s/_OPENMP/__OPENMP_NVPTX__/ -i /usr/lib/llvm-{args.llvm}/lib/clang/*/include/__clang_cuda_complex_builtins.h',
378 # Not needed unless we're building with CUDA 11.x, but no harm in doing always
379 f'ln -s /usr/local/cuda/compat/* /usr/local/cuda/lib64/'
382 return hpccm.building_blocks.generic_cmake(
383 repository='https://github.com/illuhad/hipSYCL.git',
384 directory='/var/tmp/hipSYCL',
385 prefix='/usr/local', recursive=True, commit=args.hipsycl,
386 cmake_opts=['-DCMAKE_BUILD_TYPE=Release', *cmake_opts],
387 postinstall=postinstall)
389 def get_intel_compute_runtime(args):
390 # The only reason we need to build Compute Runtime ourselves is because Intel packages have no DG1 support
391 # Otherwise, we could have just installed DEB packages from GitHub or Intel PPA
392 if args.intel_compute_runtime is None:
395 cmake_opts = ['-DCMAKE_BUILD_TYPE=Release',
396 '-DSKIP_UNIT_TESTS=TRUE',
397 '-DSUPPORT_GEN8=0', '-DSUPPORT_GEN9=1', '-DSUPPORT_GEN11=1', '-DSUPPORT_GEN12LP=1', '-DSUPPORT_DG1=1',
400 return hpccm.building_blocks.generic_cmake(
401 repository='https://github.com/intel/compute-runtime.git',
402 directory='compute-runtime',
403 prefix='/usr/local', recursive=True, branch=args.intel_compute_runtime,
404 cmake_opts=cmake_opts,
405 postinstall=['ldconfig'])
407 def add_tsan_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
408 """Isolate the expensive TSAN preparation stage.
410 This is a very expensive stage, but has few and disjoint dependencies, and
411 its output is easily compartmentalized (/usr/local) so we can isolate this
412 build stage to maximize build cache hits and reduce rebuild time, bookkeeping,
413 and final image size.
415 if not isinstance(output_stages, collections.abc.MutableMapping):
416 raise RuntimeError('Need output_stages container.')
417 tsan_stage = hpccm.Stage()
418 tsan_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='tsan')
420 tsan_stage += hpccm.building_blocks.packages(ospackages=['git', 'ca-certificates', 'build-essential', 'cmake'])
421 # CMake will get duplicated later, but this is an expensive image, and it isn't worth optimizing
422 # out that duplication...
423 tsan_stage += hpccm.building_blocks.python(python3=True, python2=False, devel=False)
425 compiler_branch = 'release/' + str(input_args.llvm) + '.x'
426 tsan_stage += hpccm.building_blocks.generic_cmake(
427 repository='https://github.com/llvm/llvm-project.git',
428 directory='/var/tmp/llvm-project/llvm/',
429 prefix='/usr/local', recursive=True, branch=compiler_branch,
430 cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra;compiler-rt;lld"',
431 '-D LIBOMP_TSAN_SUPPORT=on'],
432 postinstall=['ln -s /usr/local/bin/clang++ /usr/local/bin/clang++-' + str(input_args.llvm),
433 'ln -s /usr/local/bin/clang-format /usr/local/bin/clang-format-' + str(input_args.llvm),
434 'ln -s /usr/local/bin/clang-tidy /usr/local/bin/clang-tidy-' + str(input_args.llvm),
435 'ln -s /usr/local/share/clang/run-clang-tidy.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py',
436 'ln -s /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm),
437 'ln -s /usr/local/libexec/c++-analyzer /usr/local/bin/c++-analyzer-' + str(input_args.llvm)])
438 output_stages['compiler_build'] = tsan_stage
440 def oneapi_runtime(_from='0'):
441 oneapi_runtime_stage = hpccm.Stage()
442 oneapi_runtime_stage += hpccm.primitives.copy(_from='oneapi-build',
443 files={"/opt/intel": "/opt/intel",
444 "/etc/bash.bashrc": "/etc/bash.bashrc"})
445 return oneapi_runtime_stage
447 def add_oneapi_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
448 """Isolate the oneAPI preparation stage.
450 This stage is isolated so that its installed components are minimized in the
451 final image (chiefly /opt/intel) and its environment setup script can be
452 sourced. This also helps with rebuild time and final image size.
454 if not isinstance(output_stages, collections.abc.MutableMapping):
455 raise RuntimeError('Need output_stages container.')
456 oneapi_stage = hpccm.Stage()
457 oneapi_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='oneapi-build')
459 version = str(input_args.oneapi)
461 # Add required components for the next stage (both for hpccm and Intel's setvars.sh script)
462 oneapi_stage += hpccm.building_blocks.packages(ospackages=['wget', 'gnupg2', 'ca-certificates', 'lsb-release'])
463 oneapi_stage += hpccm.building_blocks.packages(
464 apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB'],
465 apt_repositories=['deb https://apt.repos.intel.com/oneapi all main'],
466 # Add minimal packages (not the whole HPC toolkit!)
467 ospackages=[f'intel-oneapi-dpcpp-cpp-{version}',
468 f'intel-oneapi-openmp-{version}',
469 f'intel-oneapi-mkl-{version}',
470 f'intel-oneapi-mkl-devel-{version}']
472 # Ensure that all bash shells on the final container will have access to oneAPI
473 oneapi_stage += hpccm.primitives.shell(
474 commands=['echo "source /opt/intel/oneapi/setvars.sh" >> /etc/bash.bashrc',
475 'unlink /opt/intel/oneapi/compiler/latest',
476 f'ln -sf /opt/intel/oneapi/compiler/{version} /opt/intel/oneapi/compiler/latest']
478 setattr(oneapi_stage, 'runtime', oneapi_runtime)
480 output_stages['compiler_build'] = oneapi_stage
482 def prepare_venv(version: StrictVersion) -> typing.Sequence[str]:
483 """Get shell commands to set up the venv for the requested Python version."""
484 major = version.version[0]
485 minor = version.version[1] # type: int
487 pyenv = '$HOME/.pyenv/bin/pyenv'
489 py_ver = f'{major}.{minor}'
490 venv_path = f'$HOME/venv/py{py_ver}'
491 commands = [f'$({pyenv} prefix `{pyenv} whence python{py_ver}`)/bin/python -m venv {venv_path}']
493 commands.append(f'{venv_path}/bin/python -m pip install --upgrade pip setuptools')
494 # Install dependencies for building and testing gmxapi Python package.
495 # WARNING: Please keep this list synchronized with python_packaging/requirements-test.txt
496 # TODO: Get requirements.txt from an input argument.
497 commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
508 'scikit-build>=0.10' \
510 'sphinxcontrib-plantuml>=0.14'""")
512 # TODO: Remove 'importlib_resources' dependency when Python >=3.7 is required.
514 commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
515 'importlib_resources'""")
520 def add_python_stages(building_blocks: typing.Mapping[str, bb_base],
522 output_stages: typing.MutableMapping[str, hpccm.Stage]):
523 """Add the stage(s) necessary for the requested venvs.
525 One intermediate build stage is created for each venv (see --venv option).
527 Each stage partially populates Python installations and venvs in the home
528 directory. The home directory is collected by the 'pyenv' stage for use by
529 the main build stage.
531 if len(input_args.venvs) < 1:
532 raise RuntimeError('No venvs to build...')
533 if output_stages is None or not isinstance(output_stages, collections.abc.Mapping):
534 raise RuntimeError('Need a container for output stages.')
536 # Main Python stage that collects the environments from individual stages.
537 # We collect the stages individually, rather than chaining them, because the
538 # copy is a bit slow and wastes local Docker image space for each filesystem
540 pyenv_stage = hpccm.Stage()
541 pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='pyenv')
542 pyenv_stage += building_blocks['compiler']
543 if building_blocks['gdrcopy'] is not None:
544 pyenv_stage += building_blocks['gdrcopy']
545 if building_blocks['ucx'] is not None:
546 pyenv_stage += building_blocks['ucx']
547 pyenv_stage += building_blocks['mpi']
548 pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
550 for version in [StrictVersion(py_ver) for py_ver in sorted(input_args.venvs)]:
551 stage_name = 'py' + str(version)
552 stage = hpccm.Stage()
553 stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as=stage_name)
554 stage += building_blocks['compiler']
555 if building_blocks['gdrcopy'] is not None:
556 stage += building_blocks['gdrcopy']
557 if building_blocks['ucx'] is not None:
558 stage += building_blocks['ucx']
559 stage += building_blocks['mpi']
560 stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
562 # TODO: Use a non-root user for testing and Python virtual environments.
563 stage += hpccm.primitives.shell(commands=[
564 'curl https://pyenv.run | bash',
565 """echo 'export PYENV_ROOT="$HOME/.pyenv"' >> $HOME/.bashrc""",
566 """echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> $HOME/.bashrc""",
567 """echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc""",
568 """echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc"""])
569 pyenv = '$HOME/.pyenv/bin/pyenv'
570 commands = [f'PYTHON_CONFIGURE_OPTS="--enable-shared" {pyenv} install -s {version}']
571 stage += hpccm.primitives.shell(commands=commands)
573 commands = prepare_venv(version)
574 stage += hpccm.primitives.shell(commands=commands)
576 # TODO: Update user home directory.
577 pyenv_stage += hpccm.primitives.copy(_from=stage_name, _mkdir=True, src=['/root/'],
580 # Add the intermediate build stage to the sequence
581 output_stages[stage_name] = stage
583 # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
584 # # `version` should be a system installation or pyenv environment (or pyenv-virtualenv)
585 # # with the dependencies for all of the Python aspects of CMake-driven builds.
586 # commands = '{pyenv} global {version}'.format(
589 # pyenv_stage += hpccm.primitives.shell(commands=commands)
591 # Add the aggregating build stage to the sequence. This allows the main stage to copy
592 # the files in a single stage, potentially reducing the overall output image size.
593 output_stages['pyenv'] = pyenv_stage
596 def add_documentation_dependencies(input_args,
597 output_stages: typing.MutableMapping[str, hpccm.Stage]):
598 """Add appropriate layers according to doxygen input arguments."""
599 if input_args.doxygen is None:
601 # Always clone the same version of linkchecker (latest release at June 1, 2021)
602 output_stages['main'] += hpccm.building_blocks.pip(pip='pip3', packages=['git+https://github.com/linkchecker/linkchecker.git@v10.0.1'])
603 output_stages['main'] += hpccm.primitives.shell(
604 commands=['sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
605 if input_args.doxygen == '1.8.5':
606 doxygen_commit = 'ed4ed873ab0e7f15116e2052119a6729d4589f7a'
607 output_stages['main'] += hpccm.building_blocks.generic_autotools(
608 repository='https://github.com/westes/flex.git',
609 commit='f7788a9a0ecccdc953ed12043ccb59ca25714018',
610 prefix='/tmp/install-of-flex',
611 configure_opts=['--disable-shared'],
612 preconfigure=['./autogen.sh'])
613 output_stages['main'] += hpccm.building_blocks.generic_autotools(
614 repository='https://github.com/doxygen/doxygen.git',
615 commit=doxygen_commit,
618 '--flex /tmp/install-of-flex/bin/flex',
621 version = input_args.doxygen
622 archive_name = f'doxygen-{version}.linux.bin.tar.gz'
623 archive_url = f'https://sourceforge.net/projects/doxygen/files/rel-{version}/{archive_name}'
624 binary_path = f'doxygen-{version}/bin/doxygen'
626 'mkdir doxygen && cd doxygen',
627 f'wget {archive_url}',
628 f'tar xf {archive_name} {binary_path}',
629 f'cp {binary_path} /usr/local/bin/',
630 'cd .. && rm -rf doxygen'
632 output_stages['main'] += hpccm.primitives.shell(commands=commands)
635 def build_stages(args) -> typing.Iterable[hpccm.Stage]:
636 """Define and sequence the stages for the recipe corresponding to *args*."""
638 # A Dockerfile or Singularity recipe can have multiple build stages.
639 # The main build stage can copy files from previous stages, though only
640 # the last stage is included in the tagged output image. This means that
641 # large or expensive sets of build instructions can be isolated in
642 # local/temporary images, but all of the stages need to be output by this
643 # script, and need to occur in the correct order, so we create a sequence
644 # object early in this function.
645 stages = collections.OrderedDict()
647 # If we need TSAN or oneAPI support the early build is more complex,
648 # so that our compiler images don't have all the cruft needed to get those things
650 if args.llvm is not None and args.tsan is not None:
651 add_tsan_compiler_build_stage(input_args=args, output_stages=stages)
652 if args.oneapi is not None:
653 add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)
655 # Building blocks are chunks of container-builder instructions that can be
656 # copied to any build stage with the addition operator.
657 building_blocks = collections.OrderedDict()
658 building_blocks['base_packages'] = hpccm.building_blocks.packages(
659 ospackages=_common_packages)
661 # Normally in hpccm the first call to baseimage sets the context
662 # for other packages, e.g. for which apt respository to
663 # use. We want to set that early on.
664 hpccm.config.set_linux_distro(hpccm_distro_name(args))
666 # These are the most expensive and most reusable layers, so we put them first.
667 building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
668 building_blocks['gdrcopy'] = get_gdrcopy(args, building_blocks['compiler'])
669 building_blocks['ucx'] = get_ucx(args, building_blocks['compiler'], building_blocks['gdrcopy'])
670 building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'], building_blocks['ucx'])
671 for i, cmake in enumerate(args.cmake):
672 building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
674 prefix=f'/usr/local/cmake-{cmake}',
677 # Install additional packages early in the build to optimize Docker build layer cache.
678 os_packages = list(get_llvm_packages(args)) + get_opencl_packages(args) + get_rocm_packages(args)
679 if args.doxygen is not None:
680 os_packages += _docs_extra_packages
681 if args.oneapi is not None:
682 os_packages += ['lsb-release']
683 if args.hipsycl is not None:
684 os_packages += ['libboost-fiber-dev']
685 if args.intel_compute_runtime is not None:
686 os_packages += _intel_compute_runtime_extra_packages
687 building_blocks['extra_packages'] = []
688 if args.rocm is not None:
689 building_blocks['extra_packages'] += hpccm.building_blocks.packages(
690 apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
691 apt_repositories=[f'deb [arch=amd64] http://repo.radeon.com/rocm/apt/{args.rocm}/ xenial main']
693 building_blocks['extra_packages'] += hpccm.building_blocks.packages(
694 ospackages=os_packages,
695 apt_ppas=['ppa:intel-opencl/intel-opencl'])
697 if args.cuda is not None and args.llvm is not None:
698 # Hack to tell clang what version of CUDA we're using
699 # based on https://github.com/llvm/llvm-project/blob/1fdec59bffc11ae37eb51a1b9869f0696bfd5312/clang/lib/Driver/ToolChains/Cuda.cpp#L43
700 cuda_version_split = args.cuda.split('.')
701 # LLVM requires having the version in x.y.z format, while args.cuda be be either x.y or x.y.z
702 cuda_version_str = '{}.{}.{}'.format(
703 cuda_version_split[0],
704 cuda_version_split[1],
705 cuda_version_split[2] if len(cuda_version_split) > 2 else 0
707 building_blocks['cuda-clang-workaround'] = hpccm.primitives.shell(commands=[
708 f'echo "CUDA Version {cuda_version_str}" > /usr/local/cuda/version.txt'
711 building_blocks['clfft'] = get_clfft(args)
713 building_blocks['hipSYCL'] = get_hipsycl(args)
715 building_blocks['intel-compute-runtime'] = get_intel_compute_runtime(args)
717 # Add Python environments to MPI images, only, so we don't have to worry
718 # about whether to install mpi4py.
719 if args.mpi is not None and len(args.venvs) > 0:
720 add_python_stages(building_blocks=building_blocks, input_args=args, output_stages=stages)
722 # Create the stage from which the targeted image will be tagged.
723 stages['main'] = hpccm.Stage()
725 stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
726 for bb in building_blocks.values():
730 # We always add Python3 and Pip
731 stages['main'] += hpccm.building_blocks.python(python3=True, python2=False)
733 # Add documentation requirements (doxygen and sphinx + misc).
734 if args.doxygen is not None:
735 add_documentation_dependencies(args, stages)
737 if 'pyenv' in stages and stages['pyenv'] is not None:
738 stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/.pyenv/'],
740 stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/venv/'],
742 # TODO: Update user home directory.
743 # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
744 # stages['main'] += hpccm.primitives.copy(_from='pyenv', src=['/root/.bashrc'],
747 # Make sure that `python` resolves to something.
748 stages['main'] += hpccm.primitives.shell(commands=['test -x /usr/bin/python || '
749 'update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && '
750 '/usr/bin/python --version'])
752 # Note that the list of stages should be sorted in dependency order.
753 for build_stage in stages.values():
754 if build_stage is not None:
758 if __name__ == '__main__':
759 args = parser.parse_args()
761 # Set container specification output format
762 hpccm.config.set_container_format(args.format)
764 container_recipe = build_stages(args)
766 # Output container specification
767 for stage in container_recipe: