#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2020, by the GROMACS development team, led by
+# Copyright (c) 2020,2021, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
# To help us fund GROMACS development, we humbly ask that you cite
# the research papers on the package. Check out http://www.gromacs.org.
-"""
+"""Building block based Dockerfile generation for CI testing images.
+
Generates a set of docker images used for running GROMACS CI on Gitlab.
The images are prepared according to a selection of build configuration targets
that hope to cover a broad enough scope of different possible systems,
Based on the example script provided by the NVidia HPCCM repository.
+Reference:
+ `NVidia HPC Container Maker <https://github.com/NVIDIA/hpc-container-maker>`__
+
Authors:
* Paul Bauer <paul.bauer.q@gmail.com>
* Eric Irrgang <ericirrgang@gmail.com>
* Joe Jordan <e.jjordan12@gmail.com>
+ * Mark Abraham <mark.j.abraham@gmail.com>
+ * Gaurav Garg <gaugarg@nvidia.com>
Usage::
$ python3 scripted_gmx_docker_builds.py --format docker > Dockerfile && docker build .
$ python3 scripted_gmx_docker_builds.py | docker build -
+See Also:
+ :file:`buildall.sh`
+
"""
import argparse
'ca-certificates',
'ccache',
'git',
+ 'gnupg',
+ 'gpg-agent',
'libfftw3-dev',
'libhwloc-dev',
'liblapack-dev',
'wget',
'xsltproc']
+_opencl_extra_packages = [
+ 'nvidia-opencl-dev',
+ # The following require apt_ppas=['ppa:intel-opencl/intel-opencl']
+ 'intel-opencl-icd',
+ 'ocl-icd-libopencl1',
+ 'ocl-icd-opencl-dev',
+ 'opencl-headers',
+]
+
+_rocm_extra_packages = [
+ # The following require
+ # apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
+ # apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/4.0.1/ xenial main']
+ 'libelf1',
+ 'rocm-opencl',
+ 'rocm-dev',
+ 'clinfo',
+ 'rocfft',
+ 'hipfft',
+]
+
+
+# Extra packages needed to build Intel Compute Runtime
+_intel_compute_runtime_extra_packages = ['pkg-config',
+ 'libxml2',
+ 'libxml2-dev',
+ 'libigc',
+ 'libigc-dev',
+ 'libigdgmm11',
+ 'libigdgmm-dev',
+ 'libze-loader',
+ 'libze-loader-dev',
+ 'ocl-icd-libopencl1',
+ 'ocl-icd-opencl-dev',
+ 'opencl-headers']
+
# Extra packages needed to build Python installations from source.
_python_extra_packages = ['build-essential',
'ca-certificates',
'help2man',
'imagemagick',
'libtool',
- 'linkchecker',
'mscgen',
'm4',
+ 'openssh-client',
'texinfo',
'texlive-latex-base',
'texlive-latex-extra',
'texlive-fonts-recommended',
- 'texlive-fonts-extra']
-
-# Supported Python versions for maintained branches.
-# TODO: Remove '3.5.9' from defaults in master once script in release-2020 diverges.
-_python_versions = ['3.5.9', '3.6.10', '3.7.7', '3.8.2']
+ 'texlive-fonts-extra',
+ 'tex-gyre']
# Parse command line arguments
-parser = argparse.ArgumentParser(description='GROMACS CI image creation script', parents=[utility.parser])
+parser = argparse.ArgumentParser(description='GROMACS CI image creation script',
+ parents=[utility.parser])
parser.add_argument('--format', type=str, default='docker',
choices=['docker', 'singularity'],
help='Container specification format (default: docker)')
-parser.add_argument('--venvs', nargs='*', type=str, default=_python_versions,
- help='List of Python versions ("major.minor.patch") for which to install venvs. '
- 'Default: {}'.format(' '.join(_python_versions)))
def base_image_tag(args) -> str:
raise RuntimeError('Logic error: no Linux distribution selected.')
return base_image_tag
+# Convert the linux distribution variables into something that hpccm
+# understands.
+def hpccm_distro_name(args) -> str:
+ if args.centos is not None:
+ name_mapping = { '7': 'centos7',
+ '8': 'centos8' }
+ if args.centos in name_mapping:
+ hpccm_name = name_mapping[args.centos]
+ else:
+ raise RuntimeError('Logic error: unsupported CentOS distribution selected.')
+ elif args.ubuntu is not None:
+ name_mapping = { '20.04': 'ubuntu20',
+ '18.04': 'ubuntu18',
+ '16.04': 'ubuntu16' }
+ if args.ubuntu in name_mapping:
+ hpccm_name = name_mapping[args.ubuntu]
+ else:
+ raise RuntimeError('Logic error: unsupported Ubuntu distribution selected.')
+ else:
+ raise RuntimeError('Logic error: no Linux distribution selected.')
+ return hpccm_name
def get_llvm_packages(args) -> typing.Iterable[str]:
# If we use the package version of LLVM, we need to install extra packages for it.
if (args.llvm is not None) and (args.tsan is None):
- return ['libomp-dev',
- 'clang-format-' + str(args.llvm),
- 'clang-tidy-' + str(args.llvm)]
+ packages = [f'libomp-{args.llvm}-dev',
+ f'libomp5-{args.llvm}',
+ 'clang-format-' + str(args.llvm),
+ 'clang-tidy-' + str(args.llvm)]
+ if args.hipsycl is not None:
+ packages += [f'llvm-{args.llvm}-dev',
+ f'libclang-{args.llvm}-dev',
+ f'lld-{args.llvm}']
+ return packages
else:
return []
+def get_opencl_packages(args) -> typing.Iterable[str]:
+ if (args.doxygen is None) and (args.oneapi is None):
+ return _opencl_extra_packages
+ else:
+ return []
-def get_compiler(args, tsan_stage: hpccm.Stage = None) -> bb_base:
- # Compiler
- if args.icc is not None:
- raise RuntimeError('Intel compiler toolchain recipe not implemented yet')
+def get_rocm_packages(args) -> typing.Iterable[str]:
+ if (args.rocm is None):
+ return []
+ else:
+ return _rocm_extra_packages
+def get_compiler(args, compiler_build_stage: hpccm.Stage = None) -> bb_base:
+ # Compiler
if args.llvm is not None:
# Build our own version instead to get TSAN + OMP
if args.tsan is not None:
- if tsan_stage is not None:
- compiler = tsan_stage.runtime(_from='tsan')
+ if compiler_build_stage is not None:
+ compiler = compiler_build_stage.runtime(_from='tsan')
else:
- raise RuntimeError('No TSAN stage!')
+ raise RuntimeError('No TSAN compiler build stage!')
# Build the default compiler if we don't need special support
else:
- compiler = hpccm.building_blocks.llvm(extra_repository=True, version=args.llvm)
+ # Currently the focal apt repositories do not contain
+ # llvm higher than 11, so we work around that. This will
+ # need further work when we start supporting ubuntu 22.04
+ compiler = hpccm.building_blocks.llvm(version=args.llvm, upstream=True if int(args.llvm) > 11 else False)
+
+ elif args.oneapi is not None:
+ if compiler_build_stage is not None:
+ compiler = compiler_build_stage.runtime(_from='oneapi')
+ # Prepare the toolchain (needed only for builds done within the Dockerfile, e.g.
+ # OpenMPI builds, which don't currently work for other reasons)
+ oneapi_toolchain = hpccm.toolchain(CC=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icx',
+ CXX=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icpx')
+ setattr(compiler, 'toolchain', oneapi_toolchain)
+
+ else:
+ raise RuntimeError('No oneAPI compiler build stage!')
- elif (args.gcc is not None):
+ elif args.gcc is not None:
compiler = hpccm.building_blocks.gnu(extra_repository=True,
version=args.gcc,
fortran=False)
return compiler
-def get_mpi(args, compiler):
+def get_gdrcopy(args, compiler):
+ if args.cuda is not None:
+ if hasattr(compiler, 'toolchain'):
+ # Version last updated June 7, 2021
+ return hpccm.building_blocks.gdrcopy(toolchain=compiler.toolchain, version="2.2")
+ else:
+ raise RuntimeError('compiler is not an HPCCM compiler building block!')
+ else:
+ return None
+
+def get_ucx(args, compiler, gdrcopy):
+ if args.cuda is not None:
+ if hasattr(compiler, 'toolchain'):
+ use_gdrcopy = (gdrcopy is not None)
+ # Version last updated June 7, 2021
+ return hpccm.building_blocks.ucx(toolchain=compiler.toolchain, gdrcopy=use_gdrcopy, version="1.10.1", cuda=True)
+ else:
+ raise RuntimeError('compiler is not an HPCCM compiler building block!')
+ else:
+ return None
+
+def get_mpi(args, compiler, ucx):
# If needed, add MPI to the image
if args.mpi is not None:
if args.mpi == 'openmpi':
- use_cuda = False
- if args.cuda is not None:
- use_cuda = True
-
if hasattr(compiler, 'toolchain'):
- return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, cuda=use_cuda, infiniband=False)
+ if args.oneapi is not None:
+ raise RuntimeError('oneAPI building OpenMPI is not supported')
+ use_cuda = (args.cuda is not None)
+ use_ucx = (ucx is not None)
+ # Version last updated June 7, 2021
+ return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, version="4.1.1", cuda=use_cuda, ucx=use_ucx, infiniband=False)
else:
raise RuntimeError('compiler is not an HPCCM compiler building block!')
elif args.mpi == 'impi':
+ # TODO Intel MPI from the oneAPI repo is not working reliably,
+ # reasons are unclear. When solved, add packagages called:
+ # 'intel-oneapi-mpi', 'intel-oneapi-mpi-devel'
+ # during the compiler stage.
+ # TODO also consider hpccm's intel_mpi package if that doesn't need
+ # a license to run.
raise RuntimeError('Intel MPI recipe not implemented yet.')
else:
raise RuntimeError('Requested unknown MPI implementation.')
return None
-def get_opencl(args):
- # Add OpenCL environment if needed
- if (args.opencl is not None):
- if args.opencl == 'nvidia':
- if (args.cuda is None):
- raise RuntimeError('Need Nvidia environment for Nvidia OpenCL image')
-
- return hpccm.building_blocks.packages(ospackages=['nvidia-opencl-dev'])
-
- elif args.opencl == 'intel':
- return hpccm.building_blocks.packages(ospackages=['ocl-icd-opencl-dev', 'opencl-headers',
- 'beignet-opencl-icd'])
- elif args.opencl == 'amd':
- # Due to the wisdom of AMD, this needs to be done differently for the OS and version! Hurray!
- # And they don't allow wget, so this branch is not taken for now! AMD, please allow me to use wget.
- raise RuntimeError(
- 'AMD recipe can not be generated because they do not allow wget for getting the packages.')
- # if args.ubuntu:
- # if args.ubuntu is not '16.04':
- # Stage0 += hpccm.building_blocks.generic_build(url='https://www2.ati.com/drivers/linux/ubuntu/'+args.ubuntu+'/amdgpu-pro-18.30-641594.tar.xz',
- # install=['./amdgpu-install --opencl=legacy --headless -y'])
- # elif:
- # Stage0 += hpccm.building_blocks.generic_build(url='https://www2.ati.com/drivers/linux/ubuntu/amdgpu-pro-18.30-641594.tar.xz',
- # install=['./amdgpu-install --opencl=legacy --headless -y'])
- # elif args.centos:
- # Stage0 += hpccm.building_blocks.generic_build(url='https://www2.ati.com/drivers/linux/rhel'+args.centos'/amdgpu-pro-18.30-641594.tar.xz',
- # install=['./amdgpu-install --opencl=legacy --headless -y'])
- else:
- return None
-
-
def get_clfft(args):
if (args.clfft is not None):
return hpccm.building_blocks.generic_cmake(
else:
return None
+def get_heffte(args):
+ if (args.heffte is not None):
+ return hpccm.building_blocks.generic_cmake(
+ cmake_opts=['-D CMAKE_BUILD_TYPE=Release',
+ '-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
+ '-D Heffte_ENABLE_CUDA=ON',
+ '-D Heffte_ENABLE_FFTW=OFF',
+ '-D BUILD_SHARED_LIBS=ON'],
+ repository='https://bitbucket.org/icl/heffte.git',
+ prefix='/usr/local', recursive=True, commit=args.heffte, directory='heffte')
+ else:
+ return None
+
+def get_hipsycl(args):
+ if args.hipsycl is None:
+ return None
+ if args.llvm is None:
+ raise RuntimeError('Can not build hipSYCL without llvm')
+
+ if args.rocm is None:
+ raise RuntimeError('hipSYCL requires the rocm packages')
+
+ cmake_opts = [f'-DLLVM_DIR=/opt/rocm/llvm/lib/cmake/llvm',
+ '-DCMAKE_PREFIX_PATH=/opt/rocm/lib/cmake',
+ '-DWITH_ROCM_BACKEND=ON']
+ if args.cuda is not None:
+ cmake_opts += [f'-DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
+ '-DWITH_CUDA_BACKEND=ON']
+
+ postinstall = [
+ # https://github.com/illuhad/hipSYCL/issues/361#issuecomment-718943645
+ 'for f in /opt/rocm/amdgcn/bitcode/*.bc; do ln -s "$f" "/opt/rocm/lib/$(basename $f .bc).amdgcn.bc"; done'
+ ]
+ if args.cuda is not None:
+ postinstall += [
+ # https://github.com/illuhad/hipSYCL/issues/410#issuecomment-743301929
+ f'sed s/_OPENMP/__OPENMP_NVPTX__/ -i /usr/lib/llvm-{args.llvm}/lib/clang/*/include/__clang_cuda_complex_builtins.h',
+ # Not needed unless we're building with CUDA 11.x, but no harm in doing always
+ f'ln -s /usr/local/cuda/compat/* /usr/local/cuda/lib64/'
+ ]
+
+ return hpccm.building_blocks.generic_cmake(
+ repository='https://github.com/illuhad/hipSYCL.git',
+ directory='/var/tmp/hipSYCL',
+ prefix='/usr/local', recursive=True, commit=args.hipsycl,
+ cmake_opts=['-DCMAKE_BUILD_TYPE=Release', *cmake_opts],
+ postinstall=postinstall)
+
+def get_intel_compute_runtime(args):
+ # The only reason we need to build Compute Runtime ourselves is because Intel packages have no DG1 support
+ # Otherwise, we could have just installed DEB packages from GitHub or Intel PPA
+ if args.intel_compute_runtime is None:
+ return None
+
+ cmake_opts = ['-DCMAKE_BUILD_TYPE=Release',
+ '-DSKIP_UNIT_TESTS=TRUE',
+ '-DSUPPORT_GEN8=0', '-DSUPPORT_GEN9=1', '-DSUPPORT_GEN11=1', '-DSUPPORT_GEN12LP=1', '-DSUPPORT_DG1=1',
+ '-DBUILD_WITH_L0=1']
-def add_tsan_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
+ return hpccm.building_blocks.generic_cmake(
+ repository='https://github.com/intel/compute-runtime.git',
+ directory='compute-runtime',
+ prefix='/usr/local', recursive=True, branch=args.intel_compute_runtime,
+ cmake_opts=cmake_opts,
+ postinstall=['ldconfig'])
+
+def add_tsan_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
"""Isolate the expensive TSAN preparation stage.
This is a very expensive stage, but has few and disjoint dependencies, and
# out that duplication...
tsan_stage += hpccm.building_blocks.python(python3=True, python2=False, devel=False)
- compiler_branch = 'release_' + str(input_args.llvm) + '0'
+ compiler_branch = 'release/' + str(input_args.llvm) + '.x'
tsan_stage += hpccm.building_blocks.generic_cmake(
- repository='https://git.llvm.org/git/llvm.git',
+ repository='https://github.com/llvm/llvm-project.git',
+ directory='/var/tmp/llvm-project/llvm/',
prefix='/usr/local', recursive=True, branch=compiler_branch,
- cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra"',
+ cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra;compiler-rt;lld"',
'-D LIBOMP_TSAN_SUPPORT=on'],
- preconfigure=['export branch=' + compiler_branch,
- '(cd projects; git clone --depth=1 --branch $branch https://git.llvm.org/git/libcxx.git)',
- '(cd projects; git clone --depth=1 --branch $branch https://git.llvm.org/git/libcxxabi.git)',
- '(cd projects; git clone --depth=1 --branch $branch https://git.llvm.org/git/compiler-rt.git)',
- '(cd ..; git clone --depth=1 --branch $branch https://git.llvm.org/git/openmp.git)',
- '(cd ..; git clone --depth=1 --branch $branch https://git.llvm.org/git/clang.git)',
- '(cd ..; git clone --depth=1 --branch $branch https://git.llvm.org/git/clang-tools-extra.git)'],
postinstall=['ln -s /usr/local/bin/clang++ /usr/local/bin/clang++-' + str(input_args.llvm),
'ln -s /usr/local/bin/clang-format /usr/local/bin/clang-format-' + str(input_args.llvm),
'ln -s /usr/local/bin/clang-tidy /usr/local/bin/clang-tidy-' + str(input_args.llvm),
+ 'ln -s /usr/local/share/clang/run-clang-tidy.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py',
+ 'ln -s /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm),
'ln -s /usr/local/libexec/c++-analyzer /usr/local/bin/c++-analyzer-' + str(input_args.llvm)])
- output_stages['tsan'] = tsan_stage
+ output_stages['compiler_build'] = tsan_stage
+
+def oneapi_runtime(_from='0'):
+ oneapi_runtime_stage = hpccm.Stage()
+ oneapi_runtime_stage += hpccm.primitives.copy(_from='oneapi-build',
+ files={"/opt/intel": "/opt/intel",
+ "/etc/bash.bashrc": "/etc/bash.bashrc"})
+ return oneapi_runtime_stage
+def add_oneapi_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
+ """Isolate the oneAPI preparation stage.
+
+ This stage is isolated so that its installed components are minimized in the
+ final image (chiefly /opt/intel) and its environment setup script can be
+ sourced. This also helps with rebuild time and final image size.
+ """
+ if not isinstance(output_stages, collections.abc.MutableMapping):
+ raise RuntimeError('Need output_stages container.')
+ oneapi_stage = hpccm.Stage()
+ oneapi_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='oneapi-build')
+
+ version = str(input_args.oneapi)
+
+ # Add required components for the next stage (both for hpccm and Intel's setvars.sh script)
+ oneapi_stage += hpccm.building_blocks.packages(ospackages=['wget', 'gnupg2', 'ca-certificates', 'lsb-release'])
+ oneapi_stage += hpccm.building_blocks.packages(
+ apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB'],
+ apt_repositories=['deb https://apt.repos.intel.com/oneapi all main'],
+ # Add minimal packages (not the whole HPC toolkit!)
+ ospackages=[f'intel-oneapi-dpcpp-cpp-{version}',
+ f'intel-oneapi-openmp-{version}',
+ f'intel-oneapi-mkl-{version}',
+ f'intel-oneapi-mkl-devel-{version}']
+ )
+ # Ensure that all bash shells on the final container will have access to oneAPI
+ oneapi_stage += hpccm.primitives.shell(
+ commands=['echo "source /opt/intel/oneapi/setvars.sh" >> /etc/bash.bashrc',
+ 'unlink /opt/intel/oneapi/compiler/latest',
+ f'ln -sf /opt/intel/oneapi/compiler/{version} /opt/intel/oneapi/compiler/latest']
+ )
+ setattr(oneapi_stage, 'runtime', oneapi_runtime)
+
+ output_stages['compiler_build'] = oneapi_stage
def prepare_venv(version: StrictVersion) -> typing.Sequence[str]:
"""Get shell commands to set up the venv for the requested Python version."""
major = version.version[0]
- minor = version.version[1]
+ minor = version.version[1] # type: int
pyenv = '$HOME/.pyenv/bin/pyenv'
- py_ver = '{}.{}'.format(major, minor)
- venv_path = '$HOME/venv/py{}'.format(py_ver)
- commands = ['$({pyenv} prefix `{pyenv} whence python{py_ver}`)/bin/python -m venv {path}'.format(
- pyenv=pyenv,
- py_ver=py_ver,
- path=venv_path
- )]
-
- commands.append('{path}/bin/python -m pip install --upgrade pip setuptools'.format(
- path=venv_path
- ))
+ py_ver = f'{major}.{minor}'
+ venv_path = f'$HOME/venv/py{py_ver}'
+ commands = [f'$({pyenv} prefix `{pyenv} whence python{py_ver}`)/bin/python -m venv {venv_path}']
+
+ commands.append(f'{venv_path}/bin/python -m pip install --upgrade pip setuptools')
# Install dependencies for building and testing gmxapi Python package.
# WARNING: Please keep this list synchronized with python_packaging/requirements-test.txt
# TODO: Get requirements.txt from an input argument.
- commands.append("""{path}/bin/python -m pip install --upgrade \
- 'cmake>=3.9.6' \
+ commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
+ 'breathe' \
+ 'cmake>=3.16.3' \
'flake8>=3.7.7' \
- 'mpi4py>=2' \
+ 'gcovr>=4.2' \
+ 'mpi4py>=3.0.3' \
'networkx>=2.0' \
'numpy>=1' \
'pip>=10.1' \
+ 'Pygments>=2.2.0' \
'pytest>=3.9' \
- 'setuptools>=28.0.0' \
- 'scikit-build>=0.7'""".format(path=venv_path))
+ 'setuptools>=42' \
+ 'Sphinx>=1.6.3' \
+ 'sphinxcontrib-plantuml>=0.14'""")
+
+ # TODO: Remove 'importlib_resources' dependency when Python >=3.7 is required.
+ if minor == 6:
+ commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
+ 'importlib_resources'""")
return commands
pyenv_stage = hpccm.Stage()
pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='pyenv')
pyenv_stage += building_blocks['compiler']
+ if building_blocks['gdrcopy'] is not None:
+ pyenv_stage += building_blocks['gdrcopy']
+ if building_blocks['ucx'] is not None:
+ pyenv_stage += building_blocks['ucx']
pyenv_stage += building_blocks['mpi']
pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
stage = hpccm.Stage()
stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as=stage_name)
stage += building_blocks['compiler']
+ if building_blocks['gdrcopy'] is not None:
+ stage += building_blocks['gdrcopy']
+ if building_blocks['ucx'] is not None:
+ stage += building_blocks['ucx']
stage += building_blocks['mpi']
stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
"""echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc""",
"""echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc"""])
pyenv = '$HOME/.pyenv/bin/pyenv'
- commands = ['PYTHON_CONFIGURE_OPTS="--enable-shared" {pyenv} install -s {version}'.format(
- pyenv=pyenv,
- version=str(version))]
+ commands = [f'PYTHON_CONFIGURE_OPTS="--enable-shared" {pyenv} install -s {version}']
stage += hpccm.primitives.shell(commands=commands)
commands = prepare_venv(version)
output_stages['pyenv'] = pyenv_stage
+def add_documentation_dependencies(input_args,
+ output_stages: typing.MutableMapping[str, hpccm.Stage]):
+ """Add appropriate layers according to doxygen input arguments."""
+ if input_args.doxygen is None:
+ return
+ # Always clone the same version of linkchecker (latest release at June 1, 2021)
+ output_stages['main'] += hpccm.building_blocks.pip(pip='pip3', packages=['git+https://github.com/linkchecker/linkchecker.git@v10.0.1'])
+ output_stages['main'] += hpccm.primitives.shell(
+ commands=['sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
+ if input_args.doxygen == '1.8.5':
+ doxygen_commit = 'ed4ed873ab0e7f15116e2052119a6729d4589f7a'
+ output_stages['main'] += hpccm.building_blocks.generic_autotools(
+ repository='https://github.com/westes/flex.git',
+ commit='f7788a9a0ecccdc953ed12043ccb59ca25714018',
+ prefix='/tmp/install-of-flex',
+ configure_opts=['--disable-shared'],
+ preconfigure=['./autogen.sh'])
+ output_stages['main'] += hpccm.building_blocks.generic_autotools(
+ repository='https://github.com/doxygen/doxygen.git',
+ commit=doxygen_commit,
+ prefix='',
+ configure_opts=[
+ '--flex /tmp/install-of-flex/bin/flex',
+ '--static'])
+ else:
+ version = input_args.doxygen
+ archive_name = f'doxygen-{version}.linux.bin.tar.gz'
+ archive_url = f'https://sourceforge.net/projects/doxygen/files/rel-{version}/{archive_name}'
+ binary_path = f'doxygen-{version}/bin/doxygen'
+ commands = [
+ 'mkdir doxygen && cd doxygen',
+ f'wget {archive_url}',
+ f'tar xf {archive_name} {binary_path}',
+ f'cp {binary_path} /usr/local/bin/',
+ 'cd .. && rm -rf doxygen'
+ ]
+ output_stages['main'] += hpccm.primitives.shell(commands=commands)
+
+
def build_stages(args) -> typing.Iterable[hpccm.Stage]:
"""Define and sequence the stages for the recipe corresponding to *args*."""
# object early in this function.
stages = collections.OrderedDict()
- # If we need the TSAN compilers, the early build is more involved.
+ # If we need TSAN or oneAPI support the early build is more complex,
+ # so that our compiler images don't have all the cruft needed to get those things
+ # installed.
if args.llvm is not None and args.tsan is not None:
- add_tsan_stage(input_args=args, output_stages=stages)
+ add_tsan_compiler_build_stage(input_args=args, output_stages=stages)
+ if args.oneapi is not None:
+ add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)
# Building blocks are chunks of container-builder instructions that can be
# copied to any build stage with the addition operator.
building_blocks = collections.OrderedDict()
+ building_blocks['base_packages'] = hpccm.building_blocks.packages(
+ ospackages=_common_packages)
+
+ # Normally in hpccm the first call to baseimage sets the context
+ # for other packages, e.g. for which apt respository to
+ # use. We want to set that early on.
+ hpccm.config.set_linux_distro(hpccm_distro_name(args))
# These are the most expensive and most reusable layers, so we put them first.
- building_blocks['compiler'] = get_compiler(args, tsan_stage=stages.get('tsan'))
- building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'])
+ building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
+ building_blocks['gdrcopy'] = get_gdrcopy(args, building_blocks['compiler'])
+ building_blocks['ucx'] = get_ucx(args, building_blocks['compiler'], building_blocks['gdrcopy'])
+ building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'], building_blocks['ucx'])
+ for i, cmake in enumerate(args.cmake):
+ building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
+ eula=True,
+ prefix=f'/usr/local/cmake-{cmake}',
+ version=cmake)
# Install additional packages early in the build to optimize Docker build layer cache.
- os_packages = _common_packages + get_llvm_packages(args)
+ os_packages = list(get_llvm_packages(args)) + get_opencl_packages(args) + get_rocm_packages(args)
if args.doxygen is not None:
os_packages += _docs_extra_packages
- building_blocks['ospackages'] = hpccm.building_blocks.packages(ospackages=os_packages)
+ if args.oneapi is not None:
+ os_packages += ['lsb-release']
+ if args.hipsycl is not None:
+ os_packages += ['libboost-fiber-dev']
+ if args.intel_compute_runtime is not None:
+ os_packages += _intel_compute_runtime_extra_packages
+ building_blocks['extra_packages'] = []
+ if args.rocm is not None:
+ building_blocks['extra_packages'] += hpccm.building_blocks.packages(
+ apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
+ apt_repositories=[f'deb [arch=amd64] http://repo.radeon.com/rocm/apt/{args.rocm}/ xenial main']
+ )
+ building_blocks['extra_packages'] += hpccm.building_blocks.packages(
+ ospackages=os_packages,
+ apt_ppas=['ppa:intel-opencl/intel-opencl'])
+
+ if args.cuda is not None and args.llvm is not None:
+ # Hack to tell clang what version of CUDA we're using
+ # based on https://github.com/llvm/llvm-project/blob/1fdec59bffc11ae37eb51a1b9869f0696bfd5312/clang/lib/Driver/ToolChains/Cuda.cpp#L43
+ cuda_version_split = args.cuda.split('.')
+ # LLVM requires having the version in x.y.z format, while args.cuda be be either x.y or x.y.z
+ cuda_version_str = '{}.{}.{}'.format(
+ cuda_version_split[0],
+ cuda_version_split[1],
+ cuda_version_split[2] if len(cuda_version_split) > 2 else 0
+ )
+ building_blocks['cuda-clang-workaround'] = hpccm.primitives.shell(commands=[
+ f'echo "CUDA Version {cuda_version_str}" > /usr/local/cuda/version.txt'
+ ])
- building_blocks['cmake'] = hpccm.building_blocks.cmake(eula=True, version=args.cmake)
- building_blocks['opencl'] = get_opencl(args)
building_blocks['clfft'] = get_clfft(args)
+ building_blocks['heffte'] = get_heffte(args)
+
+ building_blocks['hipSYCL'] = get_hipsycl(args)
+
+ building_blocks['intel-compute-runtime'] = get_intel_compute_runtime(args)
+
# Add Python environments to MPI images, only, so we don't have to worry
# about whether to install mpi4py.
if args.mpi is not None and len(args.venvs) > 0:
stages['main'] += bb
# We always add Python3 and Pip
- stages['main'] += hpccm.building_blocks.python(python3=True, python2=False, devel=True)
- stages['main'] += hpccm.building_blocks.pip(upgrade=True, pip='pip3',
- packages=['pytest', 'networkx', 'numpy'])
+ stages['main'] += hpccm.building_blocks.python(python3=True, python2=False)
# Add documentation requirements (doxygen and sphinx + misc).
- if (args.doxygen is not None):
- if (args.doxygen == '1.8.5'):
- doxygen_commit = 'ed4ed873ab0e7f15116e2052119a6729d4589f7a'
- else:
- doxygen_commit = 'a6d4f4df45febe588c38de37641513fd576b998f'
- stages['main'] += hpccm.building_blocks.generic_autotools(
- repository='https://github.com/westes/flex.git',
- commit='f7788a9a0ecccdc953ed12043ccb59ca25714018',
- prefix='/tmp/install-of-flex',
- configure_opts=['--disable-shared'],
- preconfigure=['./autogen.sh'])
- stages['main'] += hpccm.building_blocks.generic_autotools(
- repository='https://github.com/doxygen/doxygen.git',
- commit=doxygen_commit,
- prefix='',
- configure_opts=[
- '--flex /tmp/install-of-flex/bin/flex',
- '--static'],
- postinstall=[
- 'sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
- stages['main'] += hpccm.building_blocks.pip(pip='pip3', packages=['sphinx==1.6.1'])
+ if args.doxygen is not None:
+ add_documentation_dependencies(args, stages)
if 'pyenv' in stages and stages['pyenv'] is not None:
stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/.pyenv/'],
# stages['main'] += hpccm.primitives.copy(_from='pyenv', src=['/root/.bashrc'],
# dest='/root/')
+ # Make sure that `python` resolves to something.
+ stages['main'] += hpccm.primitives.shell(commands=['test -x /usr/bin/python || '
+ 'update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && '
+ '/usr/bin/python --version'])
+
# Note that the list of stages should be sorted in dependency order.
for build_stage in stages.values():
if build_stage is not None: