* Eric Irrgang <ericirrgang@gmail.com>
* Joe Jordan <e.jjordan12@gmail.com>
* Mark Abraham <mark.j.abraham@gmail.com>
+ * Gaurav Garg <gaugarg@nvidia.com>
Usage::
import argparse
import collections
+import collections.abc
import typing
from distutils.version import StrictVersion
]
_rocm_extra_packages = [
- # The following require
- # apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
- # apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/4.0.1/ xenial main']
- 'libelf1',
- 'rocm-opencl',
- 'rocm-dev',
- 'clinfo'
+ # The following require
+ # apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
+ # apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/4.0.1/ xenial main']
+ 'clinfo',
+ 'hipfft',
+ 'libelf1',
+ 'rocfft',
+ 'rocm-opencl',
+ 'rocm-dev',
]
-
# Extra packages needed to build Intel Compute Runtime
-_intel_compute_runtime_extra_packages = ['pkg-config',
- 'libxml2',
- 'libxml2-dev',
- 'libigc',
- 'libigc-dev',
- 'libigdgmm11',
- 'libigdgmm-dev',
- 'libze-loader',
- 'libze-loader-dev',
- 'ocl-icd-libopencl1',
- 'ocl-icd-opencl-dev',
- 'opencl-headers']
+_intel_compute_runtime_extra_packages = ['intel-opencl-icd',
+ 'intel-level-zero-gpu',
+ 'level-zero',
+ 'libmfx1']
# Extra packages needed to build Python installations from source.
_python_extra_packages = ['build-essential',
'texlive-latex-base',
'texlive-latex-extra',
'texlive-fonts-recommended',
- 'texlive-fonts-extra']
+ 'texlive-fonts-extra',
+ 'tex-gyre']
# Parse command line arguments
parser = argparse.ArgumentParser(description='GROMACS CI image creation script',
def base_image_tag(args) -> str:
+ """Generate *image* for hpccm.baseimage()."""
# Check if we use CUDA images or plain linux images
if args.cuda is not None:
cuda_version_tag = 'nvidia/cuda:' + args.cuda + '-devel'
return base_image_tag
+def hpccm_distro_name(args) -> str:
+ """Generate *_distro* for hpccm.baseimage().
+
+ Convert the linux distribution variables into something that hpccm
+ understands.
+
+ The same format is used by the lower level hpccm.config.set_linux_distro().
+ """
+ if args.centos is not None:
+ name_mapping = {'7': 'centos7',
+ '8': 'centos8'}
+ if args.centos in name_mapping:
+ hpccm_name = name_mapping[args.centos]
+ else:
+ raise RuntimeError('Logic error: unsupported CentOS distribution selected.')
+ elif args.ubuntu is not None:
+ name_mapping = {'20.04': 'ubuntu20',
+ '18.04': 'ubuntu18',
+ '16.04': 'ubuntu16'}
+ if args.ubuntu in name_mapping:
+ hpccm_name = name_mapping[args.ubuntu]
+ else:
+ raise RuntimeError('Logic error: unsupported Ubuntu distribution selected.')
+ else:
+ raise RuntimeError('Logic error: no Linux distribution selected.')
+ return hpccm_name
+
+
def get_llvm_packages(args) -> typing.Iterable[str]:
# If we use the package version of LLVM, we need to install extra packages for it.
if (args.llvm is not None) and (args.tsan is None):
else:
return []
-def get_opencl_packages(args) -> typing.Iterable[str]:
+
+def get_opencl_packages(args) -> typing.List[str]:
if (args.doxygen is None) and (args.oneapi is None):
return _opencl_extra_packages
else:
return []
-def get_rocm_packages(args) -> typing.Iterable[str]:
+
+def get_rocm_packages(args) -> typing.List[str]:
if (args.rocm is None):
return []
else:
return _rocm_extra_packages
+
def get_compiler(args, compiler_build_stage: hpccm.Stage = None) -> bb_base:
# Compiler
if args.llvm is not None:
raise RuntimeError('No TSAN compiler build stage!')
# Build the default compiler if we don't need special support
else:
- compiler = hpccm.building_blocks.llvm(extra_repository=True, version=args.llvm)
+ # Currently the focal apt repositories do not contain
+ # llvm higher than 11, so we work around that. This will
+ # need further work when we start supporting ubuntu 22.04
+ compiler = hpccm.building_blocks.llvm(version=args.llvm, upstream=True if int(args.llvm) > 11 else False)
elif args.oneapi is not None:
if compiler_build_stage is not None:
return compiler
-def get_mpi(args, compiler):
+def get_gdrcopy(args, compiler):
+ if args.cuda is not None:
+ if hasattr(compiler, 'toolchain'):
+ # Version last updated June 7, 2021
+ return hpccm.building_blocks.gdrcopy(toolchain=compiler.toolchain, version="2.2")
+ else:
+ raise RuntimeError('compiler is not an HPCCM compiler building block!')
+ else:
+ return None
+
+
+def get_ucx(args, compiler, gdrcopy):
+ if args.cuda is not None:
+ if hasattr(compiler, 'toolchain'):
+ use_gdrcopy = (gdrcopy is not None)
+ # Version last updated June 7, 2021
+ return hpccm.building_blocks.ucx(toolchain=compiler.toolchain, gdrcopy=use_gdrcopy, version="1.10.1",
+ cuda=True)
+ else:
+ raise RuntimeError('compiler is not an HPCCM compiler building block!')
+ else:
+ return None
+
+
+def get_mpi(args, compiler, ucx):
# If needed, add MPI to the image
if args.mpi is not None:
if args.mpi == 'openmpi':
- use_cuda = False
- if args.cuda is not None:
- use_cuda = True
-
if hasattr(compiler, 'toolchain'):
if args.oneapi is not None:
raise RuntimeError('oneAPI building OpenMPI is not supported')
- return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, cuda=use_cuda, infiniband=False)
+ use_cuda = (args.cuda is not None)
+ use_ucx = (ucx is not None)
+ # Version last updated June 7, 2021
+ return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, version="4.1.1", cuda=use_cuda,
+ ucx=use_ucx, infiniband=False)
else:
raise RuntimeError('compiler is not an HPCCM compiler building block!')
else:
return None
+
+def get_heffte(args):
+ if (args.heffte is not None):
+ return hpccm.building_blocks.generic_cmake(
+ cmake_opts=['-D CMAKE_BUILD_TYPE=Release',
+ '-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
+ '-D Heffte_ENABLE_CUDA=ON',
+ '-D Heffte_ENABLE_FFTW=OFF',
+ '-D BUILD_SHARED_LIBS=ON'],
+ repository='https://bitbucket.org/icl/heffte.git',
+ prefix='/usr/local', recursive=True, commit=args.heffte, directory='heffte')
+ else:
+ return None
+
+
def get_hipsycl(args):
if args.hipsycl is None:
return None
if args.rocm is None:
raise RuntimeError('hipSYCL requires the rocm packages')
- cmake_opts = [f'-DLLVM_DIR=/usr/lib/llvm-{args.llvm}/cmake',
- f'-DCLANG_EXECUTABLE_PATH=/usr/bin/clang++-{args.llvm}',
+ cmake_opts = ['-DLLVM_DIR=/opt/rocm/llvm/lib/cmake/llvm',
'-DCMAKE_PREFIX_PATH=/opt/rocm/lib/cmake',
'-DWITH_ROCM_BACKEND=ON']
if args.cuda is not None:
- cmake_opts += [f'-DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
+ cmake_opts += ['-DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
'-DWITH_CUDA_BACKEND=ON']
postinstall = [
- # https://github.com/illuhad/hipSYCL/issues/361#issuecomment-718943645
- 'for f in /opt/rocm/amdgcn/bitcode/*.bc; do ln -s "$f" "/opt/rocm/lib/$(basename $f .bc).amdgcn.bc"; done'
- ]
+ # https://github.com/illuhad/hipSYCL/issues/361#issuecomment-718943645
+ 'for f in /opt/rocm/amdgcn/bitcode/*.bc; do ln -s "$f" "/opt/rocm/lib/$(basename $f .bc).amdgcn.bc"; done'
+ ]
if args.cuda is not None:
postinstall += [
# https://github.com/illuhad/hipSYCL/issues/410#issuecomment-743301929
f'sed s/_OPENMP/__OPENMP_NVPTX__/ -i /usr/lib/llvm-{args.llvm}/lib/clang/*/include/__clang_cuda_complex_builtins.h',
# Not needed unless we're building with CUDA 11.x, but no harm in doing always
- f'ln -s /usr/local/cuda/compat/* /usr/local/cuda/lib64/'
- ]
+ 'ln -s /usr/local/cuda/compat/* /usr/local/cuda/lib64/'
+ ]
return hpccm.building_blocks.generic_cmake(
repository='https://github.com/illuhad/hipSYCL.git',
cmake_opts=['-DCMAKE_BUILD_TYPE=Release', *cmake_opts],
postinstall=postinstall)
-def get_intel_compute_runtime(args):
- # The only reason we need to build Compute Runtime ourselves is because Intel packages have no DG1 support
- # Otherwise, we could have just installed DEB packages from GitHub or Intel PPA
- if args.intel_compute_runtime is None:
- return None
-
- cmake_opts = ['-DCMAKE_BUILD_TYPE=Release',
- '-DSKIP_UNIT_TESTS=TRUE',
- '-DSUPPORT_GEN8=0', '-DSUPPORT_GEN9=1', '-DSUPPORT_GEN11=1', '-DSUPPORT_GEN12LP=1', '-DSUPPORT_DG1=1',
- '-DBUILD_WITH_L0=1']
-
- return hpccm.building_blocks.generic_cmake(
- repository='https://github.com/intel/compute-runtime.git',
- directory='compute-runtime',
- prefix='/usr/local', recursive=True, branch=args.intel_compute_runtime,
- cmake_opts=cmake_opts,
- postinstall=['ldconfig'])
def add_tsan_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
"""Isolate the expensive TSAN preparation stage.
"""
if not isinstance(output_stages, collections.abc.MutableMapping):
raise RuntimeError('Need output_stages container.')
+ if 'compiler_build' in output_stages:
+ raise RuntimeError('"compiler_build" output stage is already present.')
tsan_stage = hpccm.Stage()
- tsan_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='tsan')
+ tsan_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args),
+ _distro=hpccm_distro_name(input_args),
+ _as='tsan')
tsan_stage += hpccm.building_blocks.packages(ospackages=['git', 'ca-certificates', 'build-essential', 'cmake'])
# CMake will get duplicated later, but this is an expensive image, and it isn't worth optimizing
repository='https://github.com/llvm/llvm-project.git',
directory='/var/tmp/llvm-project/llvm/',
prefix='/usr/local', recursive=True, branch=compiler_branch,
- cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra;compiler-rt;lld"',
+ cmake_opts=['-D CMAKE_BUILD_TYPE=Release',
+ '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra;compiler-rt;lld"',
'-D LIBOMP_TSAN_SUPPORT=on'],
postinstall=['ln -s /usr/local/bin/clang++ /usr/local/bin/clang++-' + str(input_args.llvm),
'ln -s /usr/local/bin/clang-format /usr/local/bin/clang-format-' + str(input_args.llvm),
'ln -s /usr/local/bin/clang-tidy /usr/local/bin/clang-tidy-' + str(input_args.llvm),
- 'ln -s /usr/local/share/clang/run-clang-tidy.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py',
- 'ln -s /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm),
+ 'ln -s /usr/local/share/clang/run-clang-tidy.py /usr/local/bin/run-clang-tidy-'
+ + str(input_args.llvm) + '.py',
+ 'ln -s /usr/local/bin/run-clang-tidy-'
+ + str(input_args.llvm) + '.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm),
'ln -s /usr/local/libexec/c++-analyzer /usr/local/bin/c++-analyzer-' + str(input_args.llvm)])
output_stages['compiler_build'] = tsan_stage
+
def oneapi_runtime(_from='0'):
oneapi_runtime_stage = hpccm.Stage()
oneapi_runtime_stage += hpccm.primitives.copy(_from='oneapi-build',
"/etc/bash.bashrc": "/etc/bash.bashrc"})
return oneapi_runtime_stage
+
def add_oneapi_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
"""Isolate the oneAPI preparation stage.
"""
if not isinstance(output_stages, collections.abc.MutableMapping):
raise RuntimeError('Need output_stages container.')
+ if 'compiler_build' in output_stages:
+ raise RuntimeError('"compiler_build" output stage is already present.')
oneapi_stage = hpccm.Stage()
- oneapi_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='oneapi-build')
+ oneapi_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args),
+ _distro=hpccm_distro_name(input_args),
+ _as='oneapi-build')
version = str(input_args.oneapi)
apt_repositories=['deb https://apt.repos.intel.com/oneapi all main'],
# Add minimal packages (not the whole HPC toolkit!)
ospackages=[f'intel-oneapi-dpcpp-cpp-{version}',
- f'intel-oneapi-openmp-{version}',
- f'intel-oneapi-mkl-{version}',
- f'intel-oneapi-mkl-devel-{version}']
+ f'intel-oneapi-openmp-{version}',
+ f'intel-oneapi-mkl-{version}',
+ f'intel-oneapi-mkl-devel-{version}']
)
# Ensure that all bash shells on the final container will have access to oneAPI
oneapi_stage += hpccm.primitives.shell(
- commands=['echo "source /opt/intel/oneapi/setvars.sh" >> /etc/bash.bashrc',
- 'unlink /opt/intel/oneapi/compiler/latest',
- f'ln -sf /opt/intel/oneapi/compiler/{version} /opt/intel/oneapi/compiler/latest']
- )
+ commands=['echo "source /opt/intel/oneapi/setvars.sh" >> /etc/bash.bashrc',
+ 'unlink /opt/intel/oneapi/compiler/latest',
+ f'ln -sf /opt/intel/oneapi/compiler/{version} /opt/intel/oneapi/compiler/latest']
+ )
setattr(oneapi_stage, 'runtime', oneapi_runtime)
output_stages['compiler_build'] = oneapi_stage
+
def prepare_venv(version: StrictVersion) -> typing.Sequence[str]:
"""Get shell commands to set up the venv for the requested Python version."""
major = version.version[0]
# WARNING: Please keep this list synchronized with python_packaging/requirements-test.txt
# TODO: Get requirements.txt from an input argument.
commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
+ 'breathe' \
'cmake>=3.16.3' \
'flake8>=3.7.7' \
'gcovr>=4.2' \
'networkx>=2.0' \
'numpy>=1' \
'pip>=10.1' \
+ 'pybind11>2.6' \
'Pygments>=2.2.0' \
'pytest>=3.9' \
'setuptools>=42' \
'scikit-build>=0.10' \
'Sphinx>=1.6.3' \
- 'sphinxcontrib-plantuml>=0.14'""")
-
- # TODO: Remove 'importlib_resources' dependency when Python >=3.7 is required.
- if minor == 6:
- commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
- 'importlib_resources'""")
-
+ 'sphinxcontrib-plantuml>=0.14' \
+ 'wheel'""")
return commands
-def add_python_stages(building_blocks: typing.Mapping[str, bb_base],
- input_args,
+def add_python_stages(input_args: argparse.Namespace, *,
+ base: str,
output_stages: typing.MutableMapping[str, hpccm.Stage]):
"""Add the stage(s) necessary for the requested venvs.
# copy is a bit slow and wastes local Docker image space for each filesystem
# layer.
pyenv_stage = hpccm.Stage()
- pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='pyenv')
- pyenv_stage += building_blocks['compiler']
- pyenv_stage += building_blocks['mpi']
+ pyenv_stage += hpccm.primitives.baseimage(image=base,
+ _distro=hpccm_distro_name(input_args),
+ _as='pyenv')
pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
for version in [StrictVersion(py_ver) for py_ver in sorted(input_args.venvs)]:
stage_name = 'py' + str(version)
stage = hpccm.Stage()
- stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as=stage_name)
- stage += building_blocks['compiler']
- stage += building_blocks['mpi']
+ stage += hpccm.primitives.baseimage(image=base,
+ _distro=hpccm_distro_name(input_args),
+ _as=stage_name)
stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
# TODO: Use a non-root user for testing and Python virtual environments.
if input_args.doxygen is None:
return
# Always clone the same version of linkchecker (latest release at June 1, 2021)
- output_stages['main'] += hpccm.building_blocks.pip(pip='pip3', packages=['git+https://github.com/linkchecker/linkchecker.git@v10.0.1'])
+ output_stages['main'] += hpccm.building_blocks.pip(pip='pip3', packages=[
+ 'git+https://github.com/linkchecker/linkchecker.git@v10.0.1'])
output_stages['main'] += hpccm.primitives.shell(
- commands=['sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
+ commands=[
+ 'sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
if input_args.doxygen == '1.8.5':
doxygen_commit = 'ed4ed873ab0e7f15116e2052119a6729d4589f7a'
output_stages['main'] += hpccm.building_blocks.generic_autotools(
output_stages['main'] += hpccm.primitives.shell(commands=commands)
+def add_base_stage(name: str,
+ input_args,
+ output_stages: typing.MutableMapping[str, hpccm.Stage]):
+ """Establish dependencies that are shared by multiple parallel stages."""
+ # Building blocks are chunks of container-builder instructions that can be
+ # copied to any build stage with the addition operator.
+ building_blocks = collections.OrderedDict()
+ building_blocks['base_packages'] = hpccm.building_blocks.packages(
+ ospackages=_common_packages)
+
+ # These are the most expensive and most reusable layers, so we put them first.
+ building_blocks['compiler'] = get_compiler(input_args, compiler_build_stage=output_stages.get('compiler_build'))
+ building_blocks['gdrcopy'] = get_gdrcopy(input_args, building_blocks['compiler'])
+ building_blocks['ucx'] = get_ucx(input_args, building_blocks['compiler'], building_blocks['gdrcopy'])
+ building_blocks['mpi'] = get_mpi(input_args, building_blocks['compiler'], building_blocks['ucx'])
+
+ # Create the stage from which the targeted image will be tagged.
+ output_stages[name] = hpccm.Stage()
+
+ output_stages[name] += hpccm.primitives.baseimage(image=base_image_tag(input_args),
+ _distro=hpccm_distro_name(input_args),
+ _as=name)
+ for bb in building_blocks.values():
+ if bb is not None:
+ output_stages[name] += bb
+
+
def build_stages(args) -> typing.Iterable[hpccm.Stage]:
"""Define and sequence the stages for the recipe corresponding to *args*."""
if args.oneapi is not None:
add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)
+ add_base_stage(name='build_base', input_args=args, output_stages=stages)
+
+ # Add Python environments to MPI images, only, so we don't have to worry
+ # about whether to install mpi4py.
+ if args.mpi is not None and len(args.venvs) > 0:
+ add_python_stages(base='build_base', input_args=args, output_stages=stages)
+
# Building blocks are chunks of container-builder instructions that can be
# copied to any build stage with the addition operator.
building_blocks = collections.OrderedDict()
- building_blocks['base_packages'] = hpccm.building_blocks.packages(
- ospackages=_common_packages)
- # These are the most expensive and most reusable layers, so we put them first.
- building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
- building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'])
for i, cmake in enumerate(args.cmake):
building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
eula=True,
os_packages += ['lsb-release']
if args.hipsycl is not None:
os_packages += ['libboost-fiber-dev']
- if args.intel_compute_runtime is not None:
- os_packages += _intel_compute_runtime_extra_packages
building_blocks['extra_packages'] = []
+ if args.intel_compute_runtime:
+ building_blocks['extra_packages'] += hpccm.building_blocks.packages(
+ apt_keys=['https://repositories.intel.com/graphics/intel-graphics.key'],
+ apt_repositories=[f'deb [arch=amd64] https://repositories.intel.com/graphics/ubuntu focal main']
+ )
+ os_packages += _intel_compute_runtime_extra_packages
if args.rocm is not None:
building_blocks['extra_packages'] += hpccm.building_blocks.packages(
apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
)
building_blocks['cuda-clang-workaround'] = hpccm.primitives.shell(commands=[
f'echo "CUDA Version {cuda_version_str}" > /usr/local/cuda/version.txt'
- ])
+ ])
building_blocks['clfft'] = get_clfft(args)
- building_blocks['hipSYCL'] = get_hipsycl(args)
+ building_blocks['heffte'] = get_heffte(args)
- building_blocks['intel-compute-runtime'] = get_intel_compute_runtime(args)
+ building_blocks['hipSYCL'] = get_hipsycl(args)
# Add Python environments to MPI images, only, so we don't have to worry
# about whether to install mpi4py.
# Create the stage from which the targeted image will be tagged.
stages['main'] = hpccm.Stage()
- stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
+ stages['main'] += hpccm.primitives.baseimage(image='build_base',
+ _distro=hpccm_distro_name(args),
+ _as='main')
for bb in building_blocks.values():
if bb is not None:
stages['main'] += bb