* Eric Irrgang <ericirrgang@gmail.com>
* Joe Jordan <e.jjordan12@gmail.com>
* Mark Abraham <mark.j.abraham@gmail.com>
+ * Gaurav Garg <gaugarg@nvidia.com>
Usage::
'ocl-icd-libopencl1',
'ocl-icd-opencl-dev',
'opencl-headers',
- # The following require
- # apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
- # apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/4.0.1/ xenial main']
- 'libelf1',
- 'rocm-opencl',
- 'rocm-dev',
- 'clinfo'
]
+_rocm_extra_packages = [
+ # The following require
+ # apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
+ # apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/4.0.1/ xenial main']
+ 'libelf1',
+ 'rocm-opencl',
+ 'rocm-dev',
+ 'clinfo',
+ 'rocfft',
+ 'hipfft',
+]
+
+
# Extra packages needed to build Intel Compute Runtime
_intel_compute_runtime_extra_packages = ['pkg-config',
'libxml2',
'texlive-latex-base',
'texlive-latex-extra',
'texlive-fonts-recommended',
- 'texlive-fonts-extra']
+ 'texlive-fonts-extra',
+ 'tex-gyre']
# Parse command line arguments
parser = argparse.ArgumentParser(description='GROMACS CI image creation script',
raise RuntimeError('Logic error: no Linux distribution selected.')
return base_image_tag
+# Convert the linux distribution variables into something that hpccm
+# understands.
+def hpccm_distro_name(args) -> str:
+ if args.centos is not None:
+ name_mapping = { '7': 'centos7',
+ '8': 'centos8' }
+ if args.centos in name_mapping:
+ hpccm_name = name_mapping[args.centos]
+ else:
+ raise RuntimeError('Logic error: unsupported CentOS distribution selected.')
+ elif args.ubuntu is not None:
+ name_mapping = { '20.04': 'ubuntu20',
+ '18.04': 'ubuntu18',
+ '16.04': 'ubuntu16' }
+ if args.ubuntu in name_mapping:
+ hpccm_name = name_mapping[args.ubuntu]
+ else:
+ raise RuntimeError('Logic error: unsupported Ubuntu distribution selected.')
+ else:
+ raise RuntimeError('Logic error: no Linux distribution selected.')
+ return hpccm_name
def get_llvm_packages(args) -> typing.Iterable[str]:
# If we use the package version of LLVM, we need to install extra packages for it.
else:
return []
+def get_rocm_packages(args) -> typing.Iterable[str]:
+ if (args.rocm is None):
+ return []
+ else:
+ return _rocm_extra_packages
+
def get_compiler(args, compiler_build_stage: hpccm.Stage = None) -> bb_base:
# Compiler
if args.llvm is not None:
raise RuntimeError('No TSAN compiler build stage!')
# Build the default compiler if we don't need special support
else:
- compiler = hpccm.building_blocks.llvm(extra_repository=True, version=args.llvm)
+ # Currently the focal apt repositories do not contain
+ # llvm higher than 11, so we work around that. This will
+ # need further work when we start supporting ubuntu 22.04
+ compiler = hpccm.building_blocks.llvm(version=args.llvm, upstream=True if int(args.llvm) > 11 else False)
elif args.oneapi is not None:
if compiler_build_stage is not None:
return compiler
-def get_mpi(args, compiler):
+def get_gdrcopy(args, compiler):
+ if args.cuda is not None:
+ if hasattr(compiler, 'toolchain'):
+ # Version last updated June 7, 2021
+ return hpccm.building_blocks.gdrcopy(toolchain=compiler.toolchain, version="2.2")
+ else:
+ raise RuntimeError('compiler is not an HPCCM compiler building block!')
+ else:
+ return None
+
+def get_ucx(args, compiler, gdrcopy):
+ if args.cuda is not None:
+ if hasattr(compiler, 'toolchain'):
+ use_gdrcopy = (gdrcopy is not None)
+ # Version last updated June 7, 2021
+ return hpccm.building_blocks.ucx(toolchain=compiler.toolchain, gdrcopy=use_gdrcopy, version="1.10.1", cuda=True)
+ else:
+ raise RuntimeError('compiler is not an HPCCM compiler building block!')
+ else:
+ return None
+
+def get_mpi(args, compiler, ucx):
# If needed, add MPI to the image
if args.mpi is not None:
if args.mpi == 'openmpi':
- use_cuda = False
- if args.cuda is not None:
- use_cuda = True
-
if hasattr(compiler, 'toolchain'):
if args.oneapi is not None:
raise RuntimeError('oneAPI building OpenMPI is not supported')
- return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, cuda=use_cuda, infiniband=False)
+ use_cuda = (args.cuda is not None)
+ use_ucx = (ucx is not None)
+ # Version last updated June 7, 2021
+ return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, version="4.1.1", cuda=use_cuda, ucx=use_ucx, infiniband=False)
else:
raise RuntimeError('compiler is not an HPCCM compiler building block!')
else:
return None
+def get_heffte(args):
+ if (args.heffte is not None):
+ return hpccm.building_blocks.generic_cmake(
+ cmake_opts=['-D CMAKE_BUILD_TYPE=Release',
+ '-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
+ '-D Heffte_ENABLE_CUDA=ON',
+ '-D Heffte_ENABLE_FFTW=OFF',
+ '-D BUILD_SHARED_LIBS=ON'],
+ repository='https://bitbucket.org/icl/heffte.git',
+ prefix='/usr/local', recursive=True, commit=args.heffte, directory='heffte')
+ else:
+ return None
+
def get_hipsycl(args):
if args.hipsycl is None:
return None
if args.llvm is None:
raise RuntimeError('Can not build hipSYCL without llvm')
- cmake_opts = [f'-DLLVM_DIR=/usr/lib/llvm-{args.llvm}/cmake',
- f'-DCLANG_EXECUTABLE_PATH=/usr/bin/clang++-{args.llvm}',
+ if args.rocm is None:
+ raise RuntimeError('hipSYCL requires the rocm packages')
+
+ cmake_opts = [f'-DLLVM_DIR=/opt/rocm/llvm/lib/cmake/llvm',
'-DCMAKE_PREFIX_PATH=/opt/rocm/lib/cmake',
'-DWITH_ROCM_BACKEND=ON']
if args.cuda is not None:
# WARNING: Please keep this list synchronized with python_packaging/requirements-test.txt
# TODO: Get requirements.txt from an input argument.
commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
+ 'breathe' \
'cmake>=3.16.3' \
'flake8>=3.7.7' \
'gcovr>=4.2' \
'Pygments>=2.2.0' \
'pytest>=3.9' \
'setuptools>=42' \
- 'scikit-build>=0.10' \
'Sphinx>=1.6.3' \
'sphinxcontrib-plantuml>=0.14'""")
pyenv_stage = hpccm.Stage()
pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='pyenv')
pyenv_stage += building_blocks['compiler']
+ if building_blocks['gdrcopy'] is not None:
+ pyenv_stage += building_blocks['gdrcopy']
+ if building_blocks['ucx'] is not None:
+ pyenv_stage += building_blocks['ucx']
pyenv_stage += building_blocks['mpi']
pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
stage = hpccm.Stage()
stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as=stage_name)
stage += building_blocks['compiler']
+ if building_blocks['gdrcopy'] is not None:
+ stage += building_blocks['gdrcopy']
+ if building_blocks['ucx'] is not None:
+ stage += building_blocks['ucx']
stage += building_blocks['mpi']
stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
building_blocks['base_packages'] = hpccm.building_blocks.packages(
ospackages=_common_packages)
+ # Normally in hpccm the first call to baseimage sets the context
+ # for other packages, e.g. for which apt respository to
+ # use. We want to set that early on.
+ hpccm.config.set_linux_distro(hpccm_distro_name(args))
+
# These are the most expensive and most reusable layers, so we put them first.
building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
- building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'])
+ building_blocks['gdrcopy'] = get_gdrcopy(args, building_blocks['compiler'])
+ building_blocks['ucx'] = get_ucx(args, building_blocks['compiler'], building_blocks['gdrcopy'])
+ building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'], building_blocks['ucx'])
for i, cmake in enumerate(args.cmake):
building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
eula=True,
version=cmake)
# Install additional packages early in the build to optimize Docker build layer cache.
- os_packages = list(get_llvm_packages(args)) + get_opencl_packages(args)
+ os_packages = list(get_llvm_packages(args)) + get_opencl_packages(args) + get_rocm_packages(args)
if args.doxygen is not None:
os_packages += _docs_extra_packages
if args.oneapi is not None:
os_packages += ['libboost-fiber-dev']
if args.intel_compute_runtime is not None:
os_packages += _intel_compute_runtime_extra_packages
- building_blocks['extra_packages'] = hpccm.building_blocks.packages(
+ building_blocks['extra_packages'] = []
+ if args.rocm is not None:
+ building_blocks['extra_packages'] += hpccm.building_blocks.packages(
+ apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
+ apt_repositories=[f'deb [arch=amd64] http://repo.radeon.com/rocm/apt/{args.rocm}/ xenial main']
+ )
+ building_blocks['extra_packages'] += hpccm.building_blocks.packages(
ospackages=os_packages,
- apt_ppas=['ppa:intel-opencl/intel-opencl'],
- apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
- apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/4.0.1/ xenial main']
- )
+ apt_ppas=['ppa:intel-opencl/intel-opencl'])
if args.cuda is not None and args.llvm is not None:
# Hack to tell clang what version of CUDA we're using
building_blocks['clfft'] = get_clfft(args)
+ building_blocks['heffte'] = get_heffte(args)
+
building_blocks['hipSYCL'] = get_hipsycl(args)
building_blocks['intel-compute-runtime'] = get_intel_compute_runtime(args)