Support testing CUDA-aware MPI in CI
[alexxy/gromacs.git] / admin / containers / scripted_gmx_docker_builds.py
1 #!/usr/bin/env python
2 #
3 # This file is part of the GROMACS molecular simulation package.
4 #
5 # Copyright (c) 2020,2021, by the GROMACS development team, led by
6 # Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 # and including many others, as listed in the AUTHORS file in the
8 # top-level source directory and at http://www.gromacs.org.
9 #
10 # GROMACS is free software; you can redistribute it and/or
11 # modify it under the terms of the GNU Lesser General Public License
12 # as published by the Free Software Foundation; either version 2.1
13 # of the License, or (at your option) any later version.
14 #
15 # GROMACS is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 # Lesser General Public License for more details.
19 #
20 # You should have received a copy of the GNU Lesser General Public
21 # License along with GROMACS; if not, see
22 # http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24 #
25 # If you want to redistribute modifications to GROMACS, please
26 # consider that scientific software is very special. Version
27 # control is crucial - bugs must be traceable. We will be happy to
28 # consider code for inclusion in the official distribution, but
29 # derived work must not be called official GROMACS. Details are found
30 # in the README & COPYING files - if they are missing, get the
31 # official version at http://www.gromacs.org.
32 #
33 # To help us fund GROMACS development, we humbly ask that you cite
34 # the research papers on the package. Check out http://www.gromacs.org.
35
36 """Building block based Dockerfile generation for CI testing images.
37
38 Generates a set of docker images used for running GROMACS CI on Gitlab.
39 The images are prepared according to a selection of build configuration targets
40 that hope to cover a broad enough scope of different possible systems,
41 allowing us to check compiler types and versions, as well as libraries used
42 for accelerators and parallel communication systems. Each combinations is
43 described as an entry in the build_configs dictionary, with the script
44 analysing the logic and adding build stages as needed.
45
46 Based on the example script provided by the NVidia HPCCM repository.
47
48 Reference:
49     `NVidia HPC Container Maker <https://github.com/NVIDIA/hpc-container-maker>`__
50
51 Authors:
52     * Paul Bauer <paul.bauer.q@gmail.com>
53     * Eric Irrgang <ericirrgang@gmail.com>
54     * Joe Jordan <e.jjordan12@gmail.com>
55     * Mark Abraham <mark.j.abraham@gmail.com>
56
57 Usage::
58
59     $ python3 scripted_gmx_docker_builds.py --help
60     $ python3 scripted_gmx_docker_builds.py --format docker > Dockerfile && docker build .
61     $ python3 scripted_gmx_docker_builds.py | docker build -
62
63 See Also:
64     :file:`buildall.sh`
65
66 """
67
68 import argparse
69 import collections
70 import typing
71 from distutils.version import StrictVersion
72
73 import hpccm
74 import hpccm.config
75 from hpccm.building_blocks.base import bb_base
76
77 try:
78     import utility
79 except ImportError:
80     raise RuntimeError(
81         'This module assumes availability of supporting modules in the same directory. Add the directory to '
82         'PYTHONPATH or invoke Python from within the module directory so module location can be resolved.')
83
84 # Basic packages for all final images.
85 _common_packages = ['build-essential',
86                     'ca-certificates',
87                     'ccache',
88                     'git',
89                     'gnupg',
90                     'gpg-agent',
91                     'libfftw3-dev',
92                     'libhwloc-dev',
93                     'liblapack-dev',
94                     'libx11-dev',
95                     'moreutils',
96                     'ninja-build',
97                     'rsync',
98                     'valgrind',
99                     'vim',
100                     'wget',
101                     'xsltproc']
102
103 _opencl_extra_packages = [
104     'nvidia-opencl-dev',
105     # The following require apt_ppas=['ppa:intel-opencl/intel-opencl']
106     'intel-opencl-icd',
107     'ocl-icd-libopencl1',
108     'ocl-icd-opencl-dev',
109     'opencl-headers',
110 ]
111
112 _rocm_extra_packages = [
113         # The following require
114         #             apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
115         #             apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/4.0.1/ xenial main']
116         'libelf1',
117         'rocm-opencl',
118         'rocm-dev',
119         'clinfo'
120 ]
121                      
122
123 # Extra packages needed to build Intel Compute Runtime
124 _intel_compute_runtime_extra_packages = ['pkg-config',
125                                          'libxml2',
126                                          'libxml2-dev',
127                                          'libigc',
128                                          'libigc-dev',
129                                          'libigdgmm11',
130                                          'libigdgmm-dev',
131                                          'libze-loader',
132                                          'libze-loader-dev',
133                                          'ocl-icd-libopencl1',
134                                          'ocl-icd-opencl-dev',
135                                          'opencl-headers']
136
137 # Extra packages needed to build Python installations from source.
138 _python_extra_packages = ['build-essential',
139                           'ca-certificates',
140                           'ccache',
141                           'curl',
142                           'git',
143                           'libbz2-dev',
144                           'libffi-dev',
145                           'liblzma-dev',
146                           'libncurses5-dev',
147                           'libncursesw5-dev',
148                           'libreadline-dev',
149                           'libsqlite3-dev',
150                           'libssl-dev',
151                           'llvm',
152                           'python-openssl',
153                           'vim',
154                           'wget',
155                           'zlib1g-dev']
156
157 # Extra packages needed for images for building documentation.
158 _docs_extra_packages = ['autoconf',
159                         'automake',
160                         'autopoint',
161                         'autotools-dev',
162                         'bison',
163                         'flex',
164                         'ghostscript',
165                         'graphviz',
166                         'help2man',
167                         'imagemagick',
168                         'libtool',
169                         'mscgen',
170                         'm4',
171                         'openssh-client',
172                         'texinfo',
173                         'texlive-latex-base',
174                         'texlive-latex-extra',
175                         'texlive-fonts-recommended',
176                         'texlive-fonts-extra']
177
178 # Parse command line arguments
179 parser = argparse.ArgumentParser(description='GROMACS CI image creation script',
180                                  parents=[utility.parser])
181
182 parser.add_argument('--format', type=str, default='docker',
183                     choices=['docker', 'singularity'],
184                     help='Container specification format (default: docker)')
185
186
187 def base_image_tag(args) -> str:
188     # Check if we use CUDA images or plain linux images
189     if args.cuda is not None:
190         cuda_version_tag = 'nvidia/cuda:' + args.cuda + '-devel'
191         if args.centos is not None:
192             cuda_version_tag += '-centos' + args.centos
193         elif args.ubuntu is not None:
194             cuda_version_tag += '-ubuntu' + args.ubuntu
195         else:
196             raise RuntimeError('Logic error: no Linux distribution selected.')
197
198         base_image_tag = cuda_version_tag
199     else:
200         if args.centos is not None:
201             base_image_tag = 'centos:centos' + args.centos
202         elif args.ubuntu is not None:
203             base_image_tag = 'ubuntu:' + args.ubuntu
204         else:
205             raise RuntimeError('Logic error: no Linux distribution selected.')
206     return base_image_tag
207
208
209 def get_llvm_packages(args) -> typing.Iterable[str]:
210     # If we use the package version of LLVM, we need to install extra packages for it.
211     if (args.llvm is not None) and (args.tsan is None):
212         packages = [f'libomp-{args.llvm}-dev',
213                     f'libomp5-{args.llvm}',
214                     'clang-format-' + str(args.llvm),
215                     'clang-tidy-' + str(args.llvm)]
216         if args.hipsycl is not None:
217             packages += [f'llvm-{args.llvm}-dev',
218                          f'libclang-{args.llvm}-dev',
219                          f'lld-{args.llvm}']
220         return packages
221     else:
222         return []
223
224 def get_opencl_packages(args) -> typing.Iterable[str]:
225     if (args.doxygen is None) and (args.oneapi is None):
226         return _opencl_extra_packages
227     else:
228         return []
229
230 def get_rocm_packages(args) -> typing.Iterable[str]:
231     if (args.rocm is None):
232         return []
233     else:
234         return _rocm_extra_packages
235
236 def get_compiler(args, compiler_build_stage: hpccm.Stage = None) -> bb_base:
237     # Compiler
238     if args.llvm is not None:
239         # Build our own version instead to get TSAN + OMP
240         if args.tsan is not None:
241             if compiler_build_stage is not None:
242                 compiler = compiler_build_stage.runtime(_from='tsan')
243             else:
244                 raise RuntimeError('No TSAN compiler build stage!')
245         # Build the default compiler if we don't need special support
246         else:
247             compiler = hpccm.building_blocks.llvm(extra_repository=True, version=args.llvm)
248
249     elif args.oneapi is not None:
250         if compiler_build_stage is not None:
251             compiler = compiler_build_stage.runtime(_from='oneapi')
252             # Prepare the toolchain (needed only for builds done within the Dockerfile, e.g.
253             # OpenMPI builds, which don't currently work for other reasons)
254             oneapi_toolchain = hpccm.toolchain(CC=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icx',
255                                                CXX=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icpx')
256             setattr(compiler, 'toolchain', oneapi_toolchain)
257
258         else:
259             raise RuntimeError('No oneAPI compiler build stage!')
260
261     elif args.gcc is not None:
262         compiler = hpccm.building_blocks.gnu(extra_repository=True,
263                                              version=args.gcc,
264                                              fortran=False)
265     else:
266         raise RuntimeError('Logic error: no compiler toolchain selected.')
267     return compiler
268
269
270 def get_gdrcopy(args, compiler):
271     if args.cuda is not None:
272         if hasattr(compiler, 'toolchain'):
273             # Version last updated June 7, 2021
274             return hpccm.building_blocks.gdrcopy(toolchain=compiler.toolchain, version="2.2")
275         else:
276             raise RuntimeError('compiler is not an HPCCM compiler building block!')
277     else:
278         return None
279
280 def get_ucx(args, compiler, gdrcopy):
281     if args.cuda is not None:
282         if hasattr(compiler, 'toolchain'):
283             use_gdrcopy = (gdrcopy is not None)
284             # Version last updated June 7, 2021
285             return hpccm.building_blocks.ucx(toolchain=compiler.toolchain, gdrcopy=use_gdrcopy, version="1.10.1", cuda=True)
286         else:
287             raise RuntimeError('compiler is not an HPCCM compiler building block!')
288     else:
289         return None
290
291 def get_mpi(args, compiler, ucx):
292     # If needed, add MPI to the image
293     if args.mpi is not None:
294         if args.mpi == 'openmpi':
295             if hasattr(compiler, 'toolchain'):
296                 if args.oneapi is not None:
297                     raise RuntimeError('oneAPI building OpenMPI is not supported')
298                 use_cuda = (args.cuda is not None)
299                 use_ucx = (ucx is not None)
300                 # Version last updated June 7, 2021
301                 return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, version="4.1.1", cuda=use_cuda, ucx=use_ucx, infiniband=False)
302             else:
303                 raise RuntimeError('compiler is not an HPCCM compiler building block!')
304
305         elif args.mpi == 'impi':
306             # TODO Intel MPI from the oneAPI repo is not working reliably,
307             # reasons are unclear. When solved, add packagages called:
308             # 'intel-oneapi-mpi', 'intel-oneapi-mpi-devel'
309             # during the compiler stage.
310             # TODO also consider hpccm's intel_mpi package if that doesn't need
311             # a license to run.
312             raise RuntimeError('Intel MPI recipe not implemented yet.')
313         else:
314             raise RuntimeError('Requested unknown MPI implementation.')
315     else:
316         return None
317
318
319 def get_clfft(args):
320     if (args.clfft is not None):
321         return hpccm.building_blocks.generic_cmake(
322             repository='https://github.com/clMathLibraries/clFFT.git',
323             prefix='/usr/local', recursive=True, branch=args.clfft, directory='clFFT/src')
324     else:
325         return None
326
327 def get_hipsycl(args):
328     if args.hipsycl is None:
329         return None
330     if args.llvm is None:
331         raise RuntimeError('Can not build hipSYCL without llvm')
332
333     if args.rocm is None:
334         raise RuntimeError('hipSYCL requires the rocm packages')
335
336     cmake_opts = [f'-DLLVM_DIR=/usr/lib/llvm-{args.llvm}/cmake',
337                   f'-DCLANG_EXECUTABLE_PATH=/usr/bin/clang++-{args.llvm}',
338                   '-DCMAKE_PREFIX_PATH=/opt/rocm/lib/cmake',
339                   '-DWITH_ROCM_BACKEND=ON']
340     if args.cuda is not None:
341         cmake_opts += [f'-DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
342                        '-DWITH_CUDA_BACKEND=ON']
343
344     postinstall = [
345             # https://github.com/illuhad/hipSYCL/issues/361#issuecomment-718943645
346             'for f in /opt/rocm/amdgcn/bitcode/*.bc; do ln -s "$f" "/opt/rocm/lib/$(basename $f .bc).amdgcn.bc"; done'
347             ]
348     if args.cuda is not None:
349         postinstall += [
350             # https://github.com/illuhad/hipSYCL/issues/410#issuecomment-743301929
351             f'sed s/_OPENMP/__OPENMP_NVPTX__/ -i /usr/lib/llvm-{args.llvm}/lib/clang/*/include/__clang_cuda_complex_builtins.h',
352             # Not needed unless we're building with CUDA 11.x, but no harm in doing always
353             f'ln -s /usr/local/cuda/compat/* /usr/local/cuda/lib64/'
354             ]
355
356     return hpccm.building_blocks.generic_cmake(
357         repository='https://github.com/illuhad/hipSYCL.git',
358         directory='/var/tmp/hipSYCL',
359         prefix='/usr/local', recursive=True, commit=args.hipsycl,
360         cmake_opts=['-DCMAKE_BUILD_TYPE=Release', *cmake_opts],
361         postinstall=postinstall)
362
363 def get_intel_compute_runtime(args):
364     # The only reason we need to build Compute Runtime ourselves is because Intel packages have no DG1 support
365     # Otherwise, we could have just installed DEB packages from GitHub or Intel PPA
366     if args.intel_compute_runtime is None:
367         return None
368
369     cmake_opts = ['-DCMAKE_BUILD_TYPE=Release',
370                   '-DSKIP_UNIT_TESTS=TRUE',
371                   '-DSUPPORT_GEN8=0', '-DSUPPORT_GEN9=1', '-DSUPPORT_GEN11=1', '-DSUPPORT_GEN12LP=1', '-DSUPPORT_DG1=1',
372                   '-DBUILD_WITH_L0=1']
373
374     return hpccm.building_blocks.generic_cmake(
375         repository='https://github.com/intel/compute-runtime.git',
376         directory='compute-runtime',
377         prefix='/usr/local', recursive=True, branch=args.intel_compute_runtime,
378         cmake_opts=cmake_opts,
379         postinstall=['ldconfig'])
380
381 def add_tsan_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
382     """Isolate the expensive TSAN preparation stage.
383
384     This is a very expensive stage, but has few and disjoint dependencies, and
385     its output is easily compartmentalized (/usr/local) so we can isolate this
386     build stage to maximize build cache hits and reduce rebuild time, bookkeeping,
387     and final image size.
388     """
389     if not isinstance(output_stages, collections.abc.MutableMapping):
390         raise RuntimeError('Need output_stages container.')
391     tsan_stage = hpccm.Stage()
392     tsan_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='tsan')
393
394     tsan_stage += hpccm.building_blocks.packages(ospackages=['git', 'ca-certificates', 'build-essential', 'cmake'])
395     # CMake will get duplicated later, but this is an expensive image, and it isn't worth optimizing
396     # out that duplication...
397     tsan_stage += hpccm.building_blocks.python(python3=True, python2=False, devel=False)
398
399     compiler_branch = 'release/' + str(input_args.llvm) + '.x'
400     tsan_stage += hpccm.building_blocks.generic_cmake(
401         repository='https://github.com/llvm/llvm-project.git',
402         directory='/var/tmp/llvm-project/llvm/',
403         prefix='/usr/local', recursive=True, branch=compiler_branch,
404         cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra;compiler-rt;lld"',
405                     '-D LIBOMP_TSAN_SUPPORT=on'],
406         postinstall=['ln -s /usr/local/bin/clang++ /usr/local/bin/clang++-' + str(input_args.llvm),
407                      'ln -s /usr/local/bin/clang-format /usr/local/bin/clang-format-' + str(input_args.llvm),
408                      'ln -s /usr/local/bin/clang-tidy /usr/local/bin/clang-tidy-' + str(input_args.llvm),
409                      'ln -s /usr/local/share/clang/run-clang-tidy.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py',
410                      'ln -s /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm),
411                      'ln -s /usr/local/libexec/c++-analyzer /usr/local/bin/c++-analyzer-' + str(input_args.llvm)])
412     output_stages['compiler_build'] = tsan_stage
413
414 def oneapi_runtime(_from='0'):
415     oneapi_runtime_stage = hpccm.Stage()
416     oneapi_runtime_stage += hpccm.primitives.copy(_from='oneapi-build',
417                                                   files={"/opt/intel": "/opt/intel",
418                                                          "/etc/bash.bashrc": "/etc/bash.bashrc"})
419     return oneapi_runtime_stage
420
421 def add_oneapi_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
422     """Isolate the oneAPI preparation stage.
423
424     This stage is isolated so that its installed components are minimized in the
425     final image (chiefly /opt/intel) and its environment setup script can be
426     sourced. This also helps with rebuild time and final image size.
427     """
428     if not isinstance(output_stages, collections.abc.MutableMapping):
429         raise RuntimeError('Need output_stages container.')
430     oneapi_stage = hpccm.Stage()
431     oneapi_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='oneapi-build')
432
433     version = str(input_args.oneapi)
434
435     # Add required components for the next stage (both for hpccm and Intel's setvars.sh script)
436     oneapi_stage += hpccm.building_blocks.packages(ospackages=['wget', 'gnupg2', 'ca-certificates', 'lsb-release'])
437     oneapi_stage += hpccm.building_blocks.packages(
438         apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB'],
439         apt_repositories=['deb https://apt.repos.intel.com/oneapi all main'],
440         # Add minimal packages (not the whole HPC toolkit!)
441         ospackages=[f'intel-oneapi-dpcpp-cpp-{version}',
442             f'intel-oneapi-openmp-{version}',
443             f'intel-oneapi-mkl-{version}',
444             f'intel-oneapi-mkl-devel-{version}']
445     )
446     # Ensure that all bash shells on the final container will have access to oneAPI
447     oneapi_stage += hpccm.primitives.shell(
448             commands=['echo "source /opt/intel/oneapi/setvars.sh" >> /etc/bash.bashrc',
449                       'unlink /opt/intel/oneapi/compiler/latest',
450                      f'ln -sf /opt/intel/oneapi/compiler/{version} /opt/intel/oneapi/compiler/latest']
451             )
452     setattr(oneapi_stage, 'runtime', oneapi_runtime)
453
454     output_stages['compiler_build'] = oneapi_stage
455
456 def prepare_venv(version: StrictVersion) -> typing.Sequence[str]:
457     """Get shell commands to set up the venv for the requested Python version."""
458     major = version.version[0]
459     minor = version.version[1]  # type: int
460
461     pyenv = '$HOME/.pyenv/bin/pyenv'
462
463     py_ver = f'{major}.{minor}'
464     venv_path = f'$HOME/venv/py{py_ver}'
465     commands = [f'$({pyenv} prefix `{pyenv} whence python{py_ver}`)/bin/python -m venv {venv_path}']
466
467     commands.append(f'{venv_path}/bin/python -m pip install --upgrade pip setuptools')
468     # Install dependencies for building and testing gmxapi Python package.
469     # WARNING: Please keep this list synchronized with python_packaging/requirements-test.txt
470     # TODO: Get requirements.txt from an input argument.
471     commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
472             'cmake>=3.16.3' \
473             'flake8>=3.7.7' \
474             'gcovr>=4.2' \
475             'mpi4py>=3.0.3' \
476             'networkx>=2.0' \
477             'numpy>=1' \
478             'pip>=10.1' \
479             'Pygments>=2.2.0' \
480             'pytest>=3.9' \
481             'setuptools>=42' \
482             'scikit-build>=0.10' \
483             'Sphinx>=1.6.3' \
484             'sphinxcontrib-plantuml>=0.14'""")
485
486     # TODO: Remove 'importlib_resources' dependency when Python >=3.7 is required.
487     if minor == 6:
488         commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
489                 'importlib_resources'""")
490
491     return commands
492
493
494 def add_python_stages(building_blocks: typing.Mapping[str, bb_base],
495                       input_args,
496                       output_stages: typing.MutableMapping[str, hpccm.Stage]):
497     """Add the stage(s) necessary for the requested venvs.
498
499     One intermediate build stage is created for each venv (see --venv option).
500
501     Each stage partially populates Python installations and venvs in the home
502     directory. The home directory is collected by the 'pyenv' stage for use by
503     the main build stage.
504     """
505     if len(input_args.venvs) < 1:
506         raise RuntimeError('No venvs to build...')
507     if output_stages is None or not isinstance(output_stages, collections.abc.Mapping):
508         raise RuntimeError('Need a container for output stages.')
509
510     # Main Python stage that collects the environments from individual stages.
511     # We collect the stages individually, rather than chaining them, because the
512     # copy is a bit slow and wastes local Docker image space for each filesystem
513     # layer.
514     pyenv_stage = hpccm.Stage()
515     pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='pyenv')
516     pyenv_stage += building_blocks['compiler']
517     if building_blocks['gdrcopy'] is not None:
518         pyenv_stage += building_blocks['gdrcopy']
519     if building_blocks['ucx'] is not None:
520         pyenv_stage += building_blocks['ucx']
521     pyenv_stage += building_blocks['mpi']
522     pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
523
524     for version in [StrictVersion(py_ver) for py_ver in sorted(input_args.venvs)]:
525         stage_name = 'py' + str(version)
526         stage = hpccm.Stage()
527         stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as=stage_name)
528         stage += building_blocks['compiler']
529         if building_blocks['gdrcopy'] is not None:
530             stage += building_blocks['gdrcopy']
531         if building_blocks['ucx'] is not None:
532             stage += building_blocks['ucx']
533         stage += building_blocks['mpi']
534         stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
535
536         # TODO: Use a non-root user for testing and Python virtual environments.
537         stage += hpccm.primitives.shell(commands=[
538             'curl https://pyenv.run | bash',
539             """echo 'export PYENV_ROOT="$HOME/.pyenv"' >> $HOME/.bashrc""",
540             """echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> $HOME/.bashrc""",
541             """echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc""",
542             """echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc"""])
543         pyenv = '$HOME/.pyenv/bin/pyenv'
544         commands = [f'PYTHON_CONFIGURE_OPTS="--enable-shared" {pyenv} install -s {version}']
545         stage += hpccm.primitives.shell(commands=commands)
546
547         commands = prepare_venv(version)
548         stage += hpccm.primitives.shell(commands=commands)
549
550         # TODO: Update user home directory.
551         pyenv_stage += hpccm.primitives.copy(_from=stage_name, _mkdir=True, src=['/root/'],
552                                              dest='/root')
553
554         # Add the intermediate build stage to the sequence
555         output_stages[stage_name] = stage
556
557     # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
558     # # `version` should be a system installation or pyenv environment (or pyenv-virtualenv)
559     # # with the dependencies for all of the Python aspects of CMake-driven builds.
560     # commands = '{pyenv} global {version}'.format(
561     #             pyenv=pyenv,
562     #             version=...)
563     # pyenv_stage += hpccm.primitives.shell(commands=commands)
564
565     # Add the aggregating build stage to the sequence. This allows the main stage to copy
566     # the files in a single stage, potentially reducing the overall output image size.
567     output_stages['pyenv'] = pyenv_stage
568
569
570 def add_documentation_dependencies(input_args,
571                                    output_stages: typing.MutableMapping[str, hpccm.Stage]):
572     """Add appropriate layers according to doxygen input arguments."""
573     if input_args.doxygen is None:
574         return
575     # Always clone the same version of linkchecker (latest release at June 1, 2021)
576     output_stages['main'] += hpccm.building_blocks.pip(pip='pip3', packages=['git+https://github.com/linkchecker/linkchecker.git@v10.0.1'])
577     output_stages['main'] += hpccm.primitives.shell(
578         commands=['sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
579     if input_args.doxygen == '1.8.5':
580         doxygen_commit = 'ed4ed873ab0e7f15116e2052119a6729d4589f7a'
581         output_stages['main'] += hpccm.building_blocks.generic_autotools(
582             repository='https://github.com/westes/flex.git',
583             commit='f7788a9a0ecccdc953ed12043ccb59ca25714018',
584             prefix='/tmp/install-of-flex',
585             configure_opts=['--disable-shared'],
586             preconfigure=['./autogen.sh'])
587         output_stages['main'] += hpccm.building_blocks.generic_autotools(
588             repository='https://github.com/doxygen/doxygen.git',
589             commit=doxygen_commit,
590             prefix='',
591             configure_opts=[
592                 '--flex /tmp/install-of-flex/bin/flex',
593                 '--static'])
594     else:
595         version = input_args.doxygen
596         archive_name = f'doxygen-{version}.linux.bin.tar.gz'
597         archive_url = f'https://sourceforge.net/projects/doxygen/files/rel-{version}/{archive_name}'
598         binary_path = f'doxygen-{version}/bin/doxygen'
599         commands = [
600             'mkdir doxygen && cd doxygen',
601             f'wget {archive_url}',
602             f'tar xf {archive_name} {binary_path}',
603             f'cp {binary_path} /usr/local/bin/',
604             'cd .. && rm -rf doxygen'
605         ]
606         output_stages['main'] += hpccm.primitives.shell(commands=commands)
607
608
609 def build_stages(args) -> typing.Iterable[hpccm.Stage]:
610     """Define and sequence the stages for the recipe corresponding to *args*."""
611
612     # A Dockerfile or Singularity recipe can have multiple build stages.
613     # The main build stage can copy files from previous stages, though only
614     # the last stage is included in the tagged output image. This means that
615     # large or expensive sets of build instructions can be isolated in
616     # local/temporary images, but all of the stages need to be output by this
617     # script, and need to occur in the correct order, so we create a sequence
618     # object early in this function.
619     stages = collections.OrderedDict()
620
621     # If we need TSAN or oneAPI support the early build is more complex,
622     # so that our compiler images don't have all the cruft needed to get those things
623     # installed.
624     if args.llvm is not None and args.tsan is not None:
625         add_tsan_compiler_build_stage(input_args=args, output_stages=stages)
626     if args.oneapi is not None:
627         add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)
628
629     # Building blocks are chunks of container-builder instructions that can be
630     # copied to any build stage with the addition operator.
631     building_blocks = collections.OrderedDict()
632     building_blocks['base_packages'] = hpccm.building_blocks.packages(
633         ospackages=_common_packages)
634
635     # These are the most expensive and most reusable layers, so we put them first.
636     building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
637     building_blocks['gdrcopy'] = get_gdrcopy(args, building_blocks['compiler'])
638     building_blocks['ucx'] = get_ucx(args, building_blocks['compiler'], building_blocks['gdrcopy'])
639     building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'], building_blocks['ucx'])
640     for i, cmake in enumerate(args.cmake):
641         building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
642             eula=True,
643             prefix=f'/usr/local/cmake-{cmake}',
644             version=cmake)
645
646     # Install additional packages early in the build to optimize Docker build layer cache.
647     os_packages = list(get_llvm_packages(args)) + get_opencl_packages(args) + get_rocm_packages(args)
648     if args.doxygen is not None:
649         os_packages += _docs_extra_packages
650     if args.oneapi is not None:
651         os_packages += ['lsb-release']
652     if args.hipsycl is not None:
653         os_packages += ['libboost-fiber-dev']
654     if args.intel_compute_runtime is not None:
655         os_packages += _intel_compute_runtime_extra_packages
656     building_blocks['extra_packages'] = []
657     if args.rocm is not None:
658         building_blocks['extra_packages'] += hpccm.building_blocks.packages(
659             apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
660             apt_repositories=[f'deb [arch=amd64] http://repo.radeon.com/rocm/apt/{args.rocm}/ xenial main']
661         )
662     building_blocks['extra_packages'] += hpccm.building_blocks.packages(
663         ospackages=os_packages,
664         apt_ppas=['ppa:intel-opencl/intel-opencl'])
665
666     if args.cuda is not None and args.llvm is not None:
667         # Hack to tell clang what version of CUDA we're using
668         # based on https://github.com/llvm/llvm-project/blob/1fdec59bffc11ae37eb51a1b9869f0696bfd5312/clang/lib/Driver/ToolChains/Cuda.cpp#L43
669         cuda_version_split = args.cuda.split('.')
670         # LLVM requires having the version in x.y.z format, while args.cuda be be either x.y or x.y.z
671         cuda_version_str = '{}.{}.{}'.format(
672             cuda_version_split[0],
673             cuda_version_split[1],
674             cuda_version_split[2] if len(cuda_version_split) > 2 else 0
675         )
676         building_blocks['cuda-clang-workaround'] = hpccm.primitives.shell(commands=[
677             f'echo "CUDA Version {cuda_version_str}" > /usr/local/cuda/version.txt'
678             ])
679
680     building_blocks['clfft'] = get_clfft(args)
681
682     building_blocks['hipSYCL'] = get_hipsycl(args)
683
684     building_blocks['intel-compute-runtime'] = get_intel_compute_runtime(args)
685
686     # Add Python environments to MPI images, only, so we don't have to worry
687     # about whether to install mpi4py.
688     if args.mpi is not None and len(args.venvs) > 0:
689         add_python_stages(building_blocks=building_blocks, input_args=args, output_stages=stages)
690
691     # Create the stage from which the targeted image will be tagged.
692     stages['main'] = hpccm.Stage()
693
694     stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
695     for bb in building_blocks.values():
696         if bb is not None:
697             stages['main'] += bb
698
699     # We always add Python3 and Pip
700     stages['main'] += hpccm.building_blocks.python(python3=True, python2=False)
701
702     # Add documentation requirements (doxygen and sphinx + misc).
703     if args.doxygen is not None:
704         add_documentation_dependencies(args, stages)
705
706     if 'pyenv' in stages and stages['pyenv'] is not None:
707         stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/.pyenv/'],
708                                                 dest='/root/.pyenv')
709         stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/venv/'],
710                                                 dest='/root/venv')
711         # TODO: Update user home directory.
712         # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
713         # stages['main'] += hpccm.primitives.copy(_from='pyenv', src=['/root/.bashrc'],
714         #                                         dest='/root/')
715
716     # Make sure that `python` resolves to something.
717     stages['main'] += hpccm.primitives.shell(commands=['test -x /usr/bin/python || '
718                                                        'update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && '
719                                                        '/usr/bin/python --version'])
720
721     # Note that the list of stages should be sorted in dependency order.
722     for build_stage in stages.values():
723         if build_stage is not None:
724             yield build_stage
725
726
727 if __name__ == '__main__':
728     args = parser.parse_args()
729
730     # Set container specification output format
731     hpccm.config.set_container_format(args.format)
732
733     container_recipe = build_stages(args)
734
735     # Output container specification
736     for stage in container_recipe:
737         print(stage)