Merge remote-tracking branch 'origin/release-2021' into merge-2021-into-master
[alexxy/gromacs.git] / admin / containers / scripted_gmx_docker_builds.py
1 #!/usr/bin/env python
2 #
3 # This file is part of the GROMACS molecular simulation package.
4 #
5 # Copyright (c) 2020,2021, by the GROMACS development team, led by
6 # Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 # and including many others, as listed in the AUTHORS file in the
8 # top-level source directory and at http://www.gromacs.org.
9 #
10 # GROMACS is free software; you can redistribute it and/or
11 # modify it under the terms of the GNU Lesser General Public License
12 # as published by the Free Software Foundation; either version 2.1
13 # of the License, or (at your option) any later version.
14 #
15 # GROMACS is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 # Lesser General Public License for more details.
19 #
20 # You should have received a copy of the GNU Lesser General Public
21 # License along with GROMACS; if not, see
22 # http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24 #
25 # If you want to redistribute modifications to GROMACS, please
26 # consider that scientific software is very special. Version
27 # control is crucial - bugs must be traceable. We will be happy to
28 # consider code for inclusion in the official distribution, but
29 # derived work must not be called official GROMACS. Details are found
30 # in the README & COPYING files - if they are missing, get the
31 # official version at http://www.gromacs.org.
32 #
33 # To help us fund GROMACS development, we humbly ask that you cite
34 # the research papers on the package. Check out http://www.gromacs.org.
35
36 """Building block based Dockerfile generation for CI testing images.
37
38 Generates a set of docker images used for running GROMACS CI on Gitlab.
39 The images are prepared according to a selection of build configuration targets
40 that hope to cover a broad enough scope of different possible systems,
41 allowing us to check compiler types and versions, as well as libraries used
42 for accelerators and parallel communication systems. Each combinations is
43 described as an entry in the build_configs dictionary, with the script
44 analysing the logic and adding build stages as needed.
45
46 Based on the example script provided by the NVidia HPCCM repository.
47
48 Reference:
49     `NVidia HPC Container Maker <https://github.com/NVIDIA/hpc-container-maker>`__
50
51 Authors:
52     * Paul Bauer <paul.bauer.q@gmail.com>
53     * Eric Irrgang <ericirrgang@gmail.com>
54     * Joe Jordan <e.jjordan12@gmail.com>
55     * Mark Abraham <mark.j.abraham@gmail.com>
56
57 Usage::
58
59     $ python3 scripted_gmx_docker_builds.py --help
60     $ python3 scripted_gmx_docker_builds.py --format docker > Dockerfile && docker build .
61     $ python3 scripted_gmx_docker_builds.py | docker build -
62
63 See Also:
64     :file:`buildall.sh`
65
66 """
67
68 import argparse
69 import collections
70 import typing
71 from distutils.version import StrictVersion
72
73 import hpccm
74 import hpccm.config
75 from hpccm.building_blocks.base import bb_base
76
77 try:
78     import utility
79 except ImportError:
80     raise RuntimeError(
81         'This module assumes availability of supporting modules in the same directory. Add the directory to '
82         'PYTHONPATH or invoke Python from within the module directory so module location can be resolved.')
83
84 # Basic packages for all final images.
85 _common_packages = ['build-essential',
86                     'ca-certificates',
87                     'ccache',
88                     'git',
89                     'gnupg',
90                     'gpg-agent',
91                     'libfftw3-dev',
92                     'libhwloc-dev',
93                     'liblapack-dev',
94                     'libx11-dev',
95                     'moreutils',
96                     'ninja-build',
97                     'rsync',
98                     'valgrind',
99                     'vim',
100                     'wget',
101                     'xsltproc']
102
103 _opencl_extra_packages = [
104     'nvidia-opencl-dev',
105     # The following require apt_ppas=['ppa:intel-opencl/intel-opencl']
106     'intel-opencl-icd',
107     'ocl-icd-libopencl1',
108     'ocl-icd-opencl-dev',
109     'opencl-headers',
110 ]
111
112 _rocm_extra_packages = [
113         # The following require
114         #             apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
115         #             apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/4.0.1/ xenial main']
116         'libelf1',
117         'rocm-opencl',
118         'rocm-dev',
119         'clinfo',
120         'rocfft',
121         'hipfft',
122 ]
123                      
124
125 # Extra packages needed to build Intel Compute Runtime
126 _intel_compute_runtime_extra_packages = ['pkg-config',
127                                          'libxml2',
128                                          'libxml2-dev',
129                                          'libigc',
130                                          'libigc-dev',
131                                          'libigdgmm11',
132                                          'libigdgmm-dev',
133                                          'libze-loader',
134                                          'libze-loader-dev',
135                                          'ocl-icd-libopencl1',
136                                          'ocl-icd-opencl-dev',
137                                          'opencl-headers']
138
139 # Extra packages needed to build Python installations from source.
140 _python_extra_packages = ['build-essential',
141                           'ca-certificates',
142                           'ccache',
143                           'curl',
144                           'git',
145                           'libbz2-dev',
146                           'libffi-dev',
147                           'liblzma-dev',
148                           'libncurses5-dev',
149                           'libncursesw5-dev',
150                           'libreadline-dev',
151                           'libsqlite3-dev',
152                           'libssl-dev',
153                           'llvm',
154                           'python-openssl',
155                           'vim',
156                           'wget',
157                           'zlib1g-dev']
158
159 # Extra packages needed for images for building documentation.
160 _docs_extra_packages = ['autoconf',
161                         'automake',
162                         'autopoint',
163                         'autotools-dev',
164                         'bison',
165                         'flex',
166                         'ghostscript',
167                         'graphviz',
168                         'help2man',
169                         'imagemagick',
170                         'libtool',
171                         'mscgen',
172                         'm4',
173                         'openssh-client',
174                         'texinfo',
175                         'texlive-latex-base',
176                         'texlive-latex-extra',
177                         'texlive-fonts-recommended',
178                         'texlive-fonts-extra',
179                         'tex-gyre']
180
181 # Parse command line arguments
182 parser = argparse.ArgumentParser(description='GROMACS CI image creation script',
183                                  parents=[utility.parser])
184
185 parser.add_argument('--format', type=str, default='docker',
186                     choices=['docker', 'singularity'],
187                     help='Container specification format (default: docker)')
188
189
190 def base_image_tag(args) -> str:
191     # Check if we use CUDA images or plain linux images
192     if args.cuda is not None:
193         cuda_version_tag = 'nvidia/cuda:' + args.cuda + '-devel'
194         if args.centos is not None:
195             cuda_version_tag += '-centos' + args.centos
196         elif args.ubuntu is not None:
197             cuda_version_tag += '-ubuntu' + args.ubuntu
198         else:
199             raise RuntimeError('Logic error: no Linux distribution selected.')
200
201         base_image_tag = cuda_version_tag
202     else:
203         if args.centos is not None:
204             base_image_tag = 'centos:centos' + args.centos
205         elif args.ubuntu is not None:
206             base_image_tag = 'ubuntu:' + args.ubuntu
207         else:
208             raise RuntimeError('Logic error: no Linux distribution selected.')
209     return base_image_tag
210
211 # Convert the linux distribution variables into something that hpccm
212 # understands.
213 def hpccm_distro_name(args) -> str:
214     if args.centos is not None:
215         name_mapping = { '7': 'centos7',
216                          '8': 'centos8' }
217         if args.centos in name_mapping:
218             hpccm_name = name_mapping[args.centos]
219         else:
220             raise RuntimeError('Logic error: unsupported CentOS distribution selected.')
221     elif args.ubuntu is not None:
222         name_mapping = { '20.04': 'ubuntu20',
223                          '18.04': 'ubuntu18',
224                          '16.04': 'ubuntu16' }
225         if args.ubuntu in name_mapping:
226             hpccm_name = name_mapping[args.ubuntu]
227         else:
228             raise RuntimeError('Logic error: unsupported Ubuntu distribution selected.')
229     else:
230         raise RuntimeError('Logic error: no Linux distribution selected.')
231     return hpccm_name
232
233 def get_llvm_packages(args) -> typing.Iterable[str]:
234     # If we use the package version of LLVM, we need to install extra packages for it.
235     if (args.llvm is not None) and (args.tsan is None):
236         packages = [f'libomp-{args.llvm}-dev',
237                     f'libomp5-{args.llvm}',
238                     'clang-format-' + str(args.llvm),
239                     'clang-tidy-' + str(args.llvm)]
240         if args.hipsycl is not None:
241             packages += [f'llvm-{args.llvm}-dev',
242                          f'libclang-{args.llvm}-dev',
243                          f'lld-{args.llvm}']
244         return packages
245     else:
246         return []
247
248 def get_opencl_packages(args) -> typing.Iterable[str]:
249     if (args.doxygen is None) and (args.oneapi is None):
250         return _opencl_extra_packages
251     else:
252         return []
253
254 def get_rocm_packages(args) -> typing.Iterable[str]:
255     if (args.rocm is None):
256         return []
257     else:
258         return _rocm_extra_packages
259
260 def get_compiler(args, compiler_build_stage: hpccm.Stage = None) -> bb_base:
261     # Compiler
262     if args.llvm is not None:
263         # Build our own version instead to get TSAN + OMP
264         if args.tsan is not None:
265             if compiler_build_stage is not None:
266                 compiler = compiler_build_stage.runtime(_from='tsan')
267             else:
268                 raise RuntimeError('No TSAN compiler build stage!')
269         # Build the default compiler if we don't need special support
270         else:
271             # Currently the focal apt repositories do not contain
272             # llvm higher than 11, so we work around that. This will
273             # need further work when we start supporting ubuntu 22.04
274             compiler = hpccm.building_blocks.llvm(version=args.llvm, upstream=True if int(args.llvm) > 11 else False)
275
276     elif args.oneapi is not None:
277         if compiler_build_stage is not None:
278             compiler = compiler_build_stage.runtime(_from='oneapi')
279             # Prepare the toolchain (needed only for builds done within the Dockerfile, e.g.
280             # OpenMPI builds, which don't currently work for other reasons)
281             oneapi_toolchain = hpccm.toolchain(CC=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icx',
282                                                CXX=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icpx')
283             setattr(compiler, 'toolchain', oneapi_toolchain)
284
285         else:
286             raise RuntimeError('No oneAPI compiler build stage!')
287
288     elif args.gcc is not None:
289         compiler = hpccm.building_blocks.gnu(extra_repository=True,
290                                              version=args.gcc,
291                                              fortran=False)
292     else:
293         raise RuntimeError('Logic error: no compiler toolchain selected.')
294     return compiler
295
296
297 def get_gdrcopy(args, compiler):
298     if args.cuda is not None:
299         if hasattr(compiler, 'toolchain'):
300             # Version last updated June 7, 2021
301             return hpccm.building_blocks.gdrcopy(toolchain=compiler.toolchain, version="2.2")
302         else:
303             raise RuntimeError('compiler is not an HPCCM compiler building block!')
304     else:
305         return None
306
307 def get_ucx(args, compiler, gdrcopy):
308     if args.cuda is not None:
309         if hasattr(compiler, 'toolchain'):
310             use_gdrcopy = (gdrcopy is not None)
311             # Version last updated June 7, 2021
312             return hpccm.building_blocks.ucx(toolchain=compiler.toolchain, gdrcopy=use_gdrcopy, version="1.10.1", cuda=True)
313         else:
314             raise RuntimeError('compiler is not an HPCCM compiler building block!')
315     else:
316         return None
317
318 def get_mpi(args, compiler, ucx):
319     # If needed, add MPI to the image
320     if args.mpi is not None:
321         if args.mpi == 'openmpi':
322             if hasattr(compiler, 'toolchain'):
323                 if args.oneapi is not None:
324                     raise RuntimeError('oneAPI building OpenMPI is not supported')
325                 use_cuda = (args.cuda is not None)
326                 use_ucx = (ucx is not None)
327                 # Version last updated June 7, 2021
328                 return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, version="4.1.1", cuda=use_cuda, ucx=use_ucx, infiniband=False)
329             else:
330                 raise RuntimeError('compiler is not an HPCCM compiler building block!')
331
332         elif args.mpi == 'impi':
333             # TODO Intel MPI from the oneAPI repo is not working reliably,
334             # reasons are unclear. When solved, add packagages called:
335             # 'intel-oneapi-mpi', 'intel-oneapi-mpi-devel'
336             # during the compiler stage.
337             # TODO also consider hpccm's intel_mpi package if that doesn't need
338             # a license to run.
339             raise RuntimeError('Intel MPI recipe not implemented yet.')
340         else:
341             raise RuntimeError('Requested unknown MPI implementation.')
342     else:
343         return None
344
345
346 def get_clfft(args):
347     if (args.clfft is not None):
348         return hpccm.building_blocks.generic_cmake(
349             repository='https://github.com/clMathLibraries/clFFT.git',
350             prefix='/usr/local', recursive=True, branch=args.clfft, directory='clFFT/src')
351     else:
352         return None
353
354 def get_hipsycl(args):
355     if args.hipsycl is None:
356         return None
357     if args.llvm is None:
358         raise RuntimeError('Can not build hipSYCL without llvm')
359
360     if args.rocm is None:
361         raise RuntimeError('hipSYCL requires the rocm packages')
362
363     cmake_opts = [f'-DLLVM_DIR=/opt/rocm/llvm/lib/cmake/llvm',
364                   '-DCMAKE_PREFIX_PATH=/opt/rocm/lib/cmake',
365                   '-DWITH_ROCM_BACKEND=ON']
366     if args.cuda is not None:
367         cmake_opts += [f'-DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
368                        '-DWITH_CUDA_BACKEND=ON']
369
370     postinstall = [
371             # https://github.com/illuhad/hipSYCL/issues/361#issuecomment-718943645
372             'for f in /opt/rocm/amdgcn/bitcode/*.bc; do ln -s "$f" "/opt/rocm/lib/$(basename $f .bc).amdgcn.bc"; done'
373             ]
374     if args.cuda is not None:
375         postinstall += [
376             # https://github.com/illuhad/hipSYCL/issues/410#issuecomment-743301929
377             f'sed s/_OPENMP/__OPENMP_NVPTX__/ -i /usr/lib/llvm-{args.llvm}/lib/clang/*/include/__clang_cuda_complex_builtins.h',
378             # Not needed unless we're building with CUDA 11.x, but no harm in doing always
379             f'ln -s /usr/local/cuda/compat/* /usr/local/cuda/lib64/'
380             ]
381
382     return hpccm.building_blocks.generic_cmake(
383         repository='https://github.com/illuhad/hipSYCL.git',
384         directory='/var/tmp/hipSYCL',
385         prefix='/usr/local', recursive=True, commit=args.hipsycl,
386         cmake_opts=['-DCMAKE_BUILD_TYPE=Release', *cmake_opts],
387         postinstall=postinstall)
388
389 def get_intel_compute_runtime(args):
390     # The only reason we need to build Compute Runtime ourselves is because Intel packages have no DG1 support
391     # Otherwise, we could have just installed DEB packages from GitHub or Intel PPA
392     if args.intel_compute_runtime is None:
393         return None
394
395     cmake_opts = ['-DCMAKE_BUILD_TYPE=Release',
396                   '-DSKIP_UNIT_TESTS=TRUE',
397                   '-DSUPPORT_GEN8=0', '-DSUPPORT_GEN9=1', '-DSUPPORT_GEN11=1', '-DSUPPORT_GEN12LP=1', '-DSUPPORT_DG1=1',
398                   '-DBUILD_WITH_L0=1']
399
400     return hpccm.building_blocks.generic_cmake(
401         repository='https://github.com/intel/compute-runtime.git',
402         directory='compute-runtime',
403         prefix='/usr/local', recursive=True, branch=args.intel_compute_runtime,
404         cmake_opts=cmake_opts,
405         postinstall=['ldconfig'])
406
407 def add_tsan_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
408     """Isolate the expensive TSAN preparation stage.
409
410     This is a very expensive stage, but has few and disjoint dependencies, and
411     its output is easily compartmentalized (/usr/local) so we can isolate this
412     build stage to maximize build cache hits and reduce rebuild time, bookkeeping,
413     and final image size.
414     """
415     if not isinstance(output_stages, collections.abc.MutableMapping):
416         raise RuntimeError('Need output_stages container.')
417     tsan_stage = hpccm.Stage()
418     tsan_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='tsan')
419
420     tsan_stage += hpccm.building_blocks.packages(ospackages=['git', 'ca-certificates', 'build-essential', 'cmake'])
421     # CMake will get duplicated later, but this is an expensive image, and it isn't worth optimizing
422     # out that duplication...
423     tsan_stage += hpccm.building_blocks.python(python3=True, python2=False, devel=False)
424
425     compiler_branch = 'release/' + str(input_args.llvm) + '.x'
426     tsan_stage += hpccm.building_blocks.generic_cmake(
427         repository='https://github.com/llvm/llvm-project.git',
428         directory='/var/tmp/llvm-project/llvm/',
429         prefix='/usr/local', recursive=True, branch=compiler_branch,
430         cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra;compiler-rt;lld"',
431                     '-D LIBOMP_TSAN_SUPPORT=on'],
432         postinstall=['ln -s /usr/local/bin/clang++ /usr/local/bin/clang++-' + str(input_args.llvm),
433                      'ln -s /usr/local/bin/clang-format /usr/local/bin/clang-format-' + str(input_args.llvm),
434                      'ln -s /usr/local/bin/clang-tidy /usr/local/bin/clang-tidy-' + str(input_args.llvm),
435                      'ln -s /usr/local/share/clang/run-clang-tidy.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py',
436                      'ln -s /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm),
437                      'ln -s /usr/local/libexec/c++-analyzer /usr/local/bin/c++-analyzer-' + str(input_args.llvm)])
438     output_stages['compiler_build'] = tsan_stage
439
440 def oneapi_runtime(_from='0'):
441     oneapi_runtime_stage = hpccm.Stage()
442     oneapi_runtime_stage += hpccm.primitives.copy(_from='oneapi-build',
443                                                   files={"/opt/intel": "/opt/intel",
444                                                          "/etc/bash.bashrc": "/etc/bash.bashrc"})
445     return oneapi_runtime_stage
446
447 def add_oneapi_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
448     """Isolate the oneAPI preparation stage.
449
450     This stage is isolated so that its installed components are minimized in the
451     final image (chiefly /opt/intel) and its environment setup script can be
452     sourced. This also helps with rebuild time and final image size.
453     """
454     if not isinstance(output_stages, collections.abc.MutableMapping):
455         raise RuntimeError('Need output_stages container.')
456     oneapi_stage = hpccm.Stage()
457     oneapi_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='oneapi-build')
458
459     version = str(input_args.oneapi)
460
461     # Add required components for the next stage (both for hpccm and Intel's setvars.sh script)
462     oneapi_stage += hpccm.building_blocks.packages(ospackages=['wget', 'gnupg2', 'ca-certificates', 'lsb-release'])
463     oneapi_stage += hpccm.building_blocks.packages(
464         apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB'],
465         apt_repositories=['deb https://apt.repos.intel.com/oneapi all main'],
466         # Add minimal packages (not the whole HPC toolkit!)
467         ospackages=[f'intel-oneapi-dpcpp-cpp-{version}',
468             f'intel-oneapi-openmp-{version}',
469             f'intel-oneapi-mkl-{version}',
470             f'intel-oneapi-mkl-devel-{version}']
471     )
472     # Ensure that all bash shells on the final container will have access to oneAPI
473     oneapi_stage += hpccm.primitives.shell(
474             commands=['echo "source /opt/intel/oneapi/setvars.sh" >> /etc/bash.bashrc',
475                       'unlink /opt/intel/oneapi/compiler/latest',
476                      f'ln -sf /opt/intel/oneapi/compiler/{version} /opt/intel/oneapi/compiler/latest']
477             )
478     setattr(oneapi_stage, 'runtime', oneapi_runtime)
479
480     output_stages['compiler_build'] = oneapi_stage
481
482 def prepare_venv(version: StrictVersion) -> typing.Sequence[str]:
483     """Get shell commands to set up the venv for the requested Python version."""
484     major = version.version[0]
485     minor = version.version[1]  # type: int
486
487     pyenv = '$HOME/.pyenv/bin/pyenv'
488
489     py_ver = f'{major}.{minor}'
490     venv_path = f'$HOME/venv/py{py_ver}'
491     commands = [f'$({pyenv} prefix `{pyenv} whence python{py_ver}`)/bin/python -m venv {venv_path}']
492
493     commands.append(f'{venv_path}/bin/python -m pip install --upgrade pip setuptools')
494     # Install dependencies for building and testing gmxapi Python package.
495     # WARNING: Please keep this list synchronized with python_packaging/requirements-test.txt
496     # TODO: Get requirements.txt from an input argument.
497     commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
498             'breathe' \
499             'cmake>=3.16.3' \
500             'flake8>=3.7.7' \
501             'gcovr>=4.2' \
502             'mpi4py>=3.0.3' \
503             'networkx>=2.0' \
504             'numpy>=1' \
505             'pip>=10.1' \
506             'Pygments>=2.2.0' \
507             'pytest>=3.9' \
508             'setuptools>=42' \
509             'scikit-build>=0.10' \
510             'Sphinx>=1.6.3' \
511             'sphinxcontrib-plantuml>=0.14'""")
512
513     # TODO: Remove 'importlib_resources' dependency when Python >=3.7 is required.
514     if minor == 6:
515         commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
516                 'importlib_resources'""")
517
518     return commands
519
520
521 def add_python_stages(building_blocks: typing.Mapping[str, bb_base],
522                       input_args,
523                       output_stages: typing.MutableMapping[str, hpccm.Stage]):
524     """Add the stage(s) necessary for the requested venvs.
525
526     One intermediate build stage is created for each venv (see --venv option).
527
528     Each stage partially populates Python installations and venvs in the home
529     directory. The home directory is collected by the 'pyenv' stage for use by
530     the main build stage.
531     """
532     if len(input_args.venvs) < 1:
533         raise RuntimeError('No venvs to build...')
534     if output_stages is None or not isinstance(output_stages, collections.abc.Mapping):
535         raise RuntimeError('Need a container for output stages.')
536
537     # Main Python stage that collects the environments from individual stages.
538     # We collect the stages individually, rather than chaining them, because the
539     # copy is a bit slow and wastes local Docker image space for each filesystem
540     # layer.
541     pyenv_stage = hpccm.Stage()
542     pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='pyenv')
543     pyenv_stage += building_blocks['compiler']
544     if building_blocks['gdrcopy'] is not None:
545         pyenv_stage += building_blocks['gdrcopy']
546     if building_blocks['ucx'] is not None:
547         pyenv_stage += building_blocks['ucx']
548     pyenv_stage += building_blocks['mpi']
549     pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
550
551     for version in [StrictVersion(py_ver) for py_ver in sorted(input_args.venvs)]:
552         stage_name = 'py' + str(version)
553         stage = hpccm.Stage()
554         stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as=stage_name)
555         stage += building_blocks['compiler']
556         if building_blocks['gdrcopy'] is not None:
557             stage += building_blocks['gdrcopy']
558         if building_blocks['ucx'] is not None:
559             stage += building_blocks['ucx']
560         stage += building_blocks['mpi']
561         stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
562
563         # TODO: Use a non-root user for testing and Python virtual environments.
564         stage += hpccm.primitives.shell(commands=[
565             'curl https://pyenv.run | bash',
566             """echo 'export PYENV_ROOT="$HOME/.pyenv"' >> $HOME/.bashrc""",
567             """echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> $HOME/.bashrc""",
568             """echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc""",
569             """echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc"""])
570         pyenv = '$HOME/.pyenv/bin/pyenv'
571         commands = [f'PYTHON_CONFIGURE_OPTS="--enable-shared" {pyenv} install -s {version}']
572         stage += hpccm.primitives.shell(commands=commands)
573
574         commands = prepare_venv(version)
575         stage += hpccm.primitives.shell(commands=commands)
576
577         # TODO: Update user home directory.
578         pyenv_stage += hpccm.primitives.copy(_from=stage_name, _mkdir=True, src=['/root/'],
579                                              dest='/root')
580
581         # Add the intermediate build stage to the sequence
582         output_stages[stage_name] = stage
583
584     # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
585     # # `version` should be a system installation or pyenv environment (or pyenv-virtualenv)
586     # # with the dependencies for all of the Python aspects of CMake-driven builds.
587     # commands = '{pyenv} global {version}'.format(
588     #             pyenv=pyenv,
589     #             version=...)
590     # pyenv_stage += hpccm.primitives.shell(commands=commands)
591
592     # Add the aggregating build stage to the sequence. This allows the main stage to copy
593     # the files in a single stage, potentially reducing the overall output image size.
594     output_stages['pyenv'] = pyenv_stage
595
596
597 def add_documentation_dependencies(input_args,
598                                    output_stages: typing.MutableMapping[str, hpccm.Stage]):
599     """Add appropriate layers according to doxygen input arguments."""
600     if input_args.doxygen is None:
601         return
602     # Always clone the same version of linkchecker (latest release at June 1, 2021)
603     output_stages['main'] += hpccm.building_blocks.pip(pip='pip3', packages=['git+https://github.com/linkchecker/linkchecker.git@v10.0.1'])
604     output_stages['main'] += hpccm.primitives.shell(
605         commands=['sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
606     if input_args.doxygen == '1.8.5':
607         doxygen_commit = 'ed4ed873ab0e7f15116e2052119a6729d4589f7a'
608         output_stages['main'] += hpccm.building_blocks.generic_autotools(
609             repository='https://github.com/westes/flex.git',
610             commit='f7788a9a0ecccdc953ed12043ccb59ca25714018',
611             prefix='/tmp/install-of-flex',
612             configure_opts=['--disable-shared'],
613             preconfigure=['./autogen.sh'])
614         output_stages['main'] += hpccm.building_blocks.generic_autotools(
615             repository='https://github.com/doxygen/doxygen.git',
616             commit=doxygen_commit,
617             prefix='',
618             configure_opts=[
619                 '--flex /tmp/install-of-flex/bin/flex',
620                 '--static'])
621     else:
622         version = input_args.doxygen
623         archive_name = f'doxygen-{version}.linux.bin.tar.gz'
624         archive_url = f'https://sourceforge.net/projects/doxygen/files/rel-{version}/{archive_name}'
625         binary_path = f'doxygen-{version}/bin/doxygen'
626         commands = [
627             'mkdir doxygen && cd doxygen',
628             f'wget {archive_url}',
629             f'tar xf {archive_name} {binary_path}',
630             f'cp {binary_path} /usr/local/bin/',
631             'cd .. && rm -rf doxygen'
632         ]
633         output_stages['main'] += hpccm.primitives.shell(commands=commands)
634
635
636 def build_stages(args) -> typing.Iterable[hpccm.Stage]:
637     """Define and sequence the stages for the recipe corresponding to *args*."""
638
639     # A Dockerfile or Singularity recipe can have multiple build stages.
640     # The main build stage can copy files from previous stages, though only
641     # the last stage is included in the tagged output image. This means that
642     # large or expensive sets of build instructions can be isolated in
643     # local/temporary images, but all of the stages need to be output by this
644     # script, and need to occur in the correct order, so we create a sequence
645     # object early in this function.
646     stages = collections.OrderedDict()
647
648     # If we need TSAN or oneAPI support the early build is more complex,
649     # so that our compiler images don't have all the cruft needed to get those things
650     # installed.
651     if args.llvm is not None and args.tsan is not None:
652         add_tsan_compiler_build_stage(input_args=args, output_stages=stages)
653     if args.oneapi is not None:
654         add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)
655
656     # Building blocks are chunks of container-builder instructions that can be
657     # copied to any build stage with the addition operator.
658     building_blocks = collections.OrderedDict()
659     building_blocks['base_packages'] = hpccm.building_blocks.packages(
660         ospackages=_common_packages)
661
662     # Normally in hpccm the first call to baseimage sets the context
663     # for other packages, e.g. for which apt respository to
664     # use. We want to set that early on.
665     hpccm.config.set_linux_distro(hpccm_distro_name(args))
666
667     # These are the most expensive and most reusable layers, so we put them first.
668     building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
669     building_blocks['gdrcopy'] = get_gdrcopy(args, building_blocks['compiler'])
670     building_blocks['ucx'] = get_ucx(args, building_blocks['compiler'], building_blocks['gdrcopy'])
671     building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'], building_blocks['ucx'])
672     for i, cmake in enumerate(args.cmake):
673         building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
674             eula=True,
675             prefix=f'/usr/local/cmake-{cmake}',
676             version=cmake)
677
678     # Install additional packages early in the build to optimize Docker build layer cache.
679     os_packages = list(get_llvm_packages(args)) + get_opencl_packages(args) + get_rocm_packages(args)
680     if args.doxygen is not None:
681         os_packages += _docs_extra_packages
682     if args.oneapi is not None:
683         os_packages += ['lsb-release']
684     if args.hipsycl is not None:
685         os_packages += ['libboost-fiber-dev']
686     if args.intel_compute_runtime is not None:
687         os_packages += _intel_compute_runtime_extra_packages
688     building_blocks['extra_packages'] = []
689     if args.rocm is not None:
690         building_blocks['extra_packages'] += hpccm.building_blocks.packages(
691             apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
692             apt_repositories=[f'deb [arch=amd64] http://repo.radeon.com/rocm/apt/{args.rocm}/ xenial main']
693         )
694     building_blocks['extra_packages'] += hpccm.building_blocks.packages(
695         ospackages=os_packages,
696         apt_ppas=['ppa:intel-opencl/intel-opencl'])
697
698     if args.cuda is not None and args.llvm is not None:
699         # Hack to tell clang what version of CUDA we're using
700         # based on https://github.com/llvm/llvm-project/blob/1fdec59bffc11ae37eb51a1b9869f0696bfd5312/clang/lib/Driver/ToolChains/Cuda.cpp#L43
701         cuda_version_split = args.cuda.split('.')
702         # LLVM requires having the version in x.y.z format, while args.cuda be be either x.y or x.y.z
703         cuda_version_str = '{}.{}.{}'.format(
704             cuda_version_split[0],
705             cuda_version_split[1],
706             cuda_version_split[2] if len(cuda_version_split) > 2 else 0
707         )
708         building_blocks['cuda-clang-workaround'] = hpccm.primitives.shell(commands=[
709             f'echo "CUDA Version {cuda_version_str}" > /usr/local/cuda/version.txt'
710             ])
711
712     building_blocks['clfft'] = get_clfft(args)
713
714     building_blocks['hipSYCL'] = get_hipsycl(args)
715
716     building_blocks['intel-compute-runtime'] = get_intel_compute_runtime(args)
717
718     # Add Python environments to MPI images, only, so we don't have to worry
719     # about whether to install mpi4py.
720     if args.mpi is not None and len(args.venvs) > 0:
721         add_python_stages(building_blocks=building_blocks, input_args=args, output_stages=stages)
722
723     # Create the stage from which the targeted image will be tagged.
724     stages['main'] = hpccm.Stage()
725
726     stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
727     for bb in building_blocks.values():
728         if bb is not None:
729             stages['main'] += bb
730
731     # We always add Python3 and Pip
732     stages['main'] += hpccm.building_blocks.python(python3=True, python2=False)
733
734     # Add documentation requirements (doxygen and sphinx + misc).
735     if args.doxygen is not None:
736         add_documentation_dependencies(args, stages)
737
738     if 'pyenv' in stages and stages['pyenv'] is not None:
739         stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/.pyenv/'],
740                                                 dest='/root/.pyenv')
741         stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/venv/'],
742                                                 dest='/root/venv')
743         # TODO: Update user home directory.
744         # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
745         # stages['main'] += hpccm.primitives.copy(_from='pyenv', src=['/root/.bashrc'],
746         #                                         dest='/root/')
747
748     # Make sure that `python` resolves to something.
749     stages['main'] += hpccm.primitives.shell(commands=['test -x /usr/bin/python || '
750                                                        'update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && '
751                                                        '/usr/bin/python --version'])
752
753     # Note that the list of stages should be sorted in dependency order.
754     for build_stage in stages.values():
755         if build_stage is not None:
756             yield build_stage
757
758
759 if __name__ == '__main__':
760     args = parser.parse_args()
761
762     # Set container specification output format
763     hpccm.config.set_container_format(args.format)
764
765     container_recipe = build_stages(args)
766
767     # Output container specification
768     for stage in container_recipe:
769         print(stage)