1a5c94ca7b8f3c9c19b2c5fbbcf7664b52cf91d9
[alexxy/gromacs.git] / admin / containers / scripted_gmx_docker_builds.py
1 #!/usr/bin/env python
2 #
3 # This file is part of the GROMACS molecular simulation package.
4 #
5 # Copyright (c) 2020,2021, by the GROMACS development team, led by
6 # Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 # and including many others, as listed in the AUTHORS file in the
8 # top-level source directory and at http://www.gromacs.org.
9 #
10 # GROMACS is free software; you can redistribute it and/or
11 # modify it under the terms of the GNU Lesser General Public License
12 # as published by the Free Software Foundation; either version 2.1
13 # of the License, or (at your option) any later version.
14 #
15 # GROMACS is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 # Lesser General Public License for more details.
19 #
20 # You should have received a copy of the GNU Lesser General Public
21 # License along with GROMACS; if not, see
22 # http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24 #
25 # If you want to redistribute modifications to GROMACS, please
26 # consider that scientific software is very special. Version
27 # control is crucial - bugs must be traceable. We will be happy to
28 # consider code for inclusion in the official distribution, but
29 # derived work must not be called official GROMACS. Details are found
30 # in the README & COPYING files - if they are missing, get the
31 # official version at http://www.gromacs.org.
32 #
33 # To help us fund GROMACS development, we humbly ask that you cite
34 # the research papers on the package. Check out http://www.gromacs.org.
35
36 """Building block based Dockerfile generation for CI testing images.
37
38 Generates a set of docker images used for running GROMACS CI on Gitlab.
39 The images are prepared according to a selection of build configuration targets
40 that hope to cover a broad enough scope of different possible systems,
41 allowing us to check compiler types and versions, as well as libraries used
42 for accelerators and parallel communication systems. Each combinations is
43 described as an entry in the build_configs dictionary, with the script
44 analysing the logic and adding build stages as needed.
45
46 Based on the example script provided by the NVidia HPCCM repository.
47
48 Reference:
49     `NVidia HPC Container Maker <https://github.com/NVIDIA/hpc-container-maker>`__
50
51 Authors:
52     * Paul Bauer <paul.bauer.q@gmail.com>
53     * Eric Irrgang <ericirrgang@gmail.com>
54     * Joe Jordan <e.jjordan12@gmail.com>
55     * Mark Abraham <mark.j.abraham@gmail.com>
56
57 Usage::
58
59     $ python3 scripted_gmx_docker_builds.py --help
60     $ python3 scripted_gmx_docker_builds.py --format docker > Dockerfile && docker build .
61     $ python3 scripted_gmx_docker_builds.py | docker build -
62
63 See Also:
64     :file:`buildall.sh`
65
66 """
67
68 import argparse
69 import collections
70 import typing
71 from distutils.version import StrictVersion
72
73 import hpccm
74 import hpccm.config
75 from hpccm.building_blocks.base import bb_base
76
77 try:
78     import utility
79 except ImportError:
80     raise RuntimeError(
81         'This module assumes availability of supporting modules in the same directory. Add the directory to '
82         'PYTHONPATH or invoke Python from within the module directory so module location can be resolved.')
83
84 # Basic packages for all final images.
85 _common_packages = ['build-essential',
86                     'ca-certificates',
87                     'ccache',
88                     'git',
89                     'gnupg',
90                     'gpg-agent',
91                     'libfftw3-dev',
92                     'libhwloc-dev',
93                     'liblapack-dev',
94                     'libx11-dev',
95                     'moreutils',
96                     'ninja-build',
97                     'rsync',
98                     'valgrind',
99                     'vim',
100                     'wget',
101                     'xsltproc']
102
103 _opencl_extra_packages = [
104     'nvidia-opencl-dev',
105     # The following require apt_ppas=['ppa:intel-opencl/intel-opencl']
106     'intel-opencl-icd',
107     'ocl-icd-libopencl1',
108     'ocl-icd-opencl-dev',
109     'opencl-headers',
110     # The following require
111     #             apt_keys=['http://repo.radeon.com/rocm/apt/debian/rocm.gpg.key'],
112     #             apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/debian/ xenial main']
113     'libelf1',
114     'rocm-opencl',
115     'rocm-dev',
116     'clinfo'
117 ]
118
119 # Extra packages needed to build Intel Compute Runtime
120 _intel_compute_runtime_extra_packages = ['pkg-config',
121                                          'libxml2',
122                                          'libxml2-dev',
123                                          'libigc',
124                                          'libigc-dev',
125                                          'libigdgmm11',
126                                          'libigdgmm-dev',
127                                          'libze-loader',
128                                          'libze-loader-dev',
129                                          'ocl-icd-libopencl1',
130                                          'ocl-icd-opencl-dev',
131                                          'opencl-headers']
132
133 # Extra packages needed to build Python installations from source.
134 _python_extra_packages = ['build-essential',
135                           'ca-certificates',
136                           'ccache',
137                           'curl',
138                           'git',
139                           'libbz2-dev',
140                           'libffi-dev',
141                           'liblzma-dev',
142                           'libncurses5-dev',
143                           'libncursesw5-dev',
144                           'libreadline-dev',
145                           'libsqlite3-dev',
146                           'libssl-dev',
147                           'llvm',
148                           'python-openssl',
149                           'vim',
150                           'wget',
151                           'zlib1g-dev']
152
153 # Extra packages needed for images for building documentation.
154 _docs_extra_packages = ['autoconf',
155                         'automake',
156                         'autopoint',
157                         'autotools-dev',
158                         'bison',
159                         'flex',
160                         'ghostscript',
161                         'graphviz',
162                         'help2man',
163                         'imagemagick',
164                         'libtool',
165                         'linkchecker',
166                         'mscgen',
167                         'm4',
168                         'openssh-client',
169                         'texinfo',
170                         'texlive-latex-base',
171                         'texlive-latex-extra',
172                         'texlive-fonts-recommended',
173                         'texlive-fonts-extra']
174
175 # Parse command line arguments
176 parser = argparse.ArgumentParser(description='GROMACS CI image creation script',
177                                  parents=[utility.parser])
178
179 parser.add_argument('--format', type=str, default='docker',
180                     choices=['docker', 'singularity'],
181                     help='Container specification format (default: docker)')
182
183
184 def base_image_tag(args) -> str:
185     # Check if we use CUDA images or plain linux images
186     if args.cuda is not None:
187         cuda_version_tag = 'nvidia/cuda:' + args.cuda + '-devel'
188         if args.centos is not None:
189             cuda_version_tag += '-centos' + args.centos
190         elif args.ubuntu is not None:
191             cuda_version_tag += '-ubuntu' + args.ubuntu
192         else:
193             raise RuntimeError('Logic error: no Linux distribution selected.')
194
195         base_image_tag = cuda_version_tag
196     else:
197         if args.centos is not None:
198             base_image_tag = 'centos:centos' + args.centos
199         elif args.ubuntu is not None:
200             base_image_tag = 'ubuntu:' + args.ubuntu
201         else:
202             raise RuntimeError('Logic error: no Linux distribution selected.')
203     return base_image_tag
204
205
206 def get_llvm_packages(args) -> typing.Iterable[str]:
207     # If we use the package version of LLVM, we need to install extra packages for it.
208     if (args.llvm is not None) and (args.tsan is None):
209         packages = [f'libomp-{args.llvm}-dev',
210                     f'libomp5-{args.llvm}',
211                     'clang-format-' + str(args.llvm),
212                     'clang-tidy-' + str(args.llvm)]
213         if args.hipsycl is not None:
214             packages += [f'llvm-{args.llvm}-dev',
215                          f'libclang-{args.llvm}-dev',
216                          f'lld-{args.llvm}']
217         return packages
218     else:
219         return []
220
221 def get_opencl_packages(args) -> typing.Iterable[str]:
222     if (args.doxygen is None) and (args.oneapi is None):
223         return _opencl_extra_packages
224     else:
225         return []
226
227 def get_compiler(args, compiler_build_stage: hpccm.Stage = None) -> bb_base:
228     # Compiler
229     if args.llvm is not None:
230         # Build our own version instead to get TSAN + OMP
231         if args.tsan is not None:
232             if compiler_build_stage is not None:
233                 compiler = compiler_build_stage.runtime(_from='tsan')
234             else:
235                 raise RuntimeError('No TSAN compiler build stage!')
236         # Build the default compiler if we don't need special support
237         else:
238             compiler = hpccm.building_blocks.llvm(extra_repository=True, version=args.llvm)
239
240     elif args.oneapi is not None:
241         if compiler_build_stage is not None:
242             compiler = compiler_build_stage.runtime(_from='oneapi')
243             # Prepare the toolchain (needed only for builds done within the Dockerfile, e.g.
244             # OpenMPI builds, which don't currently work for other reasons)
245             oneapi_toolchain = hpccm.toolchain(CC=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icx',
246                                                CXX=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icpx')
247             setattr(compiler, 'toolchain', oneapi_toolchain)
248
249         else:
250             raise RuntimeError('No oneAPI compiler build stage!')
251
252     elif args.gcc is not None:
253         compiler = hpccm.building_blocks.gnu(extra_repository=True,
254                                              version=args.gcc,
255                                              fortran=False)
256     else:
257         raise RuntimeError('Logic error: no compiler toolchain selected.')
258     return compiler
259
260
261 def get_mpi(args, compiler):
262     # If needed, add MPI to the image
263     if args.mpi is not None:
264         if args.mpi == 'openmpi':
265             use_cuda = False
266             if args.cuda is not None:
267                 use_cuda = True
268
269             if hasattr(compiler, 'toolchain'):
270                 if args.oneapi is not None:
271                     raise RuntimeError('oneAPI building OpenMPI is not supported')
272                 return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, cuda=use_cuda, infiniband=False)
273             else:
274                 raise RuntimeError('compiler is not an HPCCM compiler building block!')
275
276         elif args.mpi == 'impi':
277             # TODO Intel MPI from the oneAPI repo is not working reliably,
278             # reasons are unclear. When solved, add packagages called:
279             # 'intel-oneapi-mpi', 'intel-oneapi-mpi-devel'
280             # during the compiler stage.
281             # TODO also consider hpccm's intel_mpi package if that doesn't need
282             # a license to run.
283             raise RuntimeError('Intel MPI recipe not implemented yet.')
284         else:
285             raise RuntimeError('Requested unknown MPI implementation.')
286     else:
287         return None
288
289
290 def get_clfft(args):
291     if (args.clfft is not None):
292         return hpccm.building_blocks.generic_cmake(
293             repository='https://github.com/clMathLibraries/clFFT.git',
294             prefix='/usr/local', recursive=True, branch=args.clfft, directory='clFFT/src')
295     else:
296         return None
297
298 def get_hipsycl(args):
299     if args.hipsycl is None:
300         return None
301     if args.llvm is None:
302         raise RuntimeError('Can not build hipSYCL without llvm')
303
304     cmake_opts = [f'-DLLVM_DIR=/usr/lib/llvm-{args.llvm}/cmake',
305                   f'-DCLANG_EXECUTABLE_PATH=/usr/bin/clang++-{args.llvm}',
306                   '-DCMAKE_PREFIX_PATH=/opt/rocm/lib/cmake',
307                   '-DWITH_ROCM_BACKEND=ON']
308     if args.cuda is not None:
309         cmake_opts += [f'-DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
310                        '-DWITH_CUDA_BACKEND=ON']
311
312     postinstall = [
313             # https://github.com/illuhad/hipSYCL/issues/361#issuecomment-718943645
314             'for f in /opt/rocm/amdgcn/bitcode/*.bc; do ln -s "$f" "/opt/rocm/lib/$(basename $f .bc).amdgcn.bc"; done'
315             ]
316     if args.cuda is not None:
317         postinstall += [
318             # https://github.com/illuhad/hipSYCL/issues/410#issuecomment-743301929
319             f'sed s/_OPENMP/__OPENMP_NVPTX__/ -i /usr/lib/llvm-{args.llvm}/lib/clang/*/include/__clang_cuda_complex_builtins.h',
320             # Not needed unless we're building with CUDA 11.x, but no harm in doing always
321             f'ln -s /usr/local/cuda/compat/* /usr/local/cuda/lib64/'
322             ]
323
324     return hpccm.building_blocks.generic_cmake(
325         repository='https://github.com/illuhad/hipSYCL.git',
326         directory='/var/tmp/hipSYCL',
327         prefix='/usr/local', recursive=True, commit=args.hipsycl,
328         cmake_opts=['-DCMAKE_BUILD_TYPE=Release', *cmake_opts],
329         postinstall=postinstall)
330
331 def get_intel_compute_runtime(args):
332     # The only reason we need to build Compute Runtime ourselves is because Intel packages have no DG1 support
333     # Otherwise, we could have just installed DEB packages from GitHub or Intel PPA
334     if args.intel_compute_runtime is None:
335         return None
336
337     cmake_opts = ['-DCMAKE_BUILD_TYPE=Release',
338                   '-DSKIP_UNIT_TESTS=TRUE',
339                   '-DSUPPORT_GEN8=0', '-DSUPPORT_GEN9=1', '-DSUPPORT_GEN11=1', '-DSUPPORT_GEN12LP=1', '-DSUPPORT_DG1=1',
340                   '-DBUILD_WITH_L0=1']
341
342     return hpccm.building_blocks.generic_cmake(
343         repository='https://github.com/intel/compute-runtime.git',
344         directory='compute-runtime',
345         prefix='/usr/local', recursive=True, branch=args.intel_compute_runtime,
346         cmake_opts=cmake_opts,
347         postinstall=['ldconfig'])
348
349 def add_tsan_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
350     """Isolate the expensive TSAN preparation stage.
351
352     This is a very expensive stage, but has few and disjoint dependencies, and
353     its output is easily compartmentalized (/usr/local) so we can isolate this
354     build stage to maximize build cache hits and reduce rebuild time, bookkeeping,
355     and final image size.
356     """
357     if not isinstance(output_stages, collections.abc.MutableMapping):
358         raise RuntimeError('Need output_stages container.')
359     tsan_stage = hpccm.Stage()
360     tsan_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='tsan')
361
362     tsan_stage += hpccm.building_blocks.packages(ospackages=['git', 'ca-certificates', 'build-essential', 'cmake'])
363     # CMake will get duplicated later, but this is an expensive image, and it isn't worth optimizing
364     # out that duplication...
365     tsan_stage += hpccm.building_blocks.python(python3=True, python2=False, devel=False)
366
367     compiler_branch = 'release/' + str(input_args.llvm) + '.x'
368     tsan_stage += hpccm.building_blocks.generic_cmake(
369         repository='https://github.com/llvm/llvm-project.git',
370         directory='/var/tmp/llvm-project/llvm/',
371         prefix='/usr/local', recursive=True, branch=compiler_branch,
372         cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra;compiler-rt;lld"',
373                     '-D LIBOMP_TSAN_SUPPORT=on'],
374         postinstall=['ln -s /usr/local/bin/clang++ /usr/local/bin/clang++-' + str(input_args.llvm),
375                      'ln -s /usr/local/bin/clang-format /usr/local/bin/clang-format-' + str(input_args.llvm),
376                      'ln -s /usr/local/bin/clang-tidy /usr/local/bin/clang-tidy-' + str(input_args.llvm),
377                      'ln -s /usr/local/share/clang/run-clang-tidy.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py',
378                      'ln -s /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm),
379                      'ln -s /usr/local/libexec/c++-analyzer /usr/local/bin/c++-analyzer-' + str(input_args.llvm)])
380     output_stages['compiler_build'] = tsan_stage
381
382 def oneapi_runtime(_from='0'):
383     oneapi_runtime_stage = hpccm.Stage()
384     oneapi_runtime_stage += hpccm.primitives.copy(_from='oneapi-build',
385                                                   files={"/opt/intel": "/opt/intel",
386                                                          "/etc/bash.bashrc": "/etc/bash.bashrc"})
387     return oneapi_runtime_stage
388
389 def add_oneapi_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
390     """Isolate the oneAPI preparation stage.
391
392     This stage is isolated so that its installed components are minimized in the
393     final image (chiefly /opt/intel) and its environment setup script can be
394     sourced. This also helps with rebuild time and final image size.
395     """
396     if not isinstance(output_stages, collections.abc.MutableMapping):
397         raise RuntimeError('Need output_stages container.')
398     oneapi_stage = hpccm.Stage()
399     oneapi_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='oneapi-build')
400
401     version = str(input_args.oneapi)
402
403     # Add required components for the next stage (both for hpccm and Intel's setvars.sh script)
404     oneapi_stage += hpccm.building_blocks.packages(ospackages=['wget', 'gnupg2', 'ca-certificates', 'lsb-release'])
405     oneapi_stage += hpccm.building_blocks.packages(
406         apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB'],
407         apt_repositories=['deb https://apt.repos.intel.com/oneapi all main'],
408         # Add minimal packages (not the whole HPC toolkit!)
409         ospackages=[f'intel-oneapi-dpcpp-cpp-{version}',
410             f'intel-oneapi-openmp-{version}',
411             f'intel-oneapi-mkl-{version}',
412             f'intel-oneapi-mkl-devel-{version}']
413     )
414     # Ensure that all bash shells on the final container will have access to oneAPI
415     oneapi_stage += hpccm.primitives.shell(
416             commands=['echo "source /opt/intel/oneapi/setvars.sh" >> /etc/bash.bashrc',
417                       'unlink /opt/intel/oneapi/compiler/latest',
418                      f'ln -sf /opt/intel/oneapi/compiler/{version} /opt/intel/oneapi/compiler/latest']
419             )
420     setattr(oneapi_stage, 'runtime', oneapi_runtime)
421
422     output_stages['compiler_build'] = oneapi_stage
423
424 def prepare_venv(version: StrictVersion) -> typing.Sequence[str]:
425     """Get shell commands to set up the venv for the requested Python version."""
426     major = version.version[0]
427     minor = version.version[1]  # type: int
428
429     pyenv = '$HOME/.pyenv/bin/pyenv'
430
431     py_ver = f'{major}.{minor}'
432     venv_path = f'$HOME/venv/py{py_ver}'
433     commands = [f'$({pyenv} prefix `{pyenv} whence python{py_ver}`)/bin/python -m venv {venv_path}']
434
435     commands.append(f'{venv_path}/bin/python -m pip install --upgrade pip setuptools')
436     # Install dependencies for building and testing gmxapi Python package.
437     # WARNING: Please keep this list synchronized with python_packaging/requirements-test.txt
438     # TODO: Get requirements.txt from an input argument.
439     commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
440             'cmake>=3.16.3' \
441             'flake8>=3.7.7' \
442             'gcovr>=4.2' \
443             'mpi4py>=3.0.3' \
444             'networkx>=2.0' \
445             'numpy>=1' \
446             'pip>=10.1' \
447             'Pygments>=2.2.0' \
448             'pytest>=3.9' \
449             'setuptools>=42' \
450             'scikit-build>=0.10' \
451             'Sphinx>=1.6.3' \
452             'sphinxcontrib-plantuml>=0.14'""")
453
454     # TODO: Remove 'importlib_resources' dependency when Python >=3.7 is required.
455     if minor == 6:
456         commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
457                 'importlib_resources'""")
458
459     return commands
460
461
462 def add_python_stages(building_blocks: typing.Mapping[str, bb_base],
463                       input_args,
464                       output_stages: typing.MutableMapping[str, hpccm.Stage]):
465     """Add the stage(s) necessary for the requested venvs.
466
467     One intermediate build stage is created for each venv (see --venv option).
468
469     Each stage partially populates Python installations and venvs in the home
470     directory. The home directory is collected by the 'pyenv' stage for use by
471     the main build stage.
472     """
473     if len(input_args.venvs) < 1:
474         raise RuntimeError('No venvs to build...')
475     if output_stages is None or not isinstance(output_stages, collections.abc.Mapping):
476         raise RuntimeError('Need a container for output stages.')
477
478     # Main Python stage that collects the environments from individual stages.
479     # We collect the stages individually, rather than chaining them, because the
480     # copy is a bit slow and wastes local Docker image space for each filesystem
481     # layer.
482     pyenv_stage = hpccm.Stage()
483     pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='pyenv')
484     pyenv_stage += building_blocks['compiler']
485     pyenv_stage += building_blocks['mpi']
486     pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
487
488     for version in [StrictVersion(py_ver) for py_ver in sorted(input_args.venvs)]:
489         stage_name = 'py' + str(version)
490         stage = hpccm.Stage()
491         stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as=stage_name)
492         stage += building_blocks['compiler']
493         stage += building_blocks['mpi']
494         stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
495
496         # TODO: Use a non-root user for testing and Python virtual environments.
497         stage += hpccm.primitives.shell(commands=[
498             'curl https://pyenv.run | bash',
499             """echo 'export PYENV_ROOT="$HOME/.pyenv"' >> $HOME/.bashrc""",
500             """echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> $HOME/.bashrc""",
501             """echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc""",
502             """echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc"""])
503         pyenv = '$HOME/.pyenv/bin/pyenv'
504         commands = [f'PYTHON_CONFIGURE_OPTS="--enable-shared" {pyenv} install -s {version}']
505         stage += hpccm.primitives.shell(commands=commands)
506
507         commands = prepare_venv(version)
508         stage += hpccm.primitives.shell(commands=commands)
509
510         # TODO: Update user home directory.
511         pyenv_stage += hpccm.primitives.copy(_from=stage_name, _mkdir=True, src=['/root/'],
512                                              dest='/root')
513
514         # Add the intermediate build stage to the sequence
515         output_stages[stage_name] = stage
516
517     # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
518     # # `version` should be a system installation or pyenv environment (or pyenv-virtualenv)
519     # # with the dependencies for all of the Python aspects of CMake-driven builds.
520     # commands = '{pyenv} global {version}'.format(
521     #             pyenv=pyenv,
522     #             version=...)
523     # pyenv_stage += hpccm.primitives.shell(commands=commands)
524
525     # Add the aggregating build stage to the sequence. This allows the main stage to copy
526     # the files in a single stage, potentially reducing the overall output image size.
527     output_stages['pyenv'] = pyenv_stage
528
529
530 def add_documentation_dependencies(input_args,
531                                    output_stages: typing.MutableMapping[str, hpccm.Stage]):
532     """Add appropriate layers according to doxygen input arguments."""
533     if input_args.doxygen is None:
534         return
535     output_stages['main'] += hpccm.primitives.shell(
536         commands=['sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
537     if input_args.doxygen == '1.8.5':
538         doxygen_commit = 'ed4ed873ab0e7f15116e2052119a6729d4589f7a'
539         output_stages['main'] += hpccm.building_blocks.generic_autotools(
540             repository='https://github.com/westes/flex.git',
541             commit='f7788a9a0ecccdc953ed12043ccb59ca25714018',
542             prefix='/tmp/install-of-flex',
543             configure_opts=['--disable-shared'],
544             preconfigure=['./autogen.sh'])
545         output_stages['main'] += hpccm.building_blocks.generic_autotools(
546             repository='https://github.com/doxygen/doxygen.git',
547             commit=doxygen_commit,
548             prefix='',
549             configure_opts=[
550                 '--flex /tmp/install-of-flex/bin/flex',
551                 '--static'])
552     else:
553         version = input_args.doxygen
554         archive_name = f'doxygen-{version}.linux.bin.tar.gz'
555         archive_url = f'https://sourceforge.net/projects/doxygen/files/rel-{version}/{archive_name}'
556         binary_path = f'doxygen-{version}/bin/doxygen'
557         commands = [
558             'mkdir doxygen && cd doxygen',
559             f'wget {archive_url}',
560             f'tar xf {archive_name} {binary_path}',
561             f'cp {binary_path} /usr/local/bin/',
562             'cd .. && rm -rf doxygen'
563         ]
564         output_stages['main'] += hpccm.primitives.shell(commands=commands)
565
566
567 def build_stages(args) -> typing.Iterable[hpccm.Stage]:
568     """Define and sequence the stages for the recipe corresponding to *args*."""
569
570     # A Dockerfile or Singularity recipe can have multiple build stages.
571     # The main build stage can copy files from previous stages, though only
572     # the last stage is included in the tagged output image. This means that
573     # large or expensive sets of build instructions can be isolated in
574     # local/temporary images, but all of the stages need to be output by this
575     # script, and need to occur in the correct order, so we create a sequence
576     # object early in this function.
577     stages = collections.OrderedDict()
578
579     # If we need TSAN or oneAPI support the early build is more complex,
580     # so that our compiler images don't have all the cruft needed to get those things
581     # installed.
582     if args.llvm is not None and args.tsan is not None:
583         add_tsan_compiler_build_stage(input_args=args, output_stages=stages)
584     if args.oneapi is not None:
585         add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)
586
587     # Building blocks are chunks of container-builder instructions that can be
588     # copied to any build stage with the addition operator.
589     building_blocks = collections.OrderedDict()
590     building_blocks['base_packages'] = hpccm.building_blocks.packages(
591         ospackages=_common_packages)
592
593     # These are the most expensive and most reusable layers, so we put them first.
594     building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
595     building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'])
596     for i, cmake in enumerate(args.cmake):
597         building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
598             eula=True,
599             prefix=f'/usr/local/cmake-{cmake}',
600             version=cmake)
601
602     # Install additional packages early in the build to optimize Docker build layer cache.
603     os_packages = list(get_llvm_packages(args)) + get_opencl_packages(args)
604     if args.doxygen is not None:
605         os_packages += _docs_extra_packages
606     if args.oneapi is not None:
607         os_packages += ['lsb-release']
608     if args.hipsycl is not None:
609         os_packages += ['libboost-fiber-dev']
610     if args.intel_compute_runtime is not None:
611         os_packages += _intel_compute_runtime_extra_packages
612     building_blocks['extra_packages'] = hpccm.building_blocks.packages(
613         ospackages=os_packages,
614         apt_ppas=['ppa:intel-opencl/intel-opencl'],
615         apt_keys=['http://repo.radeon.com/rocm/apt/debian/rocm.gpg.key'],
616         apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/debian/ xenial main']
617     )
618
619     if args.cuda is not None and args.llvm is not None:
620         # Hack to tell clang what version of CUDA we're using
621         # based on https://github.com/llvm/llvm-project/blob/1fdec59bffc11ae37eb51a1b9869f0696bfd5312/clang/lib/Driver/ToolChains/Cuda.cpp#L43
622         cuda_version_split = args.cuda.split('.')
623         # LLVM requires having the version in x.y.z format, while args.cuda be be either x.y or x.y.z
624         cuda_version_str = '{}.{}.{}'.format(
625             cuda_version_split[0],
626             cuda_version_split[1],
627             cuda_version_split[2] if len(cuda_version_split) > 2 else 0
628         )
629         building_blocks['cuda-clang-workaround'] = hpccm.primitives.shell(commands=[
630             f'echo "CUDA Version {cuda_version_str}" > /usr/local/cuda/version.txt'
631             ])
632
633     building_blocks['clfft'] = get_clfft(args)
634
635     building_blocks['hipSYCL'] = get_hipsycl(args)
636
637     building_blocks['intel-compute-runtime'] = get_intel_compute_runtime(args)
638
639     # Add Python environments to MPI images, only, so we don't have to worry
640     # about whether to install mpi4py.
641     if args.mpi is not None and len(args.venvs) > 0:
642         add_python_stages(building_blocks=building_blocks, input_args=args, output_stages=stages)
643
644     # Create the stage from which the targeted image will be tagged.
645     stages['main'] = hpccm.Stage()
646
647     stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
648     for bb in building_blocks.values():
649         if bb is not None:
650             stages['main'] += bb
651
652     # We always add Python3 and Pip
653     stages['main'] += hpccm.building_blocks.python(python3=True, python2=False)
654
655     # Add documentation requirements (doxygen and sphinx + misc).
656     if args.doxygen is not None:
657         add_documentation_dependencies(args, stages)
658
659     if 'pyenv' in stages and stages['pyenv'] is not None:
660         stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/.pyenv/'],
661                                                 dest='/root/.pyenv')
662         stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/venv/'],
663                                                 dest='/root/venv')
664         # TODO: Update user home directory.
665         # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
666         # stages['main'] += hpccm.primitives.copy(_from='pyenv', src=['/root/.bashrc'],
667         #                                         dest='/root/')
668
669     # Make sure that `python` resolves to something.
670     stages['main'] += hpccm.primitives.shell(commands=['test -x /usr/bin/python || '
671                                                        'update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && '
672                                                        '/usr/bin/python --version'])
673
674     # Note that the list of stages should be sorted in dependency order.
675     for build_stage in stages.values():
676         if build_stage is not None:
677             yield build_stage
678
679
680 if __name__ == '__main__':
681     args = parser.parse_args()
682
683     # Set container specification output format
684     hpccm.config.set_container_format(args.format)
685
686     container_recipe = build_stages(args)
687
688     # Output container specification
689     for stage in container_recipe:
690         print(stage)